2019-10-10 16:30:30 +00:00
|
|
|
#include <Storages/MergeTree/IMergeTreeReader.h>
|
2018-02-13 19:34:15 +00:00
|
|
|
#include <Columns/FilterDescription.h>
|
2022-01-17 04:33:47 +00:00
|
|
|
#include <Columns/ColumnConst.h>
|
2018-02-13 19:34:15 +00:00
|
|
|
#include <Columns/ColumnsCommon.h>
|
2022-05-27 17:54:11 +00:00
|
|
|
#include <Common/TargetSpecific.h>
|
2022-06-22 16:13:46 +00:00
|
|
|
#include <IO/WriteBufferFromString.h>
|
|
|
|
#include <IO/Operators.h>
|
2021-10-02 07:13:14 +00:00
|
|
|
#include <base/range.h>
|
2021-08-06 13:39:11 +00:00
|
|
|
#include <Interpreters/castColumn.h>
|
2018-11-16 12:22:51 +00:00
|
|
|
#include <DataTypes/DataTypeNothing.h>
|
2022-07-31 14:34:05 +00:00
|
|
|
#include <bit>
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
#ifdef __SSE2__
|
2018-02-13 19:34:15 +00:00
|
|
|
#include <emmintrin.h>
|
|
|
|
#endif
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2022-06-15 13:19:29 +00:00
|
|
|
#if defined(__aarch64__) && defined(__ARM_NEON)
|
|
|
|
# include <arm_neon.h>
|
|
|
|
# ifdef HAS_RESERVED_IDENTIFIER
|
|
|
|
# pragma clang diagnostic ignored "-Wreserved-identifier"
|
|
|
|
# endif
|
|
|
|
#endif
|
2022-05-27 17:54:11 +00:00
|
|
|
|
2017-06-14 10:50:22 +00:00
|
|
|
namespace DB
|
|
|
|
{
|
2020-02-25 18:10:48 +00:00
|
|
|
namespace ErrorCodes
|
|
|
|
{
|
|
|
|
extern const int LOGICAL_ERROR;
|
2021-12-09 10:39:28 +00:00
|
|
|
extern const int BAD_ARGUMENTS;
|
2020-02-25 18:10:48 +00:00
|
|
|
}
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2020-03-18 00:57:00 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
static void filterColumns(Columns & columns, const IColumn::Filter & filter, size_t filter_bytes)
|
2020-03-18 00:57:00 +00:00
|
|
|
{
|
|
|
|
for (auto & column : columns)
|
|
|
|
{
|
|
|
|
if (column)
|
|
|
|
{
|
2022-10-31 08:00:56 +00:00
|
|
|
assert(column->size() == filter.size());
|
|
|
|
|
2022-12-13 14:01:33 +00:00
|
|
|
column = column->filter(filter, filter_bytes);
|
2020-03-18 00:57:00 +00:00
|
|
|
|
|
|
|
if (column->empty())
|
|
|
|
{
|
|
|
|
columns.clear();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-06-13 13:00:26 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
static void filterColumns(Columns & columns, const FilterWithCachedCount & filter)
|
2021-02-20 11:00:16 +00:00
|
|
|
{
|
2022-12-13 14:01:33 +00:00
|
|
|
if (filter.alwaysTrue())
|
2021-02-20 11:00:16 +00:00
|
|
|
return;
|
|
|
|
|
2022-12-13 14:01:33 +00:00
|
|
|
if (filter.alwaysFalse())
|
2021-02-20 11:00:16 +00:00
|
|
|
{
|
|
|
|
for (auto & col : columns)
|
|
|
|
if (col)
|
|
|
|
col = col->cloneEmpty();
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-12-13 14:01:33 +00:00
|
|
|
filterColumns(columns, filter.getData(), filter.countBytesInFilter());
|
2021-02-20 11:00:16 +00:00
|
|
|
}
|
2022-06-13 13:00:26 +00:00
|
|
|
|
2020-03-18 00:57:00 +00:00
|
|
|
|
2022-05-06 13:06:56 +00:00
|
|
|
size_t MergeTreeRangeReader::ReadResult::getLastMark(const MergeTreeRangeReader::ReadResult::RangesInfo & ranges)
|
2021-10-27 21:54:06 +00:00
|
|
|
{
|
|
|
|
size_t current_task_last_mark = 0;
|
|
|
|
for (const auto & mark_range : ranges)
|
|
|
|
current_task_last_mark = std::max(current_task_last_mark, mark_range.range.end);
|
|
|
|
return current_task_last_mark;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
MergeTreeRangeReader::DelayedStream::DelayedStream(
|
2021-10-15 08:36:26 +00:00
|
|
|
size_t from_mark,
|
|
|
|
size_t current_task_last_mark_,
|
|
|
|
IMergeTreeReader * merge_tree_reader_)
|
2018-02-13 19:34:15 +00:00
|
|
|
: current_mark(from_mark), current_offset(0), num_delayed_rows(0)
|
2021-10-15 08:36:26 +00:00
|
|
|
, current_task_last_mark(current_task_last_mark_)
|
2019-08-03 11:02:40 +00:00
|
|
|
, merge_tree_reader(merge_tree_reader_)
|
2022-09-05 16:55:00 +00:00
|
|
|
, index_granularity(&(merge_tree_reader->data_part_info_for_read->getIndexGranularity()))
|
2018-02-13 19:34:15 +00:00
|
|
|
, continue_reading(false), is_finished(false)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::position() const
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-04-01 10:34:22 +00:00
|
|
|
size_t num_rows_before_current_mark = index_granularity->getMarkStartingRow(current_mark);
|
2018-11-15 14:06:54 +00:00
|
|
|
return num_rows_before_current_mark + current_offset + num_delayed_rows;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::readRows(Columns & columns, size_t num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
if (num_rows)
|
|
|
|
{
|
2021-10-15 08:36:26 +00:00
|
|
|
size_t rows_read = merge_tree_reader->readRows(
|
|
|
|
current_mark, current_task_last_mark, continue_reading, num_rows, columns);
|
2018-02-13 19:34:15 +00:00
|
|
|
continue_reading = true;
|
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
/// Zero rows_read maybe either because reading has finished
|
2018-02-13 19:34:15 +00:00
|
|
|
/// or because there is no columns we can read in current part (for example, all columns are default).
|
|
|
|
/// In the last case we can't finish reading, but it's also ok for the first case
|
|
|
|
/// because we can finish reading by calculation the number of pending rows.
|
|
|
|
if (0 < rows_read && rows_read < num_rows)
|
|
|
|
is_finished = true;
|
|
|
|
|
|
|
|
return rows_read;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::read(Columns & columns, size_t from_mark, size_t offset, size_t num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-04-01 10:34:22 +00:00
|
|
|
size_t num_rows_before_from_mark = index_granularity->getMarkStartingRow(from_mark);
|
2018-11-14 11:26:44 +00:00
|
|
|
/// We already stand accurately in required position,
|
|
|
|
/// so because stream is lazy, we don't read anything
|
|
|
|
/// and only increment amount delayed_rows
|
|
|
|
if (position() == num_rows_before_from_mark + offset)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
num_delayed_rows += num_rows;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t read_rows = finalize(columns);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
continue_reading = false;
|
|
|
|
current_mark = from_mark;
|
|
|
|
current_offset = offset;
|
|
|
|
num_delayed_rows = num_rows;
|
|
|
|
|
|
|
|
return read_rows;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::DelayedStream::finalize(Columns & columns)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-11-14 11:26:44 +00:00
|
|
|
/// We need to skip some rows before reading
|
2018-02-13 19:34:15 +00:00
|
|
|
if (current_offset && !continue_reading)
|
|
|
|
{
|
2021-06-15 19:55:21 +00:00
|
|
|
for (size_t mark_num : collections::range(current_mark, index_granularity->getMarksCount()))
|
2018-11-14 11:26:44 +00:00
|
|
|
{
|
2019-03-27 15:57:14 +00:00
|
|
|
size_t mark_index_granularity = index_granularity->getMarkRows(mark_num);
|
2018-11-14 11:26:44 +00:00
|
|
|
if (current_offset >= mark_index_granularity)
|
|
|
|
{
|
|
|
|
current_offset -= mark_index_granularity;
|
|
|
|
current_mark++;
|
|
|
|
}
|
2018-11-15 14:06:54 +00:00
|
|
|
else
|
|
|
|
break;
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
/// Skip some rows from begin of granule.
|
2018-11-14 11:26:44 +00:00
|
|
|
/// We don't know size of rows in compressed granule,
|
2019-09-23 19:22:02 +00:00
|
|
|
/// so have to read them and throw out.
|
2018-02-13 19:34:15 +00:00
|
|
|
if (current_offset)
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
Columns tmp_columns;
|
|
|
|
tmp_columns.resize(columns.size());
|
|
|
|
readRows(tmp_columns, current_offset);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t rows_to_read = num_delayed_rows;
|
|
|
|
current_offset += num_delayed_rows;
|
|
|
|
num_delayed_rows = 0;
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
return readRows(columns, rows_to_read);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
MergeTreeRangeReader::Stream::Stream(
|
2021-10-15 08:36:26 +00:00
|
|
|
size_t from_mark, size_t to_mark, size_t current_task_last_mark, IMergeTreeReader * merge_tree_reader_)
|
2018-02-13 19:34:15 +00:00
|
|
|
: current_mark(from_mark), offset_after_current_mark(0)
|
2018-11-14 11:26:44 +00:00
|
|
|
, last_mark(to_mark)
|
2019-08-03 11:02:40 +00:00
|
|
|
, merge_tree_reader(merge_tree_reader_)
|
2022-09-05 16:55:00 +00:00
|
|
|
, index_granularity(&(merge_tree_reader->data_part_info_for_read->getIndexGranularity()))
|
2019-03-27 15:57:14 +00:00
|
|
|
, current_mark_index_granularity(index_granularity->getMarkRows(from_mark))
|
2021-10-15 08:36:26 +00:00
|
|
|
, stream(from_mark, current_task_last_mark, merge_tree_reader)
|
2017-06-14 10:50:22 +00:00
|
|
|
{
|
2019-03-27 15:57:14 +00:00
|
|
|
size_t marks_count = index_granularity->getMarksCount();
|
2019-03-25 13:55:24 +00:00
|
|
|
if (from_mark >= marks_count)
|
2018-12-04 14:44:42 +00:00
|
|
|
throw Exception("Trying create stream to read from mark №"+ toString(current_mark) + " but total marks count is "
|
2019-03-25 13:55:24 +00:00
|
|
|
+ toString(marks_count), ErrorCodes::LOGICAL_ERROR);
|
2018-12-04 14:44:42 +00:00
|
|
|
|
2019-03-25 13:55:24 +00:00
|
|
|
if (last_mark > marks_count)
|
2018-12-04 14:44:42 +00:00
|
|
|
throw Exception("Trying create stream to read to mark №"+ toString(current_mark) + " but total marks count is "
|
2019-03-25 13:55:24 +00:00
|
|
|
+ toString(marks_count), ErrorCodes::LOGICAL_ERROR);
|
2017-06-14 10:50:22 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::Stream::checkNotFinished() const
|
2017-06-15 17:01:13 +00:00
|
|
|
{
|
2018-02-13 19:34:15 +00:00
|
|
|
if (isFinished())
|
2021-12-09 10:39:28 +00:00
|
|
|
throw Exception("Cannot read out of marks range.", ErrorCodes::BAD_ARGUMENTS);
|
2017-06-15 17:01:13 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::Stream::checkEnoughSpaceInCurrentGranule(size_t num_rows) const
|
2017-06-15 17:01:13 +00:00
|
|
|
{
|
2018-11-14 11:26:44 +00:00
|
|
|
if (num_rows + offset_after_current_mark > current_mark_index_granularity)
|
2018-02-13 19:34:15 +00:00
|
|
|
throw Exception("Cannot read from granule more than index_granularity.", ErrorCodes::LOGICAL_ERROR);
|
2017-06-15 17:01:13 +00:00
|
|
|
}
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::readRows(Columns & columns, size_t num_rows)
|
2017-06-14 10:50:22 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t rows_read = stream.read(columns, current_mark, offset_after_current_mark, num_rows);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
if (stream.isFinished())
|
|
|
|
finish();
|
|
|
|
|
|
|
|
return rows_read;
|
|
|
|
}
|
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
void MergeTreeRangeReader::Stream::toNextMark()
|
|
|
|
{
|
|
|
|
++current_mark;
|
2018-12-04 08:05:58 +00:00
|
|
|
|
2019-03-27 15:57:14 +00:00
|
|
|
size_t total_marks_count = index_granularity->getMarksCount();
|
2019-03-19 13:10:24 +00:00
|
|
|
if (current_mark < total_marks_count)
|
2019-03-27 15:57:14 +00:00
|
|
|
current_mark_index_granularity = index_granularity->getMarkRows(current_mark);
|
|
|
|
else if (current_mark == total_marks_count)
|
2019-03-19 13:10:24 +00:00
|
|
|
current_mark_index_granularity = 0; /// HACK?
|
2019-03-27 15:57:14 +00:00
|
|
|
else
|
|
|
|
throw Exception("Trying to read from mark " + toString(current_mark) + ", but total marks count " + toString(total_marks_count), ErrorCodes::LOGICAL_ERROR);
|
2018-12-04 14:44:42 +00:00
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
offset_after_current_mark = 0;
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::read(Columns & columns, size_t num_rows, bool skip_remaining_rows_in_current_granule)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
checkEnoughSpaceInCurrentGranule(num_rows);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
if (num_rows)
|
|
|
|
{
|
|
|
|
checkNotFinished();
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t read_rows = readRows(columns, num_rows);
|
2019-03-25 16:55:48 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
offset_after_current_mark += num_rows;
|
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
/// Start new granule; skipped_rows_after_offset is already zero.
|
|
|
|
if (offset_after_current_mark == current_mark_index_granularity || skip_remaining_rows_in_current_granule)
|
|
|
|
toNextMark();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
return read_rows;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/// Nothing to read.
|
|
|
|
if (skip_remaining_rows_in_current_granule)
|
|
|
|
{
|
|
|
|
/// Skip the rest of the rows in granule and start new one.
|
|
|
|
checkNotFinished();
|
2018-11-14 11:26:44 +00:00
|
|
|
toNextMark();
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
2017-07-19 13:42:21 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2017-07-19 16:39:18 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::Stream::skip(size_t num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
if (num_rows)
|
|
|
|
{
|
|
|
|
checkNotFinished();
|
2018-02-20 11:45:58 +00:00
|
|
|
checkEnoughSpaceInCurrentGranule(num_rows);
|
2017-07-19 16:39:18 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
offset_after_current_mark += num_rows;
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2018-11-14 11:26:44 +00:00
|
|
|
if (offset_after_current_mark == current_mark_index_granularity)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
/// Start new granule; skipped_rows_after_offset is already zero.
|
2018-11-14 11:26:44 +00:00
|
|
|
toNextMark();
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::finalize(Columns & columns)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
size_t read_rows = stream.finalize(columns);
|
2017-06-14 10:50:22 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
if (stream.isFinished())
|
|
|
|
finish();
|
2017-06-16 20:11:02 +00:00
|
|
|
|
|
|
|
return read_rows;
|
2017-06-14 10:50:22 +00:00
|
|
|
}
|
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::addGranule(size_t num_rows_)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
rows_per_granule.push_back(num_rows_);
|
|
|
|
total_rows_per_granule += num_rows_;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 13:37:04 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::adjustLastGranule()
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-03-05 14:41:43 +00:00
|
|
|
size_t num_rows_to_subtract = total_rows_per_granule - num_read_rows;
|
2018-02-20 13:37:04 +00:00
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
if (rows_per_granule.empty())
|
2021-12-09 10:39:28 +00:00
|
|
|
throw Exception("Can't adjust last granule because no granules were added", ErrorCodes::LOGICAL_ERROR);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
if (num_rows_to_subtract > rows_per_granule.back())
|
2021-10-15 08:36:26 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
|
|
"Can't adjust last granule because it has {} rows, but try to subtract {} rows.",
|
|
|
|
toString(rows_per_granule.back()), toString(num_rows_to_subtract));
|
2018-02-13 19:34:15 +00:00
|
|
|
|
|
|
|
rows_per_granule.back() -= num_rows_to_subtract;
|
2018-03-05 14:41:43 +00:00
|
|
|
total_rows_per_granule -= num_rows_to_subtract;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::clear()
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
/// Need to save information about the number of granules.
|
2018-02-22 12:43:57 +00:00
|
|
|
num_rows_to_skip_in_last_granule += rows_per_granule.back();
|
2018-02-13 19:34:15 +00:00
|
|
|
rows_per_granule.assign(rows_per_granule.size(), 0);
|
2018-03-05 14:41:43 +00:00
|
|
|
total_rows_per_granule = 0;
|
2022-11-16 17:48:08 +00:00
|
|
|
final_filter = FilterWithCachedCount();
|
|
|
|
num_rows = 0;
|
|
|
|
columns.clear();
|
2022-12-24 23:04:52 +00:00
|
|
|
additional_columns.clear();
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
2022-10-04 11:59:11 +00:00
|
|
|
|
2022-09-29 13:35:37 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::clearFilter()
|
|
|
|
{
|
2022-10-04 11:59:11 +00:00
|
|
|
// TODO: old version didn't clear filter_holder. WTF??????
|
2022-11-16 17:48:08 +00:00
|
|
|
final_filter = FilterWithCachedCount();
|
2022-09-29 13:35:37 +00:00
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
#if 0
|
2022-10-31 08:00:56 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::shrink(Columns & old_columns, const NumRows & rows_per_granule_previous) const
|
2019-11-15 03:38:35 +00:00
|
|
|
{
|
2020-03-09 02:55:28 +00:00
|
|
|
for (auto & column : old_columns)
|
2019-11-15 03:38:35 +00:00
|
|
|
{
|
2020-03-09 02:55:28 +00:00
|
|
|
if (!column)
|
2019-11-15 03:38:35 +00:00
|
|
|
continue;
|
2021-01-29 15:13:09 +00:00
|
|
|
|
|
|
|
if (const auto * column_const = typeid_cast<const ColumnConst *>(column.get()))
|
|
|
|
{
|
|
|
|
column = column_const->cloneResized(total_rows_per_granule);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
LOG_TEST(log, "ReadResult::shrink() column size: {} total_rows_per_granule: {}",
|
|
|
|
column->size(), total_rows_per_granule);
|
|
|
|
|
2020-03-09 02:55:28 +00:00
|
|
|
auto new_column = column->cloneEmpty();
|
2019-11-15 03:38:35 +00:00
|
|
|
new_column->reserve(total_rows_per_granule);
|
2022-10-31 08:00:56 +00:00
|
|
|
for (size_t j = 0, pos = 0; j < rows_per_granule_previous.size(); pos += rows_per_granule_previous[j++])
|
2019-11-15 03:38:35 +00:00
|
|
|
{
|
|
|
|
if (rows_per_granule[j])
|
2020-03-09 02:55:28 +00:00
|
|
|
new_column->insertRangeFrom(*column, pos, rows_per_granule[j]);
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
2020-03-09 02:55:28 +00:00
|
|
|
column = std::move(new_column);
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
|
|
|
}
|
2022-12-20 23:07:33 +00:00
|
|
|
#endif
|
2019-11-15 03:38:35 +00:00
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::checkInternalConsistency() const
|
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
/// Check that filter size matches number of rows that will be read.
|
2022-11-16 17:48:08 +00:00
|
|
|
assert(!final_filter.present() || final_filter.size() == total_rows_per_granule);
|
2022-12-20 23:07:33 +00:00
|
|
|
|
|
|
|
assert(!final_filter.present()
|
2022-12-24 23:04:52 +00:00
|
|
|
|| final_filter.countBytesInFilter() == num_rows /// If filter has been applied.
|
|
|
|
|| total_rows_per_granule == num_rows /// If filter has not been applied.
|
2022-12-20 23:07:33 +00:00
|
|
|
);
|
2022-12-24 23:04:52 +00:00
|
|
|
|
|
|
|
assert(!additional_columns || additional_columns.rows() == num_rows);
|
2022-10-31 08:00:56 +00:00
|
|
|
|
|
|
|
for (const auto & column : columns)
|
|
|
|
{
|
|
|
|
if (column)
|
|
|
|
assert(column->size() == num_rows);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-16 17:48:08 +00:00
|
|
|
std::string MergeTreeRangeReader::ReadResult::dumpInfo() const
|
|
|
|
{
|
|
|
|
WriteBufferFromOwnString out;
|
|
|
|
out << "num_rows: " << num_rows
|
|
|
|
<< ", columns: " << columns.size()
|
|
|
|
<< ", total_rows_per_granule: " << total_rows_per_granule
|
|
|
|
<< ", need_filter: " << need_filter;
|
|
|
|
if (final_filter.present())
|
|
|
|
{
|
|
|
|
out << ", filter_size: " << final_filter.size()
|
|
|
|
<< ", filter_1s: " << final_filter.countBytesInFilter();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
out << ", no filter";
|
|
|
|
}
|
|
|
|
for (size_t ci = 0; ci < columns.size(); ++ci)
|
|
|
|
{
|
|
|
|
out << ", column[" << ci << "]: ";
|
|
|
|
if (!columns[ci])
|
|
|
|
out << " nullptr";
|
|
|
|
else
|
|
|
|
{
|
|
|
|
out << " " << columns[ci]->dumpStructure();
|
|
|
|
}
|
|
|
|
}
|
2022-12-24 23:04:52 +00:00
|
|
|
if (additional_columns)
|
2022-11-16 17:48:08 +00:00
|
|
|
{
|
2022-12-24 23:04:52 +00:00
|
|
|
out << ", additional_columns: " << additional_columns.dumpStructure();
|
2022-11-16 17:48:08 +00:00
|
|
|
}
|
|
|
|
return out.str();
|
|
|
|
}
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
static std::string dumpNames(const NamesAndTypesList & columns)
|
2022-11-16 17:48:08 +00:00
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
WriteBufferFromOwnString out;
|
|
|
|
for (auto it = columns.begin(); it != columns.end(); ++it)
|
|
|
|
{
|
|
|
|
if (it != columns.begin())
|
|
|
|
out << ", ";
|
|
|
|
out << it->name;
|
|
|
|
}
|
|
|
|
return out.str();
|
2022-11-16 17:48:08 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::setFilterConstTrue()
|
2022-11-16 17:48:08 +00:00
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
clearFilter();
|
2022-11-16 17:48:08 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
static ColumnPtr combineFilters(ColumnPtr first, ColumnPtr second);
|
2022-11-16 17:48:08 +00:00
|
|
|
|
|
|
|
void MergeTreeRangeReader::ReadResult::optimize(ColumnPtr current_filter, bool can_read_incomplete_granules)
|
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
/// Combine new filter with the previous one if it is present.
|
|
|
|
/// This filter has the size of total_rows_per granule. It is applied after reading contiguous chunks from
|
|
|
|
/// the start of each granule.
|
2022-11-30 13:26:51 +00:00
|
|
|
auto combined_filter = current_filter;
|
2022-11-16 17:48:08 +00:00
|
|
|
if (final_filter.present())
|
2022-12-20 23:07:33 +00:00
|
|
|
combined_filter = combineFilters(final_filter.getColumn(), current_filter);
|
2022-11-16 17:48:08 +00:00
|
|
|
|
2022-11-30 13:26:51 +00:00
|
|
|
FilterWithCachedCount filter(combined_filter);
|
2022-11-16 17:48:08 +00:00
|
|
|
|
|
|
|
if (total_rows_per_granule == 0 || !filter.present())
|
|
|
|
return;
|
|
|
|
|
|
|
|
NumRows zero_tails;
|
|
|
|
auto total_zero_rows_in_tails = countZeroTails(filter.getData(), zero_tails, can_read_incomplete_granules);
|
|
|
|
|
|
|
|
LOG_TEST(log, "ReadResult::optimize() before: {}", dumpInfo());
|
|
|
|
|
|
|
|
// checkInternalConsistency();
|
|
|
|
|
|
|
|
// SCOPE_EXIT(checkInternalConsistency());
|
|
|
|
|
|
|
|
SCOPE_EXIT({
|
|
|
|
LOG_TEST(log, "ReadResult::optimize() after: {}", dumpInfo());
|
|
|
|
});
|
|
|
|
|
|
|
|
if (total_zero_rows_in_tails == filter.size())
|
|
|
|
{
|
|
|
|
clear();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else if (total_zero_rows_in_tails == 0 && filter.countBytesInFilter() == filter.size())
|
|
|
|
{
|
|
|
|
setFilterConstTrue();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/// Just a guess. If only a few rows may be skipped, it's better not to skip at all.
|
|
|
|
else if (2 * total_zero_rows_in_tails > filter.size())
|
|
|
|
{
|
|
|
|
const NumRows rows_per_granule_previous = rows_per_granule;
|
|
|
|
const size_t total_rows_per_granule_previous = total_rows_per_granule;
|
|
|
|
|
|
|
|
for (auto i : collections::range(0, rows_per_granule.size()))
|
|
|
|
{
|
|
|
|
rows_per_granule[i] -= zero_tails[i];
|
|
|
|
}
|
|
|
|
num_rows_to_skip_in_last_granule += rows_per_granule_previous.back() - rows_per_granule.back();
|
|
|
|
|
2022-11-30 13:26:51 +00:00
|
|
|
/* {
|
2022-11-16 17:48:08 +00:00
|
|
|
shrink(columns, rows_per_granule_previous); /// shrink acts as filtering in such case
|
|
|
|
|
2022-12-24 23:04:52 +00:00
|
|
|
auto c = additional_columns.getColumns();
|
2022-11-16 17:48:08 +00:00
|
|
|
shrink(c, rows_per_granule_previous); /// shrink acts as filtering in such case
|
2022-12-24 23:04:52 +00:00
|
|
|
additional_columns.setColumns(c);
|
2022-11-16 17:48:08 +00:00
|
|
|
}
|
2022-11-30 13:26:51 +00:00
|
|
|
*/
|
2022-11-16 17:48:08 +00:00
|
|
|
/// Check if const 1 after shrink
|
|
|
|
if (
|
|
|
|
num_rows == total_rows_per_granule_previous && /// We can apply shrink only if after the previous step the number of rows in the result
|
|
|
|
/// matches the rows_per_granule info. Otherwise we will not be able to match newly added zeros in granule tails.
|
|
|
|
filter.countBytesInFilter() + total_zero_rows_in_tails == total_rows_per_granule) /// All zeros are in tails?
|
|
|
|
{
|
|
|
|
total_rows_per_granule = total_rows_per_granule_previous - total_zero_rows_in_tails;
|
2022-11-30 13:26:51 +00:00
|
|
|
// num_rows = total_rows_per_granule;
|
2022-11-16 17:48:08 +00:00
|
|
|
setFilterConstTrue();
|
|
|
|
|
|
|
|
|
|
|
|
LOG_TEST(log, "ReadResult::optimize() after shrink {}", dumpInfo());
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
auto new_filter = ColumnUInt8::create(filter.size() - total_zero_rows_in_tails);
|
|
|
|
IColumn::Filter & new_data = new_filter->getData();
|
|
|
|
|
|
|
|
collapseZeroTails(filter.getData(), rows_per_granule_previous, new_data);
|
|
|
|
total_rows_per_granule = new_filter->size();
|
2022-11-30 13:26:51 +00:00
|
|
|
// num_rows = total_rows_per_granule;
|
2022-11-16 17:48:08 +00:00
|
|
|
final_filter = FilterWithCachedCount(new_filter->getPtr());
|
|
|
|
|
|
|
|
LOG_TEST(log, "ReadResult::optimize() after colapseZeroTails {}", dumpInfo());
|
|
|
|
}
|
|
|
|
need_filter = true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
/// Another guess, if it's worth filtering at PREWHERE
|
2022-11-16 17:48:08 +00:00
|
|
|
if (filter.countBytesInFilter() < 0.6 * filter.size())
|
|
|
|
{
|
|
|
|
need_filter = true;
|
|
|
|
}
|
|
|
|
|
2022-12-12 16:16:29 +00:00
|
|
|
/// Check if we have rows already filtered at the previous step. In such case we must apply the filter because
|
|
|
|
/// otherwise num_rows doesn't match total_rows_per_granule and the next read step will not know how to filter
|
|
|
|
/// newly read columns to match the num_rows.
|
|
|
|
if (num_rows != total_rows_per_granule)
|
|
|
|
{
|
|
|
|
need_filter = true;
|
|
|
|
}
|
|
|
|
|
2022-11-16 17:48:08 +00:00
|
|
|
final_filter = std::move(filter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-05 12:27:31 +00:00
|
|
|
size_t MergeTreeRangeReader::ReadResult::countZeroTails(const IColumn::Filter & filter_vec, NumRows & zero_tails, bool can_read_incomplete_granules) const
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
zero_tails.resize(0);
|
|
|
|
zero_tails.reserve(rows_per_granule.size());
|
|
|
|
|
2020-04-22 06:34:20 +00:00
|
|
|
const auto * filter_data = filter_vec.data();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-02-22 12:43:57 +00:00
|
|
|
size_t total_zero_rows_in_tails = 0;
|
|
|
|
|
|
|
|
for (auto rows_to_read : rows_per_granule)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
/// Count the number of zeros at the end of filter for rows were read from current granule.
|
2019-12-05 12:27:31 +00:00
|
|
|
size_t zero_tail = numZerosInTail(filter_data, filter_data + rows_to_read);
|
|
|
|
if (!can_read_incomplete_granules && zero_tail != rows_to_read)
|
|
|
|
zero_tail = 0;
|
|
|
|
zero_tails.push_back(zero_tail);
|
2018-02-22 12:43:57 +00:00
|
|
|
total_zero_rows_in_tails += zero_tails.back();
|
|
|
|
filter_data += rows_to_read;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-22 12:43:57 +00:00
|
|
|
return total_zero_rows_in_tails;
|
|
|
|
}
|
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
void MergeTreeRangeReader::ReadResult::collapseZeroTails(const IColumn::Filter & filter_vec, const NumRows & rows_per_granule_previous, IColumn::Filter & new_filter_vec) const
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
2020-04-22 06:34:20 +00:00
|
|
|
const auto * filter_data = filter_vec.data();
|
|
|
|
auto * new_filter_data = new_filter_vec.data();
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2021-06-15 19:55:21 +00:00
|
|
|
for (auto i : collections::range(0, rows_per_granule.size()))
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-11-15 03:38:35 +00:00
|
|
|
memcpySmallAllowReadWriteOverflow15(new_filter_data, filter_data, rows_per_granule[i]);
|
2022-10-31 08:00:56 +00:00
|
|
|
filter_data += rows_per_granule_previous[i];
|
2019-11-15 03:38:35 +00:00
|
|
|
new_filter_data += rows_per_granule[i];
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2019-01-04 12:10:00 +00:00
|
|
|
new_filter_vec.resize(new_filter_data - new_filter_vec.data());
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2022-06-02 09:23:44 +00:00
|
|
|
DECLARE_AVX512BW_SPECIFIC_CODE(
|
|
|
|
size_t numZerosInTail(const UInt8 * begin, const UInt8 * end)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
size_t count = 0;
|
2022-06-02 09:23:44 +00:00
|
|
|
const __m512i zero64 = _mm512_setzero_epi32();
|
|
|
|
while (end - begin >= 64)
|
2022-05-31 19:41:10 +00:00
|
|
|
{
|
2022-06-02 09:23:44 +00:00
|
|
|
end -= 64;
|
|
|
|
const auto * pos = end;
|
|
|
|
UInt64 val = static_cast<UInt64>(_mm512_cmp_epi8_mask(
|
|
|
|
_mm512_loadu_si512(reinterpret_cast<const __m512i *>(pos)),
|
|
|
|
zero64,
|
|
|
|
_MM_CMPINT_EQ));
|
|
|
|
val = ~val;
|
|
|
|
if (val == 0)
|
|
|
|
count += 64;
|
|
|
|
else
|
2022-05-27 17:54:11 +00:00
|
|
|
{
|
2022-07-31 14:34:05 +00:00
|
|
|
count += std::countl_zero(val);
|
2022-06-02 09:23:44 +00:00
|
|
|
return count;
|
2022-05-27 17:54:11 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-02 09:23:44 +00:00
|
|
|
while (end > begin && *(--end) == 0)
|
|
|
|
{
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
) /// DECLARE_AVX512BW_SPECIFIC_CODE
|
|
|
|
|
2022-06-02 11:01:13 +00:00
|
|
|
DECLARE_AVX2_SPECIFIC_CODE(
|
|
|
|
size_t numZerosInTail(const UInt8 * begin, const UInt8 * end)
|
|
|
|
{
|
|
|
|
size_t count = 0;
|
|
|
|
const __m256i zero32 = _mm256_setzero_si256();
|
|
|
|
while (end - begin >= 64)
|
|
|
|
{
|
|
|
|
end -= 64;
|
|
|
|
const auto * pos = end;
|
|
|
|
UInt64 val =
|
|
|
|
(static_cast<UInt64>(_mm256_movemask_epi8(_mm256_cmpeq_epi8(
|
|
|
|
_mm256_loadu_si256(reinterpret_cast<const __m256i *>(pos)),
|
|
|
|
zero32))) & 0xffffffffu)
|
|
|
|
| (static_cast<UInt64>(_mm256_movemask_epi8(_mm256_cmpeq_epi8(
|
|
|
|
_mm256_loadu_si256(reinterpret_cast<const __m256i *>(pos + 32)),
|
|
|
|
zero32))) << 32u);
|
|
|
|
|
|
|
|
val = ~val;
|
|
|
|
if (val == 0)
|
|
|
|
count += 64;
|
|
|
|
else
|
2022-05-27 17:54:11 +00:00
|
|
|
{
|
2022-07-31 14:34:05 +00:00
|
|
|
count += std::countl_zero(val);
|
2022-06-02 11:01:13 +00:00
|
|
|
return count;
|
2022-05-27 17:54:11 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-02 11:01:13 +00:00
|
|
|
while (end > begin && *(--end) == 0)
|
|
|
|
{
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
) /// DECLARE_AVX2_SPECIFIC_CODE
|
|
|
|
|
2022-06-02 09:23:44 +00:00
|
|
|
size_t MergeTreeRangeReader::ReadResult::numZerosInTail(const UInt8 * begin, const UInt8 * end)
|
|
|
|
{
|
|
|
|
#if USE_MULTITARGET_CODE
|
|
|
|
/// check if cpu support avx512 dynamically, haveAVX512BW contains check of haveAVX512F
|
|
|
|
if (isArchSupported(TargetArch::AVX512BW))
|
|
|
|
return TargetSpecific::AVX512BW::numZerosInTail(begin, end);
|
2022-06-02 11:01:13 +00:00
|
|
|
else if (isArchSupported(TargetArch::AVX2))
|
|
|
|
return TargetSpecific::AVX2::numZerosInTail(begin, end);
|
2022-05-27 17:54:11 +00:00
|
|
|
#endif
|
|
|
|
|
2022-06-02 09:23:44 +00:00
|
|
|
size_t count = 0;
|
|
|
|
|
2022-07-31 14:34:05 +00:00
|
|
|
#if defined(__SSE2__)
|
2018-02-13 19:34:15 +00:00
|
|
|
const __m128i zero16 = _mm_setzero_si128();
|
|
|
|
while (end - begin >= 64)
|
|
|
|
{
|
|
|
|
end -= 64;
|
2020-04-22 06:34:20 +00:00
|
|
|
const auto * pos = end;
|
2018-02-13 19:34:15 +00:00
|
|
|
UInt64 val =
|
2021-01-29 07:37:57 +00:00
|
|
|
static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpeq_epi8(
|
2018-02-13 19:34:15 +00:00
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos)),
|
|
|
|
zero16)))
|
2021-01-29 07:37:57 +00:00
|
|
|
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpeq_epi8(
|
2018-02-13 19:34:15 +00:00
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 16)),
|
2019-09-23 19:22:02 +00:00
|
|
|
zero16))) << 16u)
|
2021-01-29 07:37:57 +00:00
|
|
|
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpeq_epi8(
|
2018-02-13 19:34:15 +00:00
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 32)),
|
2019-09-23 19:22:02 +00:00
|
|
|
zero16))) << 32u)
|
2021-01-29 07:37:57 +00:00
|
|
|
| (static_cast<UInt64>(_mm_movemask_epi8(_mm_cmpeq_epi8(
|
2018-02-13 19:34:15 +00:00
|
|
|
_mm_loadu_si128(reinterpret_cast<const __m128i *>(pos + 48)),
|
2019-09-23 19:22:02 +00:00
|
|
|
zero16))) << 48u);
|
2021-01-29 07:37:57 +00:00
|
|
|
val = ~val;
|
2018-02-13 19:34:15 +00:00
|
|
|
if (val == 0)
|
|
|
|
count += 64;
|
|
|
|
else
|
|
|
|
{
|
2022-07-31 14:34:05 +00:00
|
|
|
count += std::countl_zero(val);
|
2018-02-13 19:34:15 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
}
|
2022-06-15 13:19:29 +00:00
|
|
|
#elif defined(__aarch64__) && defined(__ARM_NEON)
|
|
|
|
const uint8x16_t bitmask = {0x01, 0x02, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80};
|
|
|
|
while (end - begin >= 64)
|
|
|
|
{
|
|
|
|
end -= 64;
|
|
|
|
const auto * src = reinterpret_cast<const unsigned char *>(end);
|
|
|
|
const uint8x16_t p0 = vceqzq_u8(vld1q_u8(src));
|
|
|
|
const uint8x16_t p1 = vceqzq_u8(vld1q_u8(src + 16));
|
|
|
|
const uint8x16_t p2 = vceqzq_u8(vld1q_u8(src + 32));
|
|
|
|
const uint8x16_t p3 = vceqzq_u8(vld1q_u8(src + 48));
|
|
|
|
uint8x16_t t0 = vandq_u8(p0, bitmask);
|
|
|
|
uint8x16_t t1 = vandq_u8(p1, bitmask);
|
|
|
|
uint8x16_t t2 = vandq_u8(p2, bitmask);
|
|
|
|
uint8x16_t t3 = vandq_u8(p3, bitmask);
|
|
|
|
uint8x16_t sum0 = vpaddq_u8(t0, t1);
|
|
|
|
uint8x16_t sum1 = vpaddq_u8(t2, t3);
|
|
|
|
sum0 = vpaddq_u8(sum0, sum1);
|
|
|
|
sum0 = vpaddq_u8(sum0, sum0);
|
|
|
|
UInt64 val = vgetq_lane_u64(vreinterpretq_u64_u8(sum0), 0);
|
|
|
|
val = ~val;
|
|
|
|
if (val == 0)
|
|
|
|
count += 64;
|
|
|
|
else
|
|
|
|
{
|
2022-07-31 14:34:05 +00:00
|
|
|
count += std::countl_zero(val);
|
2022-06-15 13:19:29 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
while (end > begin && *(--end) == 0)
|
|
|
|
{
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
MergeTreeRangeReader::MergeTreeRangeReader(
|
2020-01-09 12:54:37 +00:00
|
|
|
IMergeTreeReader * merge_tree_reader_,
|
2019-11-15 03:38:35 +00:00
|
|
|
MergeTreeRangeReader * prev_reader_,
|
2022-06-07 07:03:11 +00:00
|
|
|
const PrewhereExprStep * prewhere_info_,
|
2022-03-15 06:34:25 +00:00
|
|
|
bool last_reader_in_chain_,
|
2022-04-11 13:43:09 +00:00
|
|
|
const Names & non_const_virtual_column_names_)
|
2019-11-15 03:38:35 +00:00
|
|
|
: merge_tree_reader(merge_tree_reader_)
|
2022-09-05 16:55:00 +00:00
|
|
|
, index_granularity(&(merge_tree_reader->data_part_info_for_read->getIndexGranularity()))
|
2021-01-25 14:31:59 +00:00
|
|
|
, prev_reader(prev_reader_)
|
2021-02-13 22:07:13 +00:00
|
|
|
, prewhere_info(prewhere_info_)
|
2021-01-25 14:31:59 +00:00
|
|
|
, last_reader_in_chain(last_reader_in_chain_)
|
|
|
|
, is_initialized(true)
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
2019-09-26 17:29:41 +00:00
|
|
|
if (prev_reader)
|
2022-12-23 14:21:57 +00:00
|
|
|
result_sample_block = prev_reader->getSampleBlock();
|
2019-09-26 17:29:41 +00:00
|
|
|
|
2020-04-22 06:34:20 +00:00
|
|
|
for (const auto & name_and_type : merge_tree_reader->getColumns())
|
2022-12-23 14:21:57 +00:00
|
|
|
{
|
|
|
|
read_sample_block.insert({name_and_type.type->createColumn(), name_and_type.type, name_and_type.name});
|
|
|
|
result_sample_block.insert({name_and_type.type->createColumn(), name_and_type.type, name_and_type.name});
|
|
|
|
}
|
2022-07-21 07:32:29 +00:00
|
|
|
|
2022-07-12 11:25:14 +00:00
|
|
|
for (const auto & column_name : non_const_virtual_column_names_)
|
2022-03-15 06:34:25 +00:00
|
|
|
{
|
2022-12-23 14:21:57 +00:00
|
|
|
if (result_sample_block.has(column_name))
|
2022-04-11 13:43:09 +00:00
|
|
|
continue;
|
|
|
|
|
2022-07-12 11:25:14 +00:00
|
|
|
non_const_virtual_column_names.push_back(column_name);
|
|
|
|
|
2022-12-23 14:21:57 +00:00
|
|
|
if (column_name == "_part_offset" && !prev_reader)
|
|
|
|
{
|
|
|
|
/// _part_offset column is filled by the first reader.
|
|
|
|
read_sample_block.insert(ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared<DataTypeUInt64>(), column_name));
|
|
|
|
result_sample_block.insert(ColumnWithTypeAndName(ColumnUInt64::create(), std::make_shared<DataTypeUInt64>(), column_name));
|
|
|
|
}
|
2022-03-15 06:34:25 +00:00
|
|
|
}
|
|
|
|
|
2021-02-13 22:07:13 +00:00
|
|
|
if (prewhere_info)
|
2019-11-15 03:38:35 +00:00
|
|
|
{
|
2022-06-07 07:03:11 +00:00
|
|
|
const auto & step = *prewhere_info;
|
2022-06-13 13:00:26 +00:00
|
|
|
if (step.actions)
|
2022-12-23 14:21:57 +00:00
|
|
|
step.actions->execute(result_sample_block, true);
|
2021-02-13 22:07:13 +00:00
|
|
|
|
2022-06-13 13:00:26 +00:00
|
|
|
if (step.remove_column)
|
2022-12-23 14:21:57 +00:00
|
|
|
result_sample_block.erase(step.column_name);
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool MergeTreeRangeReader::isReadingFinished() const
|
|
|
|
{
|
|
|
|
return prev_reader ? prev_reader->isReadingFinished() : stream.isFinished();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t MergeTreeRangeReader::numReadRowsInCurrentGranule() const
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
return prev_reader ? prev_reader->numReadRowsInCurrentGranule() : stream.numReadRowsInCurrentGranule();
|
|
|
|
}
|
2019-07-18 14:41:11 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
size_t MergeTreeRangeReader::numPendingRowsInCurrentGranule() const
|
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
if (prev_reader)
|
|
|
|
return prev_reader->numPendingRowsInCurrentGranule();
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-03-05 14:41:43 +00:00
|
|
|
auto pending_rows = stream.numPendingRowsInCurrentGranule();
|
2019-03-25 16:55:48 +00:00
|
|
|
|
|
|
|
if (pending_rows)
|
|
|
|
return pending_rows;
|
|
|
|
|
2019-03-28 15:03:49 +00:00
|
|
|
return numRowsInCurrentGranule();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
size_t MergeTreeRangeReader::numRowsInCurrentGranule() const
|
|
|
|
{
|
2018-02-22 12:43:57 +00:00
|
|
|
/// If pending_rows is zero, than stream is not initialized.
|
2019-03-25 16:55:48 +00:00
|
|
|
if (stream.current_mark_index_granularity)
|
|
|
|
return stream.current_mark_index_granularity;
|
|
|
|
|
|
|
|
/// We haven't read anything, return first
|
|
|
|
size_t first_mark = merge_tree_reader->getFirstMarkToRead();
|
2019-03-27 15:57:14 +00:00
|
|
|
return index_granularity->getMarkRows(first_mark);
|
2018-11-14 11:26:44 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 15:03:49 +00:00
|
|
|
size_t MergeTreeRangeReader::currentMark() const
|
|
|
|
{
|
|
|
|
return stream.currentMark();
|
|
|
|
}
|
|
|
|
|
2019-03-28 08:52:09 +00:00
|
|
|
size_t MergeTreeRangeReader::Stream::numPendingRows() const
|
|
|
|
{
|
|
|
|
size_t rows_between_marks = index_granularity->getRowsCountInRange(current_mark, last_mark);
|
2018-11-15 14:06:54 +00:00
|
|
|
return rows_between_marks - offset_after_current_mark;
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
|
2022-03-15 06:34:25 +00:00
|
|
|
UInt64 MergeTreeRangeReader::Stream::currentPartOffset() const
|
|
|
|
{
|
|
|
|
return index_granularity->getMarkStartingRow(current_mark) + offset_after_current_mark;
|
|
|
|
}
|
|
|
|
|
|
|
|
UInt64 MergeTreeRangeReader::Stream::lastPartOffset() const
|
|
|
|
{
|
|
|
|
return index_granularity->getMarkStartingRow(last_mark);
|
|
|
|
}
|
|
|
|
|
2019-12-02 17:10:22 +00:00
|
|
|
|
|
|
|
size_t MergeTreeRangeReader::Stream::ceilRowsToCompleteGranules(size_t rows_num) const
|
|
|
|
{
|
|
|
|
/// FIXME suboptimal
|
|
|
|
size_t result = 0;
|
|
|
|
size_t from_mark = current_mark;
|
|
|
|
while (result < rows_num && from_mark < last_mark)
|
|
|
|
result += index_granularity->getMarkRows(from_mark++);
|
2019-12-18 16:41:11 +00:00
|
|
|
|
2019-12-02 17:10:22 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-20 11:50:33 +00:00
|
|
|
bool MergeTreeRangeReader::isCurrentRangeFinished() const
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
|
|
|
return prev_reader ? prev_reader->isCurrentRangeFinished() : stream.isFinished();
|
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
MergeTreeRangeReader::ReadResult MergeTreeRangeReader::read(size_t max_rows, MarkRanges & ranges)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
if (max_rows == 0)
|
|
|
|
throw Exception("Expected at least 1 row to read, got 0.", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
ReadResult read_result(log);
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
SCOPE_EXIT({
|
|
|
|
LOG_TEST(log, "read() returned {}, sample block {}",
|
2022-12-23 14:21:57 +00:00
|
|
|
read_result.dumpInfo(), this->result_sample_block.dumpNames());
|
2022-12-20 23:07:33 +00:00
|
|
|
});
|
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
if (prev_reader)
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
|
|
|
read_result = prev_reader->read(max_rows, ranges);
|
2019-09-23 19:22:02 +00:00
|
|
|
|
|
|
|
size_t num_read_rows;
|
|
|
|
Columns columns = continueReadingChain(read_result, num_read_rows);
|
|
|
|
|
|
|
|
/// Nothing to do. Return empty result.
|
|
|
|
if (read_result.num_rows == 0)
|
|
|
|
return read_result;
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
/// Calculate and update read bytes
|
2019-11-15 03:38:35 +00:00
|
|
|
size_t total_bytes = 0;
|
|
|
|
for (auto & column : columns)
|
|
|
|
{
|
|
|
|
if (column)
|
2021-05-04 10:52:37 +00:00
|
|
|
{
|
2019-11-15 03:38:35 +00:00
|
|
|
total_bytes += column->byteSize();
|
2021-05-04 10:52:37 +00:00
|
|
|
}
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
|
|
|
read_result.addNumBytesRead(total_bytes);
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
if (!columns.empty())
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
/// If all requested columns are absent in part num_read_rows will be 0.
|
|
|
|
/// In this case we need to use number of rows in the result to fill the default values and don't filter block.
|
|
|
|
if (num_read_rows == 0)
|
|
|
|
num_read_rows = read_result.num_rows;
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
/// fillMissingColumns() must be called after reading but befoe any filterings because
|
|
|
|
/// some columns (e.g. arrays) might be only partially filled and thus not be valid and
|
|
|
|
/// fillMissingColumns() fixes this.
|
|
|
|
bool should_evaluate_missing_defaults;
|
|
|
|
merge_tree_reader->fillMissingColumns(columns, should_evaluate_missing_defaults,
|
|
|
|
num_read_rows);
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
if (read_result.total_rows_per_granule == num_read_rows && read_result.num_rows != num_read_rows)
|
|
|
|
{
|
|
|
|
/// We have filter applied from the previous step
|
|
|
|
/// So we need to apply it to the newly read rows
|
|
|
|
assert(read_result.final_filter.present());
|
|
|
|
assert(read_result.final_filter.countBytesInFilter() == read_result.num_rows);
|
2018-10-04 08:58:19 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
filterColumns(columns, read_result.final_filter);
|
|
|
|
}
|
2018-10-03 13:55:21 +00:00
|
|
|
|
2020-12-24 10:11:07 +00:00
|
|
|
/// If some columns absent in part, then evaluate default values
|
2020-11-13 03:43:35 +00:00
|
|
|
if (should_evaluate_missing_defaults)
|
2019-11-15 03:38:35 +00:00
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
Block additional_columns = prev_reader->getSampleBlock().cloneWithColumns(read_result.columns);
|
2022-12-24 23:04:52 +00:00
|
|
|
for (const auto & col : read_result.additional_columns)
|
2022-12-20 23:07:33 +00:00
|
|
|
additional_columns.insert(col);
|
|
|
|
|
|
|
|
merge_tree_reader->evaluateMissingDefaults(additional_columns, columns);
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
2022-12-20 23:07:33 +00:00
|
|
|
|
2020-11-13 13:24:14 +00:00
|
|
|
/// If columns not empty, then apply on-fly alter conversions if any required
|
2020-01-15 13:00:08 +00:00
|
|
|
merge_tree_reader->performRequiredConversions(columns);
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
2019-10-02 11:57:17 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
read_result.columns.reserve(read_result.columns.size() + columns.size());
|
|
|
|
for (auto & column : columns)
|
|
|
|
read_result.columns.emplace_back(std::move(column));
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
else
|
2018-02-22 12:43:57 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
read_result = startReadingChain(max_rows, ranges);
|
2019-09-23 19:22:02 +00:00
|
|
|
read_result.num_rows = read_result.numReadRows();
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
LOG_TEST(log, "First reader returned: {}, requested columns: {}",
|
|
|
|
read_result.dumpInfo(), dumpNames(merge_tree_reader->getColumns()));
|
2022-12-20 23:07:33 +00:00
|
|
|
|
|
|
|
if (read_result.num_rows == 0)
|
|
|
|
return read_result;
|
2022-03-15 06:34:25 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
{
|
2022-12-23 14:21:57 +00:00
|
|
|
/// Physical columns go first and then some virtual columns follow
|
|
|
|
size_t physical_columns_count = merge_tree_reader->getColumns().size();
|
|
|
|
Columns physical_columns(read_result.columns.begin(), read_result.columns.begin() + physical_columns_count);
|
|
|
|
|
2018-02-22 12:43:57 +00:00
|
|
|
bool should_evaluate_missing_defaults;
|
2022-12-23 14:21:57 +00:00
|
|
|
merge_tree_reader->fillMissingColumns(physical_columns, should_evaluate_missing_defaults,
|
|
|
|
read_result.num_rows);
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2020-12-24 10:11:07 +00:00
|
|
|
/// If some columns absent in part, then evaluate default values
|
2018-02-22 12:43:57 +00:00
|
|
|
if (should_evaluate_missing_defaults)
|
2022-12-23 14:21:57 +00:00
|
|
|
merge_tree_reader->evaluateMissingDefaults({}, physical_columns);
|
2020-01-15 13:00:08 +00:00
|
|
|
|
2020-11-13 13:24:14 +00:00
|
|
|
/// If result not empty, then apply on-fly alter conversions if any required
|
2022-12-23 14:21:57 +00:00
|
|
|
merge_tree_reader->performRequiredConversions(physical_columns);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < physical_columns.size(); ++i)
|
|
|
|
read_result.columns[i] = std::move(physical_columns[i]);
|
2018-02-22 12:43:57 +00:00
|
|
|
}
|
2019-11-15 03:38:35 +00:00
|
|
|
|
|
|
|
size_t total_bytes = 0;
|
|
|
|
for (auto & column : read_result.columns)
|
|
|
|
total_bytes += column->byteSize();
|
|
|
|
|
|
|
|
read_result.addNumBytesRead(total_bytes);
|
2018-02-22 12:43:57 +00:00
|
|
|
}
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2021-01-29 15:13:09 +00:00
|
|
|
executePrewhereActionsAndFilterColumns(read_result);
|
2018-09-10 15:28:03 +00:00
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
read_result.checkInternalConsistency();
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
assert(read_result.num_rows == 0 || read_result.columns.size() == getSampleBlock().columns());
|
|
|
|
|
2018-02-13 19:34:15 +00:00
|
|
|
return read_result;
|
|
|
|
}
|
2022-11-16 17:48:08 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
MergeTreeRangeReader::ReadResult MergeTreeRangeReader::startReadingChain(size_t max_rows, MarkRanges & ranges)
|
2022-11-16 17:48:08 +00:00
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
ReadResult result(log);
|
|
|
|
result.columns.resize(merge_tree_reader->getColumns().size());
|
2022-11-30 13:26:51 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
size_t current_task_last_mark = getLastMark(ranges);
|
2021-10-15 08:36:26 +00:00
|
|
|
|
2022-04-11 13:43:09 +00:00
|
|
|
/// The stream could be unfinished by the previous read request because of max_rows limit.
|
|
|
|
/// In this case it will have some rows from the previously started range. We need to save their begin and
|
|
|
|
/// end offsets to properly fill _part_offset column.
|
2022-12-23 14:21:57 +00:00
|
|
|
UInt64 leading_begin_part_offset = 0;
|
|
|
|
UInt64 leading_end_part_offset = 0;
|
|
|
|
if (!stream.isFinished())
|
|
|
|
{
|
|
|
|
leading_begin_part_offset = stream.currentPartOffset();
|
|
|
|
leading_end_part_offset = stream.lastPartOffset();
|
|
|
|
}
|
2022-03-15 06:34:25 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
/// Stream is lazy. result.num_added_rows is the number of rows added to block which is not equal to
|
|
|
|
/// result.num_rows_read until call to stream.finalize(). Also result.num_added_rows may be less than
|
|
|
|
/// result.num_rows_read if the last granule in range also the last in part (so we have to adjust last granule).
|
|
|
|
{
|
|
|
|
size_t space_left = max_rows;
|
2018-02-20 12:02:25 +00:00
|
|
|
while (space_left && (!stream.isFinished() || !ranges.empty()))
|
2018-02-20 11:45:58 +00:00
|
|
|
{
|
|
|
|
if (stream.isFinished())
|
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
result.addRows(stream.finalize(result.columns));
|
2021-10-15 08:36:26 +00:00
|
|
|
stream = Stream(ranges.front().begin, ranges.front().end, current_task_last_mark, merge_tree_reader);
|
2020-02-10 12:36:01 +00:00
|
|
|
result.addRange(ranges.front());
|
|
|
|
ranges.pop_front();
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
|
2019-12-02 17:10:22 +00:00
|
|
|
size_t current_space = space_left;
|
|
|
|
|
|
|
|
/// If reader can't read part of granule, we have to increase number of reading rows
|
|
|
|
/// to read complete granules and exceed max_rows a bit.
|
|
|
|
if (!merge_tree_reader->canReadIncompleteGranules())
|
|
|
|
current_space = stream.ceilRowsToCompleteGranules(space_left);
|
2019-10-31 14:44:17 +00:00
|
|
|
|
2019-12-02 17:10:22 +00:00
|
|
|
auto rows_to_read = std::min(current_space, stream.numPendingRowsInCurrentGranule());
|
2019-10-31 14:44:17 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
bool last = rows_to_read == space_left;
|
2019-09-23 19:22:02 +00:00
|
|
|
result.addRows(stream.read(result.columns, rows_to_read, !last));
|
2018-02-20 11:45:58 +00:00
|
|
|
result.addGranule(rows_to_read);
|
2019-12-02 17:10:22 +00:00
|
|
|
space_left = (rows_to_read > space_left ? 0 : space_left - rows_to_read);
|
2018-02-20 11:45:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
result.addRows(stream.finalize(result.columns));
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
/// Last granule may be incomplete.
|
2022-10-31 08:00:56 +00:00
|
|
|
if (!result.rows_per_granule.empty())
|
2022-09-05 16:55:00 +00:00
|
|
|
result.adjustLastGranule();
|
2018-02-20 11:45:58 +00:00
|
|
|
|
2022-12-23 14:21:57 +00:00
|
|
|
if (read_sample_block.has("_part_offset"))
|
|
|
|
fillPartOffsetColumn(result, leading_begin_part_offset, leading_end_part_offset);
|
2022-03-15 06:34:25 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
return result;
|
|
|
|
}
|
2022-12-23 14:21:57 +00:00
|
|
|
|
2022-04-11 13:43:09 +00:00
|
|
|
void MergeTreeRangeReader::fillPartOffsetColumn(ReadResult & result, UInt64 leading_begin_part_offset, UInt64 leading_end_part_offset)
|
2022-03-15 06:34:25 +00:00
|
|
|
{
|
|
|
|
size_t num_rows = result.numReadRows();
|
|
|
|
|
|
|
|
auto column = ColumnUInt64::create(num_rows);
|
|
|
|
ColumnUInt64::Container & vec = column->getData();
|
|
|
|
|
|
|
|
UInt64 * pos = vec.data();
|
|
|
|
UInt64 * end = &vec[num_rows];
|
|
|
|
|
2022-04-11 13:43:09 +00:00
|
|
|
while (pos < end && leading_begin_part_offset < leading_end_part_offset)
|
|
|
|
*pos++ = leading_begin_part_offset++;
|
2022-03-15 06:34:25 +00:00
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
const auto & start_ranges = result.started_ranges;
|
2022-03-15 06:34:25 +00:00
|
|
|
|
2022-04-11 13:43:09 +00:00
|
|
|
for (const auto & start_range : start_ranges)
|
2022-03-15 06:34:25 +00:00
|
|
|
{
|
2022-04-11 13:43:09 +00:00
|
|
|
UInt64 start_part_offset = index_granularity->getMarkStartingRow(start_range.range.begin);
|
|
|
|
UInt64 end_part_offset = index_granularity->getMarkStartingRow(start_range.range.end);
|
2022-03-15 06:34:25 +00:00
|
|
|
|
|
|
|
while (pos < end && start_part_offset < end_part_offset)
|
|
|
|
*pos++ = start_part_offset++;
|
|
|
|
}
|
|
|
|
|
|
|
|
result.columns.emplace_back(std::move(column));
|
|
|
|
}
|
2022-07-17 18:41:17 +00:00
|
|
|
|
2022-06-13 13:00:26 +00:00
|
|
|
Columns MergeTreeRangeReader::continueReadingChain(const ReadResult & result, size_t & num_rows)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
Columns columns;
|
|
|
|
num_rows = 0;
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2022-06-13 13:00:26 +00:00
|
|
|
/// No columns need to be read at this step? (only more filtering)
|
2022-06-08 22:00:36 +00:00
|
|
|
if (merge_tree_reader->getColumns().empty())
|
|
|
|
return columns;
|
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
if (result.rows_per_granule.empty())
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
|
|
|
/// If zero rows were read on prev step, than there is no more rows to read.
|
|
|
|
/// Last granule may have less rows than index_granularity, so finish reading manually.
|
|
|
|
stream.finish();
|
2019-09-23 19:22:02 +00:00
|
|
|
return columns;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2019-10-31 11:32:24 +00:00
|
|
|
columns.resize(merge_tree_reader->numColumnsInResult());
|
2019-09-23 19:22:02 +00:00
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
const auto & rows_per_granule = result.rows_per_granule;
|
|
|
|
const auto & started_ranges = result.started_ranges;
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2022-05-06 13:06:56 +00:00
|
|
|
size_t current_task_last_mark = ReadResult::getLastMark(started_ranges);
|
2018-02-20 11:45:58 +00:00
|
|
|
size_t next_range_to_start = 0;
|
2018-02-13 19:34:15 +00:00
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
auto size = rows_per_granule.size();
|
2021-06-15 19:55:21 +00:00
|
|
|
for (auto i : collections::range(0, size))
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2018-02-20 11:45:58 +00:00
|
|
|
if (next_range_to_start < started_ranges.size()
|
|
|
|
&& i == started_ranges[next_range_to_start].num_granules_read_before_start)
|
2018-02-13 19:34:15 +00:00
|
|
|
{
|
2019-09-23 19:22:02 +00:00
|
|
|
num_rows += stream.finalize(columns);
|
2020-04-22 06:34:20 +00:00
|
|
|
const auto & range = started_ranges[next_range_to_start].range;
|
2018-02-22 11:31:15 +00:00
|
|
|
++next_range_to_start;
|
2021-10-18 08:06:30 +00:00
|
|
|
stream = Stream(range.begin, range.end, current_task_last_mark, merge_tree_reader);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2018-02-20 11:45:58 +00:00
|
|
|
bool last = i + 1 == size;
|
2019-09-23 19:22:02 +00:00
|
|
|
num_rows += stream.read(columns, rows_per_granule[i], !last);
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
2018-02-20 11:45:58 +00:00
|
|
|
|
2022-10-31 08:00:56 +00:00
|
|
|
stream.skip(result.num_rows_to_skip_in_last_granule);
|
2019-09-23 19:22:02 +00:00
|
|
|
num_rows += stream.finalize(columns);
|
2018-02-20 11:45:58 +00:00
|
|
|
|
|
|
|
/// added_rows may be zero if all columns were read in prewhere and it's ok.
|
2022-10-31 08:00:56 +00:00
|
|
|
if (num_rows && num_rows != result.total_rows_per_granule)
|
2019-09-23 19:22:02 +00:00
|
|
|
throw Exception("RangeReader read " + toString(num_rows) + " rows, but "
|
2022-10-31 08:00:56 +00:00
|
|
|
+ toString(result.total_rows_per_granule) + " expected.", ErrorCodes::LOGICAL_ERROR);
|
2018-02-22 12:43:57 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
return columns;
|
2018-02-13 19:34:15 +00:00
|
|
|
}
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
static void checkCombinedFiltersSize(size_t bytes_in_first_filter, size_t second_filter_size)
|
|
|
|
{
|
|
|
|
if (bytes_in_first_filter != second_filter_size)
|
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
|
|
"Cannot combine filters because number of bytes in a first filter ({}) "
|
|
|
|
"does not match second filter size ({})", bytes_in_first_filter, second_filter_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Second filter size must be equal to number of 1s in the first filter.
|
|
|
|
/// The result size is equal to first filter size.
|
|
|
|
static ColumnPtr combineFilters(ColumnPtr first, ColumnPtr second)
|
|
|
|
{
|
|
|
|
ConstantFilterDescription first_const_descr(*first);
|
|
|
|
|
|
|
|
if (first_const_descr.always_true)
|
|
|
|
{
|
|
|
|
checkCombinedFiltersSize(first->size(), second->size());
|
|
|
|
return second;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (first_const_descr.always_false)
|
|
|
|
{
|
|
|
|
checkCombinedFiltersSize(0, second->size());
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
|
|
|
|
FilterDescription first_descr(*first);
|
|
|
|
|
|
|
|
size_t bytes_in_first_filter = countBytesInFilter(*first_descr.data);
|
|
|
|
checkCombinedFiltersSize(bytes_in_first_filter, second->size());
|
|
|
|
|
|
|
|
ConstantFilterDescription second_const_descr(*second);
|
|
|
|
|
|
|
|
if (second_const_descr.always_true)
|
|
|
|
return first;
|
|
|
|
|
|
|
|
if (second_const_descr.always_false)
|
|
|
|
return second->cloneResized(first->size());
|
|
|
|
|
|
|
|
FilterDescription second_descr(*second);
|
|
|
|
|
|
|
|
MutableColumnPtr mut_first;
|
|
|
|
if (first_descr.data_holder)
|
|
|
|
mut_first = IColumn::mutate(std::move(first_descr.data_holder));
|
|
|
|
else
|
|
|
|
mut_first = IColumn::mutate(std::move(first));
|
|
|
|
|
|
|
|
auto & first_data = typeid_cast<ColumnUInt8 *>(mut_first.get())->getData();
|
|
|
|
const auto * second_data = second_descr.data->data();
|
|
|
|
|
|
|
|
for (auto & val : first_data)
|
|
|
|
{
|
|
|
|
if (val)
|
|
|
|
{
|
|
|
|
val = *second_data;
|
|
|
|
++second_data;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return mut_first;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergeTreeRangeReader::executePrewhereActionsAndFilterColumns(ReadResult & result) const
|
2017-06-15 17:01:13 +00:00
|
|
|
{
|
2022-10-31 08:00:56 +00:00
|
|
|
result.checkInternalConsistency();
|
|
|
|
|
2021-02-13 22:07:13 +00:00
|
|
|
if (!prewhere_info)
|
2021-01-29 15:13:09 +00:00
|
|
|
return;
|
|
|
|
|
2022-12-23 14:21:57 +00:00
|
|
|
const auto & header = read_sample_block;
|
|
|
|
size_t num_columns = header.columns();
|
2019-09-23 19:22:02 +00:00
|
|
|
|
2022-06-13 13:00:26 +00:00
|
|
|
/// Check that we have columns from previous steps and newly read required columns
|
2022-12-23 14:21:57 +00:00
|
|
|
if (result.columns.size() < num_columns)
|
2022-06-13 13:00:26 +00:00
|
|
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
|
|
"Invalid number of columns passed to MergeTreeRangeReader. Expected {}, got {}",
|
|
|
|
num_columns, result.columns.size());
|
|
|
|
|
|
|
|
/// Filter computed at the current step. Its size is equal to num_rows which is <= total_rows_per_granule
|
|
|
|
ColumnPtr current_step_filter;
|
2021-02-20 11:00:16 +00:00
|
|
|
size_t prewhere_column_pos;
|
2019-09-23 19:22:02 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
/// Restore block from columns list.
|
|
|
|
Block block;
|
2019-10-02 11:57:17 +00:00
|
|
|
size_t pos = 0;
|
|
|
|
|
|
|
|
if (prev_reader)
|
|
|
|
{
|
2020-04-22 06:34:20 +00:00
|
|
|
for (const auto & col : prev_reader->getSampleBlock())
|
2019-10-02 11:57:17 +00:00
|
|
|
{
|
|
|
|
block.insert({result.columns[pos], col.type, col.name});
|
|
|
|
++pos;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-13 13:00:26 +00:00
|
|
|
for (auto name_and_type = header.begin(); name_and_type != header.end() && pos < result.columns.size(); ++pos, ++name_and_type)
|
|
|
|
block.insert({result.columns[pos], name_and_type->type, name_and_type->name});
|
2019-09-23 19:22:02 +00:00
|
|
|
|
2022-03-15 06:34:25 +00:00
|
|
|
{
|
2022-12-20 23:07:33 +00:00
|
|
|
/// Columns might be projected out. We need to store them here so that default columns can be evaluated later.
|
2022-12-24 23:04:52 +00:00
|
|
|
Block additional_columns = block;
|
2022-06-10 16:32:37 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
if (prewhere_info->actions)
|
|
|
|
prewhere_info->actions->execute(block);
|
2022-06-13 13:00:26 +00:00
|
|
|
|
2022-12-24 23:04:52 +00:00
|
|
|
result.additional_columns.clear();
|
|
|
|
for (auto & col : additional_columns)
|
2022-07-12 11:25:14 +00:00
|
|
|
{
|
2022-12-24 23:04:52 +00:00
|
|
|
/// Exclude columns that are present in the result block to avoid storing them and filtering twice.
|
|
|
|
/// TODO: also need to exclude the column that are not needed for the next steps.
|
2022-12-20 23:07:33 +00:00
|
|
|
if (block.has(col.name))
|
|
|
|
continue;
|
2022-12-24 23:04:52 +00:00
|
|
|
result.additional_columns.insert(col);
|
2022-07-12 11:25:14 +00:00
|
|
|
}
|
2022-03-15 06:34:25 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 07:03:11 +00:00
|
|
|
prewhere_column_pos = block.getPositionByName(prewhere_info->column_name);
|
2021-02-13 22:07:13 +00:00
|
|
|
|
2019-09-23 19:22:02 +00:00
|
|
|
result.columns.clear();
|
2019-10-02 11:57:17 +00:00
|
|
|
result.columns.reserve(block.columns());
|
2019-09-23 19:22:02 +00:00
|
|
|
for (auto & col : block)
|
|
|
|
result.columns.emplace_back(std::move(col.column));
|
2021-02-20 11:00:16 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
current_step_filter = result.columns[prewhere_column_pos];
|
2021-02-20 11:00:16 +00:00
|
|
|
}
|
2022-06-07 07:03:11 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
if (prewhere_info->remove_column)
|
|
|
|
result.columns.erase(result.columns.begin() + prewhere_column_pos);
|
2021-02-20 11:00:16 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
result.optimize(current_step_filter, merge_tree_reader->canReadIncompleteGranules());
|
2022-10-04 11:59:11 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
if (result.num_rows && (result.need_filter || prewhere_info->need_filter))
|
2019-09-23 19:22:02 +00:00
|
|
|
{
|
2022-12-24 21:07:18 +00:00
|
|
|
result.need_filter = false; /// We are going to apply the filter now, reset the flag before the next step
|
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
FilterWithCachedCount current_step_filter_with_count(current_step_filter);
|
2019-10-02 11:57:17 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
/// Filter has not been applied yet, do it now
|
|
|
|
filterColumns(result.columns, current_step_filter_with_count);
|
2018-03-05 14:41:43 +00:00
|
|
|
|
2022-12-20 23:07:33 +00:00
|
|
|
if (!last_reader_in_chain)
|
2019-11-15 03:38:35 +00:00
|
|
|
{
|
2022-12-24 23:04:52 +00:00
|
|
|
auto additional_columns = result.additional_columns.getColumns();
|
|
|
|
filterColumns(additional_columns, current_step_filter_with_count);
|
|
|
|
if (!additional_columns.empty())
|
|
|
|
result.additional_columns.setColumns(additional_columns);
|
2019-11-15 03:38:35 +00:00
|
|
|
else
|
2022-12-24 23:04:52 +00:00
|
|
|
result.additional_columns.clear();
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
2021-10-21 21:12:30 +00:00
|
|
|
else
|
|
|
|
{
|
2022-12-24 23:04:52 +00:00
|
|
|
result.additional_columns.clear();
|
2022-12-20 23:07:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
if (current_step_filter_with_count.alwaysTrue())
|
|
|
|
;
|
|
|
|
else if (current_step_filter_with_count.alwaysFalse())
|
|
|
|
result.num_rows = 0;
|
2022-10-31 13:10:25 +00:00
|
|
|
else
|
2022-12-20 23:07:33 +00:00
|
|
|
result.num_rows = current_step_filter_with_count.countBytesInFilter();
|
2021-10-21 21:12:30 +00:00
|
|
|
}
|
2019-11-15 03:38:35 +00:00
|
|
|
}
|
2022-11-16 17:48:08 +00:00
|
|
|
|
|
|
|
LOG_TEST(log, "After execute prewhere {}", result.dumpInfo());
|
2017-06-15 17:01:13 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 21:00:34 +00:00
|
|
|
std::string PrewhereExprInfo::dump() const
|
|
|
|
{
|
2022-06-13 13:00:26 +00:00
|
|
|
WriteBufferFromOwnString s;
|
2022-06-07 21:00:34 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < steps.size(); ++i)
|
|
|
|
{
|
|
|
|
s << "STEP " << i << ":\n"
|
2022-06-10 16:33:43 +00:00
|
|
|
<< " ACTIONS: " << (steps[i].actions ? steps[i].actions->dumpActions() : "nullptr") << "\n"
|
2022-06-07 21:00:34 +00:00
|
|
|
<< " COLUMN: " << steps[i].column_name << "\n"
|
|
|
|
<< " REMOVE_COLUMN: " << steps[i].remove_column << "\n"
|
|
|
|
<< " NEED_FILTER: " << steps[i].need_filter << "\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.str();
|
|
|
|
}
|
|
|
|
|
2017-06-14 10:50:22 +00:00
|
|
|
}
|