ClickHouse/dbms/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.cpp

119 lines
4.3 KiB
C++
Raw Normal View History

2019-10-10 16:30:30 +00:00
#include <Storages/MergeTree/IMergeTreeReader.h>
#include <Storages/MergeTree/MergeTreeReadPool.h>
2018-11-29 09:19:42 +00:00
#include <Storages/MergeTree/MergeTreeThreadSelectBlockInputStream.h>
2016-11-20 12:43:20 +00:00
namespace DB
{
2018-11-29 09:19:42 +00:00
MergeTreeThreadSelectBlockInputStream::MergeTreeThreadSelectBlockInputStream(
2019-08-03 11:02:40 +00:00
const size_t thread_,
const MergeTreeReadPoolPtr & pool_,
const size_t min_marks_to_read_,
2019-02-10 16:55:12 +00:00
const UInt64 max_block_size_rows_,
size_t preferred_block_size_bytes_,
size_t preferred_max_column_in_block_size_bytes_,
const MergeTreeData & storage_,
const bool use_uncompressed_cache_,
const PrewhereInfoPtr & prewhere_info_,
2019-10-10 16:30:30 +00:00
const ReaderSettings & reader_settings_,
const Names & virt_column_names_)
:
MergeTreeBaseSelectBlockInputStream{storage_, prewhere_info_, max_block_size_rows_,
2019-10-10 16:30:30 +00:00
preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_,
reader_settings_, use_uncompressed_cache_, virt_column_names_},
2019-08-03 11:02:40 +00:00
thread{thread_},
pool{pool_}
{
/// round min_marks_to_read up to nearest multiple of block_size expressed in marks
/// If granularity is adaptive it doesn't make sense
/// Maybe it will make sence to add settings `max_block_size_bytes`
2019-06-19 14:46:06 +00:00
if (max_block_size_rows && !storage.canUseAdaptiveGranularity())
{
2019-08-26 14:24:29 +00:00
size_t fixed_index_granularity = storage.getSettings()->index_granularity;
min_marks_to_read = (min_marks_to_read_ * fixed_index_granularity + max_block_size_rows - 1)
/ max_block_size_rows * max_block_size_rows / fixed_index_granularity;
}
else
min_marks_to_read = min_marks_to_read_;
2018-04-19 15:18:26 +00:00
ordered_names = getHeader().getNames();
}
2016-11-20 12:43:20 +00:00
2018-11-29 09:19:42 +00:00
Block MergeTreeThreadSelectBlockInputStream::getHeader() const
{
auto res = pool->getHeader();
2018-04-16 12:21:36 +00:00
executePrewhereActions(res, prewhere_info);
2018-09-07 20:23:28 +00:00
injectVirtualColumns(res);
return res;
}
2016-11-20 12:43:20 +00:00
/// Requests read task from MergeTreeReadPool and signals whether it got one
2018-11-29 09:19:42 +00:00
bool MergeTreeThreadSelectBlockInputStream::getNewTask()
2016-11-20 12:43:20 +00:00
{
2018-04-19 15:18:26 +00:00
task = pool->getTask(min_marks_to_read, thread, ordered_names);
if (!task)
{
/** Close the files (before destroying the object).
* When many sources are created, but simultaneously reading only a few of them,
* buffers don't waste memory.
*/
reader.reset();
pre_reader.reset();
return false;
}
const std::string path = task->data_part->getFullPath();
/// Allows pool to reduce number of threads in case of too slow reads.
2019-08-03 11:02:40 +00:00
auto profile_callback = [this](ReadBufferFromFileBase::ProfileInfo info_) { pool->profileFeedback(info_); };
if (!reader)
{
auto rest_mark_ranges = pool->getRestMarks(*task->data_part, task->mark_ranges[0]);
if (use_uncompressed_cache)
owned_uncompressed_cache = storage.global_context.getUncompressedCache();
owned_mark_cache = storage.global_context.getMarkCache();
2019-10-10 16:30:30 +00:00
reader = task->data_part->getReader(task->columns, rest_mark_ranges,
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
IMergeTreeReader::ValueSizeMap{}, profile_callback);
if (prewhere_info)
2019-10-10 16:30:30 +00:00
pre_reader = task->data_part->getReader(task->pre_columns, rest_mark_ranges,
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
IMergeTreeReader::ValueSizeMap{}, profile_callback);
}
else
{
/// in other case we can reuse readers, anyway they will be "seeked" to required mark
if (path != last_readed_part_path)
2018-10-03 17:12:38 +00:00
{
auto rest_mark_ranges = pool->getRestMarks(*task->data_part, task->mark_ranges[0]);
/// retain avg_value_size_hints
2019-10-10 16:30:30 +00:00
reader = task->data_part->getReader(task->columns, rest_mark_ranges,
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
reader->getAvgValueSizeHints(), profile_callback);
if (prewhere_info)
2019-10-10 16:30:30 +00:00
pre_reader = task->data_part->getReader(task->pre_columns, rest_mark_ranges,
owned_uncompressed_cache.get(), owned_mark_cache.get(), reader_settings,
reader->getAvgValueSizeHints(), profile_callback);
}
}
2019-10-10 16:30:30 +00:00
2018-10-04 08:52:56 +00:00
last_readed_part_path = path;
return true;
2016-11-20 12:43:20 +00:00
}
2018-11-29 09:19:42 +00:00
MergeTreeThreadSelectBlockInputStream::~MergeTreeThreadSelectBlockInputStream() = default;
2016-11-20 12:43:20 +00:00
}