2012-02-27 06:28:20 +00:00
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
|
|
#include <DB/Interpreters/Aggregator.h>
|
2015-12-01 14:09:05 +00:00
|
|
|
|
#include <DB/IO/ReadBufferFromFile.h>
|
|
|
|
|
#include <DB/IO/CompressedReadBuffer.h>
|
2012-02-27 06:28:20 +00:00
|
|
|
|
#include <DB/DataStreams/IProfilingBlockInputStream.h>
|
2015-12-01 14:09:05 +00:00
|
|
|
|
#include <DB/DataStreams/BlocksListBlockInputStream.h>
|
|
|
|
|
#include <DB/DataStreams/NativeBlockInputStream.h>
|
|
|
|
|
#include <DB/DataStreams/MergingAggregatedMemoryEfficientBlockInputStream.h>
|
2014-11-30 18:22:57 +00:00
|
|
|
|
#include <DB/DataStreams/ParallelInputsProcessor.h>
|
2015-12-01 14:09:05 +00:00
|
|
|
|
#include <common/Revision.h>
|
2012-02-27 06:28:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
using Poco::SharedPtr;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** Агрегирует несколько источников параллельно.
|
2014-11-30 18:22:57 +00:00
|
|
|
|
* Производит агрегацию блоков из разных источников независимо в разных потоках, затем объединяет результаты.
|
2014-11-30 06:59:03 +00:00
|
|
|
|
* Если final == false, агрегатные функции не финализируются, то есть, не заменяются на своё значение, а содержат промежуточное состояние вычислений.
|
2012-02-27 06:28:20 +00:00
|
|
|
|
* Это необходимо, чтобы можно было продолжить агрегацию (например, объединяя потоки частично агрегированных данных).
|
|
|
|
|
*/
|
|
|
|
|
class ParallelAggregatingBlockInputStream : public IProfilingBlockInputStream
|
|
|
|
|
{
|
|
|
|
|
public:
|
2014-02-26 11:44:54 +00:00
|
|
|
|
/** Столбцы из key_names и аргументы агрегатных функций, уже должны быть вычислены.
|
2012-02-27 06:28:20 +00:00
|
|
|
|
*/
|
2015-07-21 21:29:02 +00:00
|
|
|
|
ParallelAggregatingBlockInputStream(
|
|
|
|
|
BlockInputStreams inputs, BlockInputStreamPtr additional_input_at_end,
|
2015-12-03 05:03:23 +00:00
|
|
|
|
const Aggregator::Params & params_, bool final_, size_t max_threads_, size_t temporary_data_merge_threads_)
|
2015-12-01 14:09:05 +00:00
|
|
|
|
: params(params_), aggregator(params),
|
2015-12-03 05:03:23 +00:00
|
|
|
|
final(final_), max_threads(std::min(inputs.size(), max_threads_)), temporary_data_merge_threads(temporary_data_merge_threads_),
|
2015-11-30 16:57:05 +00:00
|
|
|
|
keys_size(params.keys_size), aggregates_size(params.aggregates_size),
|
2015-07-21 21:29:02 +00:00
|
|
|
|
handler(*this), processor(inputs, additional_input_at_end, max_threads, handler)
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2015-07-21 21:29:02 +00:00
|
|
|
|
children = inputs;
|
|
|
|
|
if (additional_input_at_end)
|
|
|
|
|
children.push_back(additional_input_at_end);
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-06-08 20:22:02 +00:00
|
|
|
|
String getName() const override { return "ParallelAggregating"; }
|
2012-10-20 02:10:47 +00:00
|
|
|
|
|
2014-11-08 23:52:18 +00:00
|
|
|
|
String getID() const override
|
2013-05-03 10:20:53 +00:00
|
|
|
|
{
|
|
|
|
|
std::stringstream res;
|
|
|
|
|
res << "ParallelAggregating(";
|
|
|
|
|
|
|
|
|
|
Strings children_ids(children.size());
|
|
|
|
|
for (size_t i = 0; i < children.size(); ++i)
|
|
|
|
|
children_ids[i] = children[i]->getID();
|
|
|
|
|
|
|
|
|
|
/// Порядок не имеет значения.
|
|
|
|
|
std::sort(children_ids.begin(), children_ids.end());
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < children_ids.size(); ++i)
|
|
|
|
|
res << (i == 0 ? "" : ", ") << children_ids[i];
|
|
|
|
|
|
2015-01-08 18:52:48 +00:00
|
|
|
|
res << ", " << aggregator.getID() << ")";
|
2013-05-03 10:20:53 +00:00
|
|
|
|
return res.str();
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-30 18:22:57 +00:00
|
|
|
|
void cancel() override
|
|
|
|
|
{
|
2015-03-20 16:20:47 +00:00
|
|
|
|
bool old_val = false;
|
|
|
|
|
if (!is_cancelled.compare_exchange_strong(old_val, true, std::memory_order_seq_cst, std::memory_order_relaxed))
|
2014-11-30 18:22:57 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2015-12-16 04:04:23 +00:00
|
|
|
|
if (!executed)
|
|
|
|
|
processor.cancel();
|
2014-11-30 18:22:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
2012-10-20 02:10:47 +00:00
|
|
|
|
protected:
|
2014-11-08 23:52:18 +00:00
|
|
|
|
Block readImpl() override
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2015-01-02 03:16:28 +00:00
|
|
|
|
if (!executed)
|
2014-11-30 18:22:57 +00:00
|
|
|
|
{
|
2015-04-16 14:27:56 +00:00
|
|
|
|
Aggregator::CancellationHook hook = [&]() { return this->isCancelled(); };
|
|
|
|
|
aggregator.setCancellationHook(hook);
|
|
|
|
|
|
2015-12-01 14:09:05 +00:00
|
|
|
|
execute();
|
|
|
|
|
|
|
|
|
|
if (isCancelled())
|
|
|
|
|
return {};
|
|
|
|
|
|
|
|
|
|
if (!aggregator.hasTemporaryFiles())
|
|
|
|
|
{
|
|
|
|
|
/** Если все частично-агрегированные данные в оперативке, то мерджим их параллельно, тоже в оперативке.
|
|
|
|
|
*/
|
2015-12-06 16:22:01 +00:00
|
|
|
|
impl = aggregator.mergeAndConvertToBlocks(many_data, final, max_threads);
|
2015-12-01 14:09:05 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/** Если есть временные файлы с частично-агрегированными данными на диске,
|
|
|
|
|
* то читаем и мерджим их, расходуя минимальное количество памяти.
|
|
|
|
|
*/
|
|
|
|
|
|
2015-12-03 21:23:21 +00:00
|
|
|
|
ProfileEvents::increment(ProfileEvents::ExternalAggregationMerge);
|
|
|
|
|
|
2015-12-01 14:09:05 +00:00
|
|
|
|
const auto & files = aggregator.getTemporaryFiles();
|
|
|
|
|
BlockInputStreams input_streams;
|
2015-12-01 16:58:15 +00:00
|
|
|
|
for (const auto & file : files.files)
|
2015-12-01 14:09:05 +00:00
|
|
|
|
{
|
|
|
|
|
temporary_inputs.emplace_back(new TemporaryFileStream(file->path()));
|
|
|
|
|
input_streams.emplace_back(temporary_inputs.back()->block_in);
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-01 16:58:15 +00:00
|
|
|
|
LOG_TRACE(log, "Will merge " << files.files.size() << " temporary files of size "
|
|
|
|
|
<< (files.sum_size_compressed / 1048576.0) << " MiB compressed, "
|
|
|
|
|
<< (files.sum_size_uncompressed / 1048576.0) << " MiB uncompressed.");
|
|
|
|
|
|
2015-12-05 04:20:37 +00:00
|
|
|
|
impl.reset(new MergingAggregatedMemoryEfficientBlockInputStream(
|
|
|
|
|
input_streams, params, final, temporary_data_merge_threads, temporary_data_merge_threads));
|
2015-12-01 14:09:05 +00:00
|
|
|
|
}
|
2015-12-16 04:04:23 +00:00
|
|
|
|
|
|
|
|
|
executed = true;
|
2014-11-30 18:22:57 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-01-02 03:16:28 +00:00
|
|
|
|
Block res;
|
2015-12-01 14:09:05 +00:00
|
|
|
|
if (isCancelled() || !impl)
|
2015-01-02 03:16:28 +00:00
|
|
|
|
return res;
|
2014-11-30 18:22:57 +00:00
|
|
|
|
|
2015-12-01 14:09:05 +00:00
|
|
|
|
return impl->read();
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
2015-12-01 14:09:05 +00:00
|
|
|
|
Aggregator::Params params;
|
2015-01-08 18:52:48 +00:00
|
|
|
|
Aggregator aggregator;
|
2013-11-03 23:35:18 +00:00
|
|
|
|
bool final;
|
2012-02-27 06:28:20 +00:00
|
|
|
|
size_t max_threads;
|
2015-12-03 05:03:23 +00:00
|
|
|
|
size_t temporary_data_merge_threads;
|
2012-02-27 06:28:20 +00:00
|
|
|
|
|
2014-11-30 18:22:57 +00:00
|
|
|
|
size_t keys_size;
|
|
|
|
|
size_t aggregates_size;
|
|
|
|
|
|
|
|
|
|
/** Используется, если есть ограничение на максимальное количество строк при агрегации,
|
|
|
|
|
* и если group_by_overflow_mode == ANY.
|
|
|
|
|
* В этом случае, новые ключи не добавляются в набор, а производится агрегация только по
|
|
|
|
|
* ключам, которые уже успели попасть в набор.
|
|
|
|
|
*/
|
|
|
|
|
bool no_more_keys = false;
|
|
|
|
|
|
2015-12-16 20:21:52 +00:00
|
|
|
|
std::atomic<bool> executed {false};
|
2015-12-01 14:09:05 +00:00
|
|
|
|
|
|
|
|
|
/// Для чтения сброшенных во временный файл данных.
|
|
|
|
|
struct TemporaryFileStream
|
|
|
|
|
{
|
|
|
|
|
ReadBufferFromFile file_in;
|
|
|
|
|
CompressedReadBuffer compressed_in;
|
|
|
|
|
BlockInputStreamPtr block_in;
|
|
|
|
|
|
|
|
|
|
TemporaryFileStream(const std::string & path)
|
|
|
|
|
: file_in(path), compressed_in(file_in), block_in(new NativeBlockInputStream(compressed_in, Revision::get())) {}
|
|
|
|
|
};
|
|
|
|
|
std::vector<std::unique_ptr<TemporaryFileStream>> temporary_inputs;
|
|
|
|
|
|
2014-11-30 18:22:57 +00:00
|
|
|
|
Logger * log = &Logger::get("ParallelAggregatingBlockInputStream");
|
|
|
|
|
|
|
|
|
|
|
2015-03-18 02:48:36 +00:00
|
|
|
|
ManyAggregatedDataVariants many_data;
|
|
|
|
|
Exceptions exceptions;
|
|
|
|
|
|
|
|
|
|
struct ThreadData
|
|
|
|
|
{
|
|
|
|
|
size_t src_rows = 0;
|
|
|
|
|
size_t src_bytes = 0;
|
|
|
|
|
|
|
|
|
|
StringRefs key;
|
|
|
|
|
ConstColumnPlainPtrs key_columns;
|
|
|
|
|
Aggregator::AggregateColumns aggregate_columns;
|
|
|
|
|
Sizes key_sizes;
|
|
|
|
|
|
|
|
|
|
ThreadData(size_t keys_size, size_t aggregates_size)
|
|
|
|
|
{
|
|
|
|
|
key.resize(keys_size);
|
|
|
|
|
key_columns.resize(keys_size);
|
|
|
|
|
aggregate_columns.resize(aggregates_size);
|
|
|
|
|
key_sizes.resize(keys_size);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
std::vector<ThreadData> threads_data;
|
|
|
|
|
|
|
|
|
|
|
2014-11-30 18:22:57 +00:00
|
|
|
|
struct Handler
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2014-11-30 18:22:57 +00:00
|
|
|
|
Handler(ParallelAggregatingBlockInputStream & parent_)
|
|
|
|
|
: parent(parent_) {}
|
2014-05-03 22:57:43 +00:00
|
|
|
|
|
2014-11-30 18:22:57 +00:00
|
|
|
|
void onBlock(Block & block, size_t thread_num)
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2015-01-08 18:52:48 +00:00
|
|
|
|
parent.aggregator.executeOnBlock(block, *parent.many_data[thread_num],
|
2014-12-14 23:07:57 +00:00
|
|
|
|
parent.threads_data[thread_num].key_columns, parent.threads_data[thread_num].aggregate_columns,
|
2015-02-22 05:51:16 +00:00
|
|
|
|
parent.threads_data[thread_num].key_sizes, parent.threads_data[thread_num].key,
|
|
|
|
|
parent.no_more_keys);
|
2014-11-30 18:22:57 +00:00
|
|
|
|
|
2014-12-14 23:07:57 +00:00
|
|
|
|
parent.threads_data[thread_num].src_rows += block.rowsInFirstColumn();
|
|
|
|
|
parent.threads_data[thread_num].src_bytes += block.bytes();
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
2014-11-30 18:22:57 +00:00
|
|
|
|
|
2015-12-01 21:20:14 +00:00
|
|
|
|
void onFinishThread(size_t thread_num)
|
|
|
|
|
{
|
2015-12-06 15:29:16 +00:00
|
|
|
|
if (!parent.isCancelled() && parent.aggregator.hasTemporaryFiles())
|
2015-12-01 21:20:14 +00:00
|
|
|
|
{
|
|
|
|
|
/// Сбросим имеющиеся в оперативке данные тоже на диск. Так проще их потом объединять.
|
|
|
|
|
auto & data = *parent.many_data[thread_num];
|
2015-12-06 15:29:16 +00:00
|
|
|
|
|
|
|
|
|
if (data.isConvertibleToTwoLevel())
|
|
|
|
|
data.convertToTwoLevel();
|
|
|
|
|
|
2015-12-01 21:20:14 +00:00
|
|
|
|
size_t rows = data.sizeWithoutOverflowRow();
|
|
|
|
|
if (rows)
|
|
|
|
|
parent.aggregator.writeToTemporaryFile(data, rows);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-30 18:22:57 +00:00
|
|
|
|
void onFinish()
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2015-12-06 15:29:16 +00:00
|
|
|
|
if (!parent.isCancelled() && parent.aggregator.hasTemporaryFiles())
|
2015-12-01 21:20:14 +00:00
|
|
|
|
{
|
|
|
|
|
/// Может так получиться, что какие-то данные ещё не сброшены на диск,
|
|
|
|
|
/// потому что во время вызова onFinishThread ещё никакие данные не были сброшены на диск, а потом какие-то - были.
|
|
|
|
|
for (auto & data : parent.many_data)
|
|
|
|
|
{
|
2015-12-06 15:29:16 +00:00
|
|
|
|
if (data->isConvertibleToTwoLevel())
|
|
|
|
|
data->convertToTwoLevel();
|
|
|
|
|
|
2015-12-01 21:20:14 +00:00
|
|
|
|
size_t rows = data->sizeWithoutOverflowRow();
|
|
|
|
|
if (rows)
|
|
|
|
|
parent.aggregator.writeToTemporaryFile(*data, rows);
|
|
|
|
|
}
|
|
|
|
|
}
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
2014-11-30 18:22:57 +00:00
|
|
|
|
|
2015-10-05 05:40:27 +00:00
|
|
|
|
void onException(std::exception_ptr & exception, size_t thread_num)
|
2014-11-30 18:22:57 +00:00
|
|
|
|
{
|
|
|
|
|
parent.exceptions[thread_num] = exception;
|
|
|
|
|
parent.cancel();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ParallelAggregatingBlockInputStream & parent;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
Handler handler;
|
|
|
|
|
ParallelInputsProcessor<Handler> processor;
|
|
|
|
|
|
2015-01-02 03:16:28 +00:00
|
|
|
|
|
2015-12-01 14:09:05 +00:00
|
|
|
|
void execute()
|
2015-01-02 03:16:28 +00:00
|
|
|
|
{
|
|
|
|
|
many_data.resize(max_threads);
|
|
|
|
|
exceptions.resize(max_threads);
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < max_threads; ++i)
|
|
|
|
|
threads_data.emplace_back(keys_size, aggregates_size);
|
|
|
|
|
|
|
|
|
|
LOG_TRACE(log, "Aggregating");
|
|
|
|
|
|
|
|
|
|
Stopwatch watch;
|
|
|
|
|
|
|
|
|
|
for (auto & elem : many_data)
|
|
|
|
|
elem = new AggregatedDataVariants;
|
|
|
|
|
|
|
|
|
|
processor.process();
|
|
|
|
|
processor.wait();
|
|
|
|
|
|
|
|
|
|
rethrowFirstException(exceptions);
|
|
|
|
|
|
|
|
|
|
if (isCancelled())
|
2015-12-01 14:09:05 +00:00
|
|
|
|
return;
|
2015-01-02 03:16:28 +00:00
|
|
|
|
|
|
|
|
|
double elapsed_seconds = watch.elapsedSeconds();
|
|
|
|
|
|
|
|
|
|
size_t total_src_rows = 0;
|
|
|
|
|
size_t total_src_bytes = 0;
|
|
|
|
|
for (size_t i = 0; i < max_threads; ++i)
|
|
|
|
|
{
|
|
|
|
|
size_t rows = many_data[i]->size();
|
|
|
|
|
LOG_TRACE(log, std::fixed << std::setprecision(3)
|
|
|
|
|
<< "Aggregated. " << threads_data[i].src_rows << " to " << rows << " rows"
|
|
|
|
|
<< " (from " << threads_data[i].src_bytes / 1048576.0 << " MiB)"
|
|
|
|
|
<< " in " << elapsed_seconds << " sec."
|
|
|
|
|
<< " (" << threads_data[i].src_rows / elapsed_seconds << " rows/sec., "
|
|
|
|
|
<< threads_data[i].src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)");
|
|
|
|
|
|
|
|
|
|
total_src_rows += threads_data[i].src_rows;
|
|
|
|
|
total_src_bytes += threads_data[i].src_bytes;
|
|
|
|
|
}
|
|
|
|
|
LOG_TRACE(log, std::fixed << std::setprecision(3)
|
|
|
|
|
<< "Total aggregated. " << total_src_rows << " rows (from " << total_src_bytes / 1048576.0 << " MiB)"
|
|
|
|
|
<< " in " << elapsed_seconds << " sec."
|
|
|
|
|
<< " (" << total_src_rows / elapsed_seconds << " rows/sec., " << total_src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)");
|
|
|
|
|
}
|
2015-12-16 04:04:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** Отсюда будем доставать готовые блоки после агрегации.
|
|
|
|
|
*/
|
|
|
|
|
std::unique_ptr<IBlockInputStream> impl;
|
2012-02-27 06:28:20 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
}
|