2013-02-03 18:39:09 +00:00
|
|
|
|
#include <iomanip>
|
|
|
|
|
|
|
|
|
|
#include <statdaemons/Stopwatch.h>
|
|
|
|
|
|
2011-09-19 03:34:23 +00:00
|
|
|
|
#include <DB/DataTypes/DataTypeAggregateFunction.h>
|
2011-09-26 07:25:22 +00:00
|
|
|
|
#include <DB/Columns/ColumnsNumber.h>
|
2012-07-15 23:13:08 +00:00
|
|
|
|
#include <DB/AggregateFunctions/AggregateFunctionCount.h>
|
2011-09-19 03:34:23 +00:00
|
|
|
|
|
|
|
|
|
#include <DB/Interpreters/Aggregator.h>
|
2011-09-19 01:42:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
|
{
|
|
|
|
|
|
2011-09-26 07:25:22 +00:00
|
|
|
|
|
2013-02-16 18:59:05 +00:00
|
|
|
|
AggregatedDataVariants::~AggregatedDataVariants()
|
|
|
|
|
{
|
2013-11-03 23:54:12 +00:00
|
|
|
|
if (aggregator && !aggregator->all_aggregates_has_trivial_destructor)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
2014-05-19 19:41:56 +00:00
|
|
|
|
aggregator->destroyAllAggregateStates(*this);
|
2014-05-10 00:31:22 +00:00
|
|
|
|
}
|
|
|
|
|
catch (...)
|
|
|
|
|
{
|
|
|
|
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-02-16 18:59:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-03-05 07:58:34 +00:00
|
|
|
|
void Aggregator::initialize(Block & block)
|
|
|
|
|
{
|
|
|
|
|
Poco::ScopedLock<Poco::FastMutex> lock(mutex);
|
|
|
|
|
|
|
|
|
|
if (initialized)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
initialized = true;
|
2013-02-13 19:24:19 +00:00
|
|
|
|
|
|
|
|
|
aggregate_functions.resize(aggregates_size);
|
2014-05-22 14:22:54 +00:00
|
|
|
|
is_final.assign(aggregates_size, false);
|
2013-02-13 19:24:19 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
aggregate_functions[i] = &*aggregates[i].function;
|
|
|
|
|
|
|
|
|
|
/// Инициализируем размеры состояний и смещения для агрегатных функций.
|
|
|
|
|
offsets_of_aggregate_states.resize(aggregates_size);
|
|
|
|
|
total_size_of_aggregate_states = 0;
|
2013-11-03 23:54:12 +00:00
|
|
|
|
all_aggregates_has_trivial_destructor = true;
|
2013-02-13 19:24:19 +00:00
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
{
|
|
|
|
|
offsets_of_aggregate_states[i] = total_size_of_aggregate_states;
|
|
|
|
|
total_size_of_aggregate_states += aggregates[i].function->sizeOfData();
|
2013-11-03 23:54:12 +00:00
|
|
|
|
|
|
|
|
|
if (!aggregates[i].function->hasTrivialDestructor())
|
|
|
|
|
all_aggregates_has_trivial_destructor = false;
|
2013-02-13 19:24:19 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/** Всё остальное - только если передан непустой block.
|
|
|
|
|
* (всё остальное не нужно в методе merge блоков с готовыми состояниями агрегатных функций).
|
|
|
|
|
*/
|
|
|
|
|
if (!block)
|
|
|
|
|
return;
|
2012-03-05 07:58:34 +00:00
|
|
|
|
|
|
|
|
|
/// Преобразуем имена столбцов в номера, если номера не заданы
|
|
|
|
|
if (keys.empty() && !key_names.empty())
|
|
|
|
|
for (Names::const_iterator it = key_names.begin(); it != key_names.end(); ++it)
|
|
|
|
|
keys.push_back(block.getPositionByName(*it));
|
|
|
|
|
|
|
|
|
|
for (AggregateDescriptions::iterator it = aggregates.begin(); it != aggregates.end(); ++it)
|
|
|
|
|
if (it->arguments.empty() && !it->argument_names.empty())
|
|
|
|
|
for (Names::const_iterator jt = it->argument_names.begin(); jt != it->argument_names.end(); ++jt)
|
|
|
|
|
it->arguments.push_back(block.getPositionByName(*jt));
|
|
|
|
|
|
|
|
|
|
/// Создадим пример блока, описывающего результат
|
|
|
|
|
if (!sample)
|
|
|
|
|
{
|
2013-02-08 20:34:30 +00:00
|
|
|
|
for (size_t i = 0; i < keys_size; ++i)
|
2012-05-31 01:13:15 +00:00
|
|
|
|
{
|
2012-03-05 07:58:34 +00:00
|
|
|
|
sample.insert(block.getByPosition(keys[i]).cloneEmpty());
|
2012-05-31 01:13:15 +00:00
|
|
|
|
if (sample.getByPosition(i).column->isConst())
|
|
|
|
|
sample.getByPosition(i).column = dynamic_cast<IColumnConst &>(*sample.getByPosition(i).column).convertToFullColumn();
|
|
|
|
|
}
|
2012-03-05 07:58:34 +00:00
|
|
|
|
|
2013-02-08 20:34:30 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2012-03-05 07:58:34 +00:00
|
|
|
|
{
|
|
|
|
|
ColumnWithNameAndType col;
|
|
|
|
|
col.name = aggregates[i].column_name;
|
2012-10-24 18:14:36 +00:00
|
|
|
|
|
|
|
|
|
size_t arguments_size = aggregates[i].arguments.size();
|
|
|
|
|
DataTypes argument_types(arguments_size);
|
|
|
|
|
for (size_t j = 0; j < arguments_size; ++j)
|
|
|
|
|
argument_types[j] = block.getByPosition(aggregates[i].arguments[j]).type;
|
2014-05-10 00:31:22 +00:00
|
|
|
|
|
2014-03-25 18:16:26 +00:00
|
|
|
|
col.type = new DataTypeAggregateFunction(aggregates[i].function, argument_types, aggregates[i].parameters);
|
2013-02-08 20:34:30 +00:00
|
|
|
|
col.column = new ColumnAggregateFunction(aggregates[i].function);
|
2012-03-05 07:58:34 +00:00
|
|
|
|
|
|
|
|
|
sample.insert(col);
|
|
|
|
|
}
|
2014-05-10 00:31:22 +00:00
|
|
|
|
}
|
2012-03-05 07:58:34 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2013-06-30 16:56:00 +00:00
|
|
|
|
AggregatedDataVariants::Type Aggregator::chooseAggregationMethod(const ConstColumnPlainPtrs & key_columns, Sizes & key_sizes)
|
2012-05-30 01:38:02 +00:00
|
|
|
|
{
|
2013-06-30 16:56:00 +00:00
|
|
|
|
bool keys_fit_128_bits = true;
|
2012-05-30 01:38:02 +00:00
|
|
|
|
size_t keys_bytes = 0;
|
|
|
|
|
key_sizes.resize(keys_size);
|
|
|
|
|
for (size_t j = 0; j < keys_size; ++j)
|
|
|
|
|
{
|
2013-07-19 20:12:02 +00:00
|
|
|
|
if (!key_columns[j]->isFixed())
|
2012-05-30 01:38:02 +00:00
|
|
|
|
{
|
|
|
|
|
keys_fit_128_bits = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
key_sizes[j] = key_columns[j]->sizeOfField();
|
|
|
|
|
keys_bytes += key_sizes[j];
|
|
|
|
|
}
|
|
|
|
|
if (keys_bytes > 16)
|
|
|
|
|
keys_fit_128_bits = false;
|
|
|
|
|
|
|
|
|
|
/// Если ключей нет
|
|
|
|
|
if (keys_size == 0)
|
|
|
|
|
return AggregatedDataVariants::WITHOUT_KEY;
|
|
|
|
|
|
2013-07-19 20:12:02 +00:00
|
|
|
|
/// Если есть один числовой ключ, который помещается в 64 бита
|
2013-02-16 20:15:45 +00:00
|
|
|
|
if (keys_size == 1 && key_columns[0]->isNumeric())
|
2012-05-30 01:38:02 +00:00
|
|
|
|
return AggregatedDataVariants::KEY_64;
|
|
|
|
|
|
2013-07-19 20:12:02 +00:00
|
|
|
|
/// Если ключи помещаются в 128 бит, будем использовать хэш-таблицу по упакованным в 128-бит ключам
|
|
|
|
|
if (keys_fit_128_bits)
|
|
|
|
|
return AggregatedDataVariants::KEYS_128;
|
|
|
|
|
|
2012-05-30 01:38:02 +00:00
|
|
|
|
/// Если есть один строковый ключ, то используем хэш-таблицу с ним
|
2014-05-10 00:31:22 +00:00
|
|
|
|
if (keys_size == 1 && dynamic_cast<const ColumnString *>(key_columns[0]))
|
2012-05-30 01:38:02 +00:00
|
|
|
|
return AggregatedDataVariants::KEY_STRING;
|
|
|
|
|
|
2014-05-10 00:31:22 +00:00
|
|
|
|
if (keys_size == 1 && dynamic_cast<const ColumnFixedString *>(key_columns[0]))
|
|
|
|
|
return AggregatedDataVariants::KEY_FIXED_STRING;
|
|
|
|
|
|
2013-06-30 16:56:00 +00:00
|
|
|
|
/// Иначе будем агрегировать по хэшу от ключей.
|
2012-05-30 01:38:02 +00:00
|
|
|
|
return AggregatedDataVariants::HASHED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-05-19 19:41:56 +00:00
|
|
|
|
void Aggregator::createAggregateStates(AggregateDataPtr & aggregate_data) const
|
|
|
|
|
{
|
|
|
|
|
for (size_t j = 0; j < aggregates_size; ++j)
|
|
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
/** Может возникнуть исключение при нехватке памяти.
|
|
|
|
|
* Для того, чтобы потом всё правильно уничтожилось, "откатываем" часть созданных состояний.
|
|
|
|
|
* Код не очень удобный.
|
|
|
|
|
*/
|
|
|
|
|
aggregate_functions[j]->create(aggregate_data + offsets_of_aggregate_states[j]);
|
|
|
|
|
}
|
|
|
|
|
catch (...)
|
|
|
|
|
{
|
|
|
|
|
for (size_t rollback_j = 0; rollback_j < j; ++rollback_j)
|
2014-05-20 19:30:39 +00:00
|
|
|
|
aggregate_functions[rollback_j]->destroy(aggregate_data + offsets_of_aggregate_states[rollback_j]);
|
2014-05-19 19:41:56 +00:00
|
|
|
|
|
|
|
|
|
aggregate_data = nullptr;
|
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-05-10 00:31:22 +00:00
|
|
|
|
template <typename Method>
|
|
|
|
|
void Aggregator::executeImpl(
|
|
|
|
|
Method & method,
|
|
|
|
|
Arena * aggregates_pool,
|
|
|
|
|
size_t rows,
|
|
|
|
|
ConstColumnPlainPtrs & key_columns,
|
|
|
|
|
AggregateColumns & aggregate_columns,
|
|
|
|
|
const Sizes & key_sizes,
|
|
|
|
|
StringRefs & keys,
|
|
|
|
|
bool no_more_keys,
|
|
|
|
|
AggregateDataPtr overflow_row) const
|
|
|
|
|
{
|
|
|
|
|
method.init(key_columns);
|
|
|
|
|
|
|
|
|
|
/// Для всех строчек.
|
|
|
|
|
for (size_t i = 0; i < rows; ++i)
|
|
|
|
|
{
|
|
|
|
|
typename Method::iterator it;
|
|
|
|
|
bool inserted; /// Вставили новый ключ, или такой ключ уже был?
|
|
|
|
|
bool overflow = false; /// Новый ключ не поместился в хэш-таблицу из-за no_more_keys.
|
|
|
|
|
|
|
|
|
|
/// Получаем ключ для вставки в хэш-таблицу.
|
|
|
|
|
typename Method::Key key = method.getKey(key_columns, keys_size, i, key_sizes, keys);
|
|
|
|
|
|
|
|
|
|
if (!no_more_keys) /// Вставляем.
|
|
|
|
|
method.data.emplace(key, it, inserted);
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/// Будем добавлять только если ключ уже есть.
|
|
|
|
|
inserted = false;
|
|
|
|
|
it = method.data.find(key);
|
|
|
|
|
if (method.data.end() == it)
|
|
|
|
|
overflow = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Если ключ не поместился, и данные не надо агрегировать в отдельную строку, то делать нечего.
|
|
|
|
|
if (overflow && !overflow_row)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
/// Если вставили новый ключ - инициализируем состояния агрегатных функций, и возможно, что-нибудь связанное с ключём.
|
|
|
|
|
if (inserted)
|
|
|
|
|
{
|
2014-05-10 01:37:12 +00:00
|
|
|
|
method.onNewKey(it, keys_size, i, keys, *aggregates_pool);
|
2014-05-10 00:31:22 +00:00
|
|
|
|
|
|
|
|
|
AggregateDataPtr & aggregate_data = Method::getAggregateData(it->second);
|
|
|
|
|
aggregate_data = aggregates_pool->alloc(total_size_of_aggregate_states);
|
2014-05-19 19:41:56 +00:00
|
|
|
|
createAggregateStates(aggregate_data);
|
2014-05-10 00:31:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AggregateDataPtr value = !overflow ? Method::getAggregateData(it->second) : overflow_row;
|
|
|
|
|
|
|
|
|
|
/// Добавляем значения в агрегатные функции.
|
|
|
|
|
for (size_t j = 0; j < aggregates_size; ++j)
|
|
|
|
|
aggregate_functions[j]->add(value + offsets_of_aggregate_states[j], &aggregate_columns[j][0], i);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <typename Method>
|
|
|
|
|
void Aggregator::convertToBlockImpl(
|
|
|
|
|
Method & method,
|
|
|
|
|
ColumnPlainPtrs & key_columns,
|
|
|
|
|
AggregateColumnsData & aggregate_columns,
|
|
|
|
|
ColumnPlainPtrs & final_aggregate_columns,
|
|
|
|
|
const Sizes & key_sizes,
|
2014-05-22 14:22:54 +00:00
|
|
|
|
size_t start_row) const
|
2014-05-10 00:31:22 +00:00
|
|
|
|
{
|
2014-05-22 14:09:10 +00:00
|
|
|
|
size_t j = start_row;
|
|
|
|
|
for (typename Method::const_iterator it = method.data.begin(); it != method.data.end(); ++it, ++j)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
{
|
2014-05-22 14:09:10 +00:00
|
|
|
|
method.insertKeyIntoColumns(it, key_columns, keys_size, key_sizes);
|
2014-05-10 00:31:22 +00:00
|
|
|
|
|
2014-05-22 14:09:10 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2014-05-22 14:22:54 +00:00
|
|
|
|
if (!is_final[i])
|
2014-05-22 14:09:10 +00:00
|
|
|
|
(*aggregate_columns[i])[j] = Method::getAggregateData(it->second) + offsets_of_aggregate_states[i];
|
|
|
|
|
else
|
2014-05-10 00:31:22 +00:00
|
|
|
|
aggregate_functions[i]->insertResultInto(
|
|
|
|
|
Method::getAggregateData(it->second) + offsets_of_aggregate_states[i],
|
|
|
|
|
*final_aggregate_columns[i]);
|
2014-05-22 14:09:10 +00:00
|
|
|
|
}
|
2014-05-10 00:31:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <typename Method>
|
|
|
|
|
void Aggregator::mergeDataImpl(
|
|
|
|
|
Method & method_dst,
|
|
|
|
|
Method & method_src) const
|
|
|
|
|
{
|
|
|
|
|
for (typename Method::iterator it = method_src.data.begin(); it != method_src.data.end(); ++it)
|
|
|
|
|
{
|
|
|
|
|
typename Method::iterator res_it;
|
|
|
|
|
bool inserted;
|
|
|
|
|
method_dst.data.emplace(it->first, res_it, inserted);
|
|
|
|
|
|
|
|
|
|
if (!inserted)
|
|
|
|
|
{
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
aggregate_functions[i]->merge(
|
|
|
|
|
Method::getAggregateData(res_it->second) + offsets_of_aggregate_states[i],
|
|
|
|
|
Method::getAggregateData(it->second) + offsets_of_aggregate_states[i]);
|
|
|
|
|
|
2014-05-23 00:35:14 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
aggregate_functions[i]->destroy(
|
|
|
|
|
Method::getAggregateData(it->second) + offsets_of_aggregate_states[i]);
|
2014-05-23 00:35:14 +00:00
|
|
|
|
|
|
|
|
|
Method::getAggregateData(it->second) = nullptr;
|
2014-05-10 00:31:22 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
res_it->second = it->second;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <typename Method>
|
|
|
|
|
void Aggregator::mergeStreamsImpl(
|
|
|
|
|
Method & method,
|
|
|
|
|
Arena * aggregates_pool,
|
|
|
|
|
size_t start_row,
|
|
|
|
|
size_t rows,
|
|
|
|
|
ConstColumnPlainPtrs & key_columns,
|
|
|
|
|
AggregateColumnsData & aggregate_columns,
|
|
|
|
|
const Sizes & key_sizes,
|
|
|
|
|
StringRefs & keys) const
|
|
|
|
|
{
|
|
|
|
|
method.init(key_columns);
|
|
|
|
|
|
|
|
|
|
/// Для всех строчек.
|
|
|
|
|
for (size_t i = start_row; i < rows; ++i)
|
|
|
|
|
{
|
|
|
|
|
typename Method::iterator it;
|
|
|
|
|
bool inserted; /// Вставили новый ключ, или такой ключ уже был?
|
|
|
|
|
|
|
|
|
|
/// Получаем ключ для вставки в хэш-таблицу.
|
|
|
|
|
typename Method::Key key = method.getKey(key_columns, keys_size, i, key_sizes, keys);
|
|
|
|
|
|
|
|
|
|
method.data.emplace(key, it, inserted);
|
|
|
|
|
|
|
|
|
|
if (inserted)
|
|
|
|
|
{
|
2014-05-10 01:37:12 +00:00
|
|
|
|
method.onNewKey(it, keys_size, i, keys, *aggregates_pool);
|
2014-05-10 00:31:22 +00:00
|
|
|
|
|
|
|
|
|
AggregateDataPtr & aggregate_data = Method::getAggregateData(it->second);
|
|
|
|
|
aggregate_data = aggregates_pool->alloc(total_size_of_aggregate_states);
|
2014-05-19 19:41:56 +00:00
|
|
|
|
createAggregateStates(aggregate_data);
|
2014-05-10 00:31:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Мерджим состояния агрегатных функций.
|
|
|
|
|
for (size_t j = 0; j < aggregates_size; ++j)
|
|
|
|
|
aggregate_functions[j]->merge(
|
|
|
|
|
Method::getAggregateData(it->second) + offsets_of_aggregate_states[j],
|
|
|
|
|
(*aggregate_columns[j])[i]);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
template <typename Method>
|
|
|
|
|
void Aggregator::destroyImpl(
|
|
|
|
|
Method & method) const
|
|
|
|
|
{
|
|
|
|
|
for (typename Method::const_iterator it = method.data.begin(); it != method.data.end(); ++it)
|
2014-05-19 19:41:56 +00:00
|
|
|
|
{
|
2014-05-10 00:31:22 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2014-05-22 14:09:10 +00:00
|
|
|
|
/// Если аггрегатная функция не может быть финализирована, то за ее удаление отвечает ColumnAggregateFunction
|
2014-05-22 14:22:54 +00:00
|
|
|
|
if (is_final[i])
|
2014-05-21 13:27:40 +00:00
|
|
|
|
{
|
|
|
|
|
char * data = Method::getAggregateData(it->second);
|
2014-05-19 19:41:56 +00:00
|
|
|
|
|
2014-05-21 13:27:40 +00:00
|
|
|
|
/** Если исключение (обычно нехватка памяти, кидается MemoryTracker-ом) возникло
|
|
|
|
|
* после вставки ключа в хэш-таблицу, но до создания всех состояний агрегатных функций,
|
|
|
|
|
* то data будет равен nullptr-у.
|
|
|
|
|
*/
|
|
|
|
|
if (nullptr != data)
|
|
|
|
|
aggregate_functions[i]->destroy(data + offsets_of_aggregate_states[i]);
|
|
|
|
|
}
|
2014-05-19 19:41:56 +00:00
|
|
|
|
}
|
2014-05-10 00:31:22 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-05-10 05:16:23 +00:00
|
|
|
|
bool Aggregator::executeOnBlock(Block & block, AggregatedDataVariants & result,
|
|
|
|
|
ConstColumnPlainPtrs & key_columns, AggregateColumns & aggregate_columns,
|
|
|
|
|
Sizes & key_sizes, StringRefs & key,
|
|
|
|
|
bool & no_more_keys)
|
|
|
|
|
{
|
|
|
|
|
initialize(block);
|
|
|
|
|
|
|
|
|
|
/// result будет уничтожать состояния агрегатных функций в деструкторе
|
|
|
|
|
result.aggregator = this;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
aggregate_columns[i].resize(aggregates[i].arguments.size());
|
|
|
|
|
|
|
|
|
|
/// Запоминаем столбцы, с которыми будем работать
|
|
|
|
|
for (size_t i = 0; i < keys_size; ++i)
|
|
|
|
|
key_columns[i] = block.getByPosition(keys[i]).column;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
{
|
|
|
|
|
for (size_t j = 0; j < aggregate_columns[i].size(); ++j)
|
|
|
|
|
{
|
|
|
|
|
aggregate_columns[i][j] = block.getByPosition(aggregates[i].arguments[j]).column;
|
|
|
|
|
|
|
|
|
|
/** Агрегатные функции рассчитывают, что в них передаются полноценные столбцы.
|
|
|
|
|
* Поэтому, стобцы-константы не разрешены в качестве аргументов агрегатных функций.
|
|
|
|
|
*/
|
|
|
|
|
if (aggregate_columns[i][j]->isConst())
|
|
|
|
|
throw Exception("Constants is not allowed as arguments of aggregate functions", ErrorCodes::ILLEGAL_COLUMN);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t rows = block.rows();
|
|
|
|
|
|
|
|
|
|
/// Каким способом выполнять агрегацию?
|
|
|
|
|
if (result.empty())
|
|
|
|
|
{
|
|
|
|
|
result.init(chooseAggregationMethod(key_columns, key_sizes));
|
|
|
|
|
result.keys_size = keys_size;
|
|
|
|
|
result.key_sizes = key_sizes;
|
|
|
|
|
LOG_TRACE(log, "Aggregation method: " << result.getMethodName());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (overflow_row && !result.without_key)
|
|
|
|
|
{
|
|
|
|
|
result.without_key = result.aggregates_pool->alloc(total_size_of_aggregate_states);
|
2014-05-19 19:41:56 +00:00
|
|
|
|
createAggregateStates(result.without_key);
|
2014-05-10 05:16:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (result.type == AggregatedDataVariants::WITHOUT_KEY)
|
|
|
|
|
{
|
|
|
|
|
AggregatedDataWithoutKey & res = result.without_key;
|
|
|
|
|
if (!res)
|
|
|
|
|
{
|
|
|
|
|
res = result.aggregates_pool->alloc(total_size_of_aggregate_states);
|
2014-05-19 19:41:56 +00:00
|
|
|
|
createAggregateStates(res);
|
2014-05-10 05:16:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Оптимизация в случае единственной агрегатной функции count.
|
|
|
|
|
AggregateFunctionCount * agg_count = aggregates_size == 1
|
|
|
|
|
? dynamic_cast<AggregateFunctionCount *>(aggregate_functions[0])
|
|
|
|
|
: NULL;
|
|
|
|
|
|
|
|
|
|
if (agg_count)
|
|
|
|
|
agg_count->addDelta(res, rows);
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
for (size_t i = 0; i < rows; ++i)
|
|
|
|
|
{
|
|
|
|
|
/// Добавляем значения
|
|
|
|
|
for (size_t j = 0; j < aggregates_size; ++j)
|
|
|
|
|
aggregate_functions[j]->add(res + offsets_of_aggregate_states[j], &aggregate_columns[j][0], i);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AggregateDataPtr overflow_row_ptr = overflow_row ? result.without_key : nullptr;
|
|
|
|
|
|
|
|
|
|
if (result.type == AggregatedDataVariants::KEY_64)
|
|
|
|
|
executeImpl(*result.key64, result.aggregates_pool, rows, key_columns, aggregate_columns,
|
|
|
|
|
result.key_sizes, key, no_more_keys, overflow_row_ptr);
|
|
|
|
|
else if (result.type == AggregatedDataVariants::KEY_STRING)
|
|
|
|
|
executeImpl(*result.key_string, result.aggregates_pool, rows, key_columns, aggregate_columns,
|
|
|
|
|
result.key_sizes, key, no_more_keys, overflow_row_ptr);
|
|
|
|
|
else if (result.type == AggregatedDataVariants::KEY_FIXED_STRING)
|
|
|
|
|
executeImpl(*result.key_fixed_string, result.aggregates_pool, rows, key_columns, aggregate_columns,
|
|
|
|
|
result.key_sizes, key, no_more_keys, overflow_row_ptr);
|
|
|
|
|
else if (result.type == AggregatedDataVariants::KEYS_128)
|
|
|
|
|
executeImpl(*result.keys128, result.aggregates_pool, rows, key_columns, aggregate_columns,
|
|
|
|
|
result.key_sizes, key, no_more_keys, overflow_row_ptr);
|
|
|
|
|
else if (result.type == AggregatedDataVariants::HASHED)
|
|
|
|
|
executeImpl(*result.hashed, result.aggregates_pool, rows, key_columns, aggregate_columns,
|
|
|
|
|
result.key_sizes, key, no_more_keys, overflow_row_ptr);
|
|
|
|
|
else if (result.type != AggregatedDataVariants::WITHOUT_KEY)
|
|
|
|
|
throw Exception("Unknown aggregated data variant.", ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT);
|
|
|
|
|
|
|
|
|
|
/// Проверка ограничений.
|
|
|
|
|
if (!no_more_keys && max_rows_to_group_by && result.size() > max_rows_to_group_by)
|
|
|
|
|
{
|
|
|
|
|
if (group_by_overflow_mode == OverflowMode::THROW)
|
|
|
|
|
throw Exception("Limit for rows to GROUP BY exceeded: has " + toString(result.size())
|
|
|
|
|
+ " rows, maximum: " + toString(max_rows_to_group_by),
|
|
|
|
|
ErrorCodes::TOO_MUCH_ROWS);
|
|
|
|
|
else if (group_by_overflow_mode == OverflowMode::BREAK)
|
|
|
|
|
return false;
|
|
|
|
|
else if (group_by_overflow_mode == OverflowMode::ANY)
|
|
|
|
|
no_more_keys = true;
|
|
|
|
|
else
|
|
|
|
|
throw Exception("Logical error: unknown overflow mode", ErrorCodes::LOGICAL_ERROR);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-09-28 05:24:38 +00:00
|
|
|
|
/** Результат хранится в оперативке и должен полностью помещаться в оперативку.
|
2011-09-19 01:42:16 +00:00
|
|
|
|
*/
|
2011-09-26 07:25:22 +00:00
|
|
|
|
void Aggregator::execute(BlockInputStreamPtr stream, AggregatedDataVariants & result)
|
2011-09-19 01:42:16 +00:00
|
|
|
|
{
|
2013-06-30 16:56:00 +00:00
|
|
|
|
StringRefs key(keys_size);
|
2013-01-08 19:41:22 +00:00
|
|
|
|
ConstColumnPlainPtrs key_columns(keys_size);
|
2011-09-19 01:42:16 +00:00
|
|
|
|
AggregateColumns aggregate_columns(aggregates_size);
|
2014-05-10 05:16:23 +00:00
|
|
|
|
Sizes key_sizes;
|
2011-09-19 01:42:16 +00:00
|
|
|
|
|
2012-12-25 19:28:59 +00:00
|
|
|
|
/** Используется, если есть ограничение на максимальное количество строк при агрегации,
|
|
|
|
|
* и если group_by_overflow_mode == ANY.
|
|
|
|
|
* В этом случае, новые ключи не добавляются в набор, а производится агрегация только по
|
|
|
|
|
* ключам, которые уже успели попасть в набор.
|
|
|
|
|
*/
|
|
|
|
|
bool no_more_keys = false;
|
|
|
|
|
|
2013-02-04 03:31:53 +00:00
|
|
|
|
LOG_TRACE(log, "Aggregating");
|
2013-02-08 20:34:30 +00:00
|
|
|
|
|
2013-02-04 03:31:53 +00:00
|
|
|
|
Stopwatch watch;
|
|
|
|
|
|
|
|
|
|
size_t src_rows = 0;
|
|
|
|
|
size_t src_bytes = 0;
|
2013-02-09 01:02:52 +00:00
|
|
|
|
|
|
|
|
|
/// Читаем все данные
|
2011-09-19 01:42:16 +00:00
|
|
|
|
while (Block block = stream->read())
|
|
|
|
|
{
|
2013-02-04 03:31:53 +00:00
|
|
|
|
src_rows += block.rows();
|
|
|
|
|
src_bytes += block.bytes();
|
2011-09-25 05:07:47 +00:00
|
|
|
|
|
2014-05-10 05:16:23 +00:00
|
|
|
|
if (!executeOnBlock(block, result,
|
|
|
|
|
key_columns, aggregate_columns, key_sizes, key,
|
|
|
|
|
no_more_keys))
|
|
|
|
|
break;
|
2011-09-26 07:25:22 +00:00
|
|
|
|
}
|
2013-02-04 03:31:53 +00:00
|
|
|
|
|
|
|
|
|
double elapsed_seconds = watch.elapsedSeconds();
|
|
|
|
|
size_t rows = result.size();
|
|
|
|
|
LOG_TRACE(log, std::fixed << std::setprecision(3)
|
|
|
|
|
<< "Aggregated. " << src_rows << " to " << rows << " rows (from " << src_bytes / 1048576.0 << " MiB)"
|
|
|
|
|
<< " in " << elapsed_seconds << " sec."
|
|
|
|
|
<< " (" << src_rows / elapsed_seconds << " rows/sec., " << src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)");
|
2011-09-19 01:42:16 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-02-26 11:44:54 +00:00
|
|
|
|
Block Aggregator::convertToBlock(AggregatedDataVariants & data_variants, bool final)
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2013-09-16 05:33:49 +00:00
|
|
|
|
Block res = sample.cloneEmpty();
|
2013-02-03 18:08:52 +00:00
|
|
|
|
size_t rows = data_variants.size();
|
2012-02-27 06:28:20 +00:00
|
|
|
|
|
2012-05-31 00:33:42 +00:00
|
|
|
|
LOG_TRACE(log, "Converting aggregated data to block");
|
|
|
|
|
|
2013-02-03 18:39:09 +00:00
|
|
|
|
Stopwatch watch;
|
|
|
|
|
|
2012-02-27 06:28:20 +00:00
|
|
|
|
/// В какой структуре данных агрегированы данные?
|
2012-05-10 07:47:13 +00:00
|
|
|
|
if (data_variants.empty())
|
2013-09-16 05:33:49 +00:00
|
|
|
|
return Block();
|
2012-05-10 07:47:13 +00:00
|
|
|
|
|
2013-02-03 18:08:52 +00:00
|
|
|
|
ColumnPlainPtrs key_columns(keys_size);
|
2014-05-10 00:31:22 +00:00
|
|
|
|
AggregateColumnsData aggregate_columns(aggregates_size);
|
2013-11-03 23:35:18 +00:00
|
|
|
|
ColumnPlainPtrs final_aggregate_columns(aggregates_size);
|
2013-02-03 18:08:52 +00:00
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < keys_size; ++i)
|
2013-02-03 18:39:09 +00:00
|
|
|
|
{
|
2013-02-03 18:08:52 +00:00
|
|
|
|
key_columns[i] = res.getByPosition(i).column;
|
2013-02-03 18:39:09 +00:00
|
|
|
|
key_columns[i]->reserve(rows);
|
|
|
|
|
}
|
2013-02-03 18:08:52 +00:00
|
|
|
|
|
2014-05-22 18:58:41 +00:00
|
|
|
|
try
|
2013-02-03 18:39:09 +00:00
|
|
|
|
{
|
2014-05-22 18:58:41 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2013-11-03 23:35:18 +00:00
|
|
|
|
{
|
2014-05-22 18:58:41 +00:00
|
|
|
|
is_final[i] = final && aggregate_functions[i]->canBeFinal();
|
|
|
|
|
if (!is_final[i])
|
|
|
|
|
{
|
|
|
|
|
/// Столбец ColumnAggregateFunction захватывает разделяемое владение ареной с состояниями агрегатных функций.
|
|
|
|
|
ColumnAggregateFunction & column_aggregate_func = static_cast<ColumnAggregateFunction &>(*res.getByPosition(i + keys_size).column);
|
|
|
|
|
|
|
|
|
|
for (size_t j = 0; j < data_variants.aggregates_pools.size(); ++j)
|
|
|
|
|
column_aggregate_func.addArena(data_variants.aggregates_pools[j]);
|
2013-11-03 23:35:18 +00:00
|
|
|
|
|
2014-05-22 18:58:41 +00:00
|
|
|
|
aggregate_columns[i] = &column_aggregate_func.getData();
|
|
|
|
|
aggregate_columns[i]->resize(rows);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
ColumnWithNameAndType & column = res.getByPosition(i + keys_size);
|
|
|
|
|
column.type = aggregate_functions[i]->getReturnType();
|
|
|
|
|
column.column = column.type->createColumn();
|
|
|
|
|
column.column->reserve(rows);
|
2013-02-09 00:12:04 +00:00
|
|
|
|
|
2014-05-22 18:58:41 +00:00
|
|
|
|
final_aggregate_columns[i] = column.column;
|
|
|
|
|
}
|
2013-11-03 23:35:18 +00:00
|
|
|
|
}
|
2014-05-22 18:58:41 +00:00
|
|
|
|
|
|
|
|
|
if (data_variants.type == AggregatedDataVariants::WITHOUT_KEY || overflow_row)
|
2013-11-03 23:35:18 +00:00
|
|
|
|
{
|
2014-05-22 18:58:41 +00:00
|
|
|
|
AggregatedDataWithoutKey & data = data_variants.without_key;
|
2013-02-08 20:34:30 +00:00
|
|
|
|
|
2014-05-22 18:58:41 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
if (!is_final[i])
|
|
|
|
|
(*aggregate_columns[i])[0] = data + offsets_of_aggregate_states[i];
|
|
|
|
|
else
|
|
|
|
|
aggregate_functions[i]->insertResultInto(data + offsets_of_aggregate_states[i], *final_aggregate_columns[i]);
|
|
|
|
|
|
|
|
|
|
if (overflow_row)
|
|
|
|
|
for (size_t i = 0; i < keys_size; ++i)
|
|
|
|
|
key_columns[i]->insertDefault();
|
2013-11-03 23:35:18 +00:00
|
|
|
|
}
|
2013-02-03 18:08:52 +00:00
|
|
|
|
|
2014-05-22 18:58:41 +00:00
|
|
|
|
size_t start_row = overflow_row ? 1 : 0;
|
|
|
|
|
|
|
|
|
|
if (data_variants.type == AggregatedDataVariants::KEY_64)
|
|
|
|
|
convertToBlockImpl(*data_variants.key64, key_columns, aggregate_columns,
|
|
|
|
|
final_aggregate_columns, data_variants.key_sizes, start_row);
|
|
|
|
|
else if (data_variants.type == AggregatedDataVariants::KEY_STRING)
|
|
|
|
|
convertToBlockImpl(*data_variants.key_string, key_columns, aggregate_columns,
|
|
|
|
|
final_aggregate_columns, data_variants.key_sizes, start_row);
|
|
|
|
|
else if (data_variants.type == AggregatedDataVariants::KEY_FIXED_STRING)
|
|
|
|
|
convertToBlockImpl(*data_variants.key_fixed_string, key_columns, aggregate_columns,
|
|
|
|
|
final_aggregate_columns, data_variants.key_sizes, start_row);
|
|
|
|
|
else if (data_variants.type == AggregatedDataVariants::KEYS_128)
|
|
|
|
|
convertToBlockImpl(*data_variants.keys128, key_columns, aggregate_columns,
|
|
|
|
|
final_aggregate_columns, data_variants.key_sizes, start_row);
|
|
|
|
|
else if (data_variants.type == AggregatedDataVariants::HASHED)
|
|
|
|
|
convertToBlockImpl(*data_variants.hashed, key_columns, aggregate_columns,
|
|
|
|
|
final_aggregate_columns, data_variants.key_sizes, start_row);
|
|
|
|
|
else if (data_variants.type != AggregatedDataVariants::WITHOUT_KEY)
|
|
|
|
|
throw Exception("Unknown aggregated data variant.", ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT);
|
|
|
|
|
}
|
|
|
|
|
catch (...)
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2014-05-22 18:58:41 +00:00
|
|
|
|
/** Работа с состояниями агрегатных функций недостаточно exception-safe.
|
|
|
|
|
* Если часть столбцов aggregate_columns была resize-на, но значения не были вставлены,
|
|
|
|
|
* то эти столбцы будут в некорректном состоянии
|
|
|
|
|
* (ColumnAggregateFunction попытаются в деструкторе вызвать деструкторы у элементов, которых нет),
|
|
|
|
|
* а также деструкторы будут вызываться у AggregatedDataVariants.
|
|
|
|
|
* Поэтому, вручную "откатываем" их.
|
|
|
|
|
*/
|
2014-05-21 13:27:40 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2014-05-22 18:58:41 +00:00
|
|
|
|
if (aggregate_columns[i])
|
|
|
|
|
aggregate_columns[i]->clear();
|
2013-09-01 04:55:41 +00:00
|
|
|
|
|
2014-05-22 18:58:41 +00:00
|
|
|
|
throw;
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
2014-02-26 11:44:54 +00:00
|
|
|
|
|
2013-11-03 23:35:18 +00:00
|
|
|
|
if (!final)
|
|
|
|
|
{
|
|
|
|
|
/// data_variants не будет уничтожать состояния агрегатных функций в деструкторе. Теперь состояниями владеют ColumnAggregateFunction.
|
2014-04-08 07:31:51 +00:00
|
|
|
|
data_variants.aggregator = nullptr;
|
2013-11-03 23:35:18 +00:00
|
|
|
|
}
|
2013-02-16 18:59:05 +00:00
|
|
|
|
|
2012-02-27 06:28:20 +00:00
|
|
|
|
/// Изменяем размер столбцов-констант в блоке.
|
|
|
|
|
size_t columns = res.columns();
|
|
|
|
|
for (size_t i = 0; i < columns; ++i)
|
|
|
|
|
if (res.getByPosition(i).column->isConst())
|
2013-05-03 05:23:14 +00:00
|
|
|
|
res.getByPosition(i).column = res.getByPosition(i).column->cut(0, rows);
|
2012-02-27 06:28:20 +00:00
|
|
|
|
|
2013-02-03 18:39:09 +00:00
|
|
|
|
double elapsed_seconds = watch.elapsedSeconds();
|
2013-02-04 03:31:53 +00:00
|
|
|
|
LOG_TRACE(log, std::fixed << std::setprecision(3)
|
|
|
|
|
<< "Converted aggregated data to block. "
|
2013-02-03 18:39:09 +00:00
|
|
|
|
<< rows << " rows, " << res.bytes() / 1048576.0 << " MiB"
|
2013-02-04 03:31:53 +00:00
|
|
|
|
<< " in " << elapsed_seconds << " sec."
|
2013-02-03 18:39:09 +00:00
|
|
|
|
<< " (" << rows / elapsed_seconds << " rows/sec., " << res.bytes() / elapsed_seconds / 1048576.0 << " MiB/sec.)");
|
2012-05-31 00:33:42 +00:00
|
|
|
|
|
2012-02-27 06:28:20 +00:00
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
AggregatedDataVariantsPtr Aggregator::merge(ManyAggregatedDataVariants & data_variants)
|
|
|
|
|
{
|
|
|
|
|
if (data_variants.empty())
|
2012-02-27 07:54:16 +00:00
|
|
|
|
throw Exception("Empty data passed to Aggregator::merge().", ErrorCodes::EMPTY_DATA_PASSED);
|
2012-02-27 06:28:20 +00:00
|
|
|
|
|
2012-05-31 00:33:42 +00:00
|
|
|
|
LOG_TRACE(log, "Merging aggregated data");
|
|
|
|
|
|
2013-02-04 03:31:53 +00:00
|
|
|
|
Stopwatch watch;
|
|
|
|
|
|
2012-08-21 18:34:55 +00:00
|
|
|
|
AggregatedDataVariantsPtr res = data_variants[0];
|
2012-02-27 06:28:20 +00:00
|
|
|
|
|
|
|
|
|
/// Все результаты агрегации соединяем с первым.
|
2013-02-04 03:31:53 +00:00
|
|
|
|
size_t rows = res->size();
|
2012-02-27 06:28:20 +00:00
|
|
|
|
for (size_t i = 1, size = data_variants.size(); i < size; ++i)
|
|
|
|
|
{
|
2013-02-04 03:31:53 +00:00
|
|
|
|
rows += data_variants[i]->size();
|
2012-02-27 06:28:20 +00:00
|
|
|
|
AggregatedDataVariants & current = *data_variants[i];
|
|
|
|
|
|
2013-02-09 00:12:04 +00:00
|
|
|
|
res->aggregates_pools.insert(res->aggregates_pools.end(), current.aggregates_pools.begin(), current.aggregates_pools.end());
|
|
|
|
|
|
2012-05-10 07:47:13 +00:00
|
|
|
|
if (current.empty())
|
|
|
|
|
continue;
|
|
|
|
|
|
2012-08-21 18:34:55 +00:00
|
|
|
|
if (res->empty())
|
2012-05-10 07:47:13 +00:00
|
|
|
|
{
|
2012-08-21 18:34:55 +00:00
|
|
|
|
res = data_variants[i];
|
2012-05-10 07:47:13 +00:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-21 18:34:55 +00:00
|
|
|
|
if (res->type != current.type)
|
2012-02-27 06:28:20 +00:00
|
|
|
|
throw Exception("Cannot merge different aggregated data variants.", ErrorCodes::CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS);
|
|
|
|
|
|
|
|
|
|
/// В какой структуре данных агрегированы данные?
|
2014-02-27 12:49:21 +00:00
|
|
|
|
if (res->type == AggregatedDataVariants::WITHOUT_KEY || overflow_row)
|
2012-02-27 06:28:20 +00:00
|
|
|
|
{
|
2012-08-21 18:34:55 +00:00
|
|
|
|
AggregatedDataWithoutKey & res_data = res->without_key;
|
2012-02-27 06:28:20 +00:00
|
|
|
|
AggregatedDataWithoutKey & current_data = current.without_key;
|
|
|
|
|
|
2013-02-08 23:41:05 +00:00
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
aggregate_functions[i]->merge(res_data + offsets_of_aggregate_states[i], current_data + offsets_of_aggregate_states[i]);
|
2014-05-23 00:35:14 +00:00
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2013-02-08 23:41:05 +00:00
|
|
|
|
aggregate_functions[i]->destroy(current_data + offsets_of_aggregate_states[i]);
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-10 00:31:22 +00:00
|
|
|
|
if (res->type == AggregatedDataVariants::KEY_64)
|
|
|
|
|
mergeDataImpl(*res->key64, *current.key64);
|
2012-08-21 18:34:55 +00:00
|
|
|
|
else if (res->type == AggregatedDataVariants::KEY_STRING)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
mergeDataImpl(*res->key_string, *current.key_string);
|
|
|
|
|
else if (res->type == AggregatedDataVariants::KEY_FIXED_STRING)
|
|
|
|
|
mergeDataImpl(*res->key_fixed_string, *current.key_fixed_string);
|
2013-06-30 16:56:00 +00:00
|
|
|
|
else if (res->type == AggregatedDataVariants::KEYS_128)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
mergeDataImpl(*res->keys128, *current.keys128);
|
2013-06-30 16:56:00 +00:00
|
|
|
|
else if (res->type == AggregatedDataVariants::HASHED)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
mergeDataImpl(*res->hashed, *current.hashed);
|
2013-05-06 11:45:28 +00:00
|
|
|
|
else if (res->type != AggregatedDataVariants::WITHOUT_KEY)
|
2012-02-27 06:28:20 +00:00
|
|
|
|
throw Exception("Unknown aggregated data variant.", ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT);
|
2013-02-16 18:59:05 +00:00
|
|
|
|
|
|
|
|
|
/// current не будет уничтожать состояния агрегатных функций в деструкторе
|
2014-04-08 07:31:51 +00:00
|
|
|
|
current.aggregator = nullptr;
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-02-04 03:31:53 +00:00
|
|
|
|
double elapsed_seconds = watch.elapsedSeconds();
|
|
|
|
|
size_t res_rows = res->size();
|
|
|
|
|
|
|
|
|
|
LOG_TRACE(log, std::fixed << std::setprecision(3)
|
|
|
|
|
<< "Merged aggregated data. "
|
|
|
|
|
<< "From " << rows << " to " << res_rows << " rows (efficiency: " << static_cast<double>(rows) / res_rows << ")"
|
|
|
|
|
<< " in " << elapsed_seconds << " sec."
|
|
|
|
|
<< " (" << rows / elapsed_seconds << " rows/sec.)");
|
2012-05-31 00:33:42 +00:00
|
|
|
|
|
2012-08-21 18:34:55 +00:00
|
|
|
|
return res;
|
2012-02-27 06:28:20 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2012-05-30 01:38:02 +00:00
|
|
|
|
void Aggregator::merge(BlockInputStreamPtr stream, AggregatedDataVariants & result)
|
|
|
|
|
{
|
2013-06-30 16:56:00 +00:00
|
|
|
|
StringRefs key(keys_size);
|
2013-01-08 19:41:22 +00:00
|
|
|
|
ConstColumnPlainPtrs key_columns(keys_size);
|
2012-05-30 01:38:02 +00:00
|
|
|
|
|
2014-05-10 00:31:22 +00:00
|
|
|
|
AggregateColumnsData aggregate_columns(aggregates_size);
|
2012-05-30 01:38:02 +00:00
|
|
|
|
|
2013-02-13 19:24:19 +00:00
|
|
|
|
Block empty_block;
|
|
|
|
|
initialize(empty_block);
|
|
|
|
|
|
2013-02-16 18:59:05 +00:00
|
|
|
|
/// result будет уничтожать состояния агрегатных функций в деструкторе
|
|
|
|
|
result.aggregator = this;
|
|
|
|
|
|
2012-05-30 01:38:02 +00:00
|
|
|
|
/// Читаем все данные
|
|
|
|
|
while (Block block = stream->read())
|
|
|
|
|
{
|
2012-05-31 00:33:42 +00:00
|
|
|
|
LOG_TRACE(log, "Merging aggregated block");
|
|
|
|
|
|
2012-05-30 03:30:29 +00:00
|
|
|
|
if (!sample)
|
|
|
|
|
for (size_t i = 0; i < keys_size + aggregates_size; ++i)
|
|
|
|
|
sample.insert(block.getByPosition(i).cloneEmpty());
|
|
|
|
|
|
2012-05-30 01:38:02 +00:00
|
|
|
|
/// Запоминаем столбцы, с которыми будем работать
|
|
|
|
|
for (size_t i = 0; i < keys_size; ++i)
|
|
|
|
|
key_columns[i] = block.getByPosition(i).column;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
|
|
|
|
aggregate_columns[i] = &dynamic_cast<ColumnAggregateFunction &>(*block.getByPosition(keys_size + i).column).getData();
|
|
|
|
|
|
|
|
|
|
size_t rows = block.rows();
|
|
|
|
|
|
|
|
|
|
/// Каким способом выполнять агрегацию?
|
|
|
|
|
Sizes key_sizes;
|
2013-12-16 02:32:00 +00:00
|
|
|
|
AggregatedDataVariants::Type method = chooseAggregationMethod(key_columns, key_sizes);
|
|
|
|
|
|
|
|
|
|
if (result.empty())
|
|
|
|
|
{
|
|
|
|
|
result.init(method);
|
|
|
|
|
result.keys_size = keys_size;
|
|
|
|
|
result.key_sizes = key_sizes;
|
|
|
|
|
}
|
2012-05-30 01:38:02 +00:00
|
|
|
|
|
2014-02-27 12:49:21 +00:00
|
|
|
|
if (result.type == AggregatedDataVariants::WITHOUT_KEY || overflow_row)
|
2012-05-30 01:38:02 +00:00
|
|
|
|
{
|
|
|
|
|
AggregatedDataWithoutKey & res = result.without_key;
|
2013-02-08 23:41:05 +00:00
|
|
|
|
if (!res)
|
2012-05-30 01:38:02 +00:00
|
|
|
|
{
|
2013-02-08 23:41:05 +00:00
|
|
|
|
res = result.aggregates_pool->alloc(total_size_of_aggregate_states);
|
2014-05-19 19:41:56 +00:00
|
|
|
|
createAggregateStates(res);
|
2012-05-30 01:38:02 +00:00
|
|
|
|
}
|
2012-05-30 03:30:29 +00:00
|
|
|
|
|
|
|
|
|
/// Добавляем значения
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2013-02-08 23:41:05 +00:00
|
|
|
|
aggregate_functions[i]->merge(res + offsets_of_aggregate_states[i], (*aggregate_columns[i])[0]);
|
2012-05-30 01:38:02 +00:00
|
|
|
|
}
|
2013-05-06 11:45:28 +00:00
|
|
|
|
|
2014-02-27 12:49:21 +00:00
|
|
|
|
size_t start_row = overflow_row ? 1 : 0;
|
2014-02-26 11:44:54 +00:00
|
|
|
|
|
2013-05-04 15:46:50 +00:00
|
|
|
|
if (result.type == AggregatedDataVariants::KEY_64)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
mergeStreamsImpl(*result.key64, result.aggregates_pool, start_row, rows, key_columns, aggregate_columns, key_sizes, key);
|
2012-05-30 01:38:02 +00:00
|
|
|
|
else if (result.type == AggregatedDataVariants::KEY_STRING)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
mergeStreamsImpl(*result.key_string, result.aggregates_pool, start_row, rows, key_columns, aggregate_columns, key_sizes, key);
|
|
|
|
|
else if (result.type == AggregatedDataVariants::KEY_FIXED_STRING)
|
|
|
|
|
mergeStreamsImpl(*result.key_fixed_string, result.aggregates_pool, start_row, rows, key_columns, aggregate_columns, key_sizes, key);
|
2013-06-30 16:56:00 +00:00
|
|
|
|
else if (result.type == AggregatedDataVariants::KEYS_128)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
mergeStreamsImpl(*result.keys128, result.aggregates_pool, start_row, rows, key_columns, aggregate_columns, key_sizes, key);
|
2013-06-30 16:56:00 +00:00
|
|
|
|
else if (result.type == AggregatedDataVariants::HASHED)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
mergeStreamsImpl(*result.hashed, result.aggregates_pool, start_row, rows, key_columns, aggregate_columns, key_sizes, key);
|
2013-05-06 11:45:28 +00:00
|
|
|
|
else if (result.type != AggregatedDataVariants::WITHOUT_KEY)
|
2012-05-30 01:38:02 +00:00
|
|
|
|
throw Exception("Unknown aggregated data variant.", ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT);
|
2012-05-31 00:33:42 +00:00
|
|
|
|
|
|
|
|
|
LOG_TRACE(log, "Merged aggregated block");
|
2012-05-30 01:38:02 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2014-05-19 19:41:56 +00:00
|
|
|
|
void Aggregator::destroyAllAggregateStates(AggregatedDataVariants & result)
|
2013-02-16 18:59:05 +00:00
|
|
|
|
{
|
|
|
|
|
if (result.size() == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
2013-11-03 23:35:18 +00:00
|
|
|
|
LOG_TRACE(log, "Destroying aggregate states");
|
2013-02-16 18:59:05 +00:00
|
|
|
|
|
|
|
|
|
/// В какой структуре данных агрегированы данные?
|
2014-02-26 11:44:54 +00:00
|
|
|
|
if (result.type == AggregatedDataVariants::WITHOUT_KEY || overflow_row)
|
2013-02-16 18:59:05 +00:00
|
|
|
|
{
|
|
|
|
|
AggregatedDataWithoutKey & res_data = result.without_key;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < aggregates_size; ++i)
|
2014-05-22 14:09:10 +00:00
|
|
|
|
/// Если аггрегатная функция не может быть финализирована, то за ее удаление отвечает ColumnAggregateFunction
|
2014-05-22 14:22:54 +00:00
|
|
|
|
if (is_final[i])
|
2014-05-21 13:27:40 +00:00
|
|
|
|
aggregate_functions[i]->destroy(res_data + offsets_of_aggregate_states[i]);
|
2013-02-16 18:59:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
2014-05-10 00:31:22 +00:00
|
|
|
|
if (result.type == AggregatedDataVariants::KEY_64)
|
|
|
|
|
destroyImpl(*result.key64);
|
2013-02-16 18:59:05 +00:00
|
|
|
|
else if (result.type == AggregatedDataVariants::KEY_STRING)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
destroyImpl(*result.key_string);
|
|
|
|
|
else if (result.type == AggregatedDataVariants::KEY_FIXED_STRING)
|
|
|
|
|
destroyImpl(*result.key_fixed_string);
|
|
|
|
|
else if (result.type == AggregatedDataVariants::KEYS_128)
|
|
|
|
|
destroyImpl(*result.keys128);
|
2013-02-16 18:59:05 +00:00
|
|
|
|
else if (result.type == AggregatedDataVariants::HASHED)
|
2014-05-10 00:31:22 +00:00
|
|
|
|
destroyImpl(*result.hashed);
|
|
|
|
|
else if (result.type != AggregatedDataVariants::WITHOUT_KEY)
|
|
|
|
|
throw Exception("Unknown aggregated data variant.", ErrorCodes::UNKNOWN_AGGREGATED_DATA_VARIANT);
|
2013-02-16 18:59:05 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-05-03 10:20:53 +00:00
|
|
|
|
|
|
|
|
|
String Aggregator::getID() const
|
|
|
|
|
{
|
|
|
|
|
std::stringstream res;
|
|
|
|
|
|
|
|
|
|
if (keys.empty())
|
|
|
|
|
{
|
|
|
|
|
res << "key_names";
|
|
|
|
|
for (size_t i = 0; i < key_names.size(); ++i)
|
|
|
|
|
res << ", " << key_names[i];
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
res << "keys";
|
|
|
|
|
for (size_t i = 0; i < keys.size(); ++i)
|
|
|
|
|
res << ", " << keys[i];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
res << ", aggregates";
|
|
|
|
|
for (size_t i = 0; i < aggregates.size(); ++i)
|
|
|
|
|
res << ", " << aggregates[i].column_name;
|
|
|
|
|
|
|
|
|
|
return res.str();
|
|
|
|
|
}
|
|
|
|
|
|
2011-09-19 01:42:16 +00:00
|
|
|
|
}
|