2014-03-13 17:44:00 +00:00
|
|
|
|
#include <DB/Storages/MergeTree/MergeTreeDataWriter.h>
|
2014-03-27 12:32:37 +00:00
|
|
|
|
#include <DB/Storages/MergeTree/MergedBlockOutputStream.h>
|
2014-03-13 17:44:00 +00:00
|
|
|
|
#include <DB/Common/escapeForFileName.h>
|
|
|
|
|
#include <DB/DataTypes/DataTypeArray.h>
|
2014-03-27 11:32:41 +00:00
|
|
|
|
#include <DB/IO/HashingWriteBuffer.h>
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
|
|
|
|
namespace DB
|
|
|
|
|
{
|
|
|
|
|
|
2014-03-19 10:45:13 +00:00
|
|
|
|
BlocksWithDateIntervals MergeTreeDataWriter::splitBlockIntoParts(const Block & block)
|
2014-03-13 17:44:00 +00:00
|
|
|
|
{
|
2014-03-19 10:45:13 +00:00
|
|
|
|
data.check(block, true);
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
2015-07-07 23:11:30 +00:00
|
|
|
|
const auto & date_lut = DateLUT::instance();
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
|
|
|
|
size_t rows = block.rows();
|
|
|
|
|
size_t columns = block.columns();
|
|
|
|
|
|
|
|
|
|
/// Достаём столбец с датой.
|
|
|
|
|
const ColumnUInt16::Container_t & dates =
|
2014-06-26 00:58:14 +00:00
|
|
|
|
typeid_cast<const ColumnUInt16 &>(*block.getByName(data.date_column_name).column).getData();
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
|
|
|
|
/// Минимальная и максимальная дата.
|
|
|
|
|
UInt16 min_date = std::numeric_limits<UInt16>::max();
|
|
|
|
|
UInt16 max_date = std::numeric_limits<UInt16>::min();
|
|
|
|
|
for (ColumnUInt16::Container_t::const_iterator it = dates.begin(); it != dates.end(); ++it)
|
|
|
|
|
{
|
|
|
|
|
if (*it < min_date)
|
|
|
|
|
min_date = *it;
|
|
|
|
|
if (*it > max_date)
|
|
|
|
|
max_date = *it;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BlocksWithDateIntervals res;
|
|
|
|
|
|
|
|
|
|
UInt16 min_month = date_lut.toFirstDayNumOfMonth(DayNum_t(min_date));
|
|
|
|
|
UInt16 max_month = date_lut.toFirstDayNumOfMonth(DayNum_t(max_date));
|
|
|
|
|
|
|
|
|
|
/// Типичный случай - когда месяц один (ничего разделять не нужно).
|
|
|
|
|
if (min_month == max_month)
|
|
|
|
|
{
|
|
|
|
|
res.push_back(BlockWithDateInterval(block, min_date, max_date));
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Разделяем на блоки по месяцам. Для каждого ещё посчитаем минимальную и максимальную дату.
|
|
|
|
|
typedef std::map<UInt16, BlockWithDateInterval *> BlocksByMonth;
|
|
|
|
|
BlocksByMonth blocks_by_month;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < rows; ++i)
|
|
|
|
|
{
|
|
|
|
|
UInt16 month = date_lut.toFirstDayNumOfMonth(DayNum_t(dates[i]));
|
|
|
|
|
|
|
|
|
|
BlockWithDateInterval *& block_for_month = blocks_by_month[month];
|
|
|
|
|
if (!block_for_month)
|
|
|
|
|
{
|
|
|
|
|
block_for_month = &*res.insert(res.end(), BlockWithDateInterval());
|
|
|
|
|
block_for_month->block = block.cloneEmpty();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dates[i] < block_for_month->min_date)
|
|
|
|
|
block_for_month->min_date = dates[i];
|
|
|
|
|
if (dates[i] > block_for_month->max_date)
|
|
|
|
|
block_for_month->max_date = dates[i];
|
|
|
|
|
|
|
|
|
|
for (size_t j = 0; j < columns; ++j)
|
|
|
|
|
block_for_month->block.getByPosition(j).column->insert((*block.getByPosition(j).column)[i]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return res;
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-17 21:09:36 +00:00
|
|
|
|
MergeTreeData::MutableDataPartPtr MergeTreeDataWriter::writeTempPart(BlockWithDateInterval & block_with_dates, Int64 temp_index)
|
2014-03-13 17:44:00 +00:00
|
|
|
|
{
|
|
|
|
|
Block & block = block_with_dates.block;
|
|
|
|
|
UInt16 min_date = block_with_dates.min_date;
|
|
|
|
|
UInt16 max_date = block_with_dates.max_date;
|
|
|
|
|
|
2015-07-07 23:11:30 +00:00
|
|
|
|
const auto & date_lut = DateLUT::instance();
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
2015-08-17 21:09:36 +00:00
|
|
|
|
DayNum_t min_month = date_lut.toFirstDayNumOfMonth(DayNum_t(min_date));
|
|
|
|
|
DayNum_t max_month = date_lut.toFirstDayNumOfMonth(DayNum_t(max_date));
|
|
|
|
|
|
|
|
|
|
if (min_month != max_month)
|
|
|
|
|
throw Exception("Logical error: part spans more than one month.");
|
|
|
|
|
|
2014-03-27 12:32:37 +00:00
|
|
|
|
size_t part_size = (block.rows() + data.index_granularity - 1) / data.index_granularity;
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
2014-05-26 10:41:40 +00:00
|
|
|
|
String tmp_part_name = "tmp_" + ActiveDataPartSet::getPartName(
|
2014-03-13 17:44:00 +00:00
|
|
|
|
DayNum_t(min_date), DayNum_t(max_date),
|
|
|
|
|
temp_index, temp_index, 0);
|
|
|
|
|
|
2014-03-19 10:45:13 +00:00
|
|
|
|
String part_tmp_path = data.getFullPath() + tmp_part_name + "/";
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
|
|
|
|
Poco::File(part_tmp_path).createDirectories();
|
|
|
|
|
|
2014-07-17 10:44:17 +00:00
|
|
|
|
MergeTreeData::MutableDataPartPtr new_data_part = std::make_shared<MergeTreeData::DataPart>(data);
|
|
|
|
|
new_data_part->name = tmp_part_name;
|
|
|
|
|
new_data_part->is_temp = true;
|
|
|
|
|
|
2014-03-13 17:44:00 +00:00
|
|
|
|
/// Если для сортировки надо вычислить некоторые столбцы - делаем это.
|
2015-03-14 02:37:53 +00:00
|
|
|
|
if (data.mode != MergeTreeData::Unsorted)
|
|
|
|
|
data.getPrimaryExpression()->execute(block);
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
2014-03-14 17:03:52 +00:00
|
|
|
|
SortDescription sort_descr = data.getSortDescription();
|
|
|
|
|
|
2014-03-13 17:44:00 +00:00
|
|
|
|
/// Сортируем.
|
2015-08-14 02:45:40 +00:00
|
|
|
|
IColumn::Permutation * perm_ptr = nullptr;
|
|
|
|
|
IColumn::Permutation perm;
|
2015-03-14 02:37:53 +00:00
|
|
|
|
if (data.mode != MergeTreeData::Unsorted)
|
2015-08-14 02:45:40 +00:00
|
|
|
|
{
|
2015-11-21 15:47:32 +00:00
|
|
|
|
if (!isAlreadySorted(block, sort_descr))
|
|
|
|
|
{
|
|
|
|
|
stableGetPermutation(block, sort_descr, perm);
|
|
|
|
|
perm_ptr = &perm;
|
|
|
|
|
}
|
2015-08-14 02:45:40 +00:00
|
|
|
|
}
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
2014-07-15 11:52:30 +00:00
|
|
|
|
NamesAndTypesList columns = data.getColumnsList().filter(block.getColumnsList().getNames());
|
2015-03-14 02:36:39 +00:00
|
|
|
|
MergedBlockOutputStream out(data, part_tmp_path, columns, CompressionMethod::LZ4);
|
|
|
|
|
|
2014-03-27 12:32:37 +00:00
|
|
|
|
out.writePrefix();
|
2015-08-14 02:45:40 +00:00
|
|
|
|
out.writeWithPermutation(block, perm_ptr);
|
2014-03-27 17:30:04 +00:00
|
|
|
|
MergeTreeData::DataPart::Checksums checksums = out.writeSuffixAndGetChecksums();
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
|
|
|
|
new_data_part->left_date = DayNum_t(min_date);
|
|
|
|
|
new_data_part->right_date = DayNum_t(max_date);
|
|
|
|
|
new_data_part->left = temp_index;
|
|
|
|
|
new_data_part->right = temp_index;
|
|
|
|
|
new_data_part->level = 0;
|
|
|
|
|
new_data_part->size = part_size;
|
|
|
|
|
new_data_part->modification_time = time(0);
|
2015-08-17 21:09:36 +00:00
|
|
|
|
new_data_part->month = min_month;
|
2014-07-09 13:39:19 +00:00
|
|
|
|
new_data_part->columns = columns;
|
2014-03-27 17:30:04 +00:00
|
|
|
|
new_data_part->checksums = checksums;
|
2014-07-09 13:39:19 +00:00
|
|
|
|
new_data_part->index.swap(out.getIndex());
|
2014-05-14 17:51:37 +00:00
|
|
|
|
new_data_part->size_in_bytes = MergeTreeData::DataPart::calcTotalSize(part_tmp_path);
|
2014-03-13 17:44:00 +00:00
|
|
|
|
|
|
|
|
|
return new_data_part;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|