2013-04-24 10:31:32 +00:00
|
|
|
|
#pragma once
|
|
|
|
|
|
2013-09-15 01:10:16 +00:00
|
|
|
|
#include <DB/IO/WriteBufferFromFile.h>
|
|
|
|
|
#include <DB/IO/CompressedWriteBuffer.h>
|
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
#include <DB/Storages/StorageMergeTree.h>
|
|
|
|
|
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
namespace DB
|
|
|
|
|
{
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
/** Для записи куска, полученного слиянием нескольких других.
|
2013-09-15 01:10:16 +00:00
|
|
|
|
* Данные уже отсортированы, относятся к одному месяцу, и пишутся в один кускок.
|
|
|
|
|
*/
|
2013-04-24 10:31:32 +00:00
|
|
|
|
class MergedBlockOutputStream : public IBlockOutputStream
|
|
|
|
|
{
|
|
|
|
|
public:
|
|
|
|
|
MergedBlockOutputStream(StorageMergeTree & storage_,
|
2013-09-26 19:16:43 +00:00
|
|
|
|
UInt16 min_date, UInt16 max_date, UInt64 min_part_id, UInt64 max_part_id, UInt32 level)
|
2013-09-27 17:26:35 +00:00
|
|
|
|
: storage(storage_), marks_count(0), index_offset(0)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
|
|
|
|
part_name = storage.getPartName(
|
2013-08-11 03:40:14 +00:00
|
|
|
|
DayNum_t(min_date), DayNum_t(max_date),
|
2013-09-15 03:14:29 +00:00
|
|
|
|
min_part_id, max_part_id, level);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
|
|
|
|
part_tmp_path = storage.full_path + "tmp_" + part_name + "/";
|
|
|
|
|
part_res_path = storage.full_path + part_name + "/";
|
|
|
|
|
|
|
|
|
|
Poco::File(part_tmp_path).createDirectories();
|
|
|
|
|
|
|
|
|
|
index_stream = new WriteBufferFromFile(part_tmp_path + "primary.idx", DBMS_DEFAULT_BUFFER_SIZE, O_TRUNC | O_CREAT | O_WRONLY);
|
|
|
|
|
|
|
|
|
|
for (NamesAndTypesList::const_iterator it = storage.columns->begin(); it != storage.columns->end(); ++it)
|
|
|
|
|
addStream(it->first, *it->second);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void write(const Block & block)
|
|
|
|
|
{
|
|
|
|
|
size_t rows = block.rows();
|
|
|
|
|
|
|
|
|
|
/// Сначала пишем индекс. Индекс содержит значение PK для каждой index_granularity строки.
|
|
|
|
|
typedef std::vector<const ColumnWithNameAndType *> PrimaryColumns;
|
|
|
|
|
PrimaryColumns primary_columns;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0, size = storage.sort_descr.size(); i < size; ++i)
|
|
|
|
|
primary_columns.push_back(
|
|
|
|
|
!storage.sort_descr[i].column_name.empty()
|
|
|
|
|
? &block.getByName(storage.sort_descr[i].column_name)
|
|
|
|
|
: &block.getByPosition(storage.sort_descr[i].column_number));
|
|
|
|
|
|
2013-08-07 19:45:47 +00:00
|
|
|
|
for (size_t i = index_offset; i < rows; i += storage.index_granularity)
|
|
|
|
|
{
|
|
|
|
|
for (PrimaryColumns::const_iterator it = primary_columns.begin(); it != primary_columns.end(); ++it)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2013-08-07 19:45:47 +00:00
|
|
|
|
(*it)->type->serializeBinary((*(*it)->column)[i], *index_stream);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-08-07 19:45:47 +00:00
|
|
|
|
++marks_count;
|
|
|
|
|
}
|
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
/// Множество записанных столбцов со смещениями, чтобы не писать общие для вложенных структур столбцы несколько раз
|
|
|
|
|
OffsetColumns offset_columns;
|
|
|
|
|
|
2013-08-07 19:45:47 +00:00
|
|
|
|
/// Теперь пишем данные.
|
|
|
|
|
for (NamesAndTypesList::const_iterator it = storage.columns->begin(); it != storage.columns->end(); ++it)
|
|
|
|
|
{
|
|
|
|
|
const ColumnWithNameAndType & column = block.getByName(it->first);
|
2013-08-08 13:12:28 +00:00
|
|
|
|
writeData(column.name, *column.type, *column.column, offset_columns);
|
2013-08-07 19:45:47 +00:00
|
|
|
|
}
|
2013-08-24 08:01:19 +00:00
|
|
|
|
|
|
|
|
|
size_t written_for_last_mark = (storage.index_granularity - index_offset + rows) % storage.index_granularity;
|
|
|
|
|
index_offset = (storage.index_granularity - written_for_last_mark) % storage.index_granularity;
|
2013-04-24 10:31:32 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void writeSuffix()
|
|
|
|
|
{
|
|
|
|
|
/// Заканчиваем запись.
|
2013-10-03 13:06:27 +00:00
|
|
|
|
index_stream->next();
|
2013-04-24 10:31:32 +00:00
|
|
|
|
index_stream = NULL;
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
|
|
|
|
for (ColumnStreams::iterator it = column_streams.begin(); it != column_streams.end(); ++it)
|
2013-09-26 19:16:43 +00:00
|
|
|
|
it->second->finalize();
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
column_streams.clear();
|
|
|
|
|
|
|
|
|
|
if (marks_count == 0)
|
|
|
|
|
throw Exception("Empty part", ErrorCodes::LOGICAL_ERROR);
|
2013-09-26 19:16:43 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
/// Переименовываем кусок.
|
|
|
|
|
Poco::File(part_tmp_path).renameTo(part_res_path);
|
|
|
|
|
|
|
|
|
|
/// А добавление нового куска в набор (и удаление исходных кусков) сделает вызывающая сторона.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Сколько засечек уже записано.
|
|
|
|
|
size_t marksCount()
|
|
|
|
|
{
|
|
|
|
|
return marks_count;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
StorageMergeTree & storage;
|
|
|
|
|
String part_name;
|
|
|
|
|
String part_tmp_path;
|
|
|
|
|
String part_res_path;
|
|
|
|
|
size_t marks_count;
|
|
|
|
|
|
|
|
|
|
struct ColumnStream
|
|
|
|
|
{
|
|
|
|
|
ColumnStream(const String & data_path, const std::string & marks_path) :
|
|
|
|
|
plain(data_path, DBMS_DEFAULT_BUFFER_SIZE, O_TRUNC | O_CREAT | O_WRONLY),
|
|
|
|
|
compressed(plain),
|
|
|
|
|
marks(marks_path, 4096, O_TRUNC | O_CREAT | O_WRONLY) {}
|
|
|
|
|
|
|
|
|
|
WriteBufferFromFile plain;
|
|
|
|
|
CompressedWriteBuffer compressed;
|
|
|
|
|
WriteBufferFromFile marks;
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
2013-09-26 19:16:43 +00:00
|
|
|
|
void finalize()
|
2013-09-15 01:10:16 +00:00
|
|
|
|
{
|
|
|
|
|
compressed.next();
|
2013-10-03 13:06:27 +00:00
|
|
|
|
plain.next();
|
|
|
|
|
marks.next();
|
2013-09-15 01:10:16 +00:00
|
|
|
|
}
|
2013-04-24 10:31:32 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef std::map<String, SharedPtr<ColumnStream> > ColumnStreams;
|
|
|
|
|
ColumnStreams column_streams;
|
|
|
|
|
|
2013-09-15 01:10:16 +00:00
|
|
|
|
SharedPtr<WriteBufferFromFile> index_stream;
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
|
|
|
|
/// Смещение до первой строчки блока, для которой надо записать индекс.
|
|
|
|
|
size_t index_offset;
|
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
typedef std::set<std::string> OffsetColumns;
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
|
|
|
|
void addStream(const String & name, const IDataType & type, size_t level = 0)
|
|
|
|
|
{
|
|
|
|
|
String escaped_column_name = escapeForFileName(name);
|
|
|
|
|
|
|
|
|
|
/// Для массивов используются отдельные потоки для размеров.
|
|
|
|
|
if (const DataTypeArray * type_arr = dynamic_cast<const DataTypeArray *>(&type))
|
|
|
|
|
{
|
2013-08-07 19:45:47 +00:00
|
|
|
|
String size_name = DataTypeNested::extractNestedTableName(name)
|
|
|
|
|
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
|
|
|
|
String escaped_size_name = escapeForFileName(DataTypeNested::extractNestedTableName(name))
|
|
|
|
|
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
|
|
|
|
column_streams[size_name] = new ColumnStream(
|
|
|
|
|
part_tmp_path + escaped_size_name + ".bin",
|
|
|
|
|
part_tmp_path + escaped_size_name + ".mrk");
|
|
|
|
|
|
|
|
|
|
addStream(name, *type_arr->getNestedType(), level + 1);
|
|
|
|
|
}
|
2013-07-12 13:35:05 +00:00
|
|
|
|
else if (const DataTypeNested * type_nested = dynamic_cast<const DataTypeNested *>(&type))
|
|
|
|
|
{
|
|
|
|
|
String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
|
|
|
|
String escaped_size_name = escaped_column_name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
|
|
|
|
|
|
|
|
|
column_streams[size_name] = new ColumnStream(
|
|
|
|
|
part_tmp_path + escaped_size_name + ".bin",
|
|
|
|
|
part_tmp_path + escaped_size_name + ".mrk");
|
|
|
|
|
|
|
|
|
|
const NamesAndTypesList & columns = *type_nested->getNestedTypesList();
|
|
|
|
|
for (NamesAndTypesList::const_iterator it = columns.begin(); it != columns.end(); ++it)
|
2013-08-07 19:45:47 +00:00
|
|
|
|
addStream(DataTypeNested::concatenateNestedName(name, it->first), *it->second, level + 1);
|
2013-07-12 13:35:05 +00:00
|
|
|
|
}
|
2013-04-24 10:31:32 +00:00
|
|
|
|
else
|
|
|
|
|
column_streams[name] = new ColumnStream(
|
|
|
|
|
part_tmp_path + escaped_column_name + ".bin",
|
|
|
|
|
part_tmp_path + escaped_column_name + ".mrk");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Записать данные одного столбца.
|
2013-08-08 13:12:28 +00:00
|
|
|
|
void writeData(const String & name, const IDataType & type, const IColumn & column, OffsetColumns & offset_columns, size_t level = 0)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
|
|
|
|
size_t size = column.size();
|
|
|
|
|
|
|
|
|
|
/// Для массивов требуется сначала сериализовать размеры, а потом значения.
|
|
|
|
|
if (const DataTypeArray * type_arr = dynamic_cast<const DataTypeArray *>(&type))
|
|
|
|
|
{
|
2013-08-07 19:45:47 +00:00
|
|
|
|
String size_name = DataTypeNested::extractNestedTableName(name)
|
|
|
|
|
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
if (offset_columns.count(size_name) == 0)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2013-08-08 13:12:28 +00:00
|
|
|
|
offset_columns.insert(size_name);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
ColumnStream & stream = *column_streams[size_name];
|
|
|
|
|
|
|
|
|
|
size_t prev_mark = 0;
|
|
|
|
|
while (prev_mark < size)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2013-08-08 13:12:28 +00:00
|
|
|
|
size_t limit = 0;
|
|
|
|
|
|
|
|
|
|
/// Если есть index_offset, то первая засечка идёт не сразу, а после этого количества строк.
|
|
|
|
|
if (prev_mark == 0 && index_offset != 0)
|
|
|
|
|
{
|
|
|
|
|
limit = index_offset;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
limit = storage.index_granularity;
|
|
|
|
|
writeIntBinary(stream.plain.count(), stream.marks);
|
|
|
|
|
writeIntBinary(stream.compressed.offset(), stream.marks);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type_arr->serializeOffsets(column, stream.compressed, prev_mark, limit);
|
|
|
|
|
prev_mark += limit;
|
2013-04-24 10:31:32 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-08-24 08:26:25 +00:00
|
|
|
|
else if (const DataTypeNested * type_nested = dynamic_cast<const DataTypeNested *>(&type))
|
2013-07-12 13:35:05 +00:00
|
|
|
|
{
|
|
|
|
|
String size_name = name + ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
|
|
|
|
|
|
|
|
|
ColumnStream & stream = *column_streams[size_name];
|
|
|
|
|
|
|
|
|
|
size_t prev_mark = 0;
|
|
|
|
|
while (prev_mark < size)
|
|
|
|
|
{
|
|
|
|
|
size_t limit = 0;
|
|
|
|
|
|
|
|
|
|
/// Если есть index_offset, то первая засечка идёт не сразу, а после этого количества строк.
|
|
|
|
|
if (prev_mark == 0 && index_offset != 0)
|
|
|
|
|
{
|
|
|
|
|
limit = index_offset;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
limit = storage.index_granularity;
|
|
|
|
|
writeIntBinary(stream.plain.count(), stream.marks);
|
|
|
|
|
writeIntBinary(stream.compressed.offset(), stream.marks);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type_nested->serializeOffsets(column, stream.compressed, prev_mark, limit);
|
|
|
|
|
prev_mark += limit;
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
ColumnStream & stream = *column_streams[name];
|
|
|
|
|
|
|
|
|
|
size_t prev_mark = 0;
|
|
|
|
|
while (prev_mark < size)
|
|
|
|
|
{
|
|
|
|
|
size_t limit = 0;
|
|
|
|
|
|
|
|
|
|
/// Если есть index_offset, то первая засечка идёт не сразу, а после этого количества строк.
|
|
|
|
|
if (prev_mark == 0 && index_offset != 0)
|
|
|
|
|
{
|
|
|
|
|
limit = index_offset;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
limit = storage.index_granularity;
|
|
|
|
|
writeIntBinary(stream.plain.count(), stream.marks);
|
|
|
|
|
writeIntBinary(stream.compressed.offset(), stream.marks);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type.serializeBinary(column, stream.compressed, prev_mark, limit);
|
|
|
|
|
prev_mark += limit;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef Poco::SharedPtr<MergedBlockOutputStream> MergedBlockOutputStreamPtr;
|
|
|
|
|
|
2013-09-26 19:16:43 +00:00
|
|
|
|
}
|