2013-04-24 10:31:32 +00:00
|
|
|
|
#pragma once
|
|
|
|
|
|
2015-04-08 16:48:47 +00:00
|
|
|
|
#include <DB/IO/createWriteBufferFromFileBase.h>
|
2013-09-15 01:10:16 +00:00
|
|
|
|
#include <DB/IO/WriteBufferFromFile.h>
|
|
|
|
|
#include <DB/IO/CompressedWriteBuffer.h>
|
2014-03-27 17:30:04 +00:00
|
|
|
|
#include <DB/IO/HashingWriteBuffer.h>
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
2014-03-09 17:36:01 +00:00
|
|
|
|
#include <DB/Storages/MergeTree/MergeTreeData.h>
|
2014-03-27 12:32:37 +00:00
|
|
|
|
#include <DB/Common/escapeForFileName.h>
|
|
|
|
|
#include <DB/DataTypes/DataTypeNested.h>
|
|
|
|
|
#include <DB/DataTypes/DataTypeArray.h>
|
2015-04-16 06:12:35 +00:00
|
|
|
|
#include <DB/DataStreams/IBlockOutputStream.h>
|
2013-04-24 10:31:32 +00:00
|
|
|
|
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
namespace DB
|
|
|
|
|
{
|
2015-08-14 02:45:40 +00:00
|
|
|
|
|
|
|
|
|
|
2014-03-04 11:30:50 +00:00
|
|
|
|
class IMergedBlockOutputStream : public IBlockOutputStream
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
|
|
|
|
public:
|
2015-03-14 02:36:39 +00:00
|
|
|
|
IMergedBlockOutputStream(
|
|
|
|
|
MergeTreeData & storage_,
|
|
|
|
|
size_t min_compress_block_size_,
|
|
|
|
|
size_t max_compress_block_size_,
|
2015-04-10 15:31:51 +00:00
|
|
|
|
CompressionMethod compression_method_,
|
|
|
|
|
size_t aio_threshold_)
|
2015-03-14 02:36:39 +00:00
|
|
|
|
: storage(storage_),
|
|
|
|
|
min_compress_block_size(min_compress_block_size_),
|
|
|
|
|
max_compress_block_size(max_compress_block_size_),
|
2015-04-10 15:31:51 +00:00
|
|
|
|
aio_threshold(aio_threshold_),
|
2015-03-14 02:36:39 +00:00
|
|
|
|
compression_method(compression_method_)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
|
|
|
|
}
|
2013-08-24 08:01:19 +00:00
|
|
|
|
|
2014-03-04 11:30:50 +00:00
|
|
|
|
protected:
|
2015-03-14 02:36:39 +00:00
|
|
|
|
using OffsetColumns = std::set<std::string>;
|
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
struct ColumnStream
|
|
|
|
|
{
|
2015-03-14 02:36:39 +00:00
|
|
|
|
ColumnStream(
|
|
|
|
|
const String & escaped_column_name_,
|
|
|
|
|
const String & data_path,
|
|
|
|
|
const std::string & marks_path,
|
|
|
|
|
size_t max_compress_block_size,
|
2015-04-08 16:48:47 +00:00
|
|
|
|
CompressionMethod compression_method,
|
|
|
|
|
size_t estimated_size,
|
|
|
|
|
size_t aio_threshold) :
|
2014-03-27 17:30:04 +00:00
|
|
|
|
escaped_column_name(escaped_column_name_),
|
2015-04-22 13:37:30 +00:00
|
|
|
|
plain_file(createWriteBufferFromFileBase(data_path, estimated_size, aio_threshold, max_compress_block_size)),
|
2015-04-08 16:48:47 +00:00
|
|
|
|
plain_hashing(*plain_file), compressed_buf(plain_hashing, compression_method), compressed(compressed_buf),
|
2014-04-14 13:08:26 +00:00
|
|
|
|
marks_file(marks_path, 4096, O_TRUNC | O_CREAT | O_WRONLY), marks(marks_file) {}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2014-03-27 17:30:04 +00:00
|
|
|
|
String escaped_column_name;
|
2014-04-14 13:08:26 +00:00
|
|
|
|
|
|
|
|
|
/// compressed -> compressed_buf -> plain_hashing -> plain_file
|
2015-04-08 16:48:47 +00:00
|
|
|
|
std::unique_ptr<WriteBufferFromFileBase> plain_file;
|
2014-04-14 13:08:26 +00:00
|
|
|
|
HashingWriteBuffer plain_hashing;
|
2014-03-27 17:30:04 +00:00
|
|
|
|
CompressedWriteBuffer compressed_buf;
|
2014-04-14 13:13:20 +00:00
|
|
|
|
HashingWriteBuffer compressed;
|
2014-04-14 13:08:26 +00:00
|
|
|
|
|
|
|
|
|
/// marks -> marks_file
|
|
|
|
|
WriteBufferFromFile marks_file;
|
2014-03-27 17:30:04 +00:00
|
|
|
|
HashingWriteBuffer marks;
|
2013-09-15 01:10:16 +00:00
|
|
|
|
|
2013-09-26 19:16:43 +00:00
|
|
|
|
void finalize()
|
2013-09-15 01:10:16 +00:00
|
|
|
|
{
|
|
|
|
|
compressed.next();
|
2015-04-08 16:48:47 +00:00
|
|
|
|
plain_file->next();
|
2013-10-03 13:06:27 +00:00
|
|
|
|
marks.next();
|
2013-09-15 01:10:16 +00:00
|
|
|
|
}
|
2014-03-05 16:28:24 +00:00
|
|
|
|
|
|
|
|
|
void sync()
|
|
|
|
|
{
|
2015-04-08 16:48:47 +00:00
|
|
|
|
plain_file->sync();
|
2014-03-27 17:30:04 +00:00
|
|
|
|
marks_file.sync();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void addToChecksums(MergeTreeData::DataPart::Checksums & checksums, String name = "")
|
|
|
|
|
{
|
|
|
|
|
if (name == "")
|
|
|
|
|
name = escaped_column_name;
|
2014-04-14 13:08:26 +00:00
|
|
|
|
|
|
|
|
|
checksums.files[name + ".bin"].is_compressed = true;
|
|
|
|
|
checksums.files[name + ".bin"].uncompressed_size = compressed.count();
|
|
|
|
|
checksums.files[name + ".bin"].uncompressed_hash = compressed.getHash();
|
|
|
|
|
checksums.files[name + ".bin"].file_size = plain_hashing.count();
|
|
|
|
|
checksums.files[name + ".bin"].file_hash = plain_hashing.getHash();
|
|
|
|
|
|
|
|
|
|
checksums.files[name + ".mrk"].file_size = marks.count();
|
|
|
|
|
checksums.files[name + ".mrk"].file_hash = marks.getHash();
|
2014-03-05 16:28:24 +00:00
|
|
|
|
}
|
2013-04-24 10:31:32 +00:00
|
|
|
|
};
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2015-03-14 02:36:39 +00:00
|
|
|
|
using ColumnStreams = std::map<String, std::unique_ptr<ColumnStream>>;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2015-04-08 16:48:47 +00:00
|
|
|
|
void addStream(const String & path, const String & name, const IDataType & type, size_t estimated_size = 0, size_t level = 0, String filename = "")
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2014-03-04 11:30:50 +00:00
|
|
|
|
String escaped_column_name;
|
|
|
|
|
if (filename.size())
|
|
|
|
|
escaped_column_name = escapeForFileName(filename);
|
|
|
|
|
else
|
|
|
|
|
escaped_column_name = escapeForFileName(name);
|
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
/// Для массивов используются отдельные потоки для размеров.
|
2014-06-26 00:58:14 +00:00
|
|
|
|
if (const DataTypeArray * type_arr = typeid_cast<const DataTypeArray *>(&type))
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2013-08-07 19:45:47 +00:00
|
|
|
|
String size_name = DataTypeNested::extractNestedTableName(name)
|
|
|
|
|
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
|
|
|
|
String escaped_size_name = escapeForFileName(DataTypeNested::extractNestedTableName(name))
|
|
|
|
|
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2016-05-28 12:22:22 +00:00
|
|
|
|
column_streams[size_name] = std::make_unique<ColumnStream>(
|
2014-03-27 17:30:04 +00:00
|
|
|
|
escaped_size_name,
|
2014-03-04 11:30:50 +00:00
|
|
|
|
path + escaped_size_name + ".bin",
|
2014-03-28 14:36:24 +00:00
|
|
|
|
path + escaped_size_name + ".mrk",
|
2015-03-14 02:36:39 +00:00
|
|
|
|
max_compress_block_size,
|
2015-04-08 16:48:47 +00:00
|
|
|
|
compression_method,
|
|
|
|
|
estimated_size,
|
2016-05-28 12:22:22 +00:00
|
|
|
|
aio_threshold);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2015-04-08 16:48:47 +00:00
|
|
|
|
addStream(path, name, *type_arr->getNestedType(), estimated_size, level + 1);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
2016-05-28 12:22:22 +00:00
|
|
|
|
column_streams[name] = std::make_unique<ColumnStream>(
|
2014-03-27 17:30:04 +00:00
|
|
|
|
escaped_column_name,
|
2014-03-04 11:30:50 +00:00
|
|
|
|
path + escaped_column_name + ".bin",
|
2014-03-28 14:36:24 +00:00
|
|
|
|
path + escaped_column_name + ".mrk",
|
2015-03-14 02:36:39 +00:00
|
|
|
|
max_compress_block_size,
|
2015-04-08 16:48:47 +00:00
|
|
|
|
compression_method,
|
|
|
|
|
estimated_size,
|
2016-05-28 12:22:22 +00:00
|
|
|
|
aio_threshold);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
/// Записать данные одного столбца.
|
2013-08-08 13:12:28 +00:00
|
|
|
|
void writeData(const String & name, const IDataType & type, const IColumn & column, OffsetColumns & offset_columns, size_t level = 0)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
|
|
|
|
size_t size = column.size();
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
/// Для массивов требуется сначала сериализовать размеры, а потом значения.
|
2014-06-26 00:58:14 +00:00
|
|
|
|
if (const DataTypeArray * type_arr = typeid_cast<const DataTypeArray *>(&type))
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2013-08-07 19:45:47 +00:00
|
|
|
|
String size_name = DataTypeNested::extractNestedTableName(name)
|
|
|
|
|
+ ARRAY_SIZES_COLUMN_NAME_SUFFIX + toString(level);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
if (offset_columns.count(size_name) == 0)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2013-08-08 13:12:28 +00:00
|
|
|
|
offset_columns.insert(size_name);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
ColumnStream & stream = *column_streams[size_name];
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
size_t prev_mark = 0;
|
|
|
|
|
while (prev_mark < size)
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
2013-08-08 13:12:28 +00:00
|
|
|
|
size_t limit = 0;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
/// Если есть index_offset, то первая засечка идёт не сразу, а после этого количества строк.
|
|
|
|
|
if (prev_mark == 0 && index_offset != 0)
|
|
|
|
|
{
|
|
|
|
|
limit = index_offset;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
limit = storage.index_granularity;
|
2014-04-22 15:34:59 +00:00
|
|
|
|
|
|
|
|
|
/// Уже могло накопиться достаточно данных для сжатия в новый блок.
|
|
|
|
|
if (stream.compressed.offset() >= min_compress_block_size)
|
|
|
|
|
stream.compressed.next();
|
|
|
|
|
|
2014-04-14 13:08:26 +00:00
|
|
|
|
writeIntBinary(stream.plain_hashing.count(), stream.marks);
|
2013-08-08 13:12:28 +00:00
|
|
|
|
writeIntBinary(stream.compressed.offset(), stream.marks);
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
type_arr->serializeOffsets(column, stream.compressed, prev_mark, limit);
|
2014-04-22 15:34:59 +00:00
|
|
|
|
|
2015-03-14 02:36:39 +00:00
|
|
|
|
/// Чтобы вместо засечек, указывающих на конец сжатого блока, были засечки, указывающие на начало следующего.
|
|
|
|
|
stream.compressed.nextIfAtEnd();
|
2014-04-22 15:34:59 +00:00
|
|
|
|
|
2013-08-08 13:12:28 +00:00
|
|
|
|
prev_mark += limit;
|
2013-04-24 10:31:32 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
{
|
|
|
|
|
ColumnStream & stream = *column_streams[name];
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
size_t prev_mark = 0;
|
|
|
|
|
while (prev_mark < size)
|
|
|
|
|
{
|
|
|
|
|
size_t limit = 0;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
/// Если есть index_offset, то первая засечка идёт не сразу, а после этого количества строк.
|
|
|
|
|
if (prev_mark == 0 && index_offset != 0)
|
|
|
|
|
{
|
|
|
|
|
limit = index_offset;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
limit = storage.index_granularity;
|
2014-04-22 15:34:59 +00:00
|
|
|
|
|
|
|
|
|
/// Уже могло накопиться достаточно данных для сжатия в новый блок.
|
|
|
|
|
if (stream.compressed.offset() >= min_compress_block_size)
|
|
|
|
|
stream.compressed.next();
|
|
|
|
|
|
2014-04-14 13:08:26 +00:00
|
|
|
|
writeIntBinary(stream.plain_hashing.count(), stream.marks);
|
2013-04-24 10:31:32 +00:00
|
|
|
|
writeIntBinary(stream.compressed.offset(), stream.marks);
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
type.serializeBinary(column, stream.compressed, prev_mark, limit);
|
2014-04-22 15:34:59 +00:00
|
|
|
|
|
2015-03-14 02:36:39 +00:00
|
|
|
|
/// Чтобы вместо засечек, указывающих на конец сжатого блока, были засечки, указывающие на начало следующего.
|
|
|
|
|
stream.compressed.nextIfAtEnd();
|
2014-04-22 15:34:59 +00:00
|
|
|
|
|
2013-04-24 10:31:32 +00:00
|
|
|
|
prev_mark += limit;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2014-03-09 17:36:01 +00:00
|
|
|
|
MergeTreeData & storage;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
|
|
|
|
ColumnStreams column_streams;
|
|
|
|
|
|
|
|
|
|
/// Смещение до первой строчки блока, для которой надо записать индекс.
|
2015-03-14 02:36:39 +00:00
|
|
|
|
size_t index_offset = 0;
|
2014-04-08 15:29:12 +00:00
|
|
|
|
|
|
|
|
|
size_t min_compress_block_size;
|
|
|
|
|
size_t max_compress_block_size;
|
2015-03-14 02:36:39 +00:00
|
|
|
|
|
2015-04-08 16:48:47 +00:00
|
|
|
|
size_t aio_threshold;
|
|
|
|
|
|
2015-03-14 02:36:39 +00:00
|
|
|
|
CompressionMethod compression_method;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
};
|
|
|
|
|
|
2015-08-14 02:45:40 +00:00
|
|
|
|
|
|
|
|
|
/** Для записи одного куска.
|
|
|
|
|
* Данные относятся к одному месяцу, и пишутся в один кускок.
|
2014-03-04 11:30:50 +00:00
|
|
|
|
*/
|
|
|
|
|
class MergedBlockOutputStream : public IMergedBlockOutputStream
|
|
|
|
|
{
|
|
|
|
|
public:
|
2015-03-14 02:36:39 +00:00
|
|
|
|
MergedBlockOutputStream(
|
|
|
|
|
MergeTreeData & storage_,
|
|
|
|
|
String part_path_,
|
|
|
|
|
const NamesAndTypesList & columns_list_,
|
2015-04-10 15:31:51 +00:00
|
|
|
|
CompressionMethod compression_method)
|
2015-03-14 02:36:39 +00:00
|
|
|
|
: IMergedBlockOutputStream(
|
|
|
|
|
storage_, storage_.context.getSettings().min_compress_block_size,
|
2015-04-10 15:31:51 +00:00
|
|
|
|
storage_.context.getSettings().max_compress_block_size, compression_method,
|
|
|
|
|
storage_.context.getSettings().min_bytes_to_use_direct_io),
|
2015-03-14 02:36:39 +00:00
|
|
|
|
columns_list(columns_list_), part_path(part_path_)
|
2014-03-04 11:30:50 +00:00
|
|
|
|
{
|
2015-04-10 15:31:51 +00:00
|
|
|
|
init();
|
|
|
|
|
for (const auto & it : columns_list)
|
|
|
|
|
addStream(part_path, it.name, *it.type);
|
|
|
|
|
}
|
2014-06-26 00:58:14 +00:00
|
|
|
|
|
2015-04-10 15:31:51 +00:00
|
|
|
|
MergedBlockOutputStream(
|
|
|
|
|
MergeTreeData & storage_,
|
|
|
|
|
String part_path_,
|
|
|
|
|
const NamesAndTypesList & columns_list_,
|
|
|
|
|
CompressionMethod compression_method,
|
2015-04-10 17:09:16 +00:00
|
|
|
|
const MergeTreeData::DataPart::ColumnToSize & merged_column_to_size_,
|
2015-04-10 15:31:51 +00:00
|
|
|
|
size_t aio_threshold_)
|
|
|
|
|
: IMergedBlockOutputStream(
|
|
|
|
|
storage_, storage_.context.getSettings().min_compress_block_size,
|
|
|
|
|
storage_.context.getSettings().max_compress_block_size, compression_method,
|
|
|
|
|
aio_threshold_),
|
|
|
|
|
columns_list(columns_list_), part_path(part_path_)
|
|
|
|
|
{
|
|
|
|
|
init();
|
2014-03-13 12:48:07 +00:00
|
|
|
|
for (const auto & it : columns_list)
|
2015-04-08 16:48:47 +00:00
|
|
|
|
{
|
|
|
|
|
size_t estimated_size = 0;
|
2015-04-10 17:09:16 +00:00
|
|
|
|
if (aio_threshold > 0)
|
|
|
|
|
{
|
|
|
|
|
auto it2 = merged_column_to_size_.find(it.name);
|
|
|
|
|
if (it2 != merged_column_to_size_.end())
|
|
|
|
|
estimated_size = it2->second;
|
|
|
|
|
}
|
2015-04-08 16:48:47 +00:00
|
|
|
|
addStream(part_path, it.name, *it.type, estimated_size);
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
}
|
2014-03-13 12:48:07 +00:00
|
|
|
|
|
2016-01-28 16:06:57 +00:00
|
|
|
|
std::string getPartPath() const
|
|
|
|
|
{
|
|
|
|
|
return part_path;
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-14 02:45:40 +00:00
|
|
|
|
/// Если данные заранее отсортированы.
|
2015-01-21 04:00:20 +00:00
|
|
|
|
void write(const Block & block) override
|
2014-03-04 11:30:50 +00:00
|
|
|
|
{
|
2015-08-14 02:45:40 +00:00
|
|
|
|
writeImpl(block, nullptr);
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2015-08-14 02:45:40 +00:00
|
|
|
|
/** Если данные не отсортированы, но мы заранее вычислили перестановку, после которой они станут сортированными.
|
|
|
|
|
* Этот метод используется для экономии оперативки, так как не нужно держать одновременно два блока - исходный и отсортированный.
|
|
|
|
|
*/
|
|
|
|
|
void writeWithPermutation(const Block & block, const IColumn::Permutation * permutation)
|
|
|
|
|
{
|
|
|
|
|
writeImpl(block, permutation);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
}
|
2014-03-27 17:30:04 +00:00
|
|
|
|
|
|
|
|
|
void writeSuffix() override
|
|
|
|
|
{
|
|
|
|
|
throw Exception("Method writeSuffix is not supported by MergedBlockOutputStream", ErrorCodes::NOT_IMPLEMENTED);
|
|
|
|
|
}
|
2014-06-26 00:58:14 +00:00
|
|
|
|
|
2014-03-27 17:30:04 +00:00
|
|
|
|
MergeTreeData::DataPart::Checksums writeSuffixAndGetChecksums()
|
2014-03-04 11:30:50 +00:00
|
|
|
|
{
|
2014-03-27 17:30:04 +00:00
|
|
|
|
/// Заканчиваем запись и достаем чексуммы.
|
|
|
|
|
MergeTreeData::DataPart::Checksums checksums;
|
|
|
|
|
|
2016-04-15 17:42:51 +00:00
|
|
|
|
if (storage.merging_params.mode != MergeTreeData::MergingParams::Unsorted)
|
2015-03-14 02:37:53 +00:00
|
|
|
|
{
|
|
|
|
|
index_stream->next();
|
|
|
|
|
checksums.files["primary.idx"].file_size = index_stream->count();
|
|
|
|
|
checksums.files["primary.idx"].file_hash = index_stream->getHash();
|
|
|
|
|
index_stream = nullptr;
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
|
|
|
|
for (ColumnStreams::iterator it = column_streams.begin(); it != column_streams.end(); ++it)
|
2014-03-27 17:30:04 +00:00
|
|
|
|
{
|
2014-03-04 11:30:50 +00:00
|
|
|
|
it->second->finalize();
|
2014-03-27 17:30:04 +00:00
|
|
|
|
it->second->addToChecksums(checksums);
|
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
|
|
|
|
column_streams.clear();
|
|
|
|
|
|
|
|
|
|
if (marks_count == 0)
|
|
|
|
|
{
|
|
|
|
|
/// Кусок пустой - все записи удалились.
|
2014-03-27 12:32:37 +00:00
|
|
|
|
Poco::File(part_path).remove(true);
|
2014-03-27 17:30:04 +00:00
|
|
|
|
checksums.files.clear();
|
2014-07-09 13:39:19 +00:00
|
|
|
|
return checksums;
|
2014-03-27 17:30:04 +00:00
|
|
|
|
}
|
2014-07-09 13:39:19 +00:00
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
/// Записываем файл с описанием столбцов.
|
|
|
|
|
WriteBufferFromFile out(part_path + "columns.txt", 4096);
|
|
|
|
|
columns_list.writeText(out);
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-27 17:30:04 +00:00
|
|
|
|
{
|
|
|
|
|
/// Записываем файл с чексуммами.
|
2014-07-09 13:39:19 +00:00
|
|
|
|
WriteBufferFromFile out(part_path + "checksums.txt", 4096);
|
2014-12-17 18:37:23 +00:00
|
|
|
|
checksums.write(out);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
}
|
2014-03-27 17:30:04 +00:00
|
|
|
|
|
|
|
|
|
return checksums;
|
2014-03-27 12:32:37 +00:00
|
|
|
|
}
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
2014-03-27 12:32:37 +00:00
|
|
|
|
MergeTreeData::DataPart::Index & getIndex()
|
|
|
|
|
{
|
2016-02-14 05:43:03 +00:00
|
|
|
|
return index_columns;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
}
|
2014-06-26 00:58:14 +00:00
|
|
|
|
|
2014-03-04 11:30:50 +00:00
|
|
|
|
/// Сколько засечек уже записано.
|
|
|
|
|
size_t marksCount()
|
|
|
|
|
{
|
|
|
|
|
return marks_count;
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-10 15:31:51 +00:00
|
|
|
|
private:
|
|
|
|
|
void init()
|
|
|
|
|
{
|
|
|
|
|
Poco::File(part_path).createDirectories();
|
|
|
|
|
|
2016-04-15 17:42:51 +00:00
|
|
|
|
if (storage.merging_params.mode != MergeTreeData::MergingParams::Unsorted)
|
2015-04-10 15:31:51 +00:00
|
|
|
|
{
|
2016-05-28 14:14:18 +00:00
|
|
|
|
index_file_stream = std::make_unique<WriteBufferFromFile>(
|
|
|
|
|
part_path + "primary.idx", DBMS_DEFAULT_BUFFER_SIZE, O_TRUNC | O_CREAT | O_WRONLY);
|
|
|
|
|
index_stream = std::make_unique<HashingWriteBuffer>(*index_file_stream);
|
2015-04-10 15:31:51 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-14 02:45:40 +00:00
|
|
|
|
/** Если задана permutation, то переставляет значения в столбцах при записи.
|
|
|
|
|
* Это нужно, чтобы не держать целый блок в оперативке для его сортировки.
|
|
|
|
|
*/
|
|
|
|
|
void writeImpl(const Block & block, const IColumn::Permutation * permutation)
|
|
|
|
|
{
|
|
|
|
|
size_t rows = block.rows();
|
|
|
|
|
|
2015-08-14 03:27:32 +00:00
|
|
|
|
/// Множество записанных столбцов со смещениями, чтобы не писать общие для вложенных структур столбцы несколько раз
|
|
|
|
|
OffsetColumns offset_columns;
|
2015-08-14 02:45:40 +00:00
|
|
|
|
|
2015-08-14 03:27:32 +00:00
|
|
|
|
auto sort_description = storage.getSortDescription();
|
2015-08-14 02:45:40 +00:00
|
|
|
|
|
2015-08-14 03:27:32 +00:00
|
|
|
|
/// Сюда будем складывать столбцы, относящиеся к Primary Key, чтобы потом записать индекс.
|
|
|
|
|
std::vector<ColumnWithTypeAndName> primary_columns(sort_description.size());
|
|
|
|
|
std::map<String, size_t> primary_columns_name_to_position;
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0, size = sort_description.size(); i < size; ++i)
|
2015-08-14 02:45:40 +00:00
|
|
|
|
{
|
2015-08-14 03:27:32 +00:00
|
|
|
|
const auto & descr = sort_description[i];
|
2015-08-14 02:45:40 +00:00
|
|
|
|
|
2015-08-14 03:27:32 +00:00
|
|
|
|
String name = !descr.column_name.empty()
|
|
|
|
|
? descr.column_name
|
|
|
|
|
: block.getByPosition(descr.column_number).name;
|
2015-08-14 02:45:40 +00:00
|
|
|
|
|
2015-08-18 15:20:38 +00:00
|
|
|
|
if (!primary_columns_name_to_position.emplace(name, i).second)
|
|
|
|
|
throw Exception("Primary key contains duplicate columns", ErrorCodes::BAD_ARGUMENTS);
|
2015-08-14 03:41:31 +00:00
|
|
|
|
|
|
|
|
|
primary_columns[i] = !descr.column_name.empty()
|
|
|
|
|
? block.getByName(descr.column_name)
|
|
|
|
|
: block.getByPosition(descr.column_number);
|
2015-08-18 15:20:38 +00:00
|
|
|
|
|
|
|
|
|
/// Столбцы первичного ключа переупорядочиваем заранее и складываем в primary_columns.
|
|
|
|
|
if (permutation)
|
|
|
|
|
primary_columns[i].column = primary_columns[i].column->permute(*permutation, 0);
|
2015-08-14 02:45:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-14 06:46:22 +00:00
|
|
|
|
if (index_columns.empty())
|
|
|
|
|
{
|
|
|
|
|
index_columns.resize(sort_description.size());
|
|
|
|
|
for (size_t i = 0, size = sort_description.size(); i < size; ++i)
|
|
|
|
|
index_columns[i] = primary_columns[i].column.get()->cloneEmpty();
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-14 02:45:40 +00:00
|
|
|
|
/// Теперь пишем данные.
|
|
|
|
|
for (const auto & it : columns_list)
|
|
|
|
|
{
|
|
|
|
|
const ColumnWithTypeAndName & column = block.getByName(it.name);
|
|
|
|
|
|
|
|
|
|
if (permutation)
|
|
|
|
|
{
|
2015-08-14 03:27:32 +00:00
|
|
|
|
auto primary_column_it = primary_columns_name_to_position.find(it.name);
|
|
|
|
|
if (primary_columns_name_to_position.end() != primary_column_it)
|
2015-08-18 15:20:38 +00:00
|
|
|
|
{
|
|
|
|
|
writeData(column.name, *column.type, *primary_columns[primary_column_it->second].column, offset_columns);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
/// Столбцы, не входящие в первичный ключ, переупорядочиваем здесь; затем результат освобождается - для экономии оперативки.
|
|
|
|
|
ColumnPtr permutted_column = column.column->permute(*permutation, 0);
|
|
|
|
|
writeData(column.name, *column.type, *permutted_column, offset_columns);
|
|
|
|
|
}
|
2015-08-14 02:45:40 +00:00
|
|
|
|
}
|
|
|
|
|
else
|
2015-08-14 03:27:32 +00:00
|
|
|
|
{
|
2015-08-14 02:45:40 +00:00
|
|
|
|
writeData(column.name, *column.type, *column.column, offset_columns);
|
2015-08-14 03:27:32 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
{
|
2016-10-25 14:22:10 +00:00
|
|
|
|
/** While filling index (index_columns), disable memory tracker.
|
|
|
|
|
* Because memory is allocated here (maybe in context of INSERT query),
|
|
|
|
|
* but then freed in completely different place (while merging parts), where query memory_tracker is not available.
|
|
|
|
|
* And otherwise it will look like excessively growing memory consumption in context of query.
|
|
|
|
|
* (observed in long INSERT SELECTs)
|
|
|
|
|
*/
|
|
|
|
|
TemporarilyDisableMemoryTracker temporarily_disable_memory_tracker;
|
|
|
|
|
|
|
|
|
|
/// Пишем индекс. Индекс содержит значение Primary Key для каждой index_granularity строки.
|
|
|
|
|
for (size_t i = index_offset; i < rows; i += storage.index_granularity)
|
2015-08-14 03:27:32 +00:00
|
|
|
|
{
|
2016-10-25 14:22:10 +00:00
|
|
|
|
if (storage.merging_params.mode != MergeTreeData::MergingParams::Unsorted)
|
2015-08-18 15:20:38 +00:00
|
|
|
|
{
|
2016-10-25 14:22:10 +00:00
|
|
|
|
for (size_t j = 0, size = primary_columns.size(); j < size; ++j)
|
|
|
|
|
{
|
|
|
|
|
const IColumn & primary_column = *primary_columns[j].column.get();
|
|
|
|
|
index_columns[j].get()->insertFrom(primary_column, i);
|
|
|
|
|
primary_columns[j].type.get()->serializeBinary(primary_column, i, *index_stream);
|
|
|
|
|
}
|
2015-08-18 15:20:38 +00:00
|
|
|
|
}
|
2015-08-14 03:27:32 +00:00
|
|
|
|
|
2016-10-25 14:22:10 +00:00
|
|
|
|
++marks_count;
|
|
|
|
|
}
|
2015-08-14 02:45:40 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t written_for_last_mark = (storage.index_granularity - index_offset + rows) % storage.index_granularity;
|
|
|
|
|
index_offset = (storage.index_granularity - written_for_last_mark) % storage.index_granularity;
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-04 11:30:50 +00:00
|
|
|
|
private:
|
2014-03-13 12:48:07 +00:00
|
|
|
|
NamesAndTypesList columns_list;
|
2014-03-27 12:32:37 +00:00
|
|
|
|
String part_path;
|
2014-03-13 12:48:07 +00:00
|
|
|
|
|
2015-03-14 02:36:39 +00:00
|
|
|
|
size_t marks_count = 0;
|
2014-03-27 12:32:37 +00:00
|
|
|
|
|
2016-05-28 14:14:18 +00:00
|
|
|
|
std::unique_ptr<WriteBufferFromFile> index_file_stream;
|
|
|
|
|
std::unique_ptr<HashingWriteBuffer> index_stream;
|
2016-02-14 05:43:03 +00:00
|
|
|
|
MergeTreeData::DataPart::Index index_columns;
|
2013-04-24 10:31:32 +00:00
|
|
|
|
};
|
|
|
|
|
|
2014-03-04 11:30:50 +00:00
|
|
|
|
|
|
|
|
|
/// Записывает только те, столбцы, что лежат в block
|
|
|
|
|
class MergedColumnOnlyOutputStream : public IMergedBlockOutputStream
|
|
|
|
|
{
|
|
|
|
|
public:
|
2015-03-14 02:36:39 +00:00
|
|
|
|
MergedColumnOnlyOutputStream(MergeTreeData & storage_, String part_path_, bool sync_, CompressionMethod compression_method)
|
|
|
|
|
: IMergedBlockOutputStream(
|
|
|
|
|
storage_, storage_.context.getSettings().min_compress_block_size,
|
2015-04-10 15:31:51 +00:00
|
|
|
|
storage_.context.getSettings().max_compress_block_size, compression_method,
|
|
|
|
|
storage_.context.getSettings().min_bytes_to_use_direct_io),
|
2015-03-14 02:36:39 +00:00
|
|
|
|
part_path(part_path_), sync(sync_)
|
2014-03-04 11:30:50 +00:00
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-21 04:00:20 +00:00
|
|
|
|
void write(const Block & block) override
|
2014-03-04 11:30:50 +00:00
|
|
|
|
{
|
|
|
|
|
if (!initialized)
|
|
|
|
|
{
|
|
|
|
|
column_streams.clear();
|
|
|
|
|
for (size_t i = 0; i < block.columns(); ++i)
|
|
|
|
|
{
|
|
|
|
|
addStream(part_path, block.getByPosition(i).name,
|
2015-04-08 16:48:47 +00:00
|
|
|
|
*block.getByPosition(i).type, 0, 0, block.getByPosition(i).name);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
}
|
|
|
|
|
initialized = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t rows = block.rows();
|
|
|
|
|
|
|
|
|
|
OffsetColumns offset_columns;
|
|
|
|
|
for (size_t i = 0; i < block.columns(); ++i)
|
|
|
|
|
{
|
2015-07-17 01:27:35 +00:00
|
|
|
|
const ColumnWithTypeAndName & column = block.getByPosition(i);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
writeData(column.name, *column.type, *column.column, offset_columns);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t written_for_last_mark = (storage.index_granularity - index_offset + rows) % storage.index_granularity;
|
|
|
|
|
index_offset = (storage.index_granularity - written_for_last_mark) % storage.index_granularity;
|
|
|
|
|
}
|
|
|
|
|
|
2014-03-27 17:30:04 +00:00
|
|
|
|
void writeSuffix() override
|
2014-03-04 11:30:50 +00:00
|
|
|
|
{
|
2014-03-27 17:30:04 +00:00
|
|
|
|
throw Exception("Method writeSuffix is not supported by MergedColumnOnlyOutputStream", ErrorCodes::NOT_IMPLEMENTED);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
MergeTreeData::DataPart::Checksums writeSuffixAndGetChecksums()
|
|
|
|
|
{
|
|
|
|
|
MergeTreeData::DataPart::Checksums checksums;
|
|
|
|
|
|
2014-03-04 11:30:50 +00:00
|
|
|
|
for (auto & column_stream : column_streams)
|
|
|
|
|
{
|
|
|
|
|
column_stream.second->finalize();
|
2014-03-05 16:28:24 +00:00
|
|
|
|
if (sync)
|
|
|
|
|
column_stream.second->sync();
|
2014-03-04 14:09:19 +00:00
|
|
|
|
std::string column = escapeForFileName(column_stream.first);
|
2014-03-27 17:30:04 +00:00
|
|
|
|
column_stream.second->addToChecksums(checksums, column);
|
2014-03-04 11:30:50 +00:00
|
|
|
|
}
|
2014-03-27 17:30:04 +00:00
|
|
|
|
|
2014-03-04 11:30:50 +00:00
|
|
|
|
column_streams.clear();
|
|
|
|
|
initialized = false;
|
2014-03-27 17:30:04 +00:00
|
|
|
|
|
|
|
|
|
return checksums;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
String part_path;
|
|
|
|
|
|
2015-03-14 02:36:39 +00:00
|
|
|
|
bool initialized = false;
|
2014-03-05 16:28:24 +00:00
|
|
|
|
bool sync;
|
2014-03-04 11:30:50 +00:00
|
|
|
|
};
|
2014-06-26 00:58:14 +00:00
|
|
|
|
|
2013-09-26 19:16:43 +00:00
|
|
|
|
}
|