mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-10-05 08:00:51 +00:00
polymorphic parts (development)
This commit is contained in:
parent
ce36cf88ac
commit
1297991cef
@ -65,21 +65,11 @@ public:
|
||||
const ValueSizeMap & avg_value_size_hints_ = ValueSizeMap{},
|
||||
const ReadBufferFromFileBase::ProfileCallback & profile_callback_ = ReadBufferFromFileBase::ProfileCallback{}) const = 0;
|
||||
|
||||
const String & part_path_,
|
||||
const MergeTreeData & storage_,
|
||||
const NamesAndTypesList & columns_list_,
|
||||
const IColumn::Permutation * permutation_,
|
||||
const String & marks_file_extension_,
|
||||
const CompressionCodecPtr & default_codec_,
|
||||
size_t max_compress_block_size_,
|
||||
size_t aio_threshold_
|
||||
|
||||
virtual MergeTreeWriterPtr getWriter(
|
||||
const NamesAndTypesList & columns_list,
|
||||
const IColumn::Permutation * permutation,
|
||||
const CompressionCodecPtr & default_codec_,
|
||||
const WriterSettings & writer_settings,
|
||||
)
|
||||
const WriterSettings & writer_settings) const = 0;
|
||||
|
||||
// virtual MergeTreeWriterPtr getWriter() const = 0;
|
||||
|
||||
|
@ -52,18 +52,18 @@ IMergeTreeDataPartWriter::ColumnStream::ColumnStream(
|
||||
{
|
||||
}
|
||||
|
||||
void IMergeTreeDataPartWriter::ColumnStream::addToChecksums(MergeTreeData::DataPart::Checksums & checksums)
|
||||
{
|
||||
String name = escaped_column_name;
|
||||
// void IMergeTreeDataPartWriter::ColumnStream::addToChecksums(MergeTreeData::DataPart::Checksums & checksums)
|
||||
// {
|
||||
// String name = escaped_column_name;
|
||||
|
||||
checksums.files[name + data_file_extension].is_compressed = true;
|
||||
checksums.files[name + data_file_extension].uncompressed_size = compressed.count();
|
||||
checksums.files[name + data_file_extension].uncompressed_hash = compressed.getHash();
|
||||
checksums.files[name + data_file_extension].file_size = plain_hashing.count();
|
||||
checksums.files[name + data_file_extension].file_hash = plain_hashing.getHash();
|
||||
// checksums.files[name + data_file_extension].is_compressed = true;
|
||||
// checksums.files[name + data_file_extension].uncompressed_size = compressed.count();
|
||||
// checksums.files[name + data_file_extension].uncompressed_hash = compressed.getHash();
|
||||
// checksums.files[name + data_file_extension].file_size = plain_hashing.count();
|
||||
// checksums.files[name + data_file_extension].file_hash = plain_hashing.getHash();
|
||||
|
||||
checksums.files[name + marks_file_extension].file_size = marks.count();
|
||||
checksums.files[name + marks_file_extension].file_hash = marks.getHash();
|
||||
}
|
||||
// checksums.files[name + marks_file_extension].file_size = marks.count();
|
||||
// checksums.files[name + marks_file_extension].file_hash = marks.getHash();
|
||||
// }
|
||||
|
||||
}
|
@ -47,10 +47,11 @@ public:
|
||||
|
||||
void sync();
|
||||
|
||||
void addToChecksums(MergeTreeData::DataPart::Checksums & checksums);
|
||||
void addToChecksums(IMergeTreeDataPart::Checksums & checksums);
|
||||
};
|
||||
|
||||
using ColumnStreamPtr = std::unique_ptr<ColumnStream>;
|
||||
using ColumnStreams = std::map<String, ColumnStreamPtr>;
|
||||
|
||||
IMergeTreeDataPartWriter(
|
||||
const String & part_path,
|
||||
|
@ -3,34 +3,14 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
size_t MergeTreeDataPartWriterCompact::writeColumnSingleGranule(
|
||||
const ColumnWithTypeAndName & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
IDataType::SerializeBinaryBulkSettings & serialize_settings,
|
||||
size_t from_row,
|
||||
size_t number_of_rows)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
size_t MergeTreeDataPartWriterCompact::write(const Block & block, size_t from_mark, size_t offset,
|
||||
size_t MergeTreeDataPartWriterCompact::write(const Block & block, size_t from_mark, size_t index_offset,
|
||||
const MergeTreeIndexGranularity & index_granularity,
|
||||
const Block & primary_key_block, const Block & skip_indexes_block)
|
||||
{
|
||||
if (!started)
|
||||
start();
|
||||
|
||||
size_t total_rows = block.rows();
|
||||
size_t current_mark = from_mark;
|
||||
size_t current_row = 0;
|
||||
|
||||
|
||||
IDataType::SerializeBinaryBulkSettings serialize_settings;
|
||||
serialize_settings.getter = [&ostr](IDataType::SubstreamPath) -> WriteBuffer * { return &ostr; };
|
||||
serialize_settings.position_independent_encoding = false;
|
||||
serialize_settings.low_cardinality_max_dictionary_size = 0;
|
||||
|
||||
ColumnsWithTypeAndName columns_to_write(columns_list.size());
|
||||
auto it = columns_list.begin();
|
||||
for (size_t i = 0; i < columns_list.size(); ++i, ++it)
|
||||
@ -55,62 +35,52 @@ size_t MergeTreeDataPartWriterCompact::write(const Block & block, size_t from_ma
|
||||
{
|
||||
bool write_marks = true;
|
||||
size_t rows_to_write;
|
||||
if (current_row == 0 && offset != 0)
|
||||
if (current_row == 0 && index_offset != 0)
|
||||
{
|
||||
rows_to_write = offset;
|
||||
rows_to_write = index_offset;
|
||||
write_marks = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
rows_to_write = index_granularity->getMarkRows(current_mark);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < columns_to_write.size(); ++i)
|
||||
{
|
||||
current_row = writeColumnSingleGranule(columns_to_write[i], offset_columns, skip_offsets, serialization_states[i], serialize_settings, current_row, rows_to_write);
|
||||
rows_to_write = index_granularity.getMarkRows(current_mark);
|
||||
}
|
||||
|
||||
if (write_marks)
|
||||
{
|
||||
writeMark();
|
||||
writeIntBinary(rows_to_write, stream->marks);
|
||||
for (size_t i = 0; i < columns_to_write.size(); ++i)
|
||||
{
|
||||
writeIntBinary(stream->plain_hashing.count(), stream->marks);
|
||||
writeIntBinary(stream->compressed.offset(), stream->marks);
|
||||
current_row = writeColumnSingleGranule(columns_to_write[i], current_row, rows_to_write);
|
||||
}
|
||||
++current_mark;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i < columns_to_write.size(); ++i)
|
||||
current_row = writeColumnSingleGranule(columns_to_write[i], current_row, rows_to_write);
|
||||
}
|
||||
}
|
||||
|
||||
/// We always write end granule for block in Compact parts.
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t MergeTreeDataPartWriterCompact::writeColumnSingleGranule(const ColumnWithTypeAndName & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
IDataType::SerializeBinaryBulkSettings & serialize_settings,
|
||||
size_t from_row,
|
||||
size_t number_of_rows)
|
||||
size_t MergeTreeDataPartWriterCompact::writeColumnSingleGranule(const ColumnWithTypeAndName & column, size_t from_row, size_t number_of_rows)
|
||||
{
|
||||
column.type->serializeBinaryBulkStatePrefix(serialize_settings, serialization_state);
|
||||
column.type->serializeBinaryBulkWithMultipleStreams(*column.column, from_row, number_of_rows, serialize_settings, serialization_state);
|
||||
column.type->serializeBinaryBulkStateSuffix(serialize_settings, serialization_state);
|
||||
}
|
||||
IDataType::SerializeBinaryBulkStatePtr state;
|
||||
IDataType::SerializeBinaryBulkSettings serialize_settings;
|
||||
|
||||
void MergeTreeDataPartWriterWide::start()
|
||||
{
|
||||
if (started)
|
||||
return;
|
||||
serialize_settings.getter = [&stream](IDataType::SubstreamPath) -> WriteBuffer * { return &stream->compressed; };
|
||||
serialize_settings.position_independent_encoding = false;
|
||||
serialize_settings.low_cardinality_max_dictionary_size = 0;
|
||||
|
||||
started = true;
|
||||
column.type->serializeBinaryBulkStatePrefix(serialize_settings, state);
|
||||
column.type->serializeBinaryBulkWithMultipleStreams(*column.column, from_row, number_of_rows, serialize_settings, state);
|
||||
column.type->serializeBinaryBulkStateSuffix(serialize_settings, state);
|
||||
|
||||
serialization_states.reserve(columns_list.size());
|
||||
WrittenOffsetColumns tmp_offset_columns;
|
||||
IDataType::SerializeBinaryBulkSettings settings;
|
||||
|
||||
for (const auto & col : columns_list)
|
||||
{
|
||||
settings.getter = createStreamGetter(col.name, tmp_offset_columns, false);
|
||||
serialization_states.emplace_back(nullptr);
|
||||
col.type->serializeBinaryBulkStatePrefix(settings, serialization_states.back());
|
||||
}
|
||||
return from_row + number_of_rows;
|
||||
}
|
||||
|
||||
}
|
@ -6,37 +6,17 @@ namespace DB
|
||||
class MergeTreeDataPartWriterCompact : IMergeTreeDataPartWriter
|
||||
{
|
||||
public:
|
||||
size_t write(const Block & block, size_t from_mark, size_t offset,
|
||||
size_t write(const Block & block, size_t from_mark, size_t index_offset, const MergeTreeIndexGranularity & index_granularity,
|
||||
const Block & primary_key_block, const Block & skip_indexes_block) override;
|
||||
|
||||
std::pair<size_t, size_t> writeColumn(
|
||||
const String & name,
|
||||
const IDataType & type,
|
||||
const IColumn & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
size_t from_mark) override;
|
||||
|
||||
/// Write single granule of one column (rows between 2 marks)
|
||||
size_t writeColumnSingleGranule(
|
||||
const ColumnWithTypeAndName & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
IDataType::SerializeBinaryBulkSettings & serialize_settings,
|
||||
size_t from_row,
|
||||
size_t number_of_rows);
|
||||
|
||||
void writeSingleMark()
|
||||
|
||||
protected:
|
||||
void start() override;
|
||||
|
||||
|
||||
private:
|
||||
ColumnStream stream;
|
||||
MergeTreeIndexGranularity * index_granularity = nullptr;
|
||||
Columns columns_to_write;
|
||||
ColumnStreamPtr stream;
|
||||
};
|
||||
|
||||
}
|
@ -1,67 +1,210 @@
|
||||
// #include <Storages/MergeTree/MergeTreeDataPartWriterWide.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataPartWriterWide.h>
|
||||
|
||||
// namespace DB
|
||||
// {
|
||||
namespace DB
|
||||
{
|
||||
|
||||
// size_t MergeTreeDataPartWriterWide::write(size_t current_mark, const Block & block)
|
||||
// {
|
||||
// if (!started)
|
||||
// start();
|
||||
size_t MergeTreeDataPartWriterWide::write(const Block & block, size_t from_mark, size_t index_offset,
|
||||
const MergeTreeIndexGranularity & index_granularity,
|
||||
const Block & primary_key_block, const Block & skip_indexes_block)
|
||||
{
|
||||
if (serialization_states.empty())
|
||||
{
|
||||
serialization_states.reserve(columns_list.size());
|
||||
WrittenOffsetColumns tmp_offset_columns;
|
||||
IDataType::SerializeBinaryBulkSettings settings;
|
||||
|
||||
// size_t index_offset = 0;
|
||||
// auto it = columns_list.begin();
|
||||
// for (size_t i = 0; i < columns_list.size(); ++i, ++it)
|
||||
// {
|
||||
// const ColumnWithTypeAndName & column = block.getByName(it->name);
|
||||
for (const auto & col : columns_list)
|
||||
{
|
||||
settings.getter = createStreamGetter(col.name, tmp_offset_columns, false);
|
||||
serialization_states.emplace_back(nullptr);
|
||||
col.type->serializeBinaryBulkStatePrefix(settings, serialization_states.back());
|
||||
}
|
||||
}
|
||||
|
||||
// if (permutation)
|
||||
// {
|
||||
// auto primary_column_it = primary_key_column_name_to_position.find(it->name);
|
||||
// auto skip_index_column_it = skip_indexes_column_name_to_position.find(it->name);
|
||||
WrittenOffsetColumns offset_columns;
|
||||
size_t new_index_offset = 0;
|
||||
|
||||
// if (primary_key_column_name_to_position.end() != primary_column_it)
|
||||
// {
|
||||
// const auto & primary_column = *primary_key_columns[primary_column_it->second].column;
|
||||
// std::tie(std::ignore, index_offset) = writeColumn(column.name, *column.type, primary_column, offset_columns, false, serialization_states[i], current_mark);
|
||||
// }
|
||||
// else if (skip_indexes_column_name_to_position.end() != skip_index_column_it)
|
||||
// {
|
||||
// const auto & index_column = *skip_indexes_columns[skip_index_column_it->second].column;
|
||||
// std::tie(std::ignore, index_offset) = writeColumn(column.name, *column.type, index_column, offset_columns, false, serialization_states[i], current_mark);
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// /// We rearrange the columns that are not included in the primary key here; Then the result is released - to save RAM.
|
||||
// ColumnPtr permuted_column = column.column->permute(*permutation, 0);
|
||||
// std::tie(std::ignore, index_offset) = writeColumn(column.name, *column.type, *permuted_column, offset_columns, false, serialization_states[i], current_mark);
|
||||
// }
|
||||
// }
|
||||
// else
|
||||
// {
|
||||
// std::tie(std::ignore, index_offset) = writeColumn(column.name, *column.type, *column.column, offset_columns, false, serialization_states[i], current_mark);
|
||||
// }
|
||||
// }
|
||||
auto it = columns_list.begin();
|
||||
for (size_t i = 0; i < columns_list.size(); ++i, ++it)
|
||||
{
|
||||
const ColumnWithTypeAndName & column = block.getByName(it->name);
|
||||
|
||||
// return index_offset;
|
||||
// }
|
||||
if (permutation)
|
||||
{
|
||||
if (primary_key_block.has(it->name))
|
||||
{
|
||||
const auto & primary_column = *primary_key_block.getByName(it->name).column;
|
||||
std::tie(std::ignore, new_index_offset) = writeColumn(column.name, *column.type, primary_column, offset_columns, false, serialization_states[i], from_mark, index_offset);
|
||||
}
|
||||
else if (skip_indexes_block.has(it->name))
|
||||
{
|
||||
const auto & index_column = *skip_indexes_block.getByName(it->name).column;
|
||||
std::tie(std::ignore, new_index_offset) = writeColumn(column.name, *column.type, index_column, offset_columns, false, serialization_states[i], from_mark, index_offset);
|
||||
}
|
||||
else
|
||||
{
|
||||
/// We rearrange the columns that are not included in the primary key here; Then the result is released - to save RAM.
|
||||
ColumnPtr permuted_column = column.column->permute(*permutation, 0);
|
||||
std::tie(std::ignore, new_index_offset) = writeColumn(column.name, *column.type, *permuted_column, offset_columns, false, serialization_states[i], from_mark, index_offset);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::tie(std::ignore, new_index_offset) = writeColumn(column.name, *column.type, *column.column, offset_columns, false, serialization_states[i], from_mark, index_offset);
|
||||
}
|
||||
}
|
||||
|
||||
// void MergeTreeDataPartWriterWide::start()
|
||||
// {
|
||||
// if (started)
|
||||
// return;
|
||||
return new_index_offset;
|
||||
}
|
||||
|
||||
// started = true;
|
||||
void MergeTreeDataPartWriterWide::writeSingleMark(
|
||||
const String & name,
|
||||
const IDataType & type,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
size_t number_of_rows,
|
||||
DB::IDataType::SubstreamPath & path)
|
||||
{
|
||||
type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path)
|
||||
{
|
||||
bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes;
|
||||
if (is_offsets && skip_offsets)
|
||||
return;
|
||||
|
||||
// serialization_states.reserve(columns_list.size());
|
||||
// WrittenOffsetColumns tmp_offset_columns;
|
||||
// IDataType::SerializeBinaryBulkSettings settings;
|
||||
String stream_name = IDataType::getFileNameForStream(name, substream_path);
|
||||
|
||||
// for (const auto & col : columns_list)
|
||||
// {
|
||||
// settings.getter = createStreamGetter(col.name, tmp_offset_columns, false);
|
||||
// serialization_states.emplace_back(nullptr);
|
||||
// col.type->serializeBinaryBulkStatePrefix(settings, serialization_states.back());
|
||||
// }
|
||||
// }
|
||||
/// Don't write offsets more than one time for Nested type.
|
||||
if (is_offsets && offset_columns.count(stream_name))
|
||||
return;
|
||||
|
||||
// }
|
||||
ColumnStream & stream = *column_streams[stream_name];
|
||||
|
||||
/// There could already be enough data to compress into the new block.
|
||||
if (stream.compressed.offset() >= min_compress_block_size)
|
||||
stream.compressed.next();
|
||||
|
||||
writeIntBinary(stream.plain_hashing.count(), stream.marks);
|
||||
writeIntBinary(stream.compressed.offset(), stream.marks);
|
||||
if (can_use_adaptive_granularity)
|
||||
writeIntBinary(number_of_rows, stream.marks);
|
||||
}, path);
|
||||
}
|
||||
|
||||
size_t MergeTreeDataPartWriterWide::writeSingleGranule(
|
||||
const String & name,
|
||||
const IDataType & type,
|
||||
const IColumn & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
IDataType::SerializeBinaryBulkSettings & serialize_settings,
|
||||
size_t from_row,
|
||||
size_t number_of_rows,
|
||||
bool write_marks)
|
||||
{
|
||||
if (write_marks)
|
||||
writeSingleMark(name, type, offset_columns, skip_offsets, number_of_rows, serialize_settings.path);
|
||||
|
||||
type.serializeBinaryBulkWithMultipleStreams(column, from_row, number_of_rows, serialize_settings, serialization_state);
|
||||
|
||||
/// So that instead of the marks pointing to the end of the compressed block, there were marks pointing to the beginning of the next one.
|
||||
type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path)
|
||||
{
|
||||
bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes;
|
||||
if (is_offsets && skip_offsets)
|
||||
return;
|
||||
|
||||
String stream_name = IDataType::getFileNameForStream(name, substream_path);
|
||||
|
||||
/// Don't write offsets more than one time for Nested type.
|
||||
if (is_offsets && offset_columns.count(stream_name))
|
||||
return;
|
||||
|
||||
column_streams[stream_name]->compressed.nextIfAtEnd();
|
||||
}, serialize_settings.path);
|
||||
|
||||
return from_row + number_of_rows;
|
||||
}
|
||||
|
||||
/// column must not be empty. (column.size() !== 0)
|
||||
|
||||
std::pair<size_t, size_t> MergeTreeDataPartWriterWide::writeColumn(
|
||||
const String & name,
|
||||
const IDataType & type,
|
||||
const IColumn & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
size_t from_mark)
|
||||
{
|
||||
auto & settings = storage.global_context.getSettingsRef();
|
||||
IDataType::SerializeBinaryBulkSettings serialize_settings;
|
||||
serialize_settings.getter = createStreamGetter(name, offset_columns, skip_offsets);
|
||||
serialize_settings.low_cardinality_max_dictionary_size = settings.low_cardinality_max_dictionary_size;
|
||||
serialize_settings.low_cardinality_use_single_dictionary_for_part = settings.low_cardinality_use_single_dictionary_for_part != 0;
|
||||
|
||||
size_t total_rows = column.size();
|
||||
size_t current_row = 0;
|
||||
size_t current_column_mark = from_mark;
|
||||
while (current_row < total_rows)
|
||||
{
|
||||
size_t rows_to_write;
|
||||
bool write_marks = true;
|
||||
|
||||
/// If there is `index_offset`, then the first mark goes not immediately, but after this number of rows.
|
||||
if (current_row == 0 && index_offset != 0)
|
||||
{
|
||||
write_marks = false;
|
||||
rows_to_write = index_offset;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (index_granularity.getMarksCount() <= current_column_mark)
|
||||
throw Exception(
|
||||
"Incorrect size of index granularity expect mark " + toString(current_column_mark) + " totally have marks " + toString(index_granularity.getMarksCount()),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
rows_to_write = index_granularity.getMarkRows(current_column_mark);
|
||||
}
|
||||
|
||||
current_row = writeSingleGranule(
|
||||
name,
|
||||
type,
|
||||
column,
|
||||
offset_columns,
|
||||
skip_offsets,
|
||||
serialization_state,
|
||||
serialize_settings,
|
||||
current_row,
|
||||
rows_to_write,
|
||||
write_marks
|
||||
);
|
||||
|
||||
if (write_marks)
|
||||
current_column_mark++;
|
||||
}
|
||||
|
||||
/// Memoize offsets for Nested types, that are already written. They will not be written again for next columns of Nested structure.
|
||||
type.enumerateStreams([&] (const IDataType::SubstreamPath & substream_path)
|
||||
{
|
||||
bool is_offsets = !substream_path.empty() && substream_path.back().type == IDataType::Substream::ArraySizes;
|
||||
if (is_offsets)
|
||||
{
|
||||
String stream_name = IDataType::getFileNameForStream(name, substream_path);
|
||||
offset_columns.insert(stream_name);
|
||||
}
|
||||
}, serialize_settings.path);
|
||||
|
||||
return std::make_pair(current_column_mark, current_row - total_rows);
|
||||
|
||||
|
||||
void MergeTreeDataPartWriterWide::start()
|
||||
{
|
||||
if (started)
|
||||
return;
|
||||
|
||||
started = true;
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,28 +1,61 @@
|
||||
// #include <Storages/MergeTree/IMergeTreeDataPartWriter.h>
|
||||
#include <Storages/MergeTree/IMergeTreeDataPartWriter.h>
|
||||
|
||||
// namespace DB
|
||||
// {
|
||||
namespace DB
|
||||
{
|
||||
|
||||
// class MergeTreeDataPartWriterWide : IMergeTreeDataPartWriter
|
||||
// {
|
||||
// public:
|
||||
// size_t write(size_t current_mark, const Block & block) override;
|
||||
class MergeTreeDataPartWriterWide : IMergeTreeDataPartWriter
|
||||
{
|
||||
public:
|
||||
size_t write(const Block & block, size_t from_mark, size_t index_offset,
|
||||
const MergeTreeIndexGranularity & index_granularity,
|
||||
const Block & primary_key_block, const Block & skip_indexes_block) override;
|
||||
|
||||
// std::pair<size_t, size_t> writeColumn(
|
||||
// const String & name,
|
||||
// const IDataType & type,
|
||||
// const IColumn & column,
|
||||
// WrittenOffsetColumns & offset_columns,
|
||||
// bool skip_offsets,
|
||||
// IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
// size_t from_mark) override;
|
||||
void addStreams(const String & path, const String & name, const IDataType & type,
|
||||
const CompressionCodecPtr & codec, size_t estimated_size, bool skip_offsets);
|
||||
|
||||
// protected:
|
||||
// void start() override;
|
||||
|
||||
// private:
|
||||
// SerializationStates serialization_states;
|
||||
// NameSet permuted_columns;
|
||||
// };
|
||||
IDataType::OutputStreamGetter createStreamGetter(const String & name, WrittenOffsetColumns & offset_columns, bool skip_offsets);
|
||||
|
||||
// }
|
||||
/// Write data of one column.
|
||||
/// Return how many marks were written and
|
||||
/// how many rows were written for last mark
|
||||
std::pair<size_t, size_t> writeColumn(
|
||||
const String & name,
|
||||
const IDataType & type,
|
||||
const IColumn & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
size_t from_mark,
|
||||
size_t index_offset);
|
||||
|
||||
private:
|
||||
/// Write single granule of one column (rows between 2 marks)
|
||||
size_t writeSingleGranule(
|
||||
const String & name,
|
||||
const IDataType & type,
|
||||
const IColumn & column,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
IDataType::SerializeBinaryBulkStatePtr & serialization_state,
|
||||
IDataType::SerializeBinaryBulkSettings & serialize_settings,
|
||||
size_t from_row,
|
||||
size_t number_of_rows,
|
||||
bool write_marks);
|
||||
|
||||
/// Write mark for column
|
||||
void writeSingleMark(
|
||||
const String & name,
|
||||
const IDataType & type,
|
||||
WrittenOffsetColumns & offset_columns,
|
||||
bool skip_offsets,
|
||||
size_t number_of_rows,
|
||||
DB::IDataType::SubstreamPath & path);
|
||||
|
||||
SerializationStates serialization_states;
|
||||
bool can_use_adaptive_granularity;
|
||||
ColumnStreams column_streams;
|
||||
|
||||
};
|
||||
|
||||
}
|
@ -5,6 +5,7 @@
|
||||
#include <Storages/MergeTree/MergeTreeIndexGranularityInfo.h>
|
||||
#include <Compression/CachedCompressedReadBuffer.h>
|
||||
#include <Compression/CompressedReadBufferFromFile.h>
|
||||
#include <Storages/MergeTree/MergeTreeReaderSettings.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
Loading…
Reference in New Issue
Block a user