mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
polymorphic parts (development)
This commit is contained in:
parent
d1ddfbb415
commit
55deeea608
@ -40,8 +40,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
Result add(MutableColumns && columns);
|
Result add(MutableColumns && columns);
|
||||||
|
|
||||||
bool hasPendingData() { return !accumulated_columns.empty(); }
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
size_t min_block_size_rows;
|
size_t min_block_size_rows;
|
||||||
size_t min_block_size_bytes;
|
size_t min_block_size_bytes;
|
||||||
|
@ -131,17 +131,16 @@ void fillIndexGranularityImpl(
|
|||||||
size_t current_row;
|
size_t current_row;
|
||||||
for (current_row = index_offset; current_row < rows_in_block; current_row += index_granularity_for_block)
|
for (current_row = index_offset; current_row < rows_in_block; current_row += index_granularity_for_block)
|
||||||
{
|
{
|
||||||
size_t rows_rest_in_block = rows_in_block - current_row;
|
size_t rows_left_in_block = rows_in_block - current_row;
|
||||||
std::cerr << "rows_rest_in_block: " << rows_rest_in_block << "\n";
|
|
||||||
std::cerr << "rows_rest_in_block: " << index_granularity_for_block << "\n";
|
if (need_finish_last_granule && rows_left_in_block < index_granularity_for_block)
|
||||||
|
|
||||||
/// FIXME may be remove need_finish_last_granule and do it always
|
|
||||||
if (need_finish_last_granule && rows_rest_in_block < index_granularity_for_block)
|
|
||||||
{
|
{
|
||||||
if (rows_rest_in_block * 2 >= index_granularity_for_block)
|
/// If enough rows are left, create a new granule. Otherwise, extend previous granule.
|
||||||
index_granularity.appendMark(rows_rest_in_block);
|
/// So,real size of granule differs from index_granularity_for_block not more than 50%.
|
||||||
|
if (rows_left_in_block * 2 >= index_granularity_for_block)
|
||||||
|
index_granularity.appendMark(rows_left_in_block);
|
||||||
else
|
else
|
||||||
index_granularity.addRowsToLastMark(rows_rest_in_block);
|
index_granularity.addRowsToLastMark(rows_left_in_block);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -69,7 +69,6 @@ public:
|
|||||||
const MergeTreeIndexGranularity & index_granularity,
|
const MergeTreeIndexGranularity & index_granularity,
|
||||||
bool need_finish_last_granule);
|
bool need_finish_last_granule);
|
||||||
|
|
||||||
/// FIXME remove indices block
|
|
||||||
virtual void write(
|
virtual void write(
|
||||||
const Block & block, const IColumn::Permutation * permutation,
|
const Block & block, const IColumn::Permutation * permutation,
|
||||||
/* Blocks with already sorted index columns */
|
/* Blocks with already sorted index columns */
|
||||||
|
@ -186,9 +186,13 @@ void MergeTreeDataPartCompact::loadIndexGranularity()
|
|||||||
readIntBinary(granularity, buffer);
|
readIntBinary(granularity, buffer);
|
||||||
index_granularity.appendMark(granularity);
|
index_granularity.appendMark(granularity);
|
||||||
/// Skip offsets for columns
|
/// Skip offsets for columns
|
||||||
buffer.seek(index_granularity_info.mark_size_in_bytes, SEEK_CUR);
|
buffer.seek(columns.size() * sizeof(MarkInCompressedFile), SEEK_CUR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::cerr << "(loadIndexGranularity) marks: " << index_granularity.getMarksCount() << "\n";
|
||||||
|
std::cerr << "(loadIndexGranularity) mark size: " << index_granularity_info.mark_size_in_bytes << "\n";
|
||||||
|
std::cerr << "(loadIndexGranularity) marks file size: " << marks_file_size << "\n";
|
||||||
|
|
||||||
if (index_granularity.getMarksCount() * index_granularity_info.mark_size_in_bytes != marks_file_size)
|
if (index_granularity.getMarksCount() * index_granularity_info.mark_size_in_bytes != marks_file_size)
|
||||||
throw Exception("Cannot read all marks from file " + marks_file_path, ErrorCodes::CANNOT_READ_ALL_DATA);
|
throw Exception("Cannot read all marks from file " + marks_file_path, ErrorCodes::CANNOT_READ_ALL_DATA);
|
||||||
|
|
||||||
|
@ -39,23 +39,38 @@ void MergeTreeDataPartWriterCompact::write(
|
|||||||
const Block & block, const IColumn::Permutation * permutation,
|
const Block & block, const IColumn::Permutation * permutation,
|
||||||
const Block & primary_key_block, const Block & skip_indexes_block)
|
const Block & primary_key_block, const Block & skip_indexes_block)
|
||||||
{
|
{
|
||||||
UNUSED(primary_key_block);
|
|
||||||
UNUSED(skip_indexes_block);
|
|
||||||
|
|
||||||
if (!header)
|
if (!header)
|
||||||
header = block.cloneEmpty();
|
header = block.cloneEmpty();
|
||||||
|
|
||||||
Block result_block = block;
|
/// Fill index granularity for this block
|
||||||
|
/// if it's unknown (in case of insert data or horizontal merge,
|
||||||
|
/// but not in case of vertical merge)
|
||||||
|
/// FIXME maybe it's wrong at this stage.
|
||||||
|
if (compute_granularity)
|
||||||
|
fillIndexGranularity(block);
|
||||||
|
|
||||||
|
Block result_block;
|
||||||
|
|
||||||
if (permutation)
|
if (permutation)
|
||||||
{
|
{
|
||||||
auto it = columns_list.begin();
|
for (const auto & it : columns_list)
|
||||||
for (size_t i = 0; i < columns_list.size(); ++i)
|
|
||||||
{
|
{
|
||||||
auto & column = result_block.getByName(it->name);
|
if (primary_key_block.has(it.name))
|
||||||
column.column = column.column->permute(*permutation, 0);
|
result_block.insert(primary_key_block.getByName(it.name));
|
||||||
|
else if (skip_indexes_block.has(it.name))
|
||||||
|
result_block.insert(skip_indexes_block.getByName(it.name));
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto column = block.getByName(it.name);
|
||||||
|
column.column = column.column->permute(*permutation, 0);
|
||||||
|
result_block.insert(column);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result_block = block;
|
||||||
|
}
|
||||||
|
|
||||||
auto result = squashing.add(result_block.mutateColumns());
|
auto result = squashing.add(result_block.mutateColumns());
|
||||||
if (!result.ready)
|
if (!result.ready)
|
||||||
@ -72,12 +87,6 @@ void MergeTreeDataPartWriterCompact::writeBlock(const Block & block)
|
|||||||
size_t from_mark = current_mark;
|
size_t from_mark = current_mark;
|
||||||
size_t current_row = 0;
|
size_t current_row = 0;
|
||||||
|
|
||||||
/// Fill index granularity for this block
|
|
||||||
/// if it's unknown (in case of insert data or horizontal merge,
|
|
||||||
/// but not in case of vertical merge)
|
|
||||||
if (compute_granularity)
|
|
||||||
fillIndexGranularity(block);
|
|
||||||
|
|
||||||
std::cerr << "(MergeTreeDataPartWriterCompact::write) marks: " << index_granularity.getMarksCount() << "\n";
|
std::cerr << "(MergeTreeDataPartWriterCompact::write) marks: " << index_granularity.getMarksCount() << "\n";
|
||||||
|
|
||||||
for (size_t i = 0; i < index_granularity.getMarksCount(); ++i)
|
for (size_t i = 0; i < index_granularity.getMarksCount(); ++i)
|
||||||
|
@ -104,7 +104,7 @@ void MergeTreeDataPartWriterWide::write(const Block & block,
|
|||||||
/// but not in case of vertical merge)
|
/// but not in case of vertical merge)
|
||||||
if (compute_granularity)
|
if (compute_granularity)
|
||||||
fillIndexGranularity(block);
|
fillIndexGranularity(block);
|
||||||
|
|
||||||
std::cerr << "(MergeTreeDataPartWriterWide::write) marks_count: " << index_granularity.getMarksCount() << "\n";
|
std::cerr << "(MergeTreeDataPartWriterWide::write) marks_count: " << index_granularity.getMarksCount() << "\n";
|
||||||
|
|
||||||
WrittenOffsetColumns offset_columns;
|
WrittenOffsetColumns offset_columns;
|
||||||
|
Loading…
Reference in New Issue
Block a user