From 9c1717b42d7897cd7f1c020cc3a967160307a6e7 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Fri, 27 Jan 2023 13:35:04 +0000 Subject: [PATCH] fix computation of granularity in vertical merges --- .../MergeTree/MergeTreeDataPartWriterOnDisk.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 1f40177d0fa..61cb9a4e7bd 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -135,6 +135,7 @@ static size_t computeIndexGranularityImpl( { size_t rows_in_block = block.rows(); size_t index_granularity_for_block; + if (!can_use_adaptive_index_granularity) { index_granularity_for_block = fixed_index_granularity_rows; @@ -143,7 +144,9 @@ static size_t computeIndexGranularityImpl( { size_t block_size_in_memory = block.bytes(); if (blocks_are_granules) + { index_granularity_for_block = rows_in_block; + } else if (block_size_in_memory >= index_granularity_bytes) { size_t granules_in_block = block_size_in_memory / index_granularity_bytes; @@ -155,10 +158,14 @@ static size_t computeIndexGranularityImpl( index_granularity_for_block = index_granularity_bytes / size_of_row_in_bytes; } } - /// We should be less or equal than fixed index granularity - index_granularity_for_block = std::min(fixed_index_granularity_rows, index_granularity_for_block); - /// very rare case when index granularity bytes less then single row + /// We should be less or equal than fixed index granularity. + /// But if block size is a granule size then do no ajust it. + /// Granularity greater than fixed granularity might come from compact part. + if (!blocks_are_granules) + index_granularity_for_block = std::min(fixed_index_granularity_rows, index_granularity_for_block); + + /// Very rare case when index granularity bytes less than single row. if (index_granularity_for_block == 0) index_granularity_for_block = 1;