mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Fix possible PARAMETER_OUT_OF_BOUND error during subcolumns reading from wide part in MergeTree
This commit is contained in:
parent
f06330dd88
commit
a5e0c45603
@ -348,6 +348,8 @@ void SerializationArray::deserializeBinaryBulkWithMultipleStreams(
|
||||
{
|
||||
auto mutable_column = column->assumeMutable();
|
||||
ColumnArray & column_array = typeid_cast<ColumnArray &>(*mutable_column);
|
||||
size_t prev_last_offset = column_array.getOffsets().back();
|
||||
|
||||
settings.path.push_back(Substream::ArraySizes);
|
||||
|
||||
if (auto cached_column = getFromSubstreamsCache(cache, settings.path))
|
||||
@ -371,9 +373,9 @@ void SerializationArray::deserializeBinaryBulkWithMultipleStreams(
|
||||
|
||||
/// Number of values corresponding with `offset_values` must be read.
|
||||
size_t last_offset = offset_values.back();
|
||||
if (last_offset < nested_column->size())
|
||||
if (last_offset < previous_last_offset)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Nested column is longer than last offset");
|
||||
size_t nested_limit = last_offset - nested_column->size();
|
||||
size_t nested_limit = last_offset - previous_last_offset;
|
||||
|
||||
if (unlikely(nested_limit > MAX_ARRAYS_SIZE))
|
||||
throw Exception(ErrorCodes::TOO_LARGE_ARRAY_SIZE, "Array sizes are too large: {}", nested_limit);
|
||||
|
@ -0,0 +1,15 @@
|
||||
DROP TABLE IF EXISTS test;
|
||||
CREATE TABLE test
|
||||
(
|
||||
`id` UInt64,
|
||||
`t` Tuple(a UInt64, b Array(Tuple(c UInt64, d UInt64)))
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id
|
||||
SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1, index_granularity = 8192;
|
||||
INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000);
|
||||
INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000);
|
||||
INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000);
|
||||
SELECT t.b, t.b.c FROM test ORDER BY id FORMAT Null;
|
||||
DROP TABLE test;
|
||||
|
Loading…
Reference in New Issue
Block a user