mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
fix reading of sparse columns with prefetch
This commit is contained in:
parent
54a0985c42
commit
da2413f7d2
@ -263,6 +263,12 @@ void SerializationSparse::deserializeBinaryBulkWithMultipleStreams(
|
||||
{
|
||||
auto * state_sparse = checkAndGetState<DeserializeStateSparse>(state);
|
||||
|
||||
if (auto cached_column = getFromSubstreamsCache(cache, settings.path))
|
||||
{
|
||||
column = cached_column;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!settings.continuous_reading)
|
||||
state_sparse->reset();
|
||||
|
||||
@ -281,7 +287,8 @@ void SerializationSparse::deserializeBinaryBulkWithMultipleStreams(
|
||||
size_t values_limit = offsets_data.size() - old_size;
|
||||
|
||||
settings.path.back() = Substream::SparseElements;
|
||||
nested->deserializeBinaryBulkWithMultipleStreams(values_column, values_limit, settings, state_sparse->nested, cache);
|
||||
/// Do not use substream cache while reading values column, because ColumnSparse can be cached only in a whole.
|
||||
nested->deserializeBinaryBulkWithMultipleStreams(values_column, values_limit, settings, state_sparse->nested, nullptr);
|
||||
settings.path.pop_back();
|
||||
|
||||
if (offsets_data.size() + 1 != values_column->size())
|
||||
@ -291,6 +298,7 @@ void SerializationSparse::deserializeBinaryBulkWithMultipleStreams(
|
||||
/// 'insertManyDefaults' just increases size of column.
|
||||
column_sparse.insertManyDefaults(read_rows);
|
||||
column = std::move(mutable_column);
|
||||
addToSubstreamsCache(cache, settings.path, column);
|
||||
}
|
||||
|
||||
/// All methods below just wrap nested serialization.
|
||||
|
@ -1,4 +1,3 @@
|
||||
-- Tags: no-s3-storage
|
||||
DROP TABLE IF EXISTS sparse_tuple;
|
||||
|
||||
CREATE TABLE sparse_tuple (id UInt64, t Tuple(a UInt64, s String))
|
||||
|
Loading…
Reference in New Issue
Block a user