fix query cache with sparse columns

This commit is contained in:
Anton Popov 2023-04-06 15:59:13 +00:00
parent d7879c16e4
commit 4757d76fbf
3 changed files with 26 additions and 1 deletions

View File

@ -242,8 +242,9 @@ void QueryCache::Writer::finalizeWrite()
Chunks squashed_chunks;
size_t rows_remaining_in_squashed = 0; /// how many further rows can the last squashed chunk consume until it reaches max_block_size
for (const auto & chunk : *query_result)
for (auto & chunk : *query_result)
{
convertToFullIfSparse(chunk);
const size_t rows_chunk = chunk.getNumRows();
size_t rows_chunk_processed = 0;

View File

@ -0,0 +1,23 @@
-- Tags: no-parallel
DROP TABLE IF EXISTS t_cache_sparse;
SYSTEM DROP QUERY CACHE;
CREATE TABLE t_cache_sparse (id UInt64, v UInt64)
ENGINE = MergeTree ORDER BY id
SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9;
SYSTEM STOP MERGES t_cache_sparse;
INSERT INTO t_cache_sparse SELECT number, number FROM numbers(10000);
INSERT INTO t_cache_sparse SELECT number, 0 FROM numbers(10000);
SET allow_experimental_query_cache = 1;
SET use_query_cache = 1;
SET max_threads = 1;
SELECT v FROM t_cache_sparse FORMAT Null;
SELECT v FROM t_cache_sparse FORMAT Null;
SELECT count() FROM system.query_cache WHERE query LIKE 'SELECT v FROM t_cache_sparse%';
DROP TABLE t_cache_sparse;