From 39d706ba9f0c8e7f8c8d757e215f639f7d510fe2 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Fri, 5 Apr 2024 14:45:51 +0000 Subject: [PATCH] rework test --- .../__init__.py | 0 .../test.py | 53 ------------------- ...s_splitter_bug_and_index_loading.reference | 1 + ...3_parts_splitter_bug_and_index_loading.sql | 17 ++++++ 4 files changed, 18 insertions(+), 53 deletions(-) delete mode 100644 tests/integration/test_final_bug_with_pk_columns_loading/__init__.py delete mode 100644 tests/integration/test_final_bug_with_pk_columns_loading/test.py create mode 100644 tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.reference create mode 100644 tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.sql diff --git a/tests/integration/test_final_bug_with_pk_columns_loading/__init__.py b/tests/integration/test_final_bug_with_pk_columns_loading/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/integration/test_final_bug_with_pk_columns_loading/test.py b/tests/integration/test_final_bug_with_pk_columns_loading/test.py deleted file mode 100644 index 61559913e05..00000000000 --- a/tests/integration/test_final_bug_with_pk_columns_loading/test.py +++ /dev/null @@ -1,53 +0,0 @@ -import pytest -import logging - -from helpers.cluster import ClickHouseCluster - -cluster = ClickHouseCluster(__file__) -node = cluster.add_instance("node", stay_alive=True) - - -@pytest.fixture(scope="module") -def start_cluster(): - try: - logging.info("Starting cluster...") - cluster.start() - logging.info("Cluster started") - - yield cluster - finally: - cluster.shutdown() - - -def test_simple_query_after_index_reload(start_cluster): - node.query( - """ - create table t(a UInt32, b UInt32) engine=MergeTree order by (a, b) settings index_granularity=1; - - -- for this part the first columns is useless, so we have to use both - insert into t select 42, number from numbers_mt(100); - - -- for this part the first columns is enough - insert into t select number, number from numbers_mt(100); - """ - ) - - # force reloading index - node.restart_clickhouse() - - # the bug happened when we used (a, b) index values for one part and only (a) for another in PartsSplitter. even a simple count query is enough, - # because some granules were assinged to wrong layers and hence not returned from the reading step (because they were filtered out by `FilterSortedStreamByRange`) - assert ( - int( - node.query( - "select count() from t where not ignore(*)", - settings={ - "max_threads": 4, - "merge_tree_min_bytes_for_concurrent_read": 1, - "merge_tree_min_rows_for_concurrent_read": 1, - "merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability": 1, - }, - ) - ) - == 200 - ) diff --git a/tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.reference b/tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.reference new file mode 100644 index 00000000000..08839f6bb29 --- /dev/null +++ b/tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.reference @@ -0,0 +1 @@ +200 diff --git a/tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.sql b/tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.sql new file mode 100644 index 00000000000..541ac67fd24 --- /dev/null +++ b/tests/queries/0_stateless/03033_parts_splitter_bug_and_index_loading.sql @@ -0,0 +1,17 @@ +create table t(a UInt32, b UInt32) engine=MergeTree order by (a, b) settings index_granularity=1; + +-- for this part the first columns is useless, so we have to use both +insert into t select 42, number from numbers_mt(100); + +-- for this part the first columns is enough +insert into t select number, number from numbers_mt(100); + +-- force reloading index +detach table t; +attach table t; + +set merge_tree_min_bytes_for_concurrent_read=1, merge_tree_min_rows_for_concurrent_read=1, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=1.0, max_threads=4; + +-- the bug happened when we used (a, b) index values for one part and only (a) for another in PartsSplitter. even a simple count query is enough, +-- because some granules were assinged to wrong layers and hence not returned from the reading step (because they were filtered out by `FilterSortedStreamByRange`) +select count() from t where not ignore(*);