From a613382d48d25a4e0da4c5a69fa5b1c7e15e6f31 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 15 Sep 2021 21:18:06 +0300 Subject: [PATCH] Cover max_suspicious_broken_parts/max_suspicious_broken_parts_bytes --- .../__init__.py | 0 .../test_max_suspicious_broken_parts/test.py | 121 ++++++++++++++++++ 2 files changed, 121 insertions(+) create mode 100644 tests/integration/test_max_suspicious_broken_parts/__init__.py create mode 100644 tests/integration/test_max_suspicious_broken_parts/test.py diff --git a/tests/integration/test_max_suspicious_broken_parts/__init__.py b/tests/integration/test_max_suspicious_broken_parts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_max_suspicious_broken_parts/test.py b/tests/integration/test_max_suspicious_broken_parts/test.py new file mode 100644 index 00000000000..31f53fdbc3c --- /dev/null +++ b/tests/integration/test_max_suspicious_broken_parts/test.py @@ -0,0 +1,121 @@ +# pylint: disable=unused-argument +# pylint: disable=redefined-outer-name +# pylint: disable=line-too-long + +import pytest + +from helpers.client import QueryRuntimeException +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node', stay_alive=True) + +@pytest.fixture(scope='module', autouse=True) +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + +def break_part(table, part_name): + node.exec_in_container(['bash', '-c', f'rm /var/lib/clickhouse/data/default/{table}/{part_name}/columns.txt']) + +def remove_part(table, part_name): + node.exec_in_container(['bash', '-c', f'rm -r /var/lib/clickhouse/data/default/{table}/{part_name}']) + +def get_count(table): + return int(node.query(f'SELECT count() FROM {table}').strip()) + +def detach_table(table): + node.query(f'DETACH TABLE {table}') +def attach_table(table): + node.query(f'ATTACH TABLE {table}') + +def check_table(table): + rows = 900 + per_part_rows = 90 + + node.query(f'INSERT INTO {table} SELECT * FROM numbers(900)') + + assert get_count(table) == rows + + # break one part, and check that clickhouse will be alive + break_part(table, '0_1_1_0') + rows -= per_part_rows + detach_table(table) + attach_table(table) + assert get_count(table) == rows + + # break two parts, and check that clickhouse will not start + break_part(table, '1_2_2_0') + break_part(table, '2_3_3_0') + rows -= per_part_rows*2 + detach_table(table) + with pytest.raises(QueryRuntimeException): + attach_table(table) + + # now remove one part, and check + remove_part(table, '1_2_2_0') + attach_table(table) + assert get_count(table) == rows + + node.query(f'DROP TABLE {table}') + +def test_max_suspicious_broken_parts(): + node.query(""" + CREATE TABLE test_max_suspicious_broken_parts ( + key Int + ) + ENGINE=MergeTree + ORDER BY key + PARTITION BY key%10 + SETTINGS + max_suspicious_broken_parts = 1; + """) + check_table('test_max_suspicious_broken_parts') + +def test_max_suspicious_broken_parts_bytes(): + node.query(""" + CREATE TABLE test_max_suspicious_broken_parts_bytes ( + key Int + ) + ENGINE=MergeTree + ORDER BY key + PARTITION BY key%10 + SETTINGS + max_suspicious_broken_parts = 10, + /* one part takes ~751 byte, so we allow failure of one part with these limit */ + max_suspicious_broken_parts_bytes = 1000; + """) + check_table('test_max_suspicious_broken_parts_bytes') + +def test_max_suspicious_broken_parts__wide(): + node.query(""" + CREATE TABLE test_max_suspicious_broken_parts__wide ( + key Int + ) + ENGINE=MergeTree + ORDER BY key + PARTITION BY key%10 + SETTINGS + min_bytes_for_wide_part = 0, + max_suspicious_broken_parts = 1; + """) + check_table('test_max_suspicious_broken_parts__wide') + +def test_max_suspicious_broken_parts_bytes__wide(): + node.query(""" + CREATE TABLE test_max_suspicious_broken_parts_bytes__wide ( + key Int + ) + ENGINE=MergeTree + ORDER BY key + PARTITION BY key%10 + SETTINGS + min_bytes_for_wide_part = 0, + max_suspicious_broken_parts = 10, + /* one part takes ~750 byte, so we allow failure of one part with these limit */ + max_suspicious_broken_parts_bytes = 1000; + """) + check_table('test_max_suspicious_broken_parts_bytes__wide')