ClickHouse/tests/integration/test_check_table/test.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

364 lines
12 KiB
Python
Raw Normal View History

2019-07-03 20:51:13 +00:00
import pytest
2023-10-25 16:01:27 +00:00
import concurrent
2019-07-03 20:51:13 +00:00
from helpers.cluster import ClickHouseCluster
2023-10-23 12:13:36 +00:00
from helpers.client import QueryRuntimeException
2023-11-23 16:22:51 +00:00
from helpers.corrupt_part_data_on_disk import corrupt_part_data_on_disk
2019-07-03 20:51:13 +00:00
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", with_zookeeper=True)
node2 = cluster.add_instance("node2", with_zookeeper=True)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
2023-10-25 16:01:27 +00:00
def remove_checksums_on_disk(node, database, table, part_name):
part_path = node.query(
2023-10-25 16:01:27 +00:00
f"SELECT path FROM system.parts WHERE database = '{database}' AND table = '{table}' AND name = '{part_name}'"
).strip()
2019-07-03 20:51:13 +00:00
node.exec_in_container(
["bash", "-c", "rm -r {p}/checksums.txt".format(p=part_path)], privileged=True
)
2019-07-03 20:51:13 +00:00
def remove_part_from_disk(node, table, part_name):
part_path = node.query(
"SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(
table, part_name
)
).strip()
2019-07-04 10:21:14 +00:00
if not part_path:
raise Exception("Part " + part_name + "doesn't exist")
2019-07-03 20:51:13 +00:00
node.exec_in_container(
["bash", "-c", "rm -r {p}/*".format(p=part_path)], privileged=True
)
2023-11-23 16:22:51 +00:00
@pytest.mark.parametrize("merge_tree_settings", [""])
def test_check_normal_table_corruption(started_cluster, merge_tree_settings):
2021-01-27 18:54:05 +00:00
node1.query("DROP TABLE IF EXISTS non_replicated_mt")
node1.query(
2023-11-23 16:22:51 +00:00
f"""
2021-01-27 18:54:05 +00:00
CREATE TABLE non_replicated_mt(date Date, id UInt32, value Int32)
ENGINE = MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id
2023-11-23 16:22:51 +00:00
{merge_tree_settings};
"""
2021-01-27 18:54:05 +00:00
)
2019-07-03 20:51:13 +00:00
node1.query(
"INSERT INTO non_replicated_mt VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)"
)
assert (
node1.query(
"CHECK TABLE non_replicated_mt PARTITION 201902",
settings={"check_query_single_value_result": 0, "max_threads": 1},
)
== "201902_1_1_0\t1\t\n"
)
2019-07-03 20:51:13 +00:00
2023-10-25 16:01:27 +00:00
remove_checksums_on_disk(node1, "default", "non_replicated_mt", "201902_1_1_0")
2019-07-03 20:51:13 +00:00
assert (
node1.query(
"CHECK TABLE non_replicated_mt",
settings={"check_query_single_value_result": 0, "max_threads": 1},
).strip()
== "201902_1_1_0\t1\tChecksums recounted and written to disk."
)
2019-07-03 20:51:13 +00:00
assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n"
2023-10-25 16:01:27 +00:00
remove_checksums_on_disk(node1, "default", "non_replicated_mt", "201902_1_1_0")
2019-07-03 20:51:13 +00:00
assert (
node1.query(
"CHECK TABLE non_replicated_mt PARTITION 201902",
settings={"check_query_single_value_result": 0, "max_threads": 1},
).strip()
== "201902_1_1_0\t1\tChecksums recounted and written to disk."
)
2019-07-03 20:51:13 +00:00
assert node1.query("SELECT COUNT() FROM non_replicated_mt") == "2\n"
2023-11-23 16:22:51 +00:00
corrupt_part_data_on_disk(
node1, "non_replicated_mt", "201902_1_1_0", database="default"
)
2019-07-03 20:51:13 +00:00
2023-09-19 16:42:46 +00:00
assert node1.query(
"CHECK TABLE non_replicated_mt",
settings={"check_query_single_value_result": 0, "max_threads": 1},
2023-09-19 16:42:46 +00:00
).strip().split("\t")[0:2] == ["201902_1_1_0", "0"]
2023-09-19 16:42:46 +00:00
assert node1.query(
"CHECK TABLE non_replicated_mt",
settings={"check_query_single_value_result": 0, "max_threads": 1},
2023-09-19 16:42:46 +00:00
).strip().split("\t")[0:2] == ["201902_1_1_0", "0"]
2019-07-03 20:51:13 +00:00
node1.query(
"INSERT INTO non_replicated_mt VALUES (toDate('2019-01-01'), 1, 10), (toDate('2019-01-01'), 2, 12)"
)
assert (
node1.query(
"CHECK TABLE non_replicated_mt PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
)
== "201901_2_2_0\t1\t\n"
)
2019-07-03 20:51:13 +00:00
2023-11-23 16:22:51 +00:00
corrupt_part_data_on_disk(
node1, "non_replicated_mt", "201901_2_2_0", database="default"
)
2019-07-03 20:51:13 +00:00
2023-10-25 16:01:27 +00:00
remove_checksums_on_disk(node1, "default", "non_replicated_mt", "201901_2_2_0")
2019-07-03 20:51:13 +00:00
2023-09-19 16:42:46 +00:00
assert node1.query(
"CHECK TABLE non_replicated_mt PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
2023-09-19 16:42:46 +00:00
).strip().split("\t")[0:2] == ["201901_2_2_0", "0"]
2019-07-03 20:51:13 +00:00
2023-11-23 16:22:51 +00:00
@pytest.mark.parametrize("merge_tree_settings, zk_path_suffix", [("", "_0")])
def test_check_replicated_table_simple(
started_cluster, merge_tree_settings, zk_path_suffix
):
2021-01-27 18:54:05 +00:00
for node in [node1, node2]:
2023-11-23 16:22:51 +00:00
node.query("DROP TABLE IF EXISTS replicated_mt SYNC")
2021-01-27 18:54:05 +00:00
node.query(
"""
2021-01-27 18:54:05 +00:00
CREATE TABLE replicated_mt(date Date, id UInt32, value Int32)
2023-11-23 16:22:51 +00:00
ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_{zk_path_suffix}', '{replica}')
PARTITION BY toYYYYMM(date) ORDER BY id
{merge_tree_settings}
2021-01-27 18:54:05 +00:00
""".format(
2023-11-23 16:22:51 +00:00
replica=node.name,
zk_path_suffix=zk_path_suffix,
merge_tree_settings=merge_tree_settings,
2021-01-27 18:54:05 +00:00
)
2019-07-03 20:51:13 +00:00
)
2019-07-03 20:51:13 +00:00
node1.query(
"INSERT INTO replicated_mt VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)"
)
2019-07-03 20:51:13 +00:00
node2.query("SYSTEM SYNC REPLICA replicated_mt")
assert node1.query("SELECT count() from replicated_mt") == "2\n"
assert node2.query("SELECT count() from replicated_mt") == "2\n"
assert (
node1.query(
"CHECK TABLE replicated_mt",
settings={"check_query_single_value_result": 0, "max_threads": 1},
)
== "201902_0_0_0\t1\t\n"
)
assert (
node2.query(
"CHECK TABLE replicated_mt",
settings={"check_query_single_value_result": 0, "max_threads": 1},
)
== "201902_0_0_0\t1\t\n"
2019-07-03 20:51:13 +00:00
)
2019-07-03 20:51:13 +00:00
node2.query(
"INSERT INTO replicated_mt VALUES (toDate('2019-01-02'), 3, 10), (toDate('2019-01-02'), 4, 12)"
)
2019-07-03 20:51:13 +00:00
node1.query("SYSTEM SYNC REPLICA replicated_mt")
assert node1.query("SELECT count() from replicated_mt") == "4\n"
assert node2.query("SELECT count() from replicated_mt") == "4\n"
assert (
node1.query(
"CHECK TABLE replicated_mt PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
)
== "201901_0_0_0\t1\t\n"
)
assert (
node2.query(
"CHECK TABLE replicated_mt PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
)
== "201901_0_0_0\t1\t\n"
)
2019-07-03 20:51:13 +00:00
2023-10-23 12:13:36 +00:00
assert sorted(
node2.query(
"CHECK TABLE replicated_mt",
settings={"check_query_single_value_result": 0},
).split("\n")
) == ["", "201901_0_0_0\t1\t", "201902_0_0_0\t1\t"]
with pytest.raises(QueryRuntimeException) as exc:
node2.query(
"CHECK TABLE replicated_mt PART '201801_0_0_0'",
settings={"check_query_single_value_result": 0},
)
assert "NO_SUCH_DATA_PART" in str(exc.value)
assert (
node2.query(
"CHECK TABLE replicated_mt PART '201902_0_0_0'",
settings={"check_query_single_value_result": 0},
)
== "201902_0_0_0\t1\t\n"
)
2019-07-03 20:51:13 +00:00
2023-11-23 16:22:51 +00:00
@pytest.mark.parametrize(
"merge_tree_settings, zk_path_suffix, part_file_ext",
[
(
"",
"_0",
".bin",
)
],
)
def test_check_replicated_table_corruption(
started_cluster, merge_tree_settings, zk_path_suffix, part_file_ext
):
2021-01-27 18:54:05 +00:00
for node in [node1, node2]:
2023-11-23 16:22:51 +00:00
node.query_with_retry("DROP TABLE IF EXISTS replicated_mt_1 SYNC")
2019-07-03 20:51:13 +00:00
2021-03-16 10:00:49 +00:00
node.query_with_retry(
"""
2021-01-27 18:54:05 +00:00
CREATE TABLE replicated_mt_1(date Date, id UInt32, value Int32)
2023-11-23 16:22:51 +00:00
ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt_1_{zk_path_suffix}', '{replica}')
PARTITION BY toYYYYMM(date) ORDER BY id
{merge_tree_settings}
2021-01-27 18:54:05 +00:00
""".format(
2023-11-23 16:22:51 +00:00
replica=node.name,
merge_tree_settings=merge_tree_settings,
zk_path_suffix=zk_path_suffix,
2021-01-27 18:54:05 +00:00
)
)
2021-01-27 18:54:05 +00:00
node1.query(
"INSERT INTO replicated_mt_1 VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)"
)
node1.query(
"INSERT INTO replicated_mt_1 VALUES (toDate('2019-01-02'), 3, 10), (toDate('2019-01-02'), 4, 12)"
)
2021-01-27 18:54:05 +00:00
node2.query("SYSTEM SYNC REPLICA replicated_mt_1")
assert node1.query("SELECT count() from replicated_mt_1") == "4\n"
assert node2.query("SELECT count() from replicated_mt_1") == "4\n"
2019-07-03 20:51:13 +00:00
2021-03-16 10:00:49 +00:00
part_name = node1.query_with_retry(
2021-01-27 18:54:05 +00:00
"SELECT name from system.parts where table = 'replicated_mt_1' and partition_id = '201901' and active = 1"
).strip()
2019-07-09 09:02:52 +00:00
2023-11-23 16:22:51 +00:00
corrupt_part_data_on_disk(
node1, "replicated_mt_1", part_name, part_file_ext, database="default"
)
2021-01-27 18:54:05 +00:00
assert node1.query(
"CHECK TABLE replicated_mt_1 PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
2021-01-27 18:54:05 +00:00
) == "{p}\t0\tPart {p} looks broken. Removing it and will try to fetch.\n".format(
p=part_name
)
2019-07-03 20:51:13 +00:00
2021-03-16 10:00:49 +00:00
node1.query_with_retry("SYSTEM SYNC REPLICA replicated_mt_1")
2021-01-27 18:54:05 +00:00
assert node1.query(
"CHECK TABLE replicated_mt_1 PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
) == "{}\t1\t\n".format(part_name)
2021-01-27 18:54:05 +00:00
assert node1.query("SELECT count() from replicated_mt_1") == "4\n"
2019-07-03 20:51:13 +00:00
2021-01-27 18:54:05 +00:00
remove_part_from_disk(node2, "replicated_mt_1", part_name)
assert node2.query(
"CHECK TABLE replicated_mt_1 PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
2021-01-27 18:54:05 +00:00
) == "{p}\t0\tPart {p} looks broken. Removing it and will try to fetch.\n".format(
p=part_name
)
2019-07-03 20:51:13 +00:00
2021-01-27 18:54:05 +00:00
node1.query("SYSTEM SYNC REPLICA replicated_mt_1")
assert node1.query(
"CHECK TABLE replicated_mt_1 PARTITION 201901",
settings={"check_query_single_value_result": 0, "max_threads": 1},
) == "{}\t1\t\n".format(part_name)
2021-01-27 18:54:05 +00:00
assert node1.query("SELECT count() from replicated_mt_1") == "4\n"
2023-10-25 16:01:27 +00:00
def test_check_all_tables(started_cluster):
def _create_table(database, table, engine=None):
if engine is None:
engine = "MergeTree() PARTITION BY toYYYYMM(date) ORDER BY id SETTINGS min_bytes_for_wide_part=0"
node1.query(
f"CREATE TABLE {database}.{table} (date Date, id UInt32, value Int32) ENGINE = {engine}"
)
node1.query(
f"INSERT INTO {database}.{table} VALUES (toDate('2019-02-01'), 1, 10), (toDate('2019-02-01'), 2, 12)"
)
node1.query(f"SYSTEM STOP MERGES {database}.{table}")
for database in ["db1", "db2", "db3"]:
node1.query(f"CREATE DATABASE {database}")
for table in ["table1", "table2"]:
_create_table(database, table)
remove_checksums_on_disk(node1, "db1", "table2", "201902_1_1_0")
# These tables should not be checked
_create_table("db1", "table_memory", "Memory()")
_create_table("db1", "table_log", "TinyLog()")
node1.query("SYSTEM ENABLE FAILPOINT check_table_query_delay_for_part")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = []
check_future = executor.submit(
lambda: node1.query(
"CHECK ALL TABLES",
settings={"check_query_single_value_result": 0, "max_threads": 16},
)
)
futures.append(executor.submit(lambda: node1.query("DROP TABLE db3.table2")))
futures.append(executor.submit(lambda: node1.query("DROP DATABASE db2")))
for future in concurrent.futures.as_completed(futures):
future.result()
check_result = check_future.result()
# Do not check databases created in other tests
check_result = [
res.split("\t")
for res in check_result.split("\n")
if res and not res.startswith("default")
]
checked_tables = {f"{res[0]}.{res[1]}" for res in check_result}
assert all(
table in checked_tables for table in ["db1.table1", "db1.table2", "db3.table1"]
), checked_tables
for check_result_row in check_result:
assert len(check_result_row) == 5, check_result_row
db, table, part, flag, message = check_result_row
if db == "db1" and table == "table2" and part == "201902_1_1_0":
assert flag == "1"
assert message == "Checksums recounted and written to disk."
else:
assert flag == "1"
assert message == ""