diff --git a/tests/integration/test_keeper_broken_logs/test.py b/tests/integration/test_keeper_broken_logs/test.py index f75e2ae4f20..75792a5f155 100644 --- a/tests/integration/test_keeper_broken_logs/test.py +++ b/tests/integration/test_keeper_broken_logs/test.py @@ -5,6 +5,8 @@ import pytest import helpers.keeper_utils as keeper_utils from helpers.cluster import ClickHouseCluster +from multiprocessing.dummy import Pool + cluster = ClickHouseCluster(__file__) node1 = cluster.add_instance( "node1", @@ -52,15 +54,34 @@ def get_fake_zk(nodename, timeout=30.0): return _fake_zk_instance +def start_clickhouse(node): + node.start_clickhouse() + + +def clean_start(): + nodes = [node1, node2, node3] + for node in nodes: + node.stop_clickhouse() + + p = Pool(3) + waiters = [] + for node in nodes: + node.exec_in_container(["rm", "-rf", "/var/lib/clickhouse/coordination/log"]) + node.exec_in_container( + ["rm", "-rf", "/var/lib/clickhouse/coordination/snapshots"] + ) + waiters.append(p.apply_async(start_clickhouse, (node,))) + + for waiter in waiters: + waiter.wait() + + def test_single_node_broken_log(started_cluster): + clean_start() try: wait_nodes() node1_conn = get_fake_zk("node1") - # Cleanup - if node1_conn.exists("/test_broken_log") != None: - node1_conn.delete("/test_broken_log") - node1_conn.create("/test_broken_log") for _ in range(10): node1_conn.create(f"/test_broken_log/node", b"somedata1", sequence=True) @@ -110,10 +131,12 @@ def test_single_node_broken_log(started_cluster): verify_nodes(node3_conn) assert node3_conn.get("/test_broken_log_final_node")[0] == b"somedata1" - assert ( + node1_logs = ( node1.exec_in_container(["ls", "/var/lib/clickhouse/coordination/log"]) - == "changelog_1_100000.bin\nchangelog_14_100013.bin\n" + .strip() + .split("\n") ) + assert len(node1_logs) == 2 and node1_logs[0] == "changelog_1_100000.bin" assert ( node2.exec_in_container(["ls", "/var/lib/clickhouse/coordination/log"]) == "changelog_1_100000.bin\n"