ClickHouse/tests/integration/test_keeper_reconfig_add/test.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

150 lines
4.1 KiB
Python
Raw Normal View History

2023-04-20 13:26:02 +00:00
#!/usr/bin/env python3
import os
import typing as tp
2023-04-20 13:26:02 +00:00
2024-09-27 10:19:39 +00:00
import pytest
import helpers.keeper_utils as ku
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
2023-04-20 13:26:02 +00:00
cluster = ClickHouseCluster(__file__)
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
node1 = cluster.add_instance("node1", main_configs=["configs/keeper1.xml"])
node2 = cluster.add_instance("node2", stay_alive=True)
node3 = cluster.add_instance("node3", stay_alive=True)
server_join_msg = "confirms it will join"
part_of_cluster = "now this node is the part of cluster"
zk1, zk2, zk3 = None, None, None
@pytest.fixture(scope="module", autouse=True)
2023-04-20 13:26:02 +00:00
def started_cluster():
try:
cluster.start()
node2.stop_clickhouse()
node2.copy_file_to_container(
os.path.join(CONFIG_DIR, "keeper2.xml"),
"/etc/clickhouse-server/config.d/keeper.xml",
)
node3.stop_clickhouse()
node3.copy_file_to_container(
os.path.join(CONFIG_DIR, "keeper3.xml"),
"/etc/clickhouse-server/config.d/keeper.xml",
)
yield cluster
finally:
conn: tp.Optional[ku.KeeperClient]
2023-04-20 13:26:02 +00:00
for conn in [zk1, zk2, zk3]:
if conn is not None:
2023-04-20 13:26:02 +00:00
conn.stop()
cluster.shutdown()
def create_client(node: ClickHouseInstance):
2023-09-07 00:44:54 +00:00
return ku.KeeperClient(
cluster.server_bin_path, cluster.get_instance_ip(node.name), 9181
)
def test_reconfig_add():
2023-04-20 13:26:02 +00:00
"""
Add a node to another node. Then add another node to two.
"""
global zk1, zk2, zk3
zk1 = create_client(node1)
2023-04-20 13:26:02 +00:00
config = zk1.get("/keeper/config")
2023-04-20 13:26:02 +00:00
print("Initial config", config)
assert len(config.split("\n")) == 1
assert "node1" in config
assert "node2" not in config
assert "node3" not in config
with pytest.raises(ku.KeeperException):
2023-04-20 13:26:02 +00:00
# duplicate id with different endpoint
zk1.reconfig(joining="server.1=localhost:1337", leaving=None, new_members=None)
with pytest.raises(ku.KeeperException):
2023-04-20 13:26:02 +00:00
# duplicate endpoint
zk1.reconfig(joining="server.8=node1:9234", leaving=None, new_members=None)
for i in range(100):
zk1.create(f"/test_three_{i}", "somedata")
2023-04-20 13:26:02 +00:00
node2.start_clickhouse()
2023-09-07 00:44:54 +00:00
config = zk1.reconfig(joining="server.2=node2:9234", leaving=None, new_members=None)
2023-04-20 13:26:02 +00:00
ku.wait_until_connected(cluster, node2)
print("After adding 2", config)
assert len(config.split("\n")) == 2
assert "node1" in config
assert "node2" in config
assert "node3" not in config
zk2 = create_client(node2)
ku.wait_configs_equal(config, zk2)
2023-04-20 13:26:02 +00:00
for i in range(100):
assert zk2.exists(f"/test_three_{i}")
zk2.create(f"/test_three_{100 + i}", "somedata")
2023-04-20 13:26:02 +00:00
# Why not both?
# One node will process add_srv request, other will pull out updated config, apply
# and return true in config update thread (without calling add_srv again)
assert node1.contains_in_log(server_join_msg) or node2.contains_in_log(
server_join_msg
)
assert node2.contains_in_log(part_of_cluster)
zk1.stop()
zk1 = create_client(node1)
2023-04-20 13:26:02 +00:00
zk1.sync("/test_three_0")
for i in range(200):
assert zk1.exists(f"/test_three_{i}")
2023-04-20 13:26:02 +00:00
for i in range(100):
zk2.create(f"/test_four_{i}", "somedata")
2023-04-20 13:26:02 +00:00
node3.start_clickhouse()
2023-09-07 00:44:54 +00:00
config = zk2.reconfig(joining="server.3=node3:9234", leaving=None, new_members=None)
2023-04-20 13:26:02 +00:00
ku.wait_until_connected(cluster, node3)
print("After adding 3", config)
assert len(config.split("\n")) == 3
assert "node1" in config
assert "node2" in config
assert "node3" in config
zk3 = create_client(node3)
ku.wait_configs_equal(config, zk3)
2023-04-20 13:26:02 +00:00
for i in range(100):
assert zk3.exists(f"/test_four_{i}")
zk3.create(f"/test_four_{100 + i}", "somedata")
2023-04-20 13:26:02 +00:00
zk1.stop()
zk1 = create_client(node1)
2023-04-20 13:26:02 +00:00
zk1.sync("/test_four_0")
zk2.stop()
zk2 = create_client(node2)
2023-04-20 13:26:02 +00:00
zk2.sync("/test_four_0")
for i in range(200):
assert zk1.exists(f"/test_four_{i}")
assert zk2.exists(f"/test_four_{i}")
2023-04-20 13:26:02 +00:00
assert node3.contains_in_log(part_of_cluster)