diff --git a/src/Interpreters/ClusterDiscovery.cpp b/src/Interpreters/ClusterDiscovery.cpp index 6f9c375c2f5..ab2ec886e76 100644 --- a/src/Interpreters/ClusterDiscovery.cpp +++ b/src/Interpreters/ClusterDiscovery.cpp @@ -129,9 +129,11 @@ ClusterDiscovery::ClusterDiscovery( if (!config.has(cluster_config_prefix)) continue; - String zk_root = config.getString(cluster_config_prefix + ".path"); - if (zk_root.empty()) + String zk_name_and_root = config.getString(cluster_config_prefix + ".path"); + if (zk_name_and_root.empty()) throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG, "ZooKeeper path for cluster '{}' is empty", key); + String zk_root = zkutil::extractZooKeeperPath(zk_name_and_root, true); + String zk_name = zkutil::extractZooKeeperName(zk_name_and_root); const auto & password = config.getString(cluster_config_prefix + ".password", ""); const auto & cluster_secret = config.getString(cluster_config_prefix + ".secret", ""); @@ -142,6 +144,7 @@ ClusterDiscovery::ClusterDiscovery( key, ClusterInfo( /* name_= */ key, + /* zk_name_= */ zk_name, /* zk_root_= */ zk_root, /* host_name= */ config.getString(cluster_config_prefix + ".my_hostname", getFQDNOrHostName()), /* username= */ config.getString(cluster_config_prefix + ".user", context->getUserName()), @@ -288,7 +291,7 @@ bool ClusterDiscovery::updateCluster(ClusterInfo & cluster_info) { LOG_DEBUG(log, "Updating cluster '{}'", cluster_info.name); - auto zk = context->getZooKeeper(); + auto zk = context->getDefaultOrAuxiliaryZooKeeper(cluster_info.zk_name); int start_version; Strings node_uuids = getNodeNames(zk, cluster_info.zk_root, cluster_info.name, &start_version, false); @@ -381,9 +384,9 @@ void ClusterDiscovery::initialUpdate() throw Exception(ErrorCodes::KEEPER_EXCEPTION, "Failpoint cluster_discovery_faults is triggered"); }); - auto zk = context->getZooKeeper(); for (auto & [_, info] : clusters_info) { + auto zk = context->getDefaultOrAuxiliaryZooKeeper(info.zk_name); registerInZk(zk, info); if (!updateCluster(info)) { diff --git a/src/Interpreters/ClusterDiscovery.h b/src/Interpreters/ClusterDiscovery.h index 756ed3d8d9b..b253473ce3e 100644 --- a/src/Interpreters/ClusterDiscovery.h +++ b/src/Interpreters/ClusterDiscovery.h @@ -67,6 +67,7 @@ private: struct ClusterInfo { const String name; + const String zk_name; const String zk_root; NodesInfo nodes_info; @@ -88,6 +89,7 @@ private: String cluster_secret; ClusterInfo(const String & name_, + const String & zk_name_, const String & zk_root_, const String & host_name, const String & username_, @@ -99,6 +101,7 @@ private: bool observer_mode, bool invisible) : name(name_) + , zk_name(zk_name_) , zk_root(zk_root_) , current_node(host_name + ":" + toString(port), secure, shard_id) , current_node_is_observer(observer_mode) diff --git a/tests/integration/test_cluster_discovery/config/config.xml b/tests/integration/test_cluster_discovery/config/config.xml index a63ca3e5438..ed2d3b27259 100644 --- a/tests/integration/test_cluster_discovery/config/config.xml +++ b/tests/integration/test_cluster_discovery/config/config.xml @@ -1,11 +1,6 @@ 1 - - - /clickhouse/discovery/test_auto_cluster - - diff --git a/tests/integration/test_cluster_discovery/config/config_discovery_path.xml b/tests/integration/test_cluster_discovery/config/config_discovery_path.xml new file mode 100644 index 00000000000..8eaecb813ab --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/config_discovery_path.xml @@ -0,0 +1,9 @@ + + + + + /clickhouse/discovery/test_auto_cluster + + + + diff --git a/tests/integration/test_cluster_discovery/config/config_discovery_path_auxiliary_keeper.xml b/tests/integration/test_cluster_discovery/config/config_discovery_path_auxiliary_keeper.xml new file mode 100644 index 00000000000..3e3835dfcab --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/config_discovery_path_auxiliary_keeper.xml @@ -0,0 +1,9 @@ + + + + + zookeeper2:/clickhouse/discovery/test_auto_cluster + + + + diff --git a/tests/integration/test_cluster_discovery/config/config_keepers.xml b/tests/integration/test_cluster_discovery/config/config_keepers.xml new file mode 100644 index 00000000000..4e976578223 --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/config_keepers.xml @@ -0,0 +1,24 @@ + + + + zoo1 + 2181 + + + zoo2 + 2181 + + + zoo3 + 2181 + + + + + + zoo1 + 2181 + + + + diff --git a/tests/integration/test_cluster_discovery/config/config_observer.xml b/tests/integration/test_cluster_discovery/config/config_observer.xml index 84d9539169e..c100c9f5348 100644 --- a/tests/integration/test_cluster_discovery/config/config_observer.xml +++ b/tests/integration/test_cluster_discovery/config/config_observer.xml @@ -3,7 +3,6 @@ - /clickhouse/discovery/test_auto_cluster diff --git a/tests/integration/test_cluster_discovery/config/config_shard1.xml b/tests/integration/test_cluster_discovery/config/config_shard1.xml index 06a77a37263..89590bed607 100644 --- a/tests/integration/test_cluster_discovery/config/config_shard1.xml +++ b/tests/integration/test_cluster_discovery/config/config_shard1.xml @@ -3,7 +3,6 @@ - /clickhouse/discovery/test_auto_cluster 1 diff --git a/tests/integration/test_cluster_discovery/config/config_shard3.xml b/tests/integration/test_cluster_discovery/config/config_shard3.xml index 2c706f7c268..9badd0a6132 100644 --- a/tests/integration/test_cluster_discovery/config/config_shard3.xml +++ b/tests/integration/test_cluster_discovery/config/config_shard3.xml @@ -3,7 +3,6 @@ - /clickhouse/discovery/test_auto_cluster 3 diff --git a/tests/integration/test_cluster_discovery/config/macros0.xml b/tests/integration/test_cluster_discovery/config/macros0.xml new file mode 100644 index 00000000000..a4cabff3005 --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/macros0.xml @@ -0,0 +1,6 @@ + + + shard0 + replica0 + + diff --git a/tests/integration/test_cluster_discovery/config/macros1.xml b/tests/integration/test_cluster_discovery/config/macros1.xml new file mode 100644 index 00000000000..bae1ce11925 --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/macros1.xml @@ -0,0 +1,6 @@ + + + shard1 + replica1 + + diff --git a/tests/integration/test_cluster_discovery/config/macros2.xml b/tests/integration/test_cluster_discovery/config/macros2.xml new file mode 100644 index 00000000000..989555d8225 --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/macros2.xml @@ -0,0 +1,6 @@ + + + shard2 + replica2 + + diff --git a/tests/integration/test_cluster_discovery/config/macros3.xml b/tests/integration/test_cluster_discovery/config/macros3.xml new file mode 100644 index 00000000000..e0d5ea23696 --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/macros3.xml @@ -0,0 +1,6 @@ + + + shard3 + replica3 + + diff --git a/tests/integration/test_cluster_discovery/config/macros4.xml b/tests/integration/test_cluster_discovery/config/macros4.xml new file mode 100644 index 00000000000..38b4c43d1b9 --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/macros4.xml @@ -0,0 +1,6 @@ + + + shard4 + replica4 + + diff --git a/tests/integration/test_cluster_discovery/config/macros_o.xml b/tests/integration/test_cluster_discovery/config/macros_o.xml new file mode 100644 index 00000000000..4ac6241b0a6 --- /dev/null +++ b/tests/integration/test_cluster_discovery/config/macros_o.xml @@ -0,0 +1,6 @@ + + + shard_o + replica_o + + diff --git a/tests/integration/test_cluster_discovery/test.py b/tests/integration/test_cluster_discovery/test.py index b47c8d06269..7eb1000b23a 100644 --- a/tests/integration/test_cluster_discovery/test.py +++ b/tests/integration/test_cluster_discovery/test.py @@ -20,7 +20,7 @@ shard_configs = { nodes = { node_name: cluster.add_instance( node_name, - main_configs=[shard_config], + main_configs=[shard_config, "config/config_discovery_path.xml"], stay_alive=True, with_zookeeper=True, ) @@ -119,3 +119,6 @@ def test_cluster_discovery_startup_and_stop(start_cluster): check_nodes_count( [nodes["node1"], nodes["node2"]], 2, cluster_name="two_shards", retries=1 ) + + # cleanup + nodes["node0"].query("DROP TABLE tbl ON CLUSTER 'test_auto_cluster' SYNC") diff --git a/tests/integration/test_cluster_discovery/test_auxiliary_keeper.py b/tests/integration/test_cluster_discovery/test_auxiliary_keeper.py new file mode 100644 index 00000000000..d35a22ab09a --- /dev/null +++ b/tests/integration/test_cluster_discovery/test_auxiliary_keeper.py @@ -0,0 +1,127 @@ +import functools + +import pytest + +from helpers.cluster import ClickHouseCluster + +from .common import check_on_cluster + +cluster = ClickHouseCluster(__file__) + +shard_configs = { + "node0": ["config/config.xml", "config/macros0.xml"], + "node1": ["config/config_shard1.xml", "config/macros1.xml"], + "node2": ["config/config.xml", "config/macros2.xml"], + "node3": ["config/config_shard3.xml", "config/macros3.xml"], + "node4": ["config/config.xml", "config/macros4.xml"], + "node_observer": ["config/config_observer.xml", "config/macros_o.xml"], +} + +nodes = { + node_name: cluster.add_instance( + node_name, + main_configs=shard_config + + [ + "config/config_discovery_path_auxiliary_keeper.xml", + "config/config_keepers.xml", + ], + stay_alive=True, + with_zookeeper=True, + ) + for node_name, shard_config in shard_configs.items() +} + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def test_cluster_discovery_with_auxiliary_keeper_startup_and_stop(start_cluster): + """ + Start cluster, check nodes count in system.clusters, + then stop/start some nodes and check that it (dis)appeared in cluster. + """ + + check_nodes_count = functools.partial( + check_on_cluster, what="count()", msg="Wrong nodes count in cluster" + ) + check_shard_num = functools.partial( + check_on_cluster, + what="count(DISTINCT shard_num)", + msg="Wrong shard_num count in cluster", + ) + + total_shards = 3 + total_nodes = 5 + + check_nodes_count( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes + ) + check_shard_num( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_shards + ) + + # test ON CLUSTER query + nodes["node0"].query( + "CREATE TABLE tbl ON CLUSTER 'test_auto_cluster' (x UInt64) ENGINE = ReplicatedMergeTree('zookeeper2:/clickhouse/{shard}/tbl', '{replica}') ORDER BY x" + ) + nodes["node0"].query("INSERT INTO tbl VALUES (1)") + nodes["node1"].query("INSERT INTO tbl VALUES (2)") + + assert ( + int( + nodes["node_observer"] + .query( + "SELECT sum(x) FROM clusterAllReplicas(test_auto_cluster, default.tbl)" + ) + .strip() + ) + == 3 + ) + + # Query SYSTEM DROP DNS CACHE may reload cluster configuration + # check that it does not affect cluster discovery + nodes["node1"].query("SYSTEM DROP DNS CACHE") + nodes["node0"].query("SYSTEM DROP DNS CACHE") + + check_shard_num( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_shards + ) + + nodes["node1"].stop_clickhouse(kill=True) + check_nodes_count( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes - 1 + ) + + # node1 was the only node in shard '1' + check_shard_num( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_shards - 1 + ) + + nodes["node3"].stop_clickhouse() + check_nodes_count( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes - 2 + ) + + nodes["node1"].start_clickhouse() + check_nodes_count( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes - 1 + ) + + nodes["node3"].start_clickhouse() + check_nodes_count( + [nodes["node0"], nodes["node2"], nodes["node_observer"]], total_nodes + ) + + # regular cluster is not affected + check_nodes_count( + [nodes["node1"], nodes["node2"]], 2, cluster_name="two_shards", retries=1 + ) + + # cleanup + nodes["node0"].query("DROP TABLE tbl ON CLUSTER 'test_auto_cluster' SYNC")