diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 1039fac6883..ff50b5c1465 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -286,8 +286,9 @@ Cluster::Address Cluster::Address::fromFullString(const String & full_string) /// Implementation of Clusters class -Clusters::Clusters(const Poco::Util::AbstractConfiguration & config, const Settings & settings, const String & config_prefix) +Clusters::Clusters(const Poco::Util::AbstractConfiguration & config, const Settings & settings, MultiVersion::Version macros, const String & config_prefix) { + this->macros_ = macros; updateClusters(config, settings, config_prefix); } @@ -296,7 +297,8 @@ ClusterPtr Clusters::getCluster(const std::string & cluster_name) const { std::lock_guard lock(mutex); - auto it = impl.find(cluster_name); + auto expanded_cluster_name = macros_->expand(cluster_name); + auto it = impl.find(expanded_cluster_name); return (it != impl.end()) ? it->second : nullptr; } diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index 13f19f7c0ed..543d93270f7 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -2,6 +2,8 @@ #include #include +#include +#include #include @@ -287,7 +289,7 @@ using ClusterPtr = std::shared_ptr; class Clusters { public: - Clusters(const Poco::Util::AbstractConfiguration & config, const Settings & settings, const String & config_prefix = "remote_servers"); + Clusters(const Poco::Util::AbstractConfiguration & config, const Settings & settings, MultiVersion::Version macros, const String & config_prefix = "remote_servers"); Clusters(const Clusters &) = delete; Clusters & operator=(const Clusters &) = delete; @@ -306,6 +308,8 @@ protected: /// setup outside of this class, stored to prevent deleting from impl on config update std::unordered_set automatic_clusters; + MultiVersion::Version macros_; + Impl impl; mutable std::mutex mutex; }; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index f41d4662e99..74a123371d6 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -2265,7 +2265,7 @@ void Context::reloadClusterConfig() const } const auto & config = cluster_config ? *cluster_config : getConfigRef(); - auto new_clusters = std::make_shared(config, settings); + auto new_clusters = std::make_shared(config, settings, getMacros()); { std::lock_guard lock(shared->clusters_mutex); @@ -2287,7 +2287,7 @@ std::shared_ptr Context::getClusters() const if (!shared->clusters) { const auto & config = shared->clusters_config ? *shared->clusters_config : getConfigRef(); - shared->clusters = std::make_shared(config, settings); + shared->clusters = std::make_shared(config, settings, getMacros()); } return shared->clusters; @@ -2318,7 +2318,7 @@ void Context::setClustersConfig(const ConfigurationPtr & config, bool enable_dis shared->clusters_config = config; if (!shared->clusters) - shared->clusters = std::make_shared(*shared->clusters_config, settings, config_name); + shared->clusters = std::make_shared(*shared->clusters_config, settings, getMacros(), config_name); else shared->clusters->updateClusters(*shared->clusters_config, settings, config_name, old_clusters_config); } diff --git a/src/TableFunctions/TableFunctionRemote.cpp b/src/TableFunctions/TableFunctionRemote.cpp index 90fbb079bb6..c97961fdb72 100644 --- a/src/TableFunctions/TableFunctionRemote.cpp +++ b/src/TableFunctions/TableFunctionRemote.cpp @@ -201,11 +201,10 @@ void TableFunctionRemote::parseArguments(const ASTPtr & ast_function, ContextPtr if (!cluster_name.empty()) { /// Use an existing cluster from the main config - String cluster_name_expanded = context->getMacros()->expand(cluster_name); if (name != "clusterAllReplicas") - cluster = context->getCluster(cluster_name_expanded); + cluster = context->getCluster(cluster_name); else - cluster = context->getCluster(cluster_name_expanded)->getClusterWithReplicasAsShards(context->getSettingsRef()); + cluster = context->getCluster(cluster_name)->getClusterWithReplicasAsShards(context->getSettingsRef()); } else { diff --git a/tests/integration/test_s3_cluster/configs/cluster.xml b/tests/integration/test_s3_cluster/configs/cluster.xml index 404a15b1273..18f15763633 100644 --- a/tests/integration/test_s3_cluster/configs/cluster.xml +++ b/tests/integration/test_s3_cluster/configs/cluster.xml @@ -21,4 +21,7 @@ + + cluster_simple + \ No newline at end of file diff --git a/tests/integration/test_s3_cluster/test.py b/tests/integration/test_s3_cluster/test.py index 93708acd49c..2cbb36fcf06 100644 --- a/tests/integration/test_s3_cluster/test.py +++ b/tests/integration/test_s3_cluster/test.py @@ -95,6 +95,29 @@ def test_count(started_cluster): assert TSV(pure_s3) == TSV(s3_distibuted) +def test_count_macro(started_cluster): + node = started_cluster.instances["s0_0_0"] + + s3_macro = node.query( + """ + SELECT count(*) from s3Cluster( + '{default_cluster_macro}', 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'minio', 'minio123', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" + ) + # print(s3_distibuted) + s3_distibuted = node.query( + """ + SELECT count(*) from s3Cluster( + 'cluster_simple', 'http://minio1:9001/root/data/{clickhouse,database}/*', + 'minio', 'minio123', 'CSV', + 'name String, value UInt32, polygon Array(Array(Tuple(Float64, Float64)))')""" + ) + # print(s3_distibuted) + + assert TSV(s3_macro) == TSV(s3_distibuted) + + def test_union_all(started_cluster): node = started_cluster.instances["s0_0_0"] pure_s3 = node.query( diff --git a/tests/integration/test_storage_hdfs/configs/macro.xml b/tests/integration/test_storage_hdfs/configs/macro.xml new file mode 100644 index 00000000000..c2e11b47a5e --- /dev/null +++ b/tests/integration/test_storage_hdfs/configs/macro.xml @@ -0,0 +1,5 @@ + + + test_cluster_two_shards + + \ No newline at end of file diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index 2946a9ce5cc..14f69a1b88c 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -2,10 +2,13 @@ import os import pytest from helpers.cluster import ClickHouseCluster +from helpers.test_tools import TSV from pyhdfs import HdfsClient cluster = ClickHouseCluster(__file__) -node1 = cluster.add_instance("node1", with_hdfs=True) +node1 = cluster.add_instance( + "node1", main_configs=["configs/macro.xml"], with_hdfs=True +) @pytest.fixture(scope="module") @@ -565,6 +568,22 @@ def test_cluster_join(started_cluster): assert "AMBIGUOUS_COLUMN_NAME" not in result +def test_cluster_macro(started_cluster): + with_macro = node1.query( + """ + SELECT id FROM hdfsCluster('{default_cluster_macro}', 'hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') + """ + ) + + no_macro = node1.query( + """ + SELECT id FROM hdfsCluster('test_cluster_two_shards', 'hdfs://hdfs1:9000/test_hdfsCluster/file*', 'TSV', 'id UInt32') + """ + ) + + assert TSV(with_macro) == TSV(no_macro) + + def test_virtual_columns_2(started_cluster): hdfs_api = started_cluster.hdfs_api