mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge pull request #70877 from ClickHouse/backport/24.3/70511
Backport #70511 to 24.3: Fix `StorageTableFunction::supportsReplication` creating source storage unnecessarily
This commit is contained in:
commit
49cfb6efcb
@ -62,6 +62,7 @@ public:
|
||||
/// Avoid loading nested table by returning nullptr/false for all table functions.
|
||||
StoragePolicyPtr getStoragePolicy() const override { return nullptr; }
|
||||
bool storesDataOnDisk() const override { return false; }
|
||||
bool supportsReplication() const override { return false; }
|
||||
|
||||
String getName() const override
|
||||
{
|
||||
|
@ -0,0 +1,16 @@
|
||||
<clickhouse>
|
||||
<remote_servers>
|
||||
<cluster>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>node1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
<replica>
|
||||
<host>node2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</cluster>
|
||||
</remote_servers>
|
||||
</clickhouse>
|
@ -28,6 +28,10 @@ from pyspark.sql.functions import monotonically_increasing_id, row_number
|
||||
from pyspark.sql.window import Window
|
||||
from minio.deleteobjects import DeleteObject
|
||||
|
||||
import helpers.client
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.network import PartitionManager
|
||||
|
||||
from helpers.s3_tools import (
|
||||
prepare_s3_bucket,
|
||||
upload_directory,
|
||||
@ -58,10 +62,13 @@ def started_cluster():
|
||||
cluster = ClickHouseCluster(__file__, with_spark=True)
|
||||
cluster.add_instance(
|
||||
"node1",
|
||||
main_configs=["configs/config.d/named_collections.xml"],
|
||||
main_configs=[
|
||||
"configs/config.d/named_collections.xml",
|
||||
],
|
||||
user_configs=["configs/users.d/users.xml"],
|
||||
with_minio=True,
|
||||
stay_alive=True,
|
||||
with_zookeeper=True,
|
||||
)
|
||||
|
||||
logging.info("Starting cluster...")
|
||||
@ -511,3 +518,4 @@ def test_restart_broken_table_function(started_cluster):
|
||||
upload_directory(minio_client, bucket, f"/{TABLE_NAME}", "")
|
||||
|
||||
assert int(instance.query(f"SELECT count() FROM {TABLE_NAME}")) == 100
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user