2017-06-16 18:19:25 +00:00
|
|
|
import time
|
|
|
|
from contextlib import contextmanager
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
|
|
from helpers.network import PartitionManager
|
2018-09-03 14:06:00 +00:00
|
|
|
from helpers.test_tools import assert_eq_with_retry
|
2017-06-16 18:19:25 +00:00
|
|
|
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
node1 = cluster.add_instance(
|
|
|
|
"node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
|
|
|
|
)
|
|
|
|
node2 = cluster.add_instance(
|
|
|
|
"node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
|
|
|
|
)
|
|
|
|
node3 = cluster.add_instance(
|
|
|
|
"node3", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
|
|
|
|
)
|
2017-06-16 18:19:25 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2017-06-16 18:19:25 +00:00
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def started_cluster():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
|
|
|
|
node_to_shards = [
|
|
|
|
(node1, [0, 2]),
|
|
|
|
(node2, [0, 1]),
|
|
|
|
(node3, [1, 2]),
|
|
|
|
]
|
|
|
|
|
|
|
|
for node, shards in node_to_shards:
|
|
|
|
for shard in shards:
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"""
|
2017-06-16 18:19:25 +00:00
|
|
|
CREATE DATABASE shard_{shard};
|
|
|
|
|
|
|
|
CREATE TABLE shard_{shard}.replicated(date Date, id UInt32, shard_id UInt32)
|
2022-06-23 08:37:52 +00:00
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{replica}') PARTITION BY toYYYYMM(date) ORDER BY id;
|
2022-03-22 16:39:58 +00:00
|
|
|
""".format(
|
|
|
|
shard=shard, replica=node.name
|
|
|
|
)
|
|
|
|
)
|
2017-06-16 18:19:25 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"""
|
2017-06-16 18:19:25 +00:00
|
|
|
CREATE TABLE distributed(date Date, id UInt32, shard_id UInt32)
|
|
|
|
ENGINE = Distributed(test_cluster, '', replicated, shard_id);
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
)
|
2017-06-16 18:19:25 +00:00
|
|
|
|
|
|
|
# Insert some data onto different shards using the Distributed table
|
2022-03-22 16:39:58 +00:00
|
|
|
to_insert = """\
|
2017-06-16 18:19:25 +00:00
|
|
|
2017-06-16 111 0
|
|
|
|
2017-06-16 222 1
|
|
|
|
2017-06-16 333 2
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
2017-06-16 18:19:25 +00:00
|
|
|
node1.query("INSERT INTO distributed FORMAT TSV", stdin=to_insert)
|
2021-04-07 12:22:53 +00:00
|
|
|
time.sleep(5)
|
2017-06-16 18:19:25 +00:00
|
|
|
|
|
|
|
yield cluster
|
|
|
|
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
|
|
|
|
|
|
|
def test(started_cluster):
|
|
|
|
# Check that the data has been inserted into correct tables.
|
2022-03-22 16:39:58 +00:00
|
|
|
assert_eq_with_retry(node1, "SELECT id FROM shard_0.replicated", "111")
|
|
|
|
assert_eq_with_retry(node1, "SELECT id FROM shard_2.replicated", "333")
|
2017-06-16 18:19:25 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
assert_eq_with_retry(node2, "SELECT id FROM shard_0.replicated", "111")
|
|
|
|
assert_eq_with_retry(node2, "SELECT id FROM shard_1.replicated", "222")
|
2017-06-16 18:19:25 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
assert_eq_with_retry(node3, "SELECT id FROM shard_1.replicated", "222")
|
|
|
|
assert_eq_with_retry(node3, "SELECT id FROM shard_2.replicated", "333")
|
2017-06-16 18:19:25 +00:00
|
|
|
|
|
|
|
# Check that SELECT from the Distributed table works.
|
2022-03-22 16:39:58 +00:00
|
|
|
expected_from_distributed = """\
|
2017-06-16 18:19:25 +00:00
|
|
|
2017-06-16 111 0
|
|
|
|
2017-06-16 222 1
|
|
|
|
2017-06-16 333 2
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed
|
|
|
|
)
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed
|
|
|
|
)
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node3, "SELECT * FROM distributed ORDER BY id", expected_from_distributed
|
|
|
|
)
|
2017-06-16 18:19:25 +00:00
|
|
|
|
2017-06-16 18:35:49 +00:00
|
|
|
# Now isolate node3 from other nodes and check that SELECTs on other nodes still work.
|
2017-06-16 18:19:25 +00:00
|
|
|
with PartitionManager() as pm:
|
2022-03-22 16:39:58 +00:00
|
|
|
pm.partition_instances(node3, node1, action="REJECT --reject-with tcp-reset")
|
|
|
|
pm.partition_instances(node3, node2, action="REJECT --reject-with tcp-reset")
|
2017-06-16 18:19:25 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
assert_eq_with_retry(
|
|
|
|
node1, "SELECT * FROM distributed ORDER BY id", expected_from_distributed
|
|
|
|
)
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node2, "SELECT * FROM distributed ORDER BY id", expected_from_distributed
|
|
|
|
)
|
2017-06-16 18:19:25 +00:00
|
|
|
|
|
|
|
with pytest.raises(Exception):
|
2022-03-22 16:39:58 +00:00
|
|
|
print(
|
|
|
|
node3.query_with_retry(
|
|
|
|
"SELECT * FROM distributed ORDER BY id", retry_count=5
|
|
|
|
)
|
|
|
|
)
|
2017-06-16 18:19:25 +00:00
|
|
|
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
if __name__ == "__main__":
|
2017-06-16 18:19:25 +00:00
|
|
|
with contextmanager(started_cluster)() as cluster:
|
2020-10-02 16:54:07 +00:00
|
|
|
for name, instance in list(cluster.instances.items()):
|
|
|
|
print(name, instance.ip_address)
|
|
|
|
input("Cluster created, press any key to destroy...")
|