2020-09-16 04:26:10 +00:00
|
|
|
import os
|
|
|
|
import sys
|
2017-05-19 18:54:05 +00:00
|
|
|
import time
|
2020-09-16 04:26:10 +00:00
|
|
|
|
|
|
|
import pytest
|
2017-08-14 11:49:30 +00:00
|
|
|
|
|
|
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
2017-05-19 18:54:05 +00:00
|
|
|
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
|
|
from helpers.network import PartitionManager
|
|
|
|
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
|
2017-08-01 17:36:00 +00:00
|
|
|
# Cluster with 2 shards of 2 replicas each. node_1_1 is the instance with Distributed table.
|
|
|
|
# Thus we have a shard with a local replica and a shard with remote replicas.
|
|
|
|
node_1_1 = instance_with_dist_table = cluster.add_instance(
|
2022-03-22 16:39:58 +00:00
|
|
|
"node_1_1", with_zookeeper=True, main_configs=["configs/remote_servers.xml"]
|
|
|
|
)
|
|
|
|
node_1_2 = cluster.add_instance("node_1_2", with_zookeeper=True)
|
|
|
|
node_2_1 = cluster.add_instance("node_2_1", with_zookeeper=True)
|
|
|
|
node_2_2 = cluster.add_instance("node_2_2", with_zookeeper=True)
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2017-05-19 18:54:05 +00:00
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def started_cluster():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
|
2017-08-01 17:36:00 +00:00
|
|
|
for shard in (1, 2):
|
|
|
|
for replica in (1, 2):
|
2022-03-22 16:39:58 +00:00
|
|
|
node = cluster.instances["node_{}_{}".format(shard, replica)]
|
|
|
|
node.query(
|
|
|
|
"""
|
2017-08-01 17:36:00 +00:00
|
|
|
CREATE TABLE replicated (d Date, x UInt32) ENGINE =
|
2022-03-22 16:39:58 +00:00
|
|
|
ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}', d, d, 8192)""".format(
|
|
|
|
shard=shard, instance=node.name
|
|
|
|
)
|
|
|
|
)
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2017-08-01 17:36:00 +00:00
|
|
|
node_1_1.query(
|
2017-05-19 18:54:05 +00:00
|
|
|
"CREATE TABLE distributed (d Date, x UInt32) ENGINE = "
|
2022-03-22 16:39:58 +00:00
|
|
|
"Distributed('test_cluster', 'default', 'replicated')"
|
|
|
|
)
|
2017-05-19 18:54:05 +00:00
|
|
|
|
|
|
|
yield cluster
|
|
|
|
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
|
|
|
|
|
|
|
def test(started_cluster):
|
|
|
|
with PartitionManager() as pm:
|
2017-08-01 17:36:00 +00:00
|
|
|
# Hinder replication between replicas of the same shard, but leave the possibility of distributed connection.
|
|
|
|
pm.partition_instances(node_1_1, node_1_2, port=9009)
|
|
|
|
pm.partition_instances(node_2_1, node_2_2, port=9009)
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2017-08-01 17:36:00 +00:00
|
|
|
node_1_2.query("INSERT INTO replicated VALUES ('2017-05-08', 1)")
|
|
|
|
node_2_2.query("INSERT INTO replicated VALUES ('2017-05-08', 2)")
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
time.sleep(1) # accrue replica delay
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
assert node_1_1.query("SELECT sum(x) FROM replicated").strip() == "0"
|
|
|
|
assert node_1_2.query("SELECT sum(x) FROM replicated").strip() == "1"
|
|
|
|
assert node_2_1.query("SELECT sum(x) FROM replicated").strip() == "0"
|
|
|
|
assert node_2_2.query("SELECT sum(x) FROM replicated").strip() == "2"
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2017-10-27 20:34:21 +00:00
|
|
|
# With in_order balancing first replicas are chosen.
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
instance_with_dist_table.query(
|
|
|
|
"SELECT count() FROM distributed SETTINGS load_balancing='in_order'"
|
|
|
|
).strip()
|
|
|
|
== "0"
|
|
|
|
)
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2017-08-01 17:36:00 +00:00
|
|
|
# When we set max_replica_delay, first replicas must be excluded.
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
instance_with_dist_table.query(
|
|
|
|
"""
|
2017-08-01 17:36:00 +00:00
|
|
|
SELECT sum(x) FROM distributed SETTINGS
|
2017-05-19 18:54:05 +00:00
|
|
|
load_balancing='in_order',
|
|
|
|
max_replica_delay_for_distributed_queries=1
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
).strip()
|
|
|
|
== "3"
|
|
|
|
)
|
|
|
|
|
|
|
|
assert (
|
|
|
|
instance_with_dist_table.query(
|
|
|
|
"""
|
2020-08-03 17:07:09 +00:00
|
|
|
SELECT sum(x) FROM distributed WITH TOTALS SETTINGS
|
|
|
|
load_balancing='in_order',
|
|
|
|
max_replica_delay_for_distributed_queries=1
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
).strip()
|
|
|
|
== "3\n\n3"
|
|
|
|
)
|
2020-08-03 17:07:09 +00:00
|
|
|
|
2017-08-01 17:36:00 +00:00
|
|
|
pm.drop_instance_zk_connections(node_1_2)
|
|
|
|
pm.drop_instance_zk_connections(node_2_2)
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2022-01-14 11:27:36 +00:00
|
|
|
# allow pings to zookeeper to timeout (must be greater than ZK session timeout).
|
2022-01-14 11:28:44 +00:00
|
|
|
for _ in range(30):
|
2022-01-14 11:27:36 +00:00
|
|
|
try:
|
|
|
|
node_2_2.query("SELECT * FROM system.zookeeper where path = '/'")
|
|
|
|
time.sleep(0.5)
|
|
|
|
except:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise Exception("Connection with zookeeper was not lost")
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2017-08-01 17:36:00 +00:00
|
|
|
# At this point all replicas are stale, but the query must still go to second replicas which are the least stale ones.
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
instance_with_dist_table.query(
|
|
|
|
"""
|
2017-08-01 17:36:00 +00:00
|
|
|
SELECT sum(x) FROM distributed SETTINGS
|
2017-05-19 18:54:05 +00:00
|
|
|
load_balancing='in_order',
|
|
|
|
max_replica_delay_for_distributed_queries=1
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
).strip()
|
|
|
|
== "3"
|
|
|
|
)
|
2017-05-19 18:54:05 +00:00
|
|
|
|
2020-04-21 22:45:51 +00:00
|
|
|
# Regression for skip_unavailable_shards in conjunction with skip_unavailable_shards
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
instance_with_dist_table.query(
|
|
|
|
"""
|
2020-04-21 22:45:51 +00:00
|
|
|
SELECT sum(x) FROM distributed SETTINGS
|
|
|
|
load_balancing='in_order',
|
|
|
|
skip_unavailable_shards=1,
|
|
|
|
max_replica_delay_for_distributed_queries=1
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
).strip()
|
|
|
|
== "3"
|
|
|
|
)
|
2020-04-21 22:45:51 +00:00
|
|
|
|
2022-01-14 11:27:36 +00:00
|
|
|
# If we forbid stale replicas, the query must fail. But sometimes we must have bigger timeouts.
|
2022-01-14 17:05:55 +00:00
|
|
|
for _ in range(20):
|
|
|
|
try:
|
2022-03-22 16:39:58 +00:00
|
|
|
instance_with_dist_table.query(
|
|
|
|
"""
|
2017-05-19 18:54:05 +00:00
|
|
|
SELECT count() FROM distributed SETTINGS
|
|
|
|
load_balancing='in_order',
|
|
|
|
max_replica_delay_for_distributed_queries=1,
|
|
|
|
fallback_to_stale_replicas_for_distributed_queries=0
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
)
|
2022-01-14 17:05:55 +00:00
|
|
|
time.sleep(0.5)
|
|
|
|
except:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise Exception("Didn't raise when stale replicas are not allowed")
|
2017-10-27 20:34:21 +00:00
|
|
|
|
|
|
|
# Now partition off the remote replica of the local shard and test that failover still works.
|
|
|
|
pm.partition_instances(node_1_1, node_1_2, port=9000)
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
assert (
|
|
|
|
instance_with_dist_table.query(
|
|
|
|
"""
|
2017-10-27 20:34:21 +00:00
|
|
|
SELECT sum(x) FROM distributed SETTINGS
|
|
|
|
load_balancing='in_order',
|
|
|
|
max_replica_delay_for_distributed_queries=1
|
2022-03-22 16:39:58 +00:00
|
|
|
"""
|
|
|
|
).strip()
|
|
|
|
== "2"
|
|
|
|
)
|