2019-10-22 11:45:11 +00:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
|
|
from helpers.test_tools import assert_eq_with_retry
|
|
|
|
|
2022-03-22 16:39:58 +00:00
|
|
|
CLICKHOUSE_DATABASE = "test"
|
2019-10-22 11:45:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
def initialize_database(nodes, shard):
|
|
|
|
for node in nodes:
|
2022-03-22 16:39:58 +00:00
|
|
|
node.query(
|
|
|
|
"""
|
2019-10-24 06:23:28 +00:00
|
|
|
CREATE DATABASE {database};
|
2019-11-14 11:50:21 +00:00
|
|
|
CREATE TABLE `{database}`.src (p UInt64, d UInt64)
|
2019-11-14 07:07:17 +00:00
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard1{shard}/replicated', '{replica}')
|
2019-10-25 04:15:25 +00:00
|
|
|
ORDER BY d PARTITION BY p
|
2023-05-22 17:07:18 +00:00
|
|
|
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5,
|
|
|
|
cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0;
|
2019-11-14 11:50:21 +00:00
|
|
|
CREATE TABLE `{database}`.dest (p UInt64, d UInt64)
|
2019-11-14 07:07:17 +00:00
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test_consistent_shard2{shard}/replicated', '{replica}')
|
2019-10-25 04:15:25 +00:00
|
|
|
ORDER BY d PARTITION BY p
|
2023-05-22 17:07:18 +00:00
|
|
|
SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5,
|
2023-06-17 19:19:21 +00:00
|
|
|
cleanup_delay_period=0, cleanup_delay_period_random_add=0, cleanup_thread_preferred_points_per_iteration=0, temporary_directories_lifetime=1;
|
2022-03-22 16:39:58 +00:00
|
|
|
""".format(
|
|
|
|
shard=shard, replica=node.name, database=CLICKHOUSE_DATABASE
|
|
|
|
)
|
|
|
|
)
|
2019-10-22 11:45:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
2022-03-22 16:39:58 +00:00
|
|
|
node1 = cluster.add_instance(
|
|
|
|
"node1", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
|
|
|
|
)
|
|
|
|
node2 = cluster.add_instance(
|
|
|
|
"node2", main_configs=["configs/remote_servers.xml"], with_zookeeper=True
|
|
|
|
)
|
2019-10-22 11:45:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
2019-11-12 03:34:34 +00:00
|
|
|
def start_cluster():
|
2019-10-22 11:45:11 +00:00
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
initialize_database([node1, node2], 1)
|
|
|
|
yield cluster
|
|
|
|
except Exception as ex:
|
2020-10-02 16:54:07 +00:00
|
|
|
print(ex)
|
2019-10-22 11:45:11 +00:00
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
|
|
|
|
|
|
|
def test_consistent_part_after_move_partition(start_cluster):
|
|
|
|
# insert into all replicas
|
|
|
|
for i in range(100):
|
2022-03-22 16:39:58 +00:00
|
|
|
node1.query(
|
|
|
|
"INSERT INTO `{database}`.src VALUES ({value} % 2, {value})".format(
|
|
|
|
database=CLICKHOUSE_DATABASE, value=i
|
|
|
|
)
|
|
|
|
)
|
|
|
|
query_source = "SELECT COUNT(*) FROM `{database}`.src".format(
|
|
|
|
database=CLICKHOUSE_DATABASE
|
|
|
|
)
|
|
|
|
query_dest = "SELECT COUNT(*) FROM `{database}`.dest".format(
|
|
|
|
database=CLICKHOUSE_DATABASE
|
|
|
|
)
|
2019-10-22 11:45:11 +00:00
|
|
|
assert_eq_with_retry(node2, query_source, node1.query(query_source))
|
|
|
|
assert_eq_with_retry(node2, query_dest, node1.query(query_dest))
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node1.query(
|
2022-03-22 16:39:58 +00:00
|
|
|
"ALTER TABLE `{database}`.src MOVE PARTITION 1 TO TABLE `{database}`.dest".format(
|
|
|
|
database=CLICKHOUSE_DATABASE
|
|
|
|
)
|
|
|
|
)
|
2019-10-22 11:45:11 +00:00
|
|
|
|
|
|
|
assert_eq_with_retry(node2, query_source, node1.query(query_source))
|
|
|
|
assert_eq_with_retry(node2, query_dest, node1.query(query_dest))
|