2018-08-06 10:01:08 +00:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
|
|
from helpers.network import PartitionManager
|
|
|
|
|
2018-09-03 14:06:00 +00:00
|
|
|
from helpers.test_tools import assert_eq_with_retry
|
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
def _fill_nodes(nodes, shard):
|
|
|
|
for node in nodes:
|
|
|
|
node.query(
|
2020-09-16 04:26:10 +00:00
|
|
|
'''
|
|
|
|
CREATE DATABASE test;
|
|
|
|
|
|
|
|
CREATE TABLE real_table(date Date, id UInt32, dummy UInt32)
|
|
|
|
ENGINE = MergeTree(date, id, 8192);
|
|
|
|
|
|
|
|
CREATE TABLE other_table(date Date, id UInt32, dummy UInt32)
|
|
|
|
ENGINE = MergeTree(date, id, 8192);
|
|
|
|
|
|
|
|
CREATE TABLE test_table(date Date, id UInt32, dummy UInt32)
|
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}', date, id, 8192);
|
|
|
|
'''.format(shard=shard, replica=node.name))
|
2018-08-06 10:01:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
|
|
|
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def normal_work():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
|
|
|
|
_fill_nodes([node1, node2], 1)
|
|
|
|
|
|
|
|
yield cluster
|
|
|
|
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
def test_normal_work(normal_work):
|
|
|
|
node1.query("insert into test_table values ('2017-06-16', 111, 0)")
|
|
|
|
node1.query("insert into real_table values ('2017-06-16', 222, 0)")
|
|
|
|
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '111')
|
|
|
|
assert_eq_with_retry(node1, "SELECT id FROM real_table order by id", '222')
|
|
|
|
assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '111')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
|
|
|
node1.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table")
|
|
|
|
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node1, "SELECT id FROM test_table order by id", '222')
|
|
|
|
assert_eq_with_retry(node2, "SELECT id FROM test_table order by id", '222')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
node3 = cluster.add_instance('node3', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
|
|
|
node4 = cluster.add_instance('node4', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def drop_failover():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
|
|
|
|
_fill_nodes([node3, node4], 2)
|
|
|
|
|
|
|
|
yield cluster
|
|
|
|
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
def test_drop_failover(drop_failover):
|
|
|
|
node3.query("insert into test_table values ('2017-06-16', 111, 0)")
|
|
|
|
node3.query("insert into real_table values ('2017-06-16', 222, 0)")
|
|
|
|
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '111')
|
|
|
|
assert_eq_with_retry(node3, "SELECT id FROM real_table order by id", '222')
|
|
|
|
assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
|
|
|
with PartitionManager() as pm:
|
|
|
|
# Hinder replication between replicas
|
|
|
|
pm.partition_instances(node3, node4, port=9009)
|
2018-08-06 13:18:52 +00:00
|
|
|
# Disconnect Node4 from zookeper
|
2018-08-06 10:01:08 +00:00
|
|
|
pm.drop_instance_zk_connections(node4)
|
|
|
|
|
|
|
|
node3.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table")
|
|
|
|
|
|
|
|
# Node3 replace is ok
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node3, "SELECT id FROM test_table order by id", '222')
|
2018-08-06 10:01:08 +00:00
|
|
|
# Network interrupted -- replace is not ok, but it's ok
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '111')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
# Drop partition on source node
|
2018-08-06 10:01:08 +00:00
|
|
|
node3.query("ALTER TABLE test_table DROP PARTITION 201706")
|
|
|
|
|
|
|
|
# connection restored
|
2018-09-03 14:06:00 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node4.query_with_retry("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'",
|
|
|
|
check_callback=lambda x: 'Not found part' not in x, sleep_time=1)
|
|
|
|
assert 'Not found part' not in node4.query(
|
|
|
|
"select last_exception from system.replication_queue where type = 'REPLACE_RANGE'")
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node4, "SELECT id FROM test_table order by id", '')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
node5 = cluster.add_instance('node5', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
|
|
|
node6 = cluster.add_instance('node6', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def replace_after_replace_failover():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
|
|
|
|
_fill_nodes([node5, node6], 3)
|
|
|
|
|
|
|
|
yield cluster
|
|
|
|
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2018-08-06 10:01:08 +00:00
|
|
|
def test_replace_after_replace_failover(replace_after_replace_failover):
|
|
|
|
node5.query("insert into test_table values ('2017-06-16', 111, 0)")
|
|
|
|
node5.query("insert into real_table values ('2017-06-16', 222, 0)")
|
|
|
|
node5.query("insert into other_table values ('2017-06-16', 333, 0)")
|
|
|
|
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '111')
|
|
|
|
assert_eq_with_retry(node5, "SELECT id FROM real_table order by id", '222')
|
|
|
|
assert_eq_with_retry(node5, "SELECT id FROM other_table order by id", '333')
|
|
|
|
assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '111')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
|
|
|
with PartitionManager() as pm:
|
|
|
|
# Hinder replication between replicas
|
|
|
|
pm.partition_instances(node5, node6, port=9009)
|
2018-08-06 13:18:52 +00:00
|
|
|
# Disconnect Node6 from zookeper
|
2018-08-06 10:01:08 +00:00
|
|
|
pm.drop_instance_zk_connections(node6)
|
|
|
|
|
|
|
|
node5.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM real_table")
|
|
|
|
|
|
|
|
# Node5 replace is ok
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '222')
|
2018-08-06 10:01:08 +00:00
|
|
|
# Network interrupted -- replace is not ok, but it's ok
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '111')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
# Replace partition on source node
|
2018-08-06 10:01:08 +00:00
|
|
|
node5.query("ALTER TABLE test_table REPLACE PARTITION 201706 FROM other_table")
|
|
|
|
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node5, "SELECT id FROM test_table order by id", '333')
|
2018-08-06 10:01:08 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node6.query_with_retry("select last_exception from system.replication_queue where type = 'REPLACE_RANGE'",
|
|
|
|
check_callback=lambda x: 'Not found part' not in x, sleep_time=1)
|
|
|
|
assert 'Not found part' not in node6.query(
|
|
|
|
"select last_exception from system.replication_queue where type = 'REPLACE_RANGE'")
|
2018-09-03 14:06:00 +00:00
|
|
|
assert_eq_with_retry(node6, "SELECT id FROM test_table order by id", '333')
|