ClickHouse/dbms/tests/integration/test_recovery_replica/test.py

50 lines
1.5 KiB
Python
Raw Normal View History

2018-08-29 10:04:41 +00:00
import time
import pytest
from helpers.cluster import ClickHouseCluster
def fill_nodes(nodes, shard):
for node in nodes:
node.query(
'''
CREATE DATABASE test;
CREATE TABLE test_table(date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test{shard}/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
2018-08-29 10:04:41 +00:00
'''.format(shard=shard, replica=node.name))
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
node2 = cluster.add_instance('node2', main_configs=['configs/remote_servers.xml'], with_zookeeper=True)
@pytest.fixture(scope="module")
2018-08-29 13:47:48 +00:00
def start_cluster():
2018-08-29 10:04:41 +00:00
try:
cluster.start()
fill_nodes([node1, node2], 1)
yield cluster
2018-08-29 13:47:48 +00:00
except Exception as ex:
print ex
2018-08-29 10:04:41 +00:00
finally:
cluster.shutdown()
2018-08-29 13:47:48 +00:00
def test_recovery(start_cluster):
2018-08-29 10:04:41 +00:00
node1.query("INSERT INTO test_table VALUES (1, 1)")
time.sleep(1)
node2.query("DETACH TABLE test_table")
for i in range(100):
node1.query("INSERT INTO test_table VALUES (1, {})".format(i))
time.sleep(2)
2018-08-29 10:04:41 +00:00
node2.query("ATTACH TABLE test_table")
time.sleep(2)
2018-08-29 10:04:41 +00:00
assert node1.query("SELECT count(*) FROM test_table") == node2.query("SELECT count(*) FROM test_table")