2020-05-06 03:10:35 +00:00
|
|
|
import time
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
import pytest
|
2020-05-06 03:10:35 +00:00
|
|
|
from helpers.cluster import ClickHouseCluster
|
2020-05-07 01:14:15 +00:00
|
|
|
from helpers.network import PartitionManager
|
2020-05-06 03:10:35 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2020-05-06 03:10:35 +00:00
|
|
|
def fill_nodes(nodes, shard):
|
|
|
|
for node in nodes:
|
|
|
|
node.query(
|
2020-09-16 04:26:10 +00:00
|
|
|
'''
|
|
|
|
CREATE DATABASE test;
|
|
|
|
|
|
|
|
CREATE TABLE test.test_table(date Date, id UInt32)
|
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
|
|
|
|
'''.format(shard=shard, replica=node.name))
|
2020-06-05 07:03:51 +00:00
|
|
|
|
|
|
|
node.query(
|
2020-09-16 04:26:10 +00:00
|
|
|
'''
|
|
|
|
CREATE DATABASE test1;
|
|
|
|
|
|
|
|
CREATE TABLE test1.test_table(date Date, id UInt32)
|
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test1/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
|
|
|
|
'''.format(shard=shard, replica=node.name))
|
2020-06-05 07:03:51 +00:00
|
|
|
|
|
|
|
node.query(
|
2020-09-16 04:26:10 +00:00
|
|
|
'''
|
|
|
|
CREATE DATABASE test2;
|
|
|
|
|
|
|
|
CREATE TABLE test2.test_table(date Date, id UInt32)
|
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test2/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
|
|
|
|
'''.format(shard=shard, replica=node.name))
|
2020-06-05 07:03:51 +00:00
|
|
|
|
|
|
|
node.query(
|
2020-09-16 04:26:10 +00:00
|
|
|
'''
|
|
|
|
CREATE DATABASE test3;
|
|
|
|
|
|
|
|
CREATE TABLE test3.test_table(date Date, id UInt32)
|
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test3/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
|
|
|
|
'''.format(shard=shard, replica=node.name))
|
2020-06-05 07:03:51 +00:00
|
|
|
|
|
|
|
node.query(
|
2020-09-16 04:26:10 +00:00
|
|
|
'''
|
|
|
|
CREATE DATABASE test4;
|
|
|
|
|
|
|
|
CREATE TABLE test4.test_table(date Date, id UInt32)
|
|
|
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test4/{shard}/replicated/test_table', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date) SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0;
|
|
|
|
'''.format(shard=shard, replica=node.name))
|
2020-06-05 07:03:51 +00:00
|
|
|
|
2020-05-06 03:10:35 +00:00
|
|
|
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
|
|
|
|
node_1_1 = cluster.add_instance('node_1_1', with_zookeeper=True, main_configs=['configs/remote_servers.xml'])
|
|
|
|
node_1_2 = cluster.add_instance('node_1_2', with_zookeeper=True, main_configs=['configs/remote_servers.xml'])
|
2020-05-17 12:44:22 +00:00
|
|
|
node_1_3 = cluster.add_instance('node_1_3', with_zookeeper=True, main_configs=['configs/remote_servers.xml'])
|
2020-05-06 03:10:35 +00:00
|
|
|
|
2021-02-24 16:41:44 +00:00
|
|
|
#FIXME it's just to run flacky check
|
2020-05-06 03:10:35 +00:00
|
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def start_cluster():
|
|
|
|
try:
|
|
|
|
cluster.start()
|
|
|
|
|
2020-06-05 07:03:51 +00:00
|
|
|
fill_nodes([node_1_1, node_1_2], 1)
|
2020-05-06 03:10:35 +00:00
|
|
|
|
|
|
|
yield cluster
|
|
|
|
|
|
|
|
except Exception as ex:
|
2020-10-02 16:54:07 +00:00
|
|
|
print(ex)
|
2020-05-06 03:10:35 +00:00
|
|
|
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
|
2020-05-06 03:10:35 +00:00
|
|
|
def test_drop_replica(start_cluster):
|
|
|
|
for i in range(100):
|
|
|
|
node_1_1.query("INSERT INTO test.test_table VALUES (1, {})".format(i))
|
2020-06-05 07:03:51 +00:00
|
|
|
node_1_1.query("INSERT INTO test1.test_table VALUES (1, {})".format(i))
|
|
|
|
node_1_1.query("INSERT INTO test2.test_table VALUES (1, {})".format(i))
|
|
|
|
node_1_1.query("INSERT INTO test3.test_table VALUES (1, {})".format(i))
|
|
|
|
node_1_1.query("INSERT INTO test4.test_table VALUES (1, {})".format(i))
|
2020-05-06 03:10:35 +00:00
|
|
|
|
|
|
|
zk = cluster.get_kazoo_client('zoo1')
|
2020-06-05 07:03:51 +00:00
|
|
|
assert "can't drop local replica" in node_1_1.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1'")
|
2020-09-16 04:26:10 +00:00
|
|
|
assert "can't drop local replica" in node_1_1.query_and_get_error(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test")
|
|
|
|
assert "can't drop local replica" in node_1_1.query_and_get_error(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table")
|
2020-06-05 07:03:51 +00:00
|
|
|
assert "it's active" in node_1_2.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1'")
|
|
|
|
assert "it's active" in node_1_2.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test")
|
|
|
|
assert "it's active" in node_1_2.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table")
|
|
|
|
assert "it's active" in \
|
2020-09-16 04:26:10 +00:00
|
|
|
node_1_3.query_and_get_error(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format(
|
|
|
|
shard=1))
|
2020-06-23 12:01:51 +00:00
|
|
|
assert "There is a local table" in \
|
2020-09-16 04:26:10 +00:00
|
|
|
node_1_2.query_and_get_error(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format(
|
|
|
|
shard=1))
|
2020-06-23 12:01:51 +00:00
|
|
|
assert "There is a local table" in \
|
2020-09-16 04:26:10 +00:00
|
|
|
node_1_1.query_and_get_error(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test/{shard}/replicated/test_table'".format(
|
|
|
|
shard=1))
|
2020-06-23 12:01:51 +00:00
|
|
|
assert "does not look like a table path" in \
|
|
|
|
node_1_3.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test'")
|
2020-05-07 01:14:15 +00:00
|
|
|
|
|
|
|
with PartitionManager() as pm:
|
2020-06-05 07:03:51 +00:00
|
|
|
## make node_1_1 dead
|
|
|
|
pm.drop_instance_zk_connections(node_1_1)
|
|
|
|
time.sleep(10)
|
2020-05-17 12:44:22 +00:00
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
assert "doesn't exist" in node_1_3.query_and_get_error(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table")
|
2020-05-17 12:44:22 +00:00
|
|
|
|
2020-06-05 07:03:51 +00:00
|
|
|
assert "doesn't exist" in node_1_3.query_and_get_error("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test1")
|
|
|
|
|
|
|
|
node_1_3.query("SYSTEM DROP REPLICA 'node_1_1'")
|
2020-09-16 04:26:10 +00:00
|
|
|
exists_replica_1_1 = zk.exists(
|
|
|
|
"/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format(shard=1,
|
|
|
|
replica='node_1_1'))
|
2020-06-05 07:03:51 +00:00
|
|
|
assert (exists_replica_1_1 != None)
|
|
|
|
|
|
|
|
## If you want to drop a inactive/stale replicate table that does not have a local replica, you can following syntax(ZKPATH):
|
2020-09-16 04:26:10 +00:00
|
|
|
node_1_3.query(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test2/{shard}/replicated/test_table'".format(
|
|
|
|
shard=1))
|
|
|
|
exists_replica_1_1 = zk.exists(
|
|
|
|
"/clickhouse/tables/test2/{shard}/replicated/test_table/replicas/{replica}".format(shard=1,
|
|
|
|
replica='node_1_1'))
|
2020-06-05 07:03:51 +00:00
|
|
|
assert (exists_replica_1_1 == None)
|
|
|
|
|
|
|
|
node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM TABLE test.test_table")
|
2020-09-16 04:26:10 +00:00
|
|
|
exists_replica_1_1 = zk.exists(
|
|
|
|
"/clickhouse/tables/test/{shard}/replicated/test_table/replicas/{replica}".format(shard=1,
|
|
|
|
replica='node_1_1'))
|
2020-06-05 07:03:51 +00:00
|
|
|
assert (exists_replica_1_1 == None)
|
|
|
|
|
|
|
|
node_1_2.query("SYSTEM DROP REPLICA 'node_1_1' FROM DATABASE test1")
|
2020-09-16 04:26:10 +00:00
|
|
|
exists_replica_1_1 = zk.exists(
|
|
|
|
"/clickhouse/tables/test1/{shard}/replicated/test_table/replicas/{replica}".format(shard=1,
|
|
|
|
replica='node_1_1'))
|
2020-06-05 07:03:51 +00:00
|
|
|
assert (exists_replica_1_1 == None)
|
|
|
|
|
2020-09-16 04:26:10 +00:00
|
|
|
node_1_3.query(
|
|
|
|
"SYSTEM DROP REPLICA 'node_1_1' FROM ZKPATH '/clickhouse/tables/test3/{shard}/replicated/test_table'".format(
|
|
|
|
shard=1))
|
|
|
|
exists_replica_1_1 = zk.exists(
|
|
|
|
"/clickhouse/tables/test3/{shard}/replicated/test_table/replicas/{replica}".format(shard=1,
|
|
|
|
replica='node_1_1'))
|
2020-06-05 07:03:51 +00:00
|
|
|
assert (exists_replica_1_1 == None)
|
2020-05-17 12:44:22 +00:00
|
|
|
|
2020-06-05 07:03:51 +00:00
|
|
|
node_1_2.query("SYSTEM DROP REPLICA 'node_1_1'")
|
2020-09-16 04:26:10 +00:00
|
|
|
exists_replica_1_1 = zk.exists(
|
|
|
|
"/clickhouse/tables/test4/{shard}/replicated/test_table/replicas/{replica}".format(shard=1,
|
|
|
|
replica='node_1_1'))
|
2020-06-05 07:03:51 +00:00
|
|
|
assert (exists_replica_1_1 == None)
|