mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
better
This commit is contained in:
parent
4d0a3ee283
commit
2f8b8596a2
@ -68,6 +68,8 @@ def test_insertion_sync(started_cluster):
|
|||||||
2000-01-01 100500''')
|
2000-01-01 100500''')
|
||||||
assert TSV(node2.query('SELECT date, val FROM local_table WHERE val = 100500 ORDER BY date')) == expected
|
assert TSV(node2.query('SELECT date, val FROM local_table WHERE val = 100500 ORDER BY date')) == expected
|
||||||
|
|
||||||
|
node1.query("TRUNCATE TABLE local_table SYNC")
|
||||||
|
node2.query("TRUNCATE TABLE local_table SYNC")
|
||||||
|
|
||||||
"""
|
"""
|
||||||
def test_insertion_sync_fails_on_error(started_cluster):
|
def test_insertion_sync_fails_on_error(started_cluster):
|
||||||
|
@ -8,6 +8,8 @@ from helpers.cluster import ClickHouseCluster
|
|||||||
from helpers.test_tools import assert_eq_with_retry, assert_logs_contain
|
from helpers.test_tools import assert_eq_with_retry, assert_logs_contain
|
||||||
from helpers.network import PartitionManager
|
from helpers.network import PartitionManager
|
||||||
|
|
||||||
|
test_recover_staled_replica_run = 1
|
||||||
|
|
||||||
cluster = ClickHouseCluster(__file__)
|
cluster = ClickHouseCluster(__file__)
|
||||||
|
|
||||||
main_node = cluster.add_instance('main_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, stay_alive=True, macros={"shard": 1, "replica": 1})
|
main_node = cluster.add_instance('main_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, stay_alive=True, macros={"shard": 1, "replica": 1})
|
||||||
@ -36,6 +38,7 @@ def started_cluster():
|
|||||||
|
|
||||||
def test_create_replicated_table(started_cluster):
|
def test_create_replicated_table(started_cluster):
|
||||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||||
|
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||||
assert "Explicit zookeeper_path and replica_name are specified" in \
|
assert "Explicit zookeeper_path and replica_name are specified" in \
|
||||||
main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "
|
main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) "
|
||||||
"ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);")
|
"ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);")
|
||||||
@ -57,10 +60,12 @@ def test_create_replicated_table(started_cluster):
|
|||||||
# assert without replacing uuid
|
# assert without replacing uuid
|
||||||
assert main_node.query("show create testdb.replicated_table") == dummy_node.query("show create testdb.replicated_table")
|
assert main_node.query("show create testdb.replicated_table") == dummy_node.query("show create testdb.replicated_table")
|
||||||
main_node.query("DROP DATABASE testdb SYNC")
|
main_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
|
||||||
@pytest.mark.parametrize("engine", ['MergeTree', 'ReplicatedMergeTree'])
|
@pytest.mark.parametrize("engine", ['MergeTree', 'ReplicatedMergeTree'])
|
||||||
def test_simple_alter_table(started_cluster, engine):
|
def test_simple_alter_table(started_cluster, engine):
|
||||||
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');")
|
||||||
|
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||||
# test_simple_alter_table
|
# test_simple_alter_table
|
||||||
name = "testdb.alter_test_{}".format(engine)
|
name = "testdb.alter_test_{}".format(engine)
|
||||||
main_node.query("CREATE TABLE {} "
|
main_node.query("CREATE TABLE {} "
|
||||||
@ -102,6 +107,8 @@ def test_simple_alter_table(started_cluster, engine):
|
|||||||
|
|
||||||
assert_create_query([main_node, dummy_node, competing_node], name, expected)
|
assert_create_query([main_node, dummy_node, competing_node], name, expected)
|
||||||
main_node.query("DROP DATABASE testdb SYNC")
|
main_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
competing_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
|
||||||
def get_table_uuid(database, name):
|
def get_table_uuid(database, name):
|
||||||
return main_node.query(f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'").strip()
|
return main_node.query(f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'").strip()
|
||||||
@ -224,7 +231,7 @@ def test_alters_from_different_replicas(started_cluster):
|
|||||||
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');")
|
||||||
|
|
||||||
# test_alters_from_different_replicas
|
# test_alters_from_different_replicas
|
||||||
competing_node.query("CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');")
|
competing_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');")
|
||||||
|
|
||||||
main_node.query("CREATE TABLE testdb.concurrent_test "
|
main_node.query("CREATE TABLE testdb.concurrent_test "
|
||||||
"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
|
"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
|
||||||
@ -258,7 +265,7 @@ def test_alters_from_different_replicas(started_cluster):
|
|||||||
assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected)
|
assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected)
|
||||||
|
|
||||||
# test_create_replica_after_delay
|
# test_create_replica_after_delay
|
||||||
main_node.query("DROP TABLE testdb.concurrent_test")
|
main_node.query("DROP TABLE testdb.concurrent_test SYNC")
|
||||||
main_node.query("CREATE TABLE testdb.concurrent_test "
|
main_node.query("CREATE TABLE testdb.concurrent_test "
|
||||||
"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
|
"(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) "
|
||||||
"ENGINE = ReplicatedMergeTree ORDER BY CounterID;")
|
"ENGINE = ReplicatedMergeTree ORDER BY CounterID;")
|
||||||
@ -320,6 +327,9 @@ def test_alters_from_different_replicas(started_cluster):
|
|||||||
assert_eq_with_retry(dummy_node, "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID", expected)
|
assert_eq_with_retry(dummy_node, "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID", expected)
|
||||||
main_node.query("DROP DATABASE testdb SYNC")
|
main_node.query("DROP DATABASE testdb SYNC")
|
||||||
dummy_node.query("DROP DATABASE testdb SYNC")
|
dummy_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
competing_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
snapshotting_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
snapshot_recovering_node.query("DROP DATABASE testdb SYNC")
|
||||||
|
|
||||||
def test_recover_staled_replica(started_cluster):
|
def test_recover_staled_replica(started_cluster):
|
||||||
main_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');")
|
main_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');")
|
||||||
@ -391,11 +401,12 @@ def test_recover_staled_replica(started_cluster):
|
|||||||
assert dummy_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n"
|
assert dummy_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n"
|
||||||
for table in ['m1', 'mt1']:
|
for table in ['m1', 'mt1']:
|
||||||
assert dummy_node.query("SELECT count() FROM recover.{}".format(table)) == "0\n"
|
assert dummy_node.query("SELECT count() FROM recover.{}".format(table)) == "0\n"
|
||||||
|
global test_recover_staled_replica_run
|
||||||
assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'") == "2\n"
|
assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'") == f"{2*test_recover_staled_replica_run}\n"
|
||||||
table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%'").strip()
|
test_recover_staled_replica_run += 1
|
||||||
|
table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%' LIMIT 1").strip()
|
||||||
assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n"
|
assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n"
|
||||||
table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%'").strip()
|
table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%' LIMIT 1").strip()
|
||||||
assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n"
|
assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n"
|
||||||
|
|
||||||
expected = "Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables"
|
expected = "Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables"
|
||||||
|
Loading…
Reference in New Issue
Block a user