mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 08:02:02 +00:00
Merge pull request #49470 from ClickHouse/remove_no_delay_flag
Replace `NO DELAY` with `SYNC` in tests
This commit is contained in:
commit
f77cc89464
@ -143,7 +143,7 @@ StoragePtr StorageMaterializedPostgreSQL::createTemporary() const
|
|||||||
if (tmp_storage)
|
if (tmp_storage)
|
||||||
{
|
{
|
||||||
LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLStorage"), "Temporary table {} already exists, dropping", tmp_table_id.getNameForLogs());
|
LOG_TRACE(&Poco::Logger::get("MaterializedPostgreSQLStorage"), "Temporary table {} already exists, dropping", tmp_table_id.getNameForLogs());
|
||||||
InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), getContext(), tmp_table_id, /* no delay */true);
|
InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind::Drop, getContext(), getContext(), tmp_table_id, /* sync */true);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto new_context = Context::createCopy(context);
|
auto new_context = Context::createCopy(context);
|
||||||
|
@ -204,7 +204,7 @@ class PostgresManager:
|
|||||||
assert materialized_database in self.instance.query("SHOW DATABASES")
|
assert materialized_database in self.instance.query("SHOW DATABASES")
|
||||||
|
|
||||||
def drop_materialized_db(self, materialized_database="test_database"):
|
def drop_materialized_db(self, materialized_database="test_database"):
|
||||||
self.instance.query(f"DROP DATABASE IF EXISTS {materialized_database} NO DELAY")
|
self.instance.query(f"DROP DATABASE IF EXISTS {materialized_database} SYNC")
|
||||||
if materialized_database in self.created_materialized_postgres_db_list:
|
if materialized_database in self.created_materialized_postgres_db_list:
|
||||||
self.created_materialized_postgres_db_list.remove(materialized_database)
|
self.created_materialized_postgres_db_list.remove(materialized_database)
|
||||||
assert materialized_database not in self.instance.query("SHOW DATABASES")
|
assert materialized_database not in self.instance.query("SHOW DATABASES")
|
||||||
|
@ -44,7 +44,7 @@ def started_cluster():
|
|||||||
|
|
||||||
|
|
||||||
def test_create_insert(started_cluster):
|
def test_create_insert(started_cluster):
|
||||||
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'test_cluster' NO DELAY")
|
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'test_cluster' SYNC")
|
||||||
node1.query(
|
node1.query(
|
||||||
"""
|
"""
|
||||||
CREATE TABLE tbl ON CLUSTER 'test_cluster' (
|
CREATE TABLE tbl ON CLUSTER 'test_cluster' (
|
||||||
|
@ -1482,23 +1482,23 @@ def test_tables_dependency():
|
|||||||
|
|
||||||
# Drop everything in reversive order.
|
# Drop everything in reversive order.
|
||||||
def drop():
|
def drop():
|
||||||
instance.query(f"DROP TABLE {t15} NO DELAY")
|
instance.query(f"DROP TABLE {t15} SYNC")
|
||||||
instance.query(f"DROP TABLE {t14} NO DELAY")
|
instance.query(f"DROP TABLE {t14} SYNC")
|
||||||
instance.query(f"DROP TABLE {t13} NO DELAY")
|
instance.query(f"DROP TABLE {t13} SYNC")
|
||||||
instance.query(f"DROP TABLE {t12} NO DELAY")
|
instance.query(f"DROP TABLE {t12} SYNC")
|
||||||
instance.query(f"DROP TABLE {t11} NO DELAY")
|
instance.query(f"DROP TABLE {t11} SYNC")
|
||||||
instance.query(f"DROP TABLE {t10} NO DELAY")
|
instance.query(f"DROP TABLE {t10} SYNC")
|
||||||
instance.query(f"DROP TABLE {t9} NO DELAY")
|
instance.query(f"DROP TABLE {t9} SYNC")
|
||||||
instance.query(f"DROP DICTIONARY {t8}")
|
instance.query(f"DROP DICTIONARY {t8}")
|
||||||
instance.query(f"DROP TABLE {t7} NO DELAY")
|
instance.query(f"DROP TABLE {t7} SYNC")
|
||||||
instance.query(f"DROP TABLE {t6} NO DELAY")
|
instance.query(f"DROP TABLE {t6} SYNC")
|
||||||
instance.query(f"DROP TABLE {t5} NO DELAY")
|
instance.query(f"DROP TABLE {t5} SYNC")
|
||||||
instance.query(f"DROP DICTIONARY {t4}")
|
instance.query(f"DROP DICTIONARY {t4}")
|
||||||
instance.query(f"DROP TABLE {t3} NO DELAY")
|
instance.query(f"DROP TABLE {t3} SYNC")
|
||||||
instance.query(f"DROP TABLE {t2} NO DELAY")
|
instance.query(f"DROP TABLE {t2} SYNC")
|
||||||
instance.query(f"DROP TABLE {t1} NO DELAY")
|
instance.query(f"DROP TABLE {t1} SYNC")
|
||||||
instance.query("DROP DATABASE test NO DELAY")
|
instance.query("DROP DATABASE test SYNC")
|
||||||
instance.query("DROP DATABASE test2 NO DELAY")
|
instance.query("DROP DATABASE test2 SYNC")
|
||||||
|
|
||||||
drop()
|
drop()
|
||||||
|
|
||||||
|
@ -65,9 +65,9 @@ def drop_after_test():
|
|||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster3' NO DELAY")
|
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster3' SYNC")
|
||||||
node1.query("DROP TABLE IF EXISTS tbl2 ON CLUSTER 'cluster3' NO DELAY")
|
node1.query("DROP TABLE IF EXISTS tbl2 ON CLUSTER 'cluster3' SYNC")
|
||||||
node1.query("DROP DATABASE IF EXISTS mydb ON CLUSTER 'cluster3' NO DELAY")
|
node1.query("DROP DATABASE IF EXISTS mydb ON CLUSTER 'cluster3' SYNC")
|
||||||
node1.query("DROP USER IF EXISTS u1, u2 ON CLUSTER 'cluster3'")
|
node1.query("DROP USER IF EXISTS u1, u2 ON CLUSTER 'cluster3'")
|
||||||
|
|
||||||
|
|
||||||
@ -107,7 +107,7 @@ def test_replicated_table():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Drop table on both nodes.
|
# Drop table on both nodes.
|
||||||
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
# Restore from backup on node2.
|
# Restore from backup on node2.
|
||||||
node2.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node2.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
@ -138,7 +138,7 @@ def test_empty_replicated_table():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Drop table on both nodes.
|
# Drop table on both nodes.
|
||||||
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
# Restore from backup on node2.
|
# Restore from backup on node2.
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
@ -172,7 +172,7 @@ def test_replicated_database():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Drop table on both nodes.
|
# Drop table on both nodes.
|
||||||
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
# Restore from backup on node2.
|
# Restore from backup on node2.
|
||||||
node1.query(f"RESTORE DATABASE mydb ON CLUSTER 'cluster' FROM {backup_name}")
|
node1.query(f"RESTORE DATABASE mydb ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
@ -201,7 +201,7 @@ def test_different_tables_on_nodes():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node2.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node2.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
|
|
||||||
@ -224,7 +224,7 @@ def test_backup_restore_on_single_replica():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP DATABASE mydb TO {backup_name}")
|
node1.query(f"BACKUP DATABASE mydb TO {backup_name}")
|
||||||
|
|
||||||
node1.query("DROP DATABASE mydb NO DELAY")
|
node1.query("DROP DATABASE mydb SYNC")
|
||||||
|
|
||||||
# Cannot restore table because it already contains data on other replicas.
|
# Cannot restore table because it already contains data on other replicas.
|
||||||
expected_error = "already contains some data"
|
expected_error = "already contains some data"
|
||||||
@ -243,7 +243,7 @@ def test_backup_restore_on_single_replica():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Can restore table with allow_non_empty_tables=true.
|
# Can restore table with allow_non_empty_tables=true.
|
||||||
node1.query("DROP DATABASE mydb NO DELAY")
|
node1.query("DROP DATABASE mydb SYNC")
|
||||||
node1.query(
|
node1.query(
|
||||||
f"RESTORE DATABASE mydb FROM {backup_name} SETTINGS allow_non_empty_tables=true"
|
f"RESTORE DATABASE mydb FROM {backup_name} SETTINGS allow_non_empty_tables=true"
|
||||||
)
|
)
|
||||||
@ -266,7 +266,7 @@ def test_table_with_parts_in_queue_considered_non_empty():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP DATABASE mydb TO {backup_name}")
|
node1.query(f"BACKUP DATABASE mydb TO {backup_name}")
|
||||||
|
|
||||||
node1.query("DROP DATABASE mydb NO DELAY")
|
node1.query("DROP DATABASE mydb SYNC")
|
||||||
|
|
||||||
# Cannot restore table because it already contains data on other replicas.
|
# Cannot restore table because it already contains data on other replicas.
|
||||||
expected_error = "already contains some data"
|
expected_error = "already contains some data"
|
||||||
@ -295,7 +295,7 @@ def test_replicated_table_with_not_synced_insert():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
||||||
@ -325,7 +325,7 @@ def test_replicated_table_with_not_synced_merge():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
||||||
@ -348,7 +348,7 @@ def test_replicated_table_restored_into_bigger_cluster():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster3' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster3' FROM {backup_name}")
|
||||||
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster3' tbl")
|
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster3' tbl")
|
||||||
@ -372,7 +372,7 @@ def test_replicated_table_restored_into_smaller_cluster():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster1' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster1' FROM {backup_name}")
|
||||||
assert node1.query("SELECT * FROM tbl ORDER BY x") == TSV([111, 222])
|
assert node1.query("SELECT * FROM tbl ORDER BY x") == TSV([111, 222])
|
||||||
@ -410,7 +410,7 @@ def test_replicated_database_async():
|
|||||||
TSV([["BACKUP_CREATED", ""]]),
|
TSV([["BACKUP_CREATED", ""]]),
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
[id, status] = node1.query(
|
[id, status] = node1.query(
|
||||||
f"RESTORE DATABASE mydb ON CLUSTER 'cluster' FROM {backup_name} ASYNC"
|
f"RESTORE DATABASE mydb ON CLUSTER 'cluster' FROM {backup_name} ASYNC"
|
||||||
@ -454,7 +454,7 @@ def test_keeper_value_max_size():
|
|||||||
settings={"backup_restore_keeper_value_max_size": 50},
|
settings={"backup_restore_keeper_value_max_size": 50},
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
node1.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
||||||
@ -541,7 +541,7 @@ def test_async_backups_to_same_destination(interface, on_cluster):
|
|||||||
assert num_failed_backups == len(ids) - 1
|
assert num_failed_backups == len(ids) - 1
|
||||||
|
|
||||||
# Check that the succeeded backup is all right.
|
# Check that the succeeded backup is all right.
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node1.query(f"RESTORE TABLE tbl FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl FROM {backup_name}")
|
||||||
assert node1.query("SELECT * FROM tbl") == "1\n"
|
assert node1.query("SELECT * FROM tbl") == "1\n"
|
||||||
|
|
||||||
@ -568,7 +568,7 @@ def test_required_privileges():
|
|||||||
node1.query("GRANT BACKUP ON tbl TO u1")
|
node1.query("GRANT BACKUP ON tbl TO u1")
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}", user="u1")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}", user="u1")
|
||||||
|
|
||||||
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
expected_error = "necessary to have grant INSERT, CREATE TABLE ON default.tbl2"
|
expected_error = "necessary to have grant INSERT, CREATE TABLE ON default.tbl2"
|
||||||
assert expected_error in node1.query_and_get_error(
|
assert expected_error in node1.query_and_get_error(
|
||||||
@ -582,7 +582,7 @@ def test_required_privileges():
|
|||||||
|
|
||||||
assert node2.query("SELECT * FROM tbl2") == "100\n"
|
assert node2.query("SELECT * FROM tbl2") == "100\n"
|
||||||
|
|
||||||
node1.query(f"DROP TABLE tbl2 ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl2 ON CLUSTER 'cluster' SYNC")
|
||||||
node1.query("REVOKE ALL FROM u1")
|
node1.query("REVOKE ALL FROM u1")
|
||||||
|
|
||||||
expected_error = "necessary to have grant INSERT, CREATE TABLE ON default.tbl"
|
expected_error = "necessary to have grant INSERT, CREATE TABLE ON default.tbl"
|
||||||
@ -703,7 +703,7 @@ def test_projection():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node1.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
node1.query(
|
node1.query(
|
||||||
@ -755,7 +755,7 @@ def test_replicated_table_with_not_synced_def():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node2.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node2.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
# But synced after RESTORE anyway
|
# But synced after RESTORE anyway
|
||||||
node1.query(
|
node1.query(
|
||||||
@ -768,7 +768,7 @@ def test_replicated_table_with_not_synced_def():
|
|||||||
"SELECT name, type FROM system.columns WHERE database='default' AND table='tbl'"
|
"SELECT name, type FROM system.columns WHERE database='default' AND table='tbl'"
|
||||||
) == TSV([["x", "String"], ["y", "String"]])
|
) == TSV([["x", "String"], ["y", "String"]])
|
||||||
|
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node2.query(
|
node2.query(
|
||||||
f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name} SETTINGS replica_num_in_backup=2"
|
f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name} SETTINGS replica_num_in_backup=2"
|
||||||
@ -795,7 +795,7 @@ def test_table_in_replicated_database_with_not_synced_def():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node2.query(f"BACKUP DATABASE mydb ON CLUSTER 'cluster' TO {backup_name}")
|
node2.query(f"BACKUP DATABASE mydb ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
# But synced after RESTORE anyway
|
# But synced after RESTORE anyway
|
||||||
node1.query(
|
node1.query(
|
||||||
@ -808,7 +808,7 @@ def test_table_in_replicated_database_with_not_synced_def():
|
|||||||
"SELECT name, type FROM system.columns WHERE database='mydb' AND table='tbl'"
|
"SELECT name, type FROM system.columns WHERE database='mydb' AND table='tbl'"
|
||||||
) == TSV([["x", "String"], ["y", "String"]])
|
) == TSV([["x", "String"], ["y", "String"]])
|
||||||
|
|
||||||
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP DATABASE mydb ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node2.query(
|
node2.query(
|
||||||
f"RESTORE DATABASE mydb ON CLUSTER 'cluster' FROM {backup_name} SETTINGS replica_num_in_backup=2"
|
f"RESTORE DATABASE mydb ON CLUSTER 'cluster' FROM {backup_name} SETTINGS replica_num_in_backup=2"
|
||||||
@ -870,7 +870,7 @@ def test_mutation():
|
|||||||
assert has_mutation_in_backup("0000000002", backup_name, "default", "tbl")
|
assert has_mutation_in_backup("0000000002", backup_name, "default", "tbl")
|
||||||
assert not has_mutation_in_backup("0000000003", backup_name, "default", "tbl")
|
assert not has_mutation_in_backup("0000000003", backup_name, "default", "tbl")
|
||||||
|
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
|
|
||||||
@ -1006,7 +1006,7 @@ def test_stop_other_host_during_backup(kill):
|
|||||||
node2.start_clickhouse()
|
node2.start_clickhouse()
|
||||||
|
|
||||||
if status == "BACKUP_CREATED":
|
if status == "BACKUP_CREATED":
|
||||||
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node1.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
assert node1.query("SELECT * FROM tbl ORDER BY x") == TSV([3, 5])
|
assert node1.query("SELECT * FROM tbl ORDER BY x") == TSV([3, 5])
|
||||||
elif status == "BACKUP_FAILED":
|
elif status == "BACKUP_FAILED":
|
||||||
|
@ -62,8 +62,8 @@ def drop_after_test():
|
|||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
node0.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' NO DELAY")
|
node0.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node0.query("DROP DATABASE IF EXISTS mydb ON CLUSTER 'cluster' NO DELAY")
|
node0.query("DROP DATABASE IF EXISTS mydb ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
|
|
||||||
backup_id_counter = 0
|
backup_id_counter = 0
|
||||||
@ -95,7 +95,7 @@ def test_replicated_table():
|
|||||||
backup_name = new_backup_name()
|
backup_name = new_backup_name()
|
||||||
node0.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
node0.query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node0.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node0.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
node0.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
node0.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
||||||
|
|
||||||
@ -131,7 +131,7 @@ def test_concurrent_backups_on_same_node():
|
|||||||
) == TSV([["BACKUP_CREATED", ""]] * num_concurrent_backups)
|
) == TSV([["BACKUP_CREATED", ""]] * num_concurrent_backups)
|
||||||
|
|
||||||
for backup_name in backup_names:
|
for backup_name in backup_names:
|
||||||
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node0.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node0.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
node0.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
node0.query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
||||||
for i in range(num_nodes):
|
for i in range(num_nodes):
|
||||||
@ -166,7 +166,7 @@ def test_concurrent_backups_on_different_nodes():
|
|||||||
) == TSV([["BACKUP_CREATED", ""]])
|
) == TSV([["BACKUP_CREATED", ""]])
|
||||||
|
|
||||||
for i in range(num_concurrent_backups):
|
for i in range(num_concurrent_backups):
|
||||||
nodes[i].query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
nodes[i].query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
nodes[i].query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_names[i]}")
|
nodes[i].query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_names[i]}")
|
||||||
nodes[i].query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
nodes[i].query("SYSTEM SYNC REPLICA ON CLUSTER 'cluster' tbl")
|
||||||
for j in range(num_nodes):
|
for j in range(num_nodes):
|
||||||
@ -214,7 +214,7 @@ def test_create_or_drop_tables_during_backup(db_engine, table_engine):
|
|||||||
while time.time() < end_time:
|
while time.time() < end_time:
|
||||||
table_name = f"mydb.tbl{randint(1, num_nodes)}"
|
table_name = f"mydb.tbl{randint(1, num_nodes)}"
|
||||||
node = nodes[randint(0, num_nodes - 1)]
|
node = nodes[randint(0, num_nodes - 1)]
|
||||||
node.query(f"DROP TABLE IF EXISTS {table_name} NO DELAY")
|
node.query(f"DROP TABLE IF EXISTS {table_name} SYNC")
|
||||||
|
|
||||||
def rename_tables():
|
def rename_tables():
|
||||||
while time.time() < end_time:
|
while time.time() < end_time:
|
||||||
@ -229,7 +229,7 @@ def test_create_or_drop_tables_during_backup(db_engine, table_engine):
|
|||||||
while time.time() < end_time:
|
while time.time() < end_time:
|
||||||
table_name = f"mydb.tbl{randint(1, num_nodes)}"
|
table_name = f"mydb.tbl{randint(1, num_nodes)}"
|
||||||
node = nodes[randint(0, num_nodes - 1)]
|
node = nodes[randint(0, num_nodes - 1)]
|
||||||
node.query(f"TRUNCATE TABLE IF EXISTS {table_name} NO DELAY")
|
node.query(f"TRUNCATE TABLE IF EXISTS {table_name} SYNC")
|
||||||
|
|
||||||
def make_backups():
|
def make_backups():
|
||||||
ids = []
|
ids = []
|
||||||
@ -320,8 +320,8 @@ def test_kill_mutation_during_backup():
|
|||||||
TSV([["BACKUP_CREATED", ""]]),
|
TSV([["BACKUP_CREATED", ""]]),
|
||||||
)
|
)
|
||||||
|
|
||||||
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node0.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
node0.query(f"RESTORE TABLE tbl ON CLUSTER 'cluster' FROM {backup_name}")
|
||||||
|
|
||||||
if n != repeat_count - 1:
|
if n != repeat_count - 1:
|
||||||
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY")
|
node0.query(f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC")
|
||||||
|
@ -84,7 +84,7 @@ def drop_after_test():
|
|||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
node0.query(
|
node0.query(
|
||||||
"DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' NO DELAY",
|
"DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' SYNC",
|
||||||
settings={
|
settings={
|
||||||
"distributed_ddl_task_timeout": 360,
|
"distributed_ddl_task_timeout": 360,
|
||||||
},
|
},
|
||||||
@ -154,7 +154,7 @@ def test_concurrent_backups_on_same_node():
|
|||||||
# This restore part is added to confirm creating an internal backup & restore work
|
# This restore part is added to confirm creating an internal backup & restore work
|
||||||
# even when a concurrent backup is stopped
|
# even when a concurrent backup is stopped
|
||||||
nodes[0].query(
|
nodes[0].query(
|
||||||
f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY",
|
f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC",
|
||||||
settings={
|
settings={
|
||||||
"distributed_ddl_task_timeout": 360,
|
"distributed_ddl_task_timeout": 360,
|
||||||
},
|
},
|
||||||
@ -206,7 +206,7 @@ def test_concurrent_restores_on_same_node():
|
|||||||
nodes[0].query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
nodes[0].query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
nodes[0].query(
|
nodes[0].query(
|
||||||
f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY",
|
f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC",
|
||||||
settings={
|
settings={
|
||||||
"distributed_ddl_task_timeout": 360,
|
"distributed_ddl_task_timeout": 360,
|
||||||
},
|
},
|
||||||
@ -251,7 +251,7 @@ def test_concurrent_restores_on_different_node():
|
|||||||
nodes[0].query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
nodes[0].query(f"BACKUP TABLE tbl ON CLUSTER 'cluster' TO {backup_name}")
|
||||||
|
|
||||||
nodes[0].query(
|
nodes[0].query(
|
||||||
f"DROP TABLE tbl ON CLUSTER 'cluster' NO DELAY",
|
f"DROP TABLE tbl ON CLUSTER 'cluster' SYNC",
|
||||||
settings={
|
settings={
|
||||||
"distributed_ddl_task_timeout": 360,
|
"distributed_ddl_task_timeout": 360,
|
||||||
},
|
},
|
||||||
|
@ -37,7 +37,7 @@ def new_backup_name():
|
|||||||
def check_backup_and_restore(storage_policy, backup_destination, size=1000):
|
def check_backup_and_restore(storage_policy, backup_destination, size=1000):
|
||||||
node.query(
|
node.query(
|
||||||
f"""
|
f"""
|
||||||
DROP TABLE IF EXISTS data NO DELAY;
|
DROP TABLE IF EXISTS data SYNC;
|
||||||
CREATE TABLE data (key Int, value String, array Array(String)) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='{storage_policy}';
|
CREATE TABLE data (key Int, value String, array Array(String)) Engine=MergeTree() ORDER BY tuple() SETTINGS storage_policy='{storage_policy}';
|
||||||
INSERT INTO data SELECT * FROM generateRandom('key Int, value String, array Array(String)') LIMIT {size};
|
INSERT INTO data SELECT * FROM generateRandom('key Int, value String, array Array(String)') LIMIT {size};
|
||||||
BACKUP TABLE data TO {backup_destination};
|
BACKUP TABLE data TO {backup_destination};
|
||||||
@ -47,8 +47,8 @@ def check_backup_and_restore(storage_policy, backup_destination, size=1000):
|
|||||||
(SELECT count(), sum(sipHash64(*)) FROM data_restored),
|
(SELECT count(), sum(sipHash64(*)) FROM data_restored),
|
||||||
'Data does not matched after BACKUP/RESTORE'
|
'Data does not matched after BACKUP/RESTORE'
|
||||||
);
|
);
|
||||||
DROP TABLE data NO DELAY;
|
DROP TABLE data SYNC;
|
||||||
DROP TABLE data_restored NO DELAY;
|
DROP TABLE data_restored SYNC;
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ def start_cluster():
|
|||||||
|
|
||||||
|
|
||||||
def test_concurrent_backups(start_cluster):
|
def test_concurrent_backups(start_cluster):
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
columns = [f"column_{i} UInt64" for i in range(1000)]
|
columns = [f"column_{i} UInt64" for i in range(1000)]
|
||||||
columns_str = ", ".join(columns)
|
columns_str = ", ".join(columns)
|
||||||
node.query(
|
node.query(
|
||||||
|
@ -390,7 +390,7 @@ def test_merge_tree_setting_override(start_cluster):
|
|||||||
|
|
||||||
node.query(
|
node.query(
|
||||||
f"""
|
f"""
|
||||||
DROP TABLE IF EXISTS {TABLE_NAME} NO DELAY;
|
DROP TABLE IF EXISTS {TABLE_NAME} SYNC;
|
||||||
CREATE TABLE {TABLE_NAME} (a Int32)
|
CREATE TABLE {TABLE_NAME} (a Int32)
|
||||||
ENGINE = MergeTree()
|
ENGINE = MergeTree()
|
||||||
ORDER BY tuple()
|
ORDER BY tuple()
|
||||||
@ -412,7 +412,7 @@ def test_merge_tree_setting_override(start_cluster):
|
|||||||
|
|
||||||
node.query(
|
node.query(
|
||||||
f"""
|
f"""
|
||||||
DROP TABLE IF EXISTS {TABLE_NAME} NO DELAY;
|
DROP TABLE IF EXISTS {TABLE_NAME} SYNC;
|
||||||
CREATE TABLE {TABLE_NAME} (a Int32)
|
CREATE TABLE {TABLE_NAME} (a Int32)
|
||||||
ENGINE = MergeTree()
|
ENGINE = MergeTree()
|
||||||
ORDER BY tuple()
|
ORDER BY tuple()
|
||||||
|
@ -30,7 +30,7 @@ def cleanup_after_test():
|
|||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
node.query("DROP TABLE IF EXISTS encrypted_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS encrypted_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -294,4 +294,4 @@ def test_restart():
|
|||||||
|
|
||||||
assert node.query(select_query) == "(0,'data'),(1,'data')"
|
assert node.query(select_query) == "(0,'data'),(1,'data')"
|
||||||
|
|
||||||
node.query("DROP TABLE encrypted_test NO DELAY;")
|
node.query("DROP TABLE encrypted_test SYNC;")
|
||||||
|
@ -44,7 +44,7 @@ def cleanup_after_test():
|
|||||||
try:
|
try:
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS encrypted_test ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE IF EXISTS encrypted_test ON CLUSTER 'cluster' SYNC")
|
||||||
|
|
||||||
|
|
||||||
def create_table(
|
def create_table(
|
||||||
|
@ -51,4 +51,4 @@ def test_failed_async_inserts(started_cluster):
|
|||||||
|
|
||||||
assert node.query(select_query) == "4\n"
|
assert node.query(select_query) == "4\n"
|
||||||
|
|
||||||
node.query("DROP TABLE IF EXISTS async_insert_30_10_2022 NO DELAY")
|
node.query("DROP TABLE IF EXISTS async_insert_30_10_2022 SYNC")
|
||||||
|
@ -461,7 +461,7 @@ def test_move_replace_partition_to_another_table(cluster):
|
|||||||
== "(512)"
|
== "(512)"
|
||||||
)
|
)
|
||||||
|
|
||||||
azure_query(node, f"DROP TABLE {table_clone_name} NO DELAY")
|
azure_query(node, f"DROP TABLE {table_clone_name} SYNC")
|
||||||
assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)"
|
assert azure_query(node, f"SELECT sum(id) FROM {TABLE_NAME} FORMAT Values") == "(0)"
|
||||||
assert (
|
assert (
|
||||||
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
azure_query(node, f"SELECT count(*) FROM {TABLE_NAME} FORMAT Values")
|
||||||
@ -470,7 +470,7 @@ def test_move_replace_partition_to_another_table(cluster):
|
|||||||
|
|
||||||
azure_query(node, f"ALTER TABLE {TABLE_NAME} FREEZE")
|
azure_query(node, f"ALTER TABLE {TABLE_NAME} FREEZE")
|
||||||
|
|
||||||
azure_query(node, f"DROP TABLE {TABLE_NAME} NO DELAY")
|
azure_query(node, f"DROP TABLE {TABLE_NAME} SYNC")
|
||||||
|
|
||||||
|
|
||||||
def test_freeze_unfreeze(cluster):
|
def test_freeze_unfreeze(cluster):
|
||||||
|
@ -410,7 +410,7 @@ def test_move_replace_partition_to_another_table(cluster):
|
|||||||
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
||||||
)
|
)
|
||||||
|
|
||||||
node.query("DROP TABLE hdfs_clone NO DELAY")
|
node.query("DROP TABLE hdfs_clone SYNC")
|
||||||
assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)"
|
assert node.query("SELECT sum(id) FROM hdfs_test FORMAT Values") == "(0)"
|
||||||
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)"
|
assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)"
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ def clear_minio(cluster):
|
|||||||
|
|
||||||
def check_no_objects_after_drop(cluster, table_name="s3_test", node_name="node"):
|
def check_no_objects_after_drop(cluster, table_name="s3_test", node_name="node"):
|
||||||
node = cluster.instances[node_name]
|
node = cluster.instances[node_name]
|
||||||
node.query(f"DROP TABLE IF EXISTS {table_name} NO DELAY")
|
node.query(f"DROP TABLE IF EXISTS {table_name} SYNC")
|
||||||
wait_for_delete_s3_objects(cluster, 0, timeout=0)
|
wait_for_delete_s3_objects(cluster, 0, timeout=0)
|
||||||
|
|
||||||
|
|
||||||
@ -506,7 +506,7 @@ def test_move_replace_partition_to_another_table(cluster, node_name):
|
|||||||
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
||||||
)
|
)
|
||||||
|
|
||||||
node.query("DROP TABLE s3_clone NO DELAY")
|
node.query("DROP TABLE s3_clone SYNC")
|
||||||
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
|
assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)"
|
||||||
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
|
assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)"
|
||||||
|
|
||||||
@ -528,7 +528,7 @@ def test_move_replace_partition_to_another_table(cluster, node_name):
|
|||||||
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
- FILES_OVERHEAD_METADATA_VERSION * 2,
|
||||||
)
|
)
|
||||||
|
|
||||||
node.query("DROP TABLE s3_test NO DELAY")
|
node.query("DROP TABLE s3_test SYNC")
|
||||||
# Backup data should remain in S3.
|
# Backup data should remain in S3.
|
||||||
|
|
||||||
wait_for_delete_s3_objects(
|
wait_for_delete_s3_objects(
|
||||||
@ -592,7 +592,7 @@ def test_freeze_system_unfreeze(cluster, node_name):
|
|||||||
node.query("TRUNCATE TABLE s3_test")
|
node.query("TRUNCATE TABLE s3_test")
|
||||||
wait_for_delete_empty_parts(node, "s3_test")
|
wait_for_delete_empty_parts(node, "s3_test")
|
||||||
wait_for_delete_inactive_parts(node, "s3_test")
|
wait_for_delete_inactive_parts(node, "s3_test")
|
||||||
node.query("DROP TABLE s3_test_removed NO DELAY")
|
node.query("DROP TABLE s3_test_removed SYNC")
|
||||||
assert (
|
assert (
|
||||||
len(list_objects(cluster, "data/"))
|
len(list_objects(cluster, "data/"))
|
||||||
== FILES_OVERHEAD
|
== FILES_OVERHEAD
|
||||||
@ -682,7 +682,7 @@ def test_s3_disk_reads_on_unstable_connection(cluster, node_name):
|
|||||||
@pytest.mark.parametrize("node_name", ["node"])
|
@pytest.mark.parametrize("node_name", ["node"])
|
||||||
def test_lazy_seek_optimization_for_async_read(cluster, node_name):
|
def test_lazy_seek_optimization_for_async_read(cluster, node_name):
|
||||||
node = cluster.instances[node_name]
|
node = cluster.instances[node_name]
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
node.query(
|
node.query(
|
||||||
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3';"
|
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3';"
|
||||||
)
|
)
|
||||||
@ -698,7 +698,7 @@ def test_lazy_seek_optimization_for_async_read(cluster, node_name):
|
|||||||
@pytest.mark.parametrize("node_name", ["node_with_limited_disk"])
|
@pytest.mark.parametrize("node_name", ["node_with_limited_disk"])
|
||||||
def test_cache_with_full_disk_space(cluster, node_name):
|
def test_cache_with_full_disk_space(cluster, node_name):
|
||||||
node = cluster.instances[node_name]
|
node = cluster.instances[node_name]
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
node.query(
|
node.query(
|
||||||
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY value SETTINGS storage_policy='s3_with_cache_and_jbod';"
|
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY value SETTINGS storage_policy='s3_with_cache_and_jbod';"
|
||||||
)
|
)
|
||||||
@ -753,7 +753,7 @@ def test_store_cleanup_disk_s3(cluster, node_name):
|
|||||||
def test_cache_setting_compatibility(cluster, node_name):
|
def test_cache_setting_compatibility(cluster, node_name):
|
||||||
node = cluster.instances[node_name]
|
node = cluster.instances[node_name]
|
||||||
|
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
|
|
||||||
node.query(
|
node.query(
|
||||||
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r', compress_marks=false, compress_primary_key=false;"
|
"CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3_cache_r', compress_marks=false, compress_primary_key=false;"
|
||||||
|
@ -85,7 +85,7 @@ def cluster():
|
|||||||
def drop_table(cluster):
|
def drop_table(cluster):
|
||||||
yield
|
yield
|
||||||
node = cluster.instances["node"]
|
node = cluster.instances["node"]
|
||||||
node.query("DROP TABLE IF EXISTS s3_failover_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_failover_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
# S3 request will be failed for an appropriate part file write.
|
# S3 request will be failed for an appropriate part file write.
|
||||||
|
@ -77,7 +77,7 @@ def test_write_is_cached(cluster, min_rows_for_wide_part, read_requests):
|
|||||||
# stat = get_query_stat(node, select_query)
|
# stat = get_query_stat(node, select_query)
|
||||||
# assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3.
|
# assert stat["S3ReadRequestsCount"] == read_requests # Only .bin files should be accessed from S3.
|
||||||
|
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -126,4 +126,4 @@ def test_read_after_cache_is_wiped(
|
|||||||
# stat = get_query_stat(node, select_query)
|
# stat = get_query_stat(node, select_query)
|
||||||
# assert stat["S3ReadRequestsCount"] == bin_files
|
# assert stat["S3ReadRequestsCount"] == bin_files
|
||||||
|
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
|
@ -624,7 +624,7 @@ def test_table_override(started_cluster):
|
|||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
query = f"select * from {materialized_database}.{table_name} order by key"
|
query = f"select * from {materialized_database}.{table_name} order by key"
|
||||||
expected = instance.query(f"select * from {table_name} order by key")
|
expected = instance.query(f"select * from {table_name} order by key")
|
||||||
instance.query(f"drop table {table_name} no delay")
|
instance.query(f"drop table {table_name} sync")
|
||||||
assert_eq_with_retry(instance, query, expected)
|
assert_eq_with_retry(instance, query, expected)
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ def started_cluster():
|
|||||||
|
|
||||||
def drop_table(nodes, table_name):
|
def drop_table(nodes, table_name):
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name))
|
node.query("DROP TABLE IF EXISTS {} SYNC".format(table_name))
|
||||||
|
|
||||||
|
|
||||||
def create_table(
|
def create_table(
|
||||||
|
@ -42,7 +42,7 @@ def copy_keys(instance, keys_file_name):
|
|||||||
|
|
||||||
|
|
||||||
def create_table():
|
def create_table():
|
||||||
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node1.query(
|
node1.query(
|
||||||
"""
|
"""
|
||||||
CREATE TABLE tbl ON CLUSTER 'cluster' (
|
CREATE TABLE tbl ON CLUSTER 'cluster' (
|
||||||
|
@ -40,7 +40,7 @@ def copy_keys(instance, keys_file_name):
|
|||||||
|
|
||||||
|
|
||||||
def create_table():
|
def create_table():
|
||||||
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' NO DELAY")
|
node1.query("DROP TABLE IF EXISTS tbl ON CLUSTER 'cluster' SYNC")
|
||||||
node1.query(
|
node1.query(
|
||||||
"""
|
"""
|
||||||
CREATE TABLE tbl ON CLUSTER 'cluster' (
|
CREATE TABLE tbl ON CLUSTER 'cluster' (
|
||||||
|
@ -111,8 +111,8 @@ def test_hdfs_zero_copy_replication_insert(cluster):
|
|||||||
SHARDS * FILES_OVERHEAD_PER_TABLE + FILES_OVERHEAD_PER_PART_COMPACT,
|
SHARDS * FILES_OVERHEAD_PER_TABLE + FILES_OVERHEAD_PER_PART_COMPACT,
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS hdfs_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS hdfs_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS hdfs_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS hdfs_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -173,7 +173,7 @@ def test_hdfs_zero_copy_replication_single_move(cluster, storage_policy, init_ob
|
|||||||
== "(10),(11)"
|
== "(10),(11)"
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS single_node_move_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS single_node_move_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -244,8 +244,8 @@ def test_hdfs_zero_copy_replication_move(cluster, storage_policy, init_objects):
|
|||||||
cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT
|
cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS move_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS move_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS move_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS move_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(("storage_policy"), ["hybrid", "tiered", "tiered_copy"])
|
@pytest.mark.parametrize(("storage_policy"), ["hybrid", "tiered", "tiered_copy"])
|
||||||
@ -282,8 +282,8 @@ def test_hdfs_zero_copy_with_ttl_move(cluster, storage_policy):
|
|||||||
== "(10),(11)"
|
== "(10),(11)"
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
def test_hdfs_zero_copy_with_ttl_delete(cluster):
|
def test_hdfs_zero_copy_with_ttl_delete(cluster):
|
||||||
@ -318,5 +318,5 @@ def test_hdfs_zero_copy_with_ttl_delete(cluster):
|
|||||||
== "(11)"
|
== "(11)"
|
||||||
)
|
)
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
|
||||||
|
@ -37,7 +37,7 @@ def started_cluster():
|
|||||||
|
|
||||||
def drop_table(nodes, table_name):
|
def drop_table(nodes, table_name):
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name))
|
node.query("DROP TABLE IF EXISTS {} SYNC".format(table_name))
|
||||||
|
|
||||||
|
|
||||||
# Create table with default zookeeper.
|
# Create table with default zookeeper.
|
||||||
|
@ -56,7 +56,7 @@ def test_s3_with_https(cluster, policy):
|
|||||||
== "(0,'data'),(1,'data')"
|
== "(0,'data'),(1,'data')"
|
||||||
)
|
)
|
||||||
|
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
|
|
||||||
if policy.find("proxy") != -1:
|
if policy.find("proxy") != -1:
|
||||||
check_proxy_logs(cluster, "proxy1")
|
check_proxy_logs(cluster, "proxy1")
|
||||||
|
@ -72,7 +72,7 @@ def test_s3_with_proxy_list(cluster, policy):
|
|||||||
== "(0,'data'),(1,'data')"
|
== "(0,'data'),(1,'data')"
|
||||||
)
|
)
|
||||||
|
|
||||||
node.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
|
|
||||||
for proxy in ["proxy1", "proxy2"]:
|
for proxy in ["proxy1", "proxy2"]:
|
||||||
check_proxy_logs(cluster, proxy, ["PUT", "GET"])
|
check_proxy_logs(cluster, proxy, ["PUT", "GET"])
|
||||||
|
@ -149,8 +149,8 @@ def test_s3_zero_copy_replication(started_cluster, policy):
|
|||||||
# Based on version 21.x - after cleanup - only one merged part
|
# Based on version 21.x - after cleanup - only one merged part
|
||||||
wait_for_large_objects_count(cluster, 1, timeout=60)
|
wait_for_large_objects_count(cluster, 1, timeout=60)
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS s3_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS s3_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Test is flaky (and never was stable)")
|
@pytest.mark.skip(reason="Test is flaky (and never was stable)")
|
||||||
@ -239,8 +239,8 @@ def test_s3_zero_copy_on_hybrid_storage(started_cluster):
|
|||||||
== "(0,'data'),(1,'data')"
|
== "(0,'data'),(1,'data')"
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS hybrid_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS hybrid_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS hybrid_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS hybrid_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
def insert_data_time(node, table, number_of_mb, time, start=0):
|
def insert_data_time(node, table, number_of_mb, time, start=0):
|
||||||
@ -275,8 +275,8 @@ def test_s3_zero_copy_with_ttl_move(
|
|||||||
node1 = cluster.instances["node1"]
|
node1 = cluster.instances["node1"]
|
||||||
node2 = cluster.instances["node2"]
|
node2 = cluster.instances["node2"]
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
|
||||||
|
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
node1.query(
|
node1.query(
|
||||||
@ -325,8 +325,8 @@ def test_s3_zero_copy_with_ttl_move(
|
|||||||
== "(10),(11)"
|
== "(10),(11)"
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS ttl_move_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -340,8 +340,8 @@ def test_s3_zero_copy_with_ttl_delete(started_cluster, large_data, iterations):
|
|||||||
node1 = cluster.instances["node1"]
|
node1 = cluster.instances["node1"]
|
||||||
node2 = cluster.instances["node2"]
|
node2 = cluster.instances["node2"]
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
|
||||||
|
|
||||||
for i in range(iterations):
|
for i in range(iterations):
|
||||||
node1.query(
|
node1.query(
|
||||||
@ -398,8 +398,8 @@ def test_s3_zero_copy_with_ttl_delete(started_cluster, large_data, iterations):
|
|||||||
== "(11)"
|
== "(11)"
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS ttl_delete_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
def wait_mutations(node, table, seconds):
|
def wait_mutations(node, table, seconds):
|
||||||
@ -438,8 +438,8 @@ def s3_zero_copy_unfreeze_base(cluster, unfreeze_query_template):
|
|||||||
node1 = cluster.instances["node1"]
|
node1 = cluster.instances["node1"]
|
||||||
node2 = cluster.instances["node2"]
|
node2 = cluster.instances["node2"]
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS unfreeze_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS unfreeze_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS unfreeze_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS unfreeze_test SYNC")
|
||||||
|
|
||||||
node1.query(
|
node1.query(
|
||||||
"""
|
"""
|
||||||
@ -489,8 +489,8 @@ def s3_zero_copy_unfreeze_base(cluster, unfreeze_query_template):
|
|||||||
|
|
||||||
check_objects_not_exisis(cluster, objects12)
|
check_objects_not_exisis(cluster, objects12)
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS unfreeze_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS unfreeze_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS unfreeze_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS unfreeze_test SYNC")
|
||||||
|
|
||||||
|
|
||||||
def test_s3_zero_copy_unfreeze_alter(started_cluster):
|
def test_s3_zero_copy_unfreeze_alter(started_cluster):
|
||||||
@ -505,8 +505,8 @@ def s3_zero_copy_drop_detached(cluster, unfreeze_query_template):
|
|||||||
node1 = cluster.instances["node1"]
|
node1 = cluster.instances["node1"]
|
||||||
node2 = cluster.instances["node2"]
|
node2 = cluster.instances["node2"]
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS drop_detached_test NO DELAY")
|
node1.query("DROP TABLE IF EXISTS drop_detached_test SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS drop_detached_test NO DELAY")
|
node2.query("DROP TABLE IF EXISTS drop_detached_test SYNC")
|
||||||
|
|
||||||
node1.query(
|
node1.query(
|
||||||
"""
|
"""
|
||||||
@ -600,8 +600,8 @@ def test_s3_zero_copy_concurrent_merge(started_cluster):
|
|||||||
node1 = cluster.instances["node1"]
|
node1 = cluster.instances["node1"]
|
||||||
node2 = cluster.instances["node2"]
|
node2 = cluster.instances["node2"]
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS concurrent_merge NO DELAY")
|
node1.query("DROP TABLE IF EXISTS concurrent_merge SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS concurrent_merge NO DELAY")
|
node2.query("DROP TABLE IF EXISTS concurrent_merge SYNC")
|
||||||
|
|
||||||
for node in (node1, node2):
|
for node in (node1, node2):
|
||||||
node.query(
|
node.query(
|
||||||
@ -647,8 +647,8 @@ def test_s3_zero_copy_keeps_data_after_mutation(started_cluster):
|
|||||||
node1 = cluster.instances["node1"]
|
node1 = cluster.instances["node1"]
|
||||||
node2 = cluster.instances["node2"]
|
node2 = cluster.instances["node2"]
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS zero_copy_mutation NO DELAY")
|
node1.query("DROP TABLE IF EXISTS zero_copy_mutation SYNC")
|
||||||
node2.query("DROP TABLE IF EXISTS zero_copy_mutation NO DELAY")
|
node2.query("DROP TABLE IF EXISTS zero_copy_mutation SYNC")
|
||||||
|
|
||||||
node1.query(
|
node1.query(
|
||||||
"""
|
"""
|
||||||
|
@ -94,7 +94,7 @@ def nats_cluster():
|
|||||||
def nats_setup_teardown():
|
def nats_setup_teardown():
|
||||||
print("NATS is available - running test")
|
print("NATS is available - running test")
|
||||||
yield # run test
|
yield # run test
|
||||||
instance.query("DROP DATABASE test NO DELAY")
|
instance.query("DROP DATABASE test SYNC")
|
||||||
instance.query("CREATE DATABASE test")
|
instance.query("CREATE DATABASE test")
|
||||||
|
|
||||||
|
|
||||||
|
@ -179,7 +179,7 @@ def test_initial_load_from_snapshot(started_cluster):
|
|||||||
|
|
||||||
cursor.execute("DROP TABLE postgresql_replica;")
|
cursor.execute("DROP TABLE postgresql_replica;")
|
||||||
postgresql_replica_check_result(result, True)
|
postgresql_replica_check_result(result, True)
|
||||||
instance.query(f"DROP TABLE test.postgresql_replica NO DELAY")
|
instance.query(f"DROP TABLE test.postgresql_replica SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.timeout(320)
|
@pytest.mark.timeout(320)
|
||||||
@ -216,7 +216,7 @@ def test_no_connection_at_startup(started_cluster):
|
|||||||
result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;")
|
result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;")
|
||||||
cursor.execute("DROP TABLE postgresql_replica;")
|
cursor.execute("DROP TABLE postgresql_replica;")
|
||||||
postgresql_replica_check_result(result, True)
|
postgresql_replica_check_result(result, True)
|
||||||
instance.query(f"DROP TABLE test.postgresql_replica NO DELAY")
|
instance.query(f"DROP TABLE test.postgresql_replica SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.timeout(320)
|
@pytest.mark.timeout(320)
|
||||||
@ -255,7 +255,7 @@ def test_detach_attach_is_ok(started_cluster):
|
|||||||
|
|
||||||
cursor.execute("DROP TABLE postgresql_replica;")
|
cursor.execute("DROP TABLE postgresql_replica;")
|
||||||
postgresql_replica_check_result(result, True)
|
postgresql_replica_check_result(result, True)
|
||||||
instance.query(f"DROP TABLE test.postgresql_replica NO DELAY")
|
instance.query(f"DROP TABLE test.postgresql_replica SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.timeout(320)
|
@pytest.mark.timeout(320)
|
||||||
@ -309,7 +309,7 @@ def test_replicating_insert_queries(started_cluster):
|
|||||||
result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;")
|
result = instance.query("SELECT * FROM test.postgresql_replica ORDER BY key;")
|
||||||
cursor.execute("DROP TABLE postgresql_replica;")
|
cursor.execute("DROP TABLE postgresql_replica;")
|
||||||
postgresql_replica_check_result(result, True)
|
postgresql_replica_check_result(result, True)
|
||||||
instance.query(f"DROP TABLE test.postgresql_replica NO DELAY")
|
instance.query(f"DROP TABLE test.postgresql_replica SYNC")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.timeout(320)
|
@pytest.mark.timeout(320)
|
||||||
@ -667,7 +667,7 @@ def test_virtual_columns(started_cluster):
|
|||||||
)
|
)
|
||||||
print(result)
|
print(result)
|
||||||
cursor.execute("DROP TABLE postgresql_replica;")
|
cursor.execute("DROP TABLE postgresql_replica;")
|
||||||
instance.query(f"DROP TABLE test.postgresql_replica NO DELAY")
|
instance.query(f"DROP TABLE test.postgresql_replica SYNC")
|
||||||
|
|
||||||
|
|
||||||
def test_abrupt_connection_loss_while_heavy_replication(started_cluster):
|
def test_abrupt_connection_loss_while_heavy_replication(started_cluster):
|
||||||
@ -702,7 +702,7 @@ def test_abrupt_connection_loss_while_heavy_replication(started_cluster):
|
|||||||
|
|
||||||
result = instance.query("SELECT count() FROM test.postgresql_replica")
|
result = instance.query("SELECT count() FROM test.postgresql_replica")
|
||||||
print(result) # Just debug
|
print(result) # Just debug
|
||||||
instance.query(f"DROP TABLE test.postgresql_replica NO DELAY")
|
instance.query(f"DROP TABLE test.postgresql_replica SYNC")
|
||||||
|
|
||||||
|
|
||||||
def test_abrupt_server_restart_while_heavy_replication(started_cluster):
|
def test_abrupt_server_restart_while_heavy_replication(started_cluster):
|
||||||
@ -720,7 +720,7 @@ def test_abrupt_server_restart_while_heavy_replication(started_cluster):
|
|||||||
create_postgres_table(cursor, table_name)
|
create_postgres_table(cursor, table_name)
|
||||||
|
|
||||||
instance.query(f"INSERT INTO postgres_database.{table_name} SELECT -1, 1")
|
instance.query(f"INSERT INTO postgres_database.{table_name} SELECT -1, 1")
|
||||||
instance.query(f"DROP TABLE IF EXISTS test.{table_name} NO DELAY")
|
instance.query(f"DROP TABLE IF EXISTS test.{table_name} SYNC")
|
||||||
create_materialized_table(
|
create_materialized_table(
|
||||||
ip=started_cluster.postgres_ip,
|
ip=started_cluster.postgres_ip,
|
||||||
port=started_cluster.postgres_port,
|
port=started_cluster.postgres_port,
|
||||||
@ -747,7 +747,7 @@ def test_abrupt_server_restart_while_heavy_replication(started_cluster):
|
|||||||
|
|
||||||
result = instance.query(f"SELECT count() FROM test.{table_name}")
|
result = instance.query(f"SELECT count() FROM test.{table_name}")
|
||||||
print(result) # Just debug
|
print(result) # Just debug
|
||||||
instance.query(f"DROP TABLE test.{table_name} NO DELAY")
|
instance.query(f"DROP TABLE test.{table_name} SYNC")
|
||||||
|
|
||||||
|
|
||||||
def test_drop_table_immediately(started_cluster):
|
def test_drop_table_immediately(started_cluster):
|
||||||
@ -771,7 +771,7 @@ def test_drop_table_immediately(started_cluster):
|
|||||||
ip=started_cluster.postgres_ip, port=started_cluster.postgres_port
|
ip=started_cluster.postgres_ip, port=started_cluster.postgres_port
|
||||||
)
|
)
|
||||||
check_tables_are_synchronized("postgresql_replica")
|
check_tables_are_synchronized("postgresql_replica")
|
||||||
instance.query(f"DROP TABLE test.postgresql_replica NO DELAY")
|
instance.query(f"DROP TABLE test.postgresql_replica SYNC")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -95,7 +95,7 @@ def rabbitmq_cluster():
|
|||||||
def rabbitmq_setup_teardown():
|
def rabbitmq_setup_teardown():
|
||||||
print("RabbitMQ is available - running test")
|
print("RabbitMQ is available - running test")
|
||||||
yield # run test
|
yield # run test
|
||||||
instance.query("DROP DATABASE test NO DELAY")
|
instance.query("DROP DATABASE test SYNC")
|
||||||
instance.query("CREATE DATABASE test")
|
instance.query("CREATE DATABASE test")
|
||||||
|
|
||||||
|
|
||||||
@ -1097,10 +1097,10 @@ def test_rabbitmq_overloaded_insert(rabbitmq_cluster):
|
|||||||
|
|
||||||
instance.query(
|
instance.query(
|
||||||
"""
|
"""
|
||||||
DROP TABLE test.consumer_overload NO DELAY;
|
DROP TABLE test.consumer_overload SYNC;
|
||||||
DROP TABLE test.view_overload NO DELAY;
|
DROP TABLE test.view_overload SYNC;
|
||||||
DROP TABLE test.rabbitmq_consume NO DELAY;
|
DROP TABLE test.rabbitmq_consume SYNC;
|
||||||
DROP TABLE test.rabbitmq_overload NO DELAY;
|
DROP TABLE test.rabbitmq_overload SYNC;
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -2745,7 +2745,7 @@ def test_rabbitmq_drop_mv(rabbitmq_cluster):
|
|||||||
result = instance.query("SELECT * FROM test.view ORDER BY key")
|
result = instance.query("SELECT * FROM test.view ORDER BY key")
|
||||||
rabbitmq_check_result(result, True)
|
rabbitmq_check_result(result, True)
|
||||||
|
|
||||||
instance.query("DROP VIEW test.consumer NO DELAY")
|
instance.query("DROP VIEW test.consumer SYNC")
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
for i in range(50, 60):
|
for i in range(50, 60):
|
||||||
channel.basic_publish(
|
channel.basic_publish(
|
||||||
|
@ -151,7 +151,7 @@ def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
|
|||||||
get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external")
|
get_command("TTL d1 TO DISK 'unknown'", "small_jbod_with_external")
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
if alter:
|
if alter:
|
||||||
node1.query(get_command(None, "small_jbod_with_external"))
|
node1.query(get_command(None, "small_jbod_with_external"))
|
||||||
@ -161,7 +161,7 @@ def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
|
|||||||
get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external")
|
get_command("TTL d1 TO VOLUME 'unknown'", "small_jbod_with_external")
|
||||||
)
|
)
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
if alter:
|
if alter:
|
||||||
node1.query(get_command(None, "only_jbod2"))
|
node1.query(get_command(None, "only_jbod2"))
|
||||||
@ -169,7 +169,7 @@ def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
|
|||||||
with pytest.raises(QueryRuntimeException):
|
with pytest.raises(QueryRuntimeException):
|
||||||
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
|
node1.query(get_command("TTL d1 TO DISK 'jbod1'", "only_jbod2"))
|
||||||
|
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
if alter:
|
if alter:
|
||||||
node1.query(get_command(None, "only_jbod2"))
|
node1.query(get_command(None, "only_jbod2"))
|
||||||
@ -178,7 +178,7 @@ def test_rule_with_invalid_destination(started_cluster, name, engine, alter):
|
|||||||
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
|
node1.query(get_command("TTL d1 TO VOLUME 'external'", "only_jbod2"))
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -253,7 +253,7 @@ def test_inserts_to_disk_work(started_cluster, name, engine, positive):
|
|||||||
|
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -330,7 +330,7 @@ def test_moves_work_after_storage_policy_change(started_cluster, name, engine):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -418,7 +418,7 @@ def test_moves_to_disk_work(started_cluster, name, engine, positive):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -489,7 +489,7 @@ def test_moves_to_volume_work(started_cluster, name, engine):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -570,7 +570,7 @@ def test_inserts_to_volume_work(started_cluster, name, engine, positive):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -649,7 +649,7 @@ def test_moves_to_disk_eventually_work(started_cluster, name, engine):
|
|||||||
used_disks = get_used_disks_for_table(node1, name)
|
used_disks = get_used_disks_for_table(node1, name)
|
||||||
assert set(used_disks) == {"jbod1"}
|
assert set(used_disks) == {"jbod1"}
|
||||||
|
|
||||||
node1.query("DROP TABLE {} NO DELAY".format(name_temp))
|
node1.query("DROP TABLE {} SYNC".format(name_temp))
|
||||||
|
|
||||||
wait_parts_mover(node1, name)
|
wait_parts_mover(node1, name)
|
||||||
|
|
||||||
@ -661,8 +661,8 @@ def test_moves_to_disk_eventually_work(started_cluster, name, engine):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name_temp))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name_temp))
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
def test_replicated_download_ttl_info(started_cluster):
|
def test_replicated_download_ttl_info(started_cluster):
|
||||||
@ -702,7 +702,7 @@ def test_replicated_download_ttl_info(started_cluster):
|
|||||||
finally:
|
finally:
|
||||||
for node in (node1, node2):
|
for node in (node1, node2):
|
||||||
try:
|
try:
|
||||||
node.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -818,7 +818,7 @@ def test_merges_to_disk_work(started_cluster, name, engine, positive):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -932,8 +932,8 @@ def test_merges_with_full_disk_work(started_cluster, name, engine):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name_temp))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name_temp))
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -1035,7 +1035,7 @@ def test_moves_after_merges_work(started_cluster, name, engine, positive):
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -1150,7 +1150,7 @@ def test_ttls_do_not_work_after_alter(started_cluster, name, engine, positive, b
|
|||||||
)
|
)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -1255,7 +1255,7 @@ def test_materialize_ttl_in_partition(started_cluster, name, engine):
|
|||||||
).strip() == str(len(data))
|
).strip() == str(len(data))
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -1378,7 +1378,7 @@ def test_alter_multiple_ttls(started_cluster, name, engine, positive):
|
|||||||
assert rows_count == 3
|
assert rows_count == 3
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
|
node1.query("DROP TABLE IF EXISTS {name} SYNC".format(name=name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -1526,7 +1526,7 @@ def test_concurrent_alter_with_ttl_move(started_cluster, name, engine):
|
|||||||
assert node1.query("SELECT 1") == "1\n"
|
assert node1.query("SELECT 1") == "1\n"
|
||||||
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "150\n"
|
assert node1.query("SELECT COUNT() FROM {}".format(name)) == "150\n"
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
|
node1.query("DROP TABLE IF EXISTS {name} SYNC".format(name=name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip(reason="Flacky test")
|
@pytest.mark.skip(reason="Flacky test")
|
||||||
@ -1626,7 +1626,7 @@ def test_double_move_while_select(started_cluster, name, positive):
|
|||||||
).splitlines() == ["1", "2", "3", "4"]
|
).splitlines() == ["1", "2", "3", "4"]
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
|
node1.query("DROP TABLE IF EXISTS {name} SYNC".format(name=name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -1745,7 +1745,7 @@ def test_alter_with_merge_work(started_cluster, name, engine, positive):
|
|||||||
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "6\n"
|
assert node1.query("SELECT count() FROM {name}".format(name=name)) == "6\n"
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
node1.query("DROP TABLE IF EXISTS {name} NO DELAY".format(name=name))
|
node1.query("DROP TABLE IF EXISTS {name} SYNC".format(name=name))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@ -1826,7 +1826,7 @@ def test_disabled_ttl_move_on_insert(started_cluster, name, dest_type, engine):
|
|||||||
|
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -1909,7 +1909,7 @@ def test_ttl_move_if_exists(started_cluster, name, dest_type):
|
|||||||
|
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
node1.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node1.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
node2.query("DROP TABLE IF EXISTS {} NO DELAY".format(name))
|
node2.query("DROP TABLE IF EXISTS {} SYNC".format(name))
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
@ -55,7 +55,7 @@ def started_cluster():
|
|||||||
|
|
||||||
def drop_table(nodes, table_name):
|
def drop_table(nodes, table_name):
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
node.query("DROP TABLE IF EXISTS {} NO DELAY".format(table_name))
|
node.query("DROP TABLE IF EXISTS {} SYNC".format(table_name))
|
||||||
|
|
||||||
|
|
||||||
# Column TTL works only with wide parts, because it's very expensive to apply it for compact parts
|
# Column TTL works only with wide parts, because it's very expensive to apply it for compact parts
|
||||||
|
@ -59,7 +59,7 @@ timeout $TIMEOUT bash -c thread4 2> /dev/null &
|
|||||||
|
|
||||||
wait
|
wait
|
||||||
|
|
||||||
echo "DROP TABLE concurrent_alter_column NO DELAY" | ${CLICKHOUSE_CLIENT} # NO DELAY has effect only for Atomic database
|
echo "DROP TABLE concurrent_alter_column SYNC" | ${CLICKHOUSE_CLIENT} # SYNC has effect only for Atomic database
|
||||||
|
|
||||||
# Wait for alters and check for deadlocks (in case of deadlock this loop will not finish)
|
# Wait for alters and check for deadlocks (in case of deadlock this loop will not finish)
|
||||||
while true; do
|
while true; do
|
||||||
|
@ -70,8 +70,8 @@ timeout $TIMEOUT bash -c thread5 2> /dev/null &
|
|||||||
|
|
||||||
wait
|
wait
|
||||||
|
|
||||||
echo "DROP TABLE src NO DELAY" | ${CLICKHOUSE_CLIENT}
|
echo "DROP TABLE src SYNC" | ${CLICKHOUSE_CLIENT}
|
||||||
echo "DROP TABLE dst NO DELAY" | ${CLICKHOUSE_CLIENT}
|
echo "DROP TABLE dst SYNC" | ${CLICKHOUSE_CLIENT}
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
# Check for deadlocks
|
# Check for deadlocks
|
||||||
|
@ -6,29 +6,29 @@ DROP TABLE IF EXISTS mt;
|
|||||||
CREATE TABLE mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple();
|
CREATE TABLE mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple();
|
||||||
|
|
||||||
SELECT '---WATERMARK---';
|
SELECT '---WATERMARK---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory WATERMARK=INTERVAL '1' SECOND AS SELECT count(a), tumbleStart(wid) AS w_start, tumbleEnd(wid) AS w_end FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory WATERMARK=INTERVAL '1' SECOND AS SELECT count(a), tumbleStart(wid) AS w_start, tumbleEnd(wid) AS w_end FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---With w_end---';
|
SELECT '---With w_end---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(tumble(timestamp, INTERVAL '3' SECOND)) AS w_start, tumbleEnd(wid) AS w_end FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(tumble(timestamp, INTERVAL '3' SECOND)) AS w_start, tumbleEnd(wid) AS w_end FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---WithOut w_end---';
|
SELECT '---WithOut w_end---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---WITH---';
|
SELECT '---WITH---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS WITH toDateTime('2018-01-01 00:00:00') AS date_time SELECT count(a), tumbleStart(wid) AS w_start, tumbleEnd(wid) AS w_end, date_time FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS WITH toDateTime('2018-01-01 00:00:00') AS date_time SELECT count(a), tumbleStart(wid) AS w_start, tumbleEnd(wid) AS w_end, date_time FROM mt GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---WHERE---';
|
SELECT '---WHERE---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---ORDER_BY---';
|
SELECT '---ORDER_BY---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid ORDER BY w_start;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY tumble(timestamp, INTERVAL '3' SECOND) AS wid ORDER BY w_start;
|
||||||
|
|
||||||
SELECT '---With now---';
|
SELECT '---With now---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start, tumbleEnd(tumble(now(), INTERVAL '3' SECOND)) AS w_end FROM mt GROUP BY tumble(now(), INTERVAL '3' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), tumbleStart(wid) AS w_start, tumbleEnd(tumble(now(), INTERVAL '3' SECOND)) AS w_end FROM mt GROUP BY tumble(now(), INTERVAL '3' SECOND) AS wid;
|
||||||
|
@ -6,29 +6,29 @@ DROP TABLE IF EXISTS mt;
|
|||||||
CREATE TABLE mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple();
|
CREATE TABLE mt(a Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple();
|
||||||
|
|
||||||
SELECT '---WATERMARK---';
|
SELECT '---WATERMARK---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory WATERMARK=INTERVAL '1' SECOND AS SELECT count(a), hopStart(wid) AS w_start, hopEnd(wid) AS w_end FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory WATERMARK=INTERVAL '1' SECOND AS SELECT count(a), hopStart(wid) AS w_start, hopEnd(wid) AS w_end FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---With w_end---';
|
SELECT '---With w_end---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start, hopEnd(wid) AS w_end FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start, hopEnd(wid) AS w_end FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---WithOut w_end---';
|
SELECT '---WithOut w_end---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---WITH---';
|
SELECT '---WITH---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS WITH toDateTime('2018-01-01 00:00:00') AS date_time SELECT count(a), hopStart(wid) AS w_start, hopEnd(wid) AS w_end, date_time FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS WITH toDateTime('2018-01-01 00:00:00') AS date_time SELECT count(a), hopStart(wid) AS w_start, hopEnd(wid) AS w_end, date_time FROM mt GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---WHERE---';
|
SELECT '---WHERE---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid;
|
||||||
|
|
||||||
SELECT '---ORDER_BY---';
|
SELECT '---ORDER_BY---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid ORDER BY w_start;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start FROM mt WHERE a != 1 GROUP BY hop(timestamp, INTERVAL '3' SECOND, INTERVAL '5' SECOND) AS wid ORDER BY w_start;
|
||||||
|
|
||||||
SELECT '---With now---';
|
SELECT '---With now---';
|
||||||
DROP TABLE IF EXISTS wv NO DELAY;
|
DROP TABLE IF EXISTS wv SYNC;
|
||||||
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start, hopEnd(hop(now(), INTERVAL '1' SECOND, INTERVAL '3' SECOND)) as w_end FROM mt GROUP BY hop(now(), INTERVAL '1' SECOND, INTERVAL '3' SECOND) AS wid;
|
CREATE WINDOW VIEW wv ENGINE Memory AS SELECT count(a), hopStart(wid) AS w_start, hopEnd(hop(now(), INTERVAL '1' SECOND, INTERVAL '3' SECOND)) as w_end FROM mt GROUP BY hop(now(), INTERVAL '1' SECOND, INTERVAL '3' SECOND) AS wid;
|
||||||
|
@ -31,7 +31,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.mt")
|
client1.send("DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.wv NO DELAY")
|
client1.send("DROP TABLE IF EXISTS db_01059_event_hop_watch_strict_asc.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
client1.send(
|
client1.send(
|
||||||
@ -71,7 +71,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
if match.groups()[1]:
|
if match.groups()[1]:
|
||||||
client1.send(client1.command)
|
client1.send(client1.command)
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE db_01059_event_hop_watch_strict_asc.wv NO DELAY")
|
client1.send("DROP TABLE db_01059_event_hop_watch_strict_asc.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE db_01059_event_hop_watch_strict_asc.mt")
|
client1.send("DROP TABLE db_01059_event_hop_watch_strict_asc.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
@ -33,9 +33,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.mt")
|
client1.send("DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send(
|
client1.send("DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.wv SYNC")
|
||||||
"DROP TABLE IF EXISTS 01062_window_view_event_hop_watch_asc.wv NO DELAY"
|
|
||||||
)
|
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
client1.send(
|
client1.send(
|
||||||
@ -77,7 +75,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
if match.groups()[1]:
|
if match.groups()[1]:
|
||||||
client1.send(client1.command)
|
client1.send(client1.command)
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.wv NO DELAY")
|
client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.mt")
|
client1.send("DROP TABLE 01062_window_view_event_hop_watch_asc.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
@ -33,7 +33,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.mt")
|
client1.send("DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.wv NO DELAY")
|
client1.send("DROP TABLE IF EXISTS 01069_window_view_proc_tumble_watch.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
client1.send(
|
client1.send(
|
||||||
@ -67,7 +67,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
if match.groups()[1]:
|
if match.groups()[1]:
|
||||||
client1.send(client1.command)
|
client1.send(client1.command)
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE 01069_window_view_proc_tumble_watch.wv NO DELAY")
|
client1.send("DROP TABLE 01069_window_view_proc_tumble_watch.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE 01069_window_view_proc_tumble_watch.mt")
|
client1.send("DROP TABLE 01069_window_view_proc_tumble_watch.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
@ -31,9 +31,9 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
|
|
||||||
client1.send("CREATE DATABASE IF NOT EXISTS 01070_window_view_watch_events")
|
client1.send("CREATE DATABASE IF NOT EXISTS 01070_window_view_watch_events")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01070_window_view_watch_events.mt NO DELAY")
|
client1.send("DROP TABLE IF EXISTS 01070_window_view_watch_events.mt SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01070_window_view_watch_events.wv NO DELAY")
|
client1.send("DROP TABLE IF EXISTS 01070_window_view_watch_events.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
client1.send(
|
client1.send(
|
||||||
@ -65,7 +65,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
if match.groups()[1]:
|
if match.groups()[1]:
|
||||||
client1.send(client1.command)
|
client1.send(client1.command)
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE 01070_window_view_watch_events.wv NO DELAY;")
|
client1.send("DROP TABLE 01070_window_view_watch_events.wv SYNC;")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE 01070_window_view_watch_events.mt;")
|
client1.send("DROP TABLE 01070_window_view_watch_events.mt;")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
@ -35,9 +35,9 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
|
|
||||||
client1.send("CREATE DATABASE IF NOT EXISTS 01078_window_view_alter_query_watch")
|
client1.send("CREATE DATABASE IF NOT EXISTS 01078_window_view_alter_query_watch")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01078_window_view_alter_query_watch.mt NO DELAY")
|
client1.send("DROP TABLE IF EXISTS 01078_window_view_alter_query_watch.mt SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01078_window_view_alter_query_watch.wv NO DELAY")
|
client1.send("DROP TABLE IF EXISTS 01078_window_view_alter_query_watch.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
client1.send(
|
client1.send(
|
||||||
@ -89,7 +89,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
if match.groups()[1]:
|
if match.groups()[1]:
|
||||||
client3.send(client3.command)
|
client3.send(client3.command)
|
||||||
client3.expect(prompt)
|
client3.expect(prompt)
|
||||||
client3.send("DROP TABLE 01078_window_view_alter_query_watch.wv NO DELAY;")
|
client3.send("DROP TABLE 01078_window_view_alter_query_watch.wv SYNC;")
|
||||||
client3.expect(prompt)
|
client3.expect(prompt)
|
||||||
client3.send("DROP TABLE 01078_window_view_alter_query_watch.mt;")
|
client3.send("DROP TABLE 01078_window_view_alter_query_watch.mt;")
|
||||||
client3.expect(prompt)
|
client3.expect(prompt)
|
||||||
|
@ -32,7 +32,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01082_window_view_watch_limit.mt")
|
client1.send("DROP TABLE IF EXISTS 01082_window_view_watch_limit.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE IF EXISTS 01082_window_view_watch_limit.wv NO DELAY")
|
client1.send("DROP TABLE IF EXISTS 01082_window_view_watch_limit.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
client1.send(
|
client1.send(
|
||||||
@ -61,7 +61,7 @@ with client(name="client1>", log=log) as client1, client(
|
|||||||
client1.expect("1 row" + end_of_block)
|
client1.expect("1 row" + end_of_block)
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
|
||||||
client1.send("DROP TABLE 01082_window_view_watch_limit.wv NO DELAY")
|
client1.send("DROP TABLE 01082_window_view_watch_limit.wv SYNC")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
client1.send("DROP TABLE 01082_window_view_watch_limit.mt")
|
client1.send("DROP TABLE 01082_window_view_watch_limit.mt")
|
||||||
client1.expect(prompt)
|
client1.expect(prompt)
|
||||||
|
@ -15,7 +15,7 @@ CREATE WINDOW VIEW test_01085.wv ENGINE Memory WATERMARK=ASCENDING AS SELECT cou
|
|||||||
|
|
||||||
SHOW tables FROM test_01085;
|
SHOW tables FROM test_01085;
|
||||||
|
|
||||||
DROP TABLE test_01085.wv NO DELAY;
|
DROP TABLE test_01085.wv SYNC;
|
||||||
SHOW tables FROM test_01085;
|
SHOW tables FROM test_01085;
|
||||||
|
|
||||||
CREATE WINDOW VIEW test_01085.wv ENGINE Memory WATERMARK=ASCENDING AS SELECT count(a) AS count, market, tumbleEnd(wid) AS w_end FROM test_01085.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND) AS wid, market;
|
CREATE WINDOW VIEW test_01085.wv ENGINE Memory WATERMARK=ASCENDING AS SELECT count(a) AS count, market, tumbleEnd(wid) AS w_end FROM test_01085.mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND) AS wid, market;
|
||||||
@ -26,5 +26,5 @@ SHOW tables FROM test_01085;
|
|||||||
ATTACH TABLE test_01085.wv;
|
ATTACH TABLE test_01085.wv;
|
||||||
SHOW tables FROM test_01085;
|
SHOW tables FROM test_01085;
|
||||||
|
|
||||||
DROP TABLE test_01085.wv NO DELAY;
|
DROP TABLE test_01085.wv SYNC;
|
||||||
SHOW tables FROM test_01085;
|
SHOW tables FROM test_01085;
|
||||||
|
@ -40,7 +40,7 @@ while true; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT market, wid FROM test_01086.\`.inner.wv\` ORDER BY market, \`windowID(timestamp, toIntervalSecond('5'), 'US/Samoa')\` as wid";
|
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT market, wid FROM test_01086.\`.inner.wv\` ORDER BY market, \`windowID(timestamp, toIntervalSecond('5'), 'US/Samoa')\` as wid";
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.wv NO DELAY;"
|
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.wv SYNC;"
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.mt NO DELAY;"
|
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.mt SYNC;"
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.dst NO DELAY;"
|
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.dst SYNC;"
|
||||||
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP DATABASE test_01086 NO DELAY;"
|
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP DATABASE test_01086 SYNC;"
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
-- Tags: no-replicated-database
|
-- Tags: no-replicated-database
|
||||||
-- Tag no-replicated-database: ON CLUSTER is not allowed
|
-- Tag no-replicated-database: ON CLUSTER is not allowed
|
||||||
|
|
||||||
DROP TABLE IF EXISTS test_repl ON CLUSTER test_shard_localhost SYNC;
|
DROP TABLE IF EXISTS test_repl ON CLUSTER test_shard_localhost NO DELAY;
|
||||||
CREATE TABLE test_repl ON CLUSTER test_shard_localhost (n UInt64) ENGINE ReplicatedMergeTree('/clickhouse/test_01181/{database}/test_repl','r1') ORDER BY tuple();
|
CREATE TABLE test_repl ON CLUSTER test_shard_localhost (n UInt64) ENGINE ReplicatedMergeTree('/clickhouse/test_01181/{database}/test_repl','r1') ORDER BY tuple();
|
||||||
DETACH TABLE test_repl ON CLUSTER test_shard_localhost SYNC;
|
DETACH TABLE test_repl ON CLUSTER test_shard_localhost NO DELAY;
|
||||||
ATTACH TABLE test_repl ON CLUSTER test_shard_localhost;
|
ATTACH TABLE test_repl ON CLUSTER test_shard_localhost;
|
||||||
DROP TABLE test_repl ON CLUSTER test_shard_localhost SYNC;
|
DROP TABLE test_repl ON CLUSTER test_shard_localhost NO DELAY;
|
||||||
|
@ -4,8 +4,8 @@
|
|||||||
|
|
||||||
SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries
|
SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries
|
||||||
|
|
||||||
DROP TABLE IF EXISTS execute_on_single_replica_r1 NO DELAY;
|
DROP TABLE IF EXISTS execute_on_single_replica_r1 SYNC;
|
||||||
DROP TABLE IF EXISTS execute_on_single_replica_r2 NO DELAY;
|
DROP TABLE IF EXISTS execute_on_single_replica_r2 SYNC;
|
||||||
|
|
||||||
/* that test requires fixed zookeeper path, so we cannot use ReplicatedMergeTree({database}) */
|
/* that test requires fixed zookeeper path, so we cannot use ReplicatedMergeTree({database}) */
|
||||||
CREATE TABLE execute_on_single_replica_r1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r1') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10;
|
CREATE TABLE execute_on_single_replica_r1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r1') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10;
|
||||||
@ -130,5 +130,5 @@ GROUP BY part_name
|
|||||||
ORDER BY part_name
|
ORDER BY part_name
|
||||||
FORMAT Vertical;
|
FORMAT Vertical;
|
||||||
|
|
||||||
DROP TABLE execute_on_single_replica_r1 NO DELAY;
|
DROP TABLE execute_on_single_replica_r1 SYNC;
|
||||||
DROP TABLE execute_on_single_replica_r2 NO DELAY;
|
DROP TABLE execute_on_single_replica_r2 SYNC;
|
||||||
|
@ -24,7 +24,7 @@ SELECT * FROM 01686_test WHERE key IN (123, 456, -123) ORDER BY key;
|
|||||||
SELECT '--';
|
SELECT '--';
|
||||||
SELECT * FROM 01686_test WHERE key = 'Hello'; -- { serverError 53 }
|
SELECT * FROM 01686_test WHERE key = 'Hello'; -- { serverError 53 }
|
||||||
|
|
||||||
DETACH TABLE 01686_test NO DELAY;
|
DETACH TABLE 01686_test SYNC;
|
||||||
ATTACH TABLE 01686_test;
|
ATTACH TABLE 01686_test;
|
||||||
|
|
||||||
SELECT * FROM 01686_test WHERE key IN (99, 999, 9999, -123) ORDER BY key;
|
SELECT * FROM 01686_test WHERE key IN (99, 999, 9999, -123) ORDER BY key;
|
||||||
|
@ -43,7 +43,7 @@ EOF
|
|||||||
get_table_comment_info
|
get_table_comment_info
|
||||||
|
|
||||||
echo detach table
|
echo detach table
|
||||||
$CLICKHOUSE_CLIENT --query="DETACH TABLE comment_test_table NO DELAY;"
|
$CLICKHOUSE_CLIENT --query="DETACH TABLE comment_test_table SYNC;"
|
||||||
get_table_comment_info
|
get_table_comment_info
|
||||||
|
|
||||||
echo re-attach table
|
echo re-attach table
|
||||||
|
@ -50,7 +50,7 @@ for STORAGE_POLICY in 's3_cache' 'local_cache'; do
|
|||||||
INNER JOIN system.filesystem_cache AS caches
|
INNER JOIN system.filesystem_cache AS caches
|
||||||
ON data_paths.cache_path = caches.cache_path"
|
ON data_paths.cache_path = caches.cache_path"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query "DROP TABLE test_02286 NO DELAY"
|
$CLICKHOUSE_CLIENT --query "DROP TABLE test_02286 SYNC"
|
||||||
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
|
$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.filesystem_cache"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache"
|
$CLICKHOUSE_CLIENT --query "SELECT cache_path FROM system.filesystem_cache"
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
|
|
||||||
-- Tags: long, replica, no-replicated-database, no-parallel
|
-- Tags: long, replica, no-replicated-database, no-parallel
|
||||||
|
|
||||||
DROP TABLE IF EXISTS part_log_profile_events_r1 NO DELAY;
|
DROP TABLE IF EXISTS part_log_profile_events_r1 SYNC;
|
||||||
DROP TABLE IF EXISTS part_log_profile_events_r2 NO DELAY;
|
DROP TABLE IF EXISTS part_log_profile_events_r2 SYNC;
|
||||||
|
|
||||||
CREATE TABLE part_log_profile_events_r1 (x UInt64)
|
CREATE TABLE part_log_profile_events_r1 (x UInt64)
|
||||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02378/part_log_profile_events', 'r1')
|
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02378/part_log_profile_events', 'r1')
|
||||||
@ -36,5 +36,5 @@ WHERE event_time > now() - INTERVAL 10 MINUTE
|
|||||||
AND event_type == 'DownloadPart'
|
AND event_type == 'DownloadPart'
|
||||||
;
|
;
|
||||||
|
|
||||||
DROP TABLE part_log_profile_events_r1 NO DELAY;
|
DROP TABLE part_log_profile_events_r1 SYNC;
|
||||||
DROP TABLE part_log_profile_events_r2 NO DELAY;
|
DROP TABLE part_log_profile_events_r2 SYNC;
|
||||||
|
@ -15,7 +15,7 @@ from pure_http_client import ClickHouseClient
|
|||||||
client = ClickHouseClient()
|
client = ClickHouseClient()
|
||||||
|
|
||||||
# test table without partition
|
# test table without partition
|
||||||
client.query("DROP TABLE IF EXISTS t_async_insert_dedup_no_part NO DELAY")
|
client.query("DROP TABLE IF EXISTS t_async_insert_dedup_no_part SYNC")
|
||||||
client.query(
|
client.query(
|
||||||
"""
|
"""
|
||||||
CREATE TABLE t_async_insert_dedup_no_part (
|
CREATE TABLE t_async_insert_dedup_no_part (
|
||||||
@ -35,7 +35,7 @@ client.query(
|
|||||||
)
|
)
|
||||||
result = client.query("select count(*) from t_async_insert_dedup_no_part")
|
result = client.query("select count(*) from t_async_insert_dedup_no_part")
|
||||||
print(result, flush=True)
|
print(result, flush=True)
|
||||||
client.query("DROP TABLE IF EXISTS t_async_insert_dedup_no_part NO DELAY")
|
client.query("DROP TABLE IF EXISTS t_async_insert_dedup_no_part SYNC")
|
||||||
|
|
||||||
|
|
||||||
# generate data and push to queue
|
# generate data and push to queue
|
||||||
@ -95,7 +95,7 @@ def fetch_and_insert_data(q, client):
|
|||||||
|
|
||||||
|
|
||||||
# main process
|
# main process
|
||||||
client.query("DROP TABLE IF EXISTS t_async_insert_dedup NO DELAY")
|
client.query("DROP TABLE IF EXISTS t_async_insert_dedup SYNC")
|
||||||
client.query(
|
client.query(
|
||||||
"""
|
"""
|
||||||
CREATE TABLE t_async_insert_dedup (
|
CREATE TABLE t_async_insert_dedup (
|
||||||
@ -161,6 +161,6 @@ result = int(result.split()[0])
|
|||||||
if result <= 0:
|
if result <= 0:
|
||||||
raise Exception(f"AsyncInsertCacheHits should > 0, but got {result}")
|
raise Exception(f"AsyncInsertCacheHits should > 0, but got {result}")
|
||||||
|
|
||||||
client.query("DROP TABLE IF EXISTS t_async_insert_dedup NO DELAY")
|
client.query("DROP TABLE IF EXISTS t_async_insert_dedup SYNC")
|
||||||
|
|
||||||
os._exit(os.EX_OK)
|
os._exit(os.EX_OK)
|
||||||
|
@ -33,5 +33,5 @@ select count() from system.filesystem_cache_log where query_id = '$query_id' AND
|
|||||||
|
|
||||||
${CLICKHOUSE_CLIENT} --multiline --multiquery -q "
|
${CLICKHOUSE_CLIENT} --multiline --multiquery -q "
|
||||||
select count() from ttt;
|
select count() from ttt;
|
||||||
drop table ttt no delay;
|
drop table ttt sync;
|
||||||
"
|
"
|
||||||
|
@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
|||||||
CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}"
|
CLICKHOUSE_TEST_ZOOKEEPER_PREFIX="${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}"
|
||||||
|
|
||||||
$CLICKHOUSE_CLIENT -n --query "
|
$CLICKHOUSE_CLIENT -n --query "
|
||||||
DROP TABLE IF EXISTS t_async_insert_cleanup NO DELAY;
|
DROP TABLE IF EXISTS t_async_insert_cleanup SYNC;
|
||||||
CREATE TABLE t_async_insert_cleanup (
|
CREATE TABLE t_async_insert_cleanup (
|
||||||
KeyID UInt32
|
KeyID UInt32
|
||||||
) Engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup', '{replica}')
|
) Engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup', '{replica}')
|
||||||
@ -27,7 +27,7 @@ old_answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper W
|
|||||||
for i in {1..300}; do
|
for i in {1..300}; do
|
||||||
answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'")
|
answer=$($CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'")
|
||||||
if [ $answer == '10' ]; then
|
if [ $answer == '10' ]; then
|
||||||
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup NO DELAY;"
|
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
sleep 1
|
sleep 1
|
||||||
@ -36,4 +36,4 @@ done
|
|||||||
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM t_async_insert_cleanup"
|
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM t_async_insert_cleanup"
|
||||||
echo $old_answer
|
echo $old_answer
|
||||||
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'"
|
$CLICKHOUSE_CLIENT --query "SELECT count(*) FROM system.zookeeper WHERE path like '/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/t_async_insert_cleanup/async_blocks%' settings allow_unrestricted_reads_from_keeper = 'true'"
|
||||||
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup NO DELAY;"
|
$CLICKHOUSE_CLIENT -n --query "DROP TABLE t_async_insert_cleanup SYNC;"
|
||||||
|
@ -12,4 +12,4 @@ CREATE TABLE test2 (x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{d
|
|||||||
-- The macro {server_uuid} is special, not a configuration-type macro. It's normal that it is inaccessible with the getMacro function.
|
-- The macro {server_uuid} is special, not a configuration-type macro. It's normal that it is inaccessible with the getMacro function.
|
||||||
SELECT getMacro('server_uuid'); -- { serverError NO_ELEMENTS_IN_CONFIG }
|
SELECT getMacro('server_uuid'); -- { serverError NO_ELEMENTS_IN_CONFIG }
|
||||||
|
|
||||||
DROP TABLE test NO DELAY;
|
DROP TABLE test SYNC;
|
||||||
|
Loading…
Reference in New Issue
Block a user