mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
fix
This commit is contained in:
parent
69463f852e
commit
ac62356477
@ -55,11 +55,4 @@
|
||||
|
||||
<database_atomic_delay_before_drop_table_sec>3</database_atomic_delay_before_drop_table_sec>
|
||||
<database_atomic_drop_error_cooldown_sec>0</database_atomic_drop_error_cooldown_sec>
|
||||
|
||||
<zookeeper_log>
|
||||
<database>system</database>
|
||||
<table>zookeeper_log</table>
|
||||
<flush_interval_milliseconds>5000</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>5000</collect_interval_milliseconds>
|
||||
</zookeeper_log>
|
||||
</clickhouse>
|
||||
|
@ -160,7 +160,11 @@ def test_drop_table(cluster):
|
||||
"action": "REJECT --reject-with tcp-reset",
|
||||
}
|
||||
)
|
||||
|
||||
# Will drop in background with retries
|
||||
node.query("drop table test_drop_table")
|
||||
|
||||
# It should not be possible to create a replica with the same path until the previous one is completely dropped
|
||||
for i in range(0, 100):
|
||||
node.query_and_get_answer_with_error(
|
||||
"create table if not exists test_drop_table (n int) "
|
||||
@ -169,11 +173,21 @@ def test_drop_table(cluster):
|
||||
)
|
||||
time.sleep(0.2)
|
||||
|
||||
# Wait for drop to actually finish
|
||||
node.wait_for_log_line(
|
||||
"Removing metadata /var/lib/clickhouse/metadata_dropped/default.test_drop_table",
|
||||
timeout=60,
|
||||
look_behind_lines=1000000,
|
||||
)
|
||||
|
||||
# It could leave some leftovers, remove them
|
||||
replicas = node.query_with_retry(
|
||||
"select name from system.zookeeper where path='/test/drop_table/replicas'"
|
||||
)
|
||||
if "1" in replicas and "test_drop_table" not in node.query("show tables"):
|
||||
node2.query("system drop replica '1' from table test_drop_table")
|
||||
|
||||
# Just in case table was not created due to connection errors
|
||||
node.query(
|
||||
"create table if not exists test_drop_table (n int) engine=ReplicatedMergeTree('/test/drop_table', '1') "
|
||||
"order by n partition by n % 99 settings storage_policy='s3'"
|
||||
|
Loading…
Reference in New Issue
Block a user