mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 10:02:01 +00:00
Merge pull request #49996 from ClickHouse/az
Fix test_insert_same_partition_and_merge failing if one Azure request attempt fails
This commit is contained in:
commit
632ab8a3d1
@ -51,7 +51,7 @@ void WriteBufferFromAzureBlobStorage::execWithRetry(std::function<void()> func,
|
||||
if (i == num_tries - 1)
|
||||
throw;
|
||||
|
||||
LOG_DEBUG(log, "Write at attempt {} for blob `{}` failed: {}", i + 1, blob_path, e.Message);
|
||||
LOG_DEBUG(log, "Write at attempt {} for blob `{}` failed: {} {}", i + 1, blob_path, e.what(), e.Message);
|
||||
};
|
||||
|
||||
for (size_t i = 0; i < num_tries; ++i)
|
||||
|
@ -203,7 +203,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical):
|
||||
node.query(f"SYSTEM START MERGES {TABLE_NAME}")
|
||||
|
||||
# Wait for merges and old parts deletion
|
||||
for attempt in range(0, 10):
|
||||
for attempt in range(0, 60):
|
||||
parts_count = azure_query(
|
||||
node,
|
||||
f"SELECT COUNT(*) FROM system.parts WHERE table = '{TABLE_NAME}' FORMAT Values",
|
||||
@ -211,7 +211,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical):
|
||||
if parts_count == "(1)":
|
||||
break
|
||||
|
||||
if attempt == 9:
|
||||
if attempt == 59:
|
||||
assert parts_count == "(1)"
|
||||
|
||||
time.sleep(1)
|
||||
|
@ -232,7 +232,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name):
|
||||
node.query("SYSTEM START MERGES s3_test")
|
||||
|
||||
# Wait for merges and old parts deletion
|
||||
for attempt in range(0, 10):
|
||||
for attempt in range(0, 60):
|
||||
parts_count = node.query(
|
||||
"SELECT COUNT(*) FROM system.parts WHERE table = 's3_test' and active = 1 FORMAT Values"
|
||||
)
|
||||
@ -240,7 +240,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name):
|
||||
if parts_count == "(1)":
|
||||
break
|
||||
|
||||
if attempt == 9:
|
||||
if attempt == 59:
|
||||
assert parts_count == "(1)"
|
||||
|
||||
time.sleep(1)
|
||||
|
Loading…
Reference in New Issue
Block a user