mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 09:02:00 +00:00
Merge pull request #52944 from kssenii/fix-s3-queue-flaky-test
Fix flaky test_storage_s3_queue::test_multiple_tables_streaming_sync_distributed
This commit is contained in:
commit
65352d64c7
@ -761,7 +761,7 @@ def test_multiple_tables_streaming_sync(started_cluster, mode):
|
|||||||
@pytest.mark.parametrize("mode", AVAILABLE_MODES)
|
@pytest.mark.parametrize("mode", AVAILABLE_MODES)
|
||||||
def test_multiple_tables_streaming_sync_distributed(started_cluster, mode):
|
def test_multiple_tables_streaming_sync_distributed(started_cluster, mode):
|
||||||
files_to_generate = 100
|
files_to_generate = 100
|
||||||
poll_size = 10
|
poll_size = 2
|
||||||
prefix = f"test_multiple_{mode}"
|
prefix = f"test_multiple_{mode}"
|
||||||
bucket = started_cluster.minio_restricted_bucket
|
bucket = started_cluster.minio_restricted_bucket
|
||||||
instance = started_cluster.instances["instance"]
|
instance = started_cluster.instances["instance"]
|
||||||
@ -785,7 +785,12 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode):
|
|||||||
CREATE TABLE test.s3_queue_persistent ({table_format})
|
CREATE TABLE test.s3_queue_persistent ({table_format})
|
||||||
ENGINE = MergeTree()
|
ENGINE = MergeTree()
|
||||||
ORDER BY column1;
|
ORDER BY column1;
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
for inst in [instance, instance_2]:
|
||||||
|
inst.query(
|
||||||
|
f"""
|
||||||
CREATE MATERIALIZED VIEW test.persistent_s3_queue_mv TO test.s3_queue_persistent AS
|
CREATE MATERIALIZED VIEW test.persistent_s3_queue_mv TO test.s3_queue_persistent AS
|
||||||
SELECT
|
SELECT
|
||||||
*
|
*
|
||||||
@ -800,7 +805,7 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode):
|
|||||||
def get_count(node, table_name):
|
def get_count(node, table_name):
|
||||||
return int(run_query(node, f"SELECT count() FROM {table_name}"))
|
return int(run_query(node, f"SELECT count() FROM {table_name}"))
|
||||||
|
|
||||||
for _ in range(100):
|
for _ in range(150):
|
||||||
if (
|
if (
|
||||||
get_count(instance, "test.s3_queue_persistent")
|
get_count(instance, "test.s3_queue_persistent")
|
||||||
+ get_count(instance_2, "test.s3_queue_persistent")
|
+ get_count(instance_2, "test.s3_queue_persistent")
|
||||||
@ -816,11 +821,12 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode):
|
|||||||
list(map(int, l.split())) for l in run_query(instance_2, get_query).splitlines()
|
list(map(int, l.split())) for l in run_query(instance_2, get_query).splitlines()
|
||||||
]
|
]
|
||||||
|
|
||||||
|
assert len(res1) + len(res2) == files_to_generate
|
||||||
|
|
||||||
# Checking that all engines have made progress
|
# Checking that all engines have made progress
|
||||||
assert len(res1) > 0
|
assert len(res1) > 0
|
||||||
assert len(res2) > 0
|
assert len(res2) > 0
|
||||||
|
|
||||||
assert len(res1) + len(res2) == files_to_generate
|
|
||||||
assert {tuple(v) for v in res1 + res2} == set([tuple(i) for i in total_values])
|
assert {tuple(v) for v in res1 + res2} == set([tuple(i) for i in total_values])
|
||||||
|
|
||||||
# Checking that all files were processed only once
|
# Checking that all files were processed only once
|
||||||
|
Loading…
Reference in New Issue
Block a user