Merge pull request #25007 from ClickHouse/fix-integration-test-s3

Maybe Minio starts for too long in tests
This commit is contained in:
alexey-milovidov 2021-06-07 02:53:28 +03:00 committed by GitHub
commit d54aaca718
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 7 deletions

View File

@ -17,11 +17,12 @@ def run_endpoint(cluster):
cluster.exec_in_container(container_id, ["python", "endpoint.py"], detach=True)
# Wait for S3 endpoint start
for attempt in range(10):
num_attempts = 100
for attempt in range(num_attempts):
ping_response = cluster.exec_in_container(cluster.get_container_id('resolver'),
["curl", "-s", "http://resolver:8080/"], nothrow=True)
if ping_response != 'OK':
if attempt == 9:
if attempt == num_attempts - 1:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
@ -156,13 +157,13 @@ def test_move_failover(cluster):
# There should be 2 attempts to move part.
assert node.query("""
SELECT count(*) FROM system.part_log
SELECT count(*) FROM system.part_log
WHERE event_type='MovePart' AND table='s3_failover_test'
""") == '2\n'
# First attempt should be failed with expected error.
exception = node.query("""
SELECT exception FROM system.part_log
SELECT exception FROM system.part_log
WHERE event_type='MovePart' AND table='s3_failover_test' AND notEmpty(exception)
ORDER BY event_time
LIMIT 1

View File

@ -440,11 +440,12 @@ def run_s3_mocks(started_cluster):
# Wait for S3 mocks to start
for mock_filename, container, port in mocks:
for attempt in range(10):
num_attempts = 100
for attempt in range(num_attempts):
ping_response = started_cluster.exec_in_container(started_cluster.get_container_id(container),
["curl", "-s", f"http://localhost:{port}/"], nothrow=True)
if ping_response != 'OK':
if attempt == 9:
if attempt == num_attempts - 1:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
@ -643,4 +644,4 @@ def test_storage_s3_put_gzip(started_cluster, extension, method):
buf = io.BytesIO(get_s3_file_content(started_cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708