2022-08-23 13:12:09 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
import logging
|
2023-05-31 19:23:01 +00:00
|
|
|
import pytest
|
2024-05-02 11:07:35 +00:00
|
|
|
import os
|
|
|
|
import minio
|
2022-08-23 13:12:09 +00:00
|
|
|
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
2023-05-31 19:23:01 +00:00
|
|
|
from helpers.mock_servers import start_s3_mock
|
2023-09-15 18:39:52 +00:00
|
|
|
from helpers.test_tools import assert_eq_with_retry
|
2022-08-23 13:12:09 +00:00
|
|
|
|
2022-08-23 13:21:29 +00:00
|
|
|
|
2022-08-23 13:12:09 +00:00
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def cluster():
|
|
|
|
try:
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
cluster.add_instance(
|
|
|
|
"node",
|
|
|
|
main_configs=[
|
|
|
|
"configs/storage_conf.xml",
|
|
|
|
],
|
|
|
|
user_configs=[
|
|
|
|
"configs/setting.xml",
|
2023-10-19 18:39:07 +00:00
|
|
|
"configs/s3_retries.xml",
|
2022-08-23 13:12:09 +00:00
|
|
|
],
|
|
|
|
with_minio=True,
|
|
|
|
)
|
2023-10-18 15:30:59 +00:00
|
|
|
cluster.add_instance(
|
|
|
|
"node_with_inf_s3_retries",
|
|
|
|
main_configs=[
|
|
|
|
"configs/storage_conf.xml",
|
|
|
|
],
|
|
|
|
user_configs=[
|
|
|
|
"configs/setting.xml",
|
|
|
|
"configs/inf_s3_retries.xml",
|
|
|
|
],
|
|
|
|
with_minio=True,
|
|
|
|
)
|
2022-08-23 13:12:09 +00:00
|
|
|
logging.info("Starting cluster...")
|
|
|
|
cluster.start()
|
|
|
|
logging.info("Cluster started")
|
|
|
|
|
|
|
|
yield cluster
|
|
|
|
finally:
|
|
|
|
cluster.shutdown()
|
|
|
|
|
|
|
|
|
2023-05-31 19:23:01 +00:00
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def init_broken_s3(cluster):
|
|
|
|
yield start_s3_mock(cluster, "broken_s3", "8083")
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
def broken_s3(init_broken_s3):
|
|
|
|
init_broken_s3.reset()
|
|
|
|
yield init_broken_s3
|
|
|
|
|
|
|
|
|
|
|
|
def test_upload_after_check_works(cluster, broken_s3):
|
2022-08-23 13:12:09 +00:00
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
|
|
|
node.query(
|
|
|
|
"""
|
2023-05-31 19:23:01 +00:00
|
|
|
CREATE TABLE s3_upload_after_check_works (
|
2022-08-23 13:12:09 +00:00
|
|
|
id Int64,
|
|
|
|
data String
|
|
|
|
) ENGINE=MergeTree()
|
|
|
|
ORDER BY id
|
2023-11-20 13:53:22 +00:00
|
|
|
SETTINGS
|
|
|
|
storage_policy='broken_s3'
|
2022-08-23 13:12:09 +00:00
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
2023-06-03 18:59:04 +00:00
|
|
|
broken_s3.setup_fake_puts(1)
|
2022-08-23 13:12:09 +00:00
|
|
|
|
2023-05-31 19:23:01 +00:00
|
|
|
error = node.query_and_get_error(
|
|
|
|
"INSERT INTO s3_upload_after_check_works VALUES (1, 'Hello')"
|
|
|
|
)
|
2022-08-23 13:12:09 +00:00
|
|
|
|
2023-05-31 19:23:01 +00:00
|
|
|
assert "Code: 499" in error, error
|
|
|
|
assert "Immediately after upload" in error, error
|
|
|
|
assert "suddenly disappeared" in error, error
|
2023-06-03 18:59:04 +00:00
|
|
|
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
def get_multipart_counters(node, query_id, log_type="ExceptionWhileProcessing"):
|
2023-06-03 18:59:04 +00:00
|
|
|
node.query("SYSTEM FLUSH LOGS")
|
|
|
|
return [
|
|
|
|
int(x)
|
|
|
|
for x in node.query(
|
|
|
|
f"""
|
|
|
|
SELECT
|
|
|
|
ProfileEvents['S3CreateMultipartUpload'],
|
|
|
|
ProfileEvents['S3UploadPart'],
|
2024-05-22 16:35:28 +00:00
|
|
|
ProfileEvents['S3WriteRequestsErrors'] + ProfileEvents['S3WriteRequestsThrottling'],
|
2023-11-20 13:53:22 +00:00
|
|
|
FROM system.query_log
|
|
|
|
WHERE query_id='{query_id}'
|
|
|
|
AND type='{log_type}'
|
|
|
|
"""
|
|
|
|
).split()
|
|
|
|
if x
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def get_put_counters(node, query_id, log_type="ExceptionWhileProcessing"):
|
|
|
|
node.query("SYSTEM FLUSH LOGS")
|
|
|
|
return [
|
|
|
|
int(x)
|
|
|
|
for x in node.query(
|
|
|
|
f"""
|
|
|
|
SELECT
|
|
|
|
ProfileEvents['S3PutObject'],
|
|
|
|
ProfileEvents['S3WriteRequestsErrors'],
|
2023-06-03 18:59:04 +00:00
|
|
|
FROM system.query_log
|
|
|
|
WHERE query_id='{query_id}'
|
|
|
|
AND type='{log_type}'
|
|
|
|
"""
|
|
|
|
).split()
|
|
|
|
if x
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2023-06-14 11:19:03 +00:00
|
|
|
@pytest.mark.parametrize(
|
2023-11-17 16:26:53 +00:00
|
|
|
"compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate", "lz4"]
|
2023-06-14 11:19:03 +00:00
|
|
|
)
|
|
|
|
def test_upload_s3_fail_create_multi_part_upload(cluster, broken_s3, compression):
|
2023-06-03 18:59:04 +00:00
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
2023-07-21 22:45:12 +00:00
|
|
|
broken_s3.setup_at_create_multi_part_upload()
|
2023-06-03 18:59:04 +00:00
|
|
|
|
2023-06-14 11:19:03 +00:00
|
|
|
insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_FAIL_CREATE_MPU_{compression}"
|
2023-06-03 18:59:04 +00:00
|
|
|
error = node.query_and_get_error(
|
2023-06-14 11:19:03 +00:00
|
|
|
f"""
|
2023-06-03 18:59:04 +00:00
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
|
|
|
'http://resolver:8083/root/data/test_upload_s3_fail_create_multi_part_upload',
|
|
|
|
'minio', 'minio123',
|
2023-06-14 11:19:03 +00:00
|
|
|
'CSV', auto, '{compression}'
|
2023-06-03 18:59:04 +00:00
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 100000000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=100
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert "Code: 499" in error, error
|
2024-05-22 16:35:28 +00:00
|
|
|
assert "mock s3 injected unretryable error" in error, error
|
2023-06-03 18:59:04 +00:00
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
create_multipart, upload_parts, s3_errors = get_multipart_counters(
|
2023-06-03 18:59:04 +00:00
|
|
|
node, insert_query_id
|
|
|
|
)
|
2023-11-20 13:53:22 +00:00
|
|
|
assert create_multipart == 1
|
|
|
|
assert upload_parts == 0
|
|
|
|
assert s3_errors == 1
|
2023-06-03 18:59:04 +00:00
|
|
|
|
|
|
|
|
2023-06-14 11:19:03 +00:00
|
|
|
@pytest.mark.parametrize(
|
2023-11-17 16:26:53 +00:00
|
|
|
"compression", ["none", "gzip", "br", "xz", "zstd", "bz2", "deflate", "lz4"]
|
2023-06-14 11:19:03 +00:00
|
|
|
)
|
|
|
|
def test_upload_s3_fail_upload_part_when_multi_part_upload(
|
|
|
|
cluster, broken_s3, compression
|
|
|
|
):
|
2023-06-03 18:59:04 +00:00
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
|
|
|
broken_s3.setup_fake_multpartuploads()
|
2023-07-21 22:45:12 +00:00
|
|
|
broken_s3.setup_at_part_upload(count=1, after=2)
|
2023-06-03 18:59:04 +00:00
|
|
|
|
2023-06-14 11:19:03 +00:00
|
|
|
insert_query_id = f"INSERT_INTO_TABLE_FUNCTION_FAIL_UPLOAD_PART_{compression}"
|
2023-06-03 18:59:04 +00:00
|
|
|
error = node.query_and_get_error(
|
2023-06-14 11:19:03 +00:00
|
|
|
f"""
|
2023-06-03 18:59:04 +00:00
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
|
|
|
'http://resolver:8083/root/data/test_upload_s3_fail_upload_part_when_multi_part_upload',
|
|
|
|
'minio', 'minio123',
|
2023-06-14 11:19:03 +00:00
|
|
|
'CSV', auto, '{compression}'
|
2023-06-03 18:59:04 +00:00
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 100000000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=100
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert "Code: 499" in error, error
|
2024-05-22 16:35:28 +00:00
|
|
|
assert "mock s3 injected unretryable error" in error, error
|
2023-06-03 18:59:04 +00:00
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
create_multipart, upload_parts, s3_errors = get_multipart_counters(
|
2023-06-03 18:59:04 +00:00
|
|
|
node, insert_query_id
|
|
|
|
)
|
2023-11-20 13:53:22 +00:00
|
|
|
assert create_multipart == 1
|
|
|
|
assert upload_parts >= 2
|
|
|
|
assert s3_errors >= 2
|
2023-07-21 22:45:12 +00:00
|
|
|
|
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
@pytest.mark.parametrize(
|
2024-05-22 16:46:02 +00:00
|
|
|
"action_and_message",
|
|
|
|
[
|
2024-05-22 16:35:28 +00:00
|
|
|
("slow_down", "DB::Exception: Slow Down."),
|
|
|
|
("qps_limit_exceeded", "DB::Exception: Please reduce your request rate."),
|
2024-05-28 15:58:32 +00:00
|
|
|
("total_qps_limit_exceeded", "DB::Exception: Please reduce your request rate."),
|
2024-05-22 16:46:02 +00:00
|
|
|
(
|
|
|
|
"connection_refused",
|
|
|
|
"Poco::Exception. Code: 1000, e.code() = 111, Connection refused",
|
|
|
|
),
|
2024-05-22 16:35:28 +00:00
|
|
|
],
|
2024-05-22 16:46:02 +00:00
|
|
|
ids=lambda x: x[0],
|
2024-05-22 16:35:28 +00:00
|
|
|
)
|
|
|
|
def test_when_error_is_retried(cluster, broken_s3, action_and_message):
|
2023-07-21 22:45:12 +00:00
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
action, message = action_and_message
|
|
|
|
|
2023-07-21 22:45:12 +00:00
|
|
|
broken_s3.setup_fake_multpartuploads()
|
2024-05-22 16:35:28 +00:00
|
|
|
broken_s3.setup_at_part_upload(count=3, after=2, action=action)
|
2023-07-21 22:45:12 +00:00
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
insert_query_id = f"INSERT_INTO_TABLE_{action}_RETRIED"
|
2023-07-21 22:45:12 +00:00
|
|
|
node.query(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
2024-05-22 16:35:28 +00:00
|
|
|
'http://resolver:8083/root/data/test_when_{action}_retried',
|
2023-07-21 22:45:12 +00:00
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 1000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=100,
|
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
create_multipart, upload_parts, s3_errors = get_multipart_counters(
|
2023-07-21 22:45:12 +00:00
|
|
|
node, insert_query_id, log_type="QueryFinish"
|
|
|
|
)
|
2023-11-20 13:53:22 +00:00
|
|
|
assert create_multipart == 1
|
|
|
|
assert upload_parts == 39
|
|
|
|
assert s3_errors == 3
|
2023-07-21 22:45:12 +00:00
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
broken_s3.setup_at_part_upload(count=1000, after=2, action=action)
|
|
|
|
insert_query_id = f"INSERT_INTO_TABLE_{action}_RETRIED_1"
|
2023-07-21 22:45:12 +00:00
|
|
|
error = node.query_and_get_error(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
2024-05-22 16:35:28 +00:00
|
|
|
'http://resolver:8083/root/data/test_when_{action}_retried',
|
2023-07-21 22:45:12 +00:00
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 1000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=100,
|
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert "Code: 499" in error, error
|
2024-05-22 16:46:02 +00:00
|
|
|
assert message in error, error
|
2023-07-21 22:45:12 +00:00
|
|
|
|
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
def test_when_s3_broken_pipe_at_upload_is_retried(cluster, broken_s3):
|
2023-07-21 22:45:12 +00:00
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
|
|
|
broken_s3.setup_fake_multpartuploads()
|
|
|
|
broken_s3.setup_at_part_upload(
|
|
|
|
count=3,
|
|
|
|
after=2,
|
2024-05-22 16:35:28 +00:00
|
|
|
action="broken_pipe",
|
2023-07-21 22:45:12 +00:00
|
|
|
)
|
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
insert_query_id = f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD"
|
2023-07-21 22:45:12 +00:00
|
|
|
node.query(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
2024-05-22 16:35:28 +00:00
|
|
|
'http://resolver:8083/root/data/test_when_s3_broken_pipe_at_upload_is_retried',
|
2023-07-21 22:45:12 +00:00
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
2024-05-22 16:35:28 +00:00
|
|
|
LIMIT 1000000
|
2023-07-21 22:45:12 +00:00
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
2024-05-22 16:35:28 +00:00
|
|
|
s3_min_upload_part_size=1000000,
|
2023-07-21 22:45:12 +00:00
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
create_multipart, upload_parts, s3_errors = get_multipart_counters(
|
2023-07-21 22:45:12 +00:00
|
|
|
node, insert_query_id, log_type="QueryFinish"
|
|
|
|
)
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
assert create_multipart == 1
|
2024-05-22 16:35:28 +00:00
|
|
|
assert upload_parts == 7
|
2023-11-20 13:53:22 +00:00
|
|
|
assert s3_errors == 3
|
2023-07-21 22:45:12 +00:00
|
|
|
|
|
|
|
broken_s3.setup_at_part_upload(
|
|
|
|
count=1000,
|
|
|
|
after=2,
|
2024-05-22 16:35:28 +00:00
|
|
|
action="broken_pipe",
|
2023-07-25 19:38:03 +00:00
|
|
|
)
|
2024-05-22 16:35:28 +00:00
|
|
|
insert_query_id = f"TEST_WHEN_S3_BROKEN_PIPE_AT_UPLOAD_1"
|
2023-07-21 22:45:12 +00:00
|
|
|
error = node.query_and_get_error(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
2024-05-22 16:35:28 +00:00
|
|
|
'http://resolver:8083/root/data/test_when_s3_broken_pipe_at_upload_is_retried',
|
2023-07-21 22:45:12 +00:00
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
2024-05-22 16:35:28 +00:00
|
|
|
LIMIT 1000000
|
2023-07-21 22:45:12 +00:00
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
2024-05-22 16:35:28 +00:00
|
|
|
s3_min_upload_part_size=1000000,
|
2023-07-21 22:45:12 +00:00
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
2023-07-23 17:25:44 +00:00
|
|
|
assert "Code: 1000" in error, error
|
|
|
|
assert (
|
2024-05-22 16:35:28 +00:00
|
|
|
"DB::Exception: Poco::Exception. Code: 1000, e.code() = 32, I/O error: Broken pipe"
|
2023-07-25 19:38:03 +00:00
|
|
|
in error
|
2023-07-23 17:25:44 +00:00
|
|
|
), error
|
2023-07-21 22:45:12 +00:00
|
|
|
|
|
|
|
|
2023-07-23 17:25:44 +00:00
|
|
|
@pytest.mark.parametrize("send_something", [True, False])
|
2024-05-22 16:35:28 +00:00
|
|
|
def test_when_s3_connection_reset_by_peer_at_upload_is_retried(
|
2023-07-23 17:25:44 +00:00
|
|
|
cluster, broken_s3, send_something
|
2023-07-21 22:45:12 +00:00
|
|
|
):
|
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
|
|
|
broken_s3.setup_fake_multpartuploads()
|
2024-05-22 16:35:28 +00:00
|
|
|
broken_s3.setup_at_part_upload(
|
2023-07-21 22:45:12 +00:00
|
|
|
count=3,
|
2024-05-22 16:35:28 +00:00
|
|
|
after=2,
|
2023-07-21 22:45:12 +00:00
|
|
|
action="connection_reset_by_peer",
|
2023-07-23 17:25:44 +00:00
|
|
|
action_args=["1"] if send_something else ["0"],
|
2023-07-21 22:45:12 +00:00
|
|
|
)
|
|
|
|
|
2023-07-25 19:38:03 +00:00
|
|
|
insert_query_id = (
|
2024-05-22 16:35:28 +00:00
|
|
|
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_UPLOAD_{send_something}"
|
2023-07-25 19:38:03 +00:00
|
|
|
)
|
2023-07-21 22:45:12 +00:00
|
|
|
node.query(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
2024-05-22 16:35:28 +00:00
|
|
|
'http://resolver:8083/root/data/test_when_s3_connection_reset_by_peer_at_upload_is_retried',
|
2023-07-21 22:45:12 +00:00
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 1000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=100,
|
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
create_multipart, upload_parts, s3_errors = get_multipart_counters(
|
2023-07-21 22:45:12 +00:00
|
|
|
node, insert_query_id, log_type="QueryFinish"
|
|
|
|
)
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
assert create_multipart == 1
|
|
|
|
assert upload_parts == 39
|
|
|
|
assert s3_errors == 3
|
2023-07-21 22:45:12 +00:00
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
broken_s3.setup_at_part_upload(
|
2023-07-21 22:45:12 +00:00
|
|
|
count=1000,
|
2024-05-22 16:35:28 +00:00
|
|
|
after=2,
|
2023-07-21 22:45:12 +00:00
|
|
|
action="connection_reset_by_peer",
|
2023-07-23 17:25:44 +00:00
|
|
|
action_args=["1"] if send_something else ["0"],
|
2023-07-21 22:45:12 +00:00
|
|
|
)
|
2023-07-25 19:38:03 +00:00
|
|
|
insert_query_id = (
|
2024-05-22 16:35:28 +00:00
|
|
|
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_UPLOAD_{send_something}_1"
|
2023-07-25 19:38:03 +00:00
|
|
|
)
|
2023-07-21 22:45:12 +00:00
|
|
|
error = node.query_and_get_error(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
2024-05-22 16:35:28 +00:00
|
|
|
'http://resolver:8083/root/data/test_when_s3_connection_reset_by_peer_at_upload_is_retried',
|
2023-07-21 22:45:12 +00:00
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 1000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=100,
|
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
2023-07-23 17:25:44 +00:00
|
|
|
assert "Code: 1000" in error, error
|
|
|
|
assert (
|
|
|
|
"DB::Exception: Connection reset by peer." in error
|
2023-07-25 19:38:03 +00:00
|
|
|
or "DB::Exception: Poco::Exception. Code: 1000, e.code() = 104, Connection reset by peer"
|
|
|
|
in error
|
2023-07-23 17:25:44 +00:00
|
|
|
), error
|
2023-07-27 19:44:32 +00:00
|
|
|
|
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
@pytest.mark.parametrize("send_something", [True, False])
|
|
|
|
def test_when_s3_connection_reset_by_peer_at_create_mpu_retried(
|
|
|
|
cluster, broken_s3, send_something
|
|
|
|
):
|
2023-07-27 19:44:32 +00:00
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
|
|
|
broken_s3.setup_fake_multpartuploads()
|
2024-05-22 16:35:28 +00:00
|
|
|
broken_s3.setup_at_create_multi_part_upload(
|
2023-07-27 19:44:32 +00:00
|
|
|
count=3,
|
2024-05-22 16:35:28 +00:00
|
|
|
after=0,
|
|
|
|
action="connection_reset_by_peer",
|
|
|
|
action_args=["1"] if send_something else ["0"],
|
2023-07-27 19:44:32 +00:00
|
|
|
)
|
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
insert_query_id = (
|
|
|
|
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_MULTIPARTUPLOAD_{send_something}"
|
|
|
|
)
|
2023-07-27 19:44:32 +00:00
|
|
|
node.query(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
2024-05-22 16:35:28 +00:00
|
|
|
'http://resolver:8083/root/data/test_when_s3_connection_reset_by_peer_at_create_mpu_retried',
|
2023-07-27 19:44:32 +00:00
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
2024-05-22 16:35:28 +00:00
|
|
|
LIMIT 1000
|
2023-07-27 19:44:32 +00:00
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
2024-05-22 16:35:28 +00:00
|
|
|
s3_min_upload_part_size=100,
|
2023-07-27 19:44:32 +00:00
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
create_multipart, upload_parts, s3_errors = get_multipart_counters(
|
2023-07-27 19:44:32 +00:00
|
|
|
node, insert_query_id, log_type="QueryFinish"
|
|
|
|
)
|
|
|
|
|
2023-11-20 13:53:22 +00:00
|
|
|
assert create_multipart == 1
|
2024-05-22 16:35:28 +00:00
|
|
|
assert upload_parts == 39
|
2023-11-20 13:53:22 +00:00
|
|
|
assert s3_errors == 3
|
2023-07-27 19:44:32 +00:00
|
|
|
|
2024-05-22 16:35:28 +00:00
|
|
|
broken_s3.setup_at_create_multi_part_upload(
|
2023-07-27 19:44:32 +00:00
|
|
|
count=1000,
|
2024-05-22 16:35:28 +00:00
|
|
|
after=0,
|
|
|
|
action="connection_reset_by_peer",
|
|
|
|
action_args=["1"] if send_something else ["0"],
|
|
|
|
)
|
|
|
|
|
|
|
|
insert_query_id = (
|
|
|
|
f"TEST_WHEN_S3_CONNECTION_RESET_BY_PEER_AT_MULTIPARTUPLOAD_{send_something}_1"
|
2023-07-27 19:44:32 +00:00
|
|
|
)
|
|
|
|
error = node.query_and_get_error(
|
|
|
|
f"""
|
2024-05-22 16:35:28 +00:00
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
|
|
|
'http://resolver:8083/root/data/test_when_s3_connection_reset_by_peer_at_create_mpu_retried',
|
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 1000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=100,
|
|
|
|
s3_check_objects_after_upload=0
|
2023-07-27 19:44:32 +00:00
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert "Code: 1000" in error, error
|
|
|
|
assert (
|
2024-05-22 16:35:28 +00:00
|
|
|
"DB::Exception: Connection reset by peer." in error
|
|
|
|
or "DB::Exception: Poco::Exception. Code: 1000, e.code() = 104, Connection reset by peer"
|
2023-07-27 19:44:32 +00:00
|
|
|
in error
|
|
|
|
), error
|
2023-09-15 18:39:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_query_is_canceled_with_inf_retries(cluster, broken_s3):
|
2023-10-18 15:30:59 +00:00
|
|
|
node = cluster.instances["node_with_inf_s3_retries"]
|
2023-09-15 18:39:52 +00:00
|
|
|
|
|
|
|
broken_s3.setup_at_part_upload(
|
|
|
|
count=10000000,
|
|
|
|
after=2,
|
|
|
|
action="connection_refused",
|
|
|
|
)
|
|
|
|
|
|
|
|
insert_query_id = f"TEST_QUERY_IS_CANCELED_WITH_INF_RETRIES"
|
|
|
|
request = node.get_query_request(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
|
|
|
'http://resolver:8083/root/data/test_query_is_canceled_with_inf_retries',
|
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 1000000
|
|
|
|
SETTINGS
|
|
|
|
s3_max_single_part_upload_size=100,
|
|
|
|
s3_min_upload_part_size=10000,
|
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
assert_eq_with_retry(
|
2023-09-16 20:56:43 +00:00
|
|
|
node,
|
|
|
|
f"SELECT count() FROM system.processes WHERE query_id='{insert_query_id}'",
|
|
|
|
"1",
|
2023-09-15 18:39:52 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
assert_eq_with_retry(
|
|
|
|
node,
|
|
|
|
f"SELECT ProfileEvents['S3WriteRequestsErrors'] > 10 FROM system.processes WHERE query_id='{insert_query_id}'",
|
|
|
|
"1",
|
|
|
|
retry_count=12,
|
|
|
|
sleep_time=10,
|
|
|
|
)
|
|
|
|
|
|
|
|
node.query(f"KILL QUERY WHERE query_id = '{insert_query_id}' ASYNC")
|
|
|
|
|
|
|
|
# no more than 2 minutes
|
|
|
|
assert_eq_with_retry(
|
2023-09-16 20:56:43 +00:00
|
|
|
node,
|
|
|
|
f"SELECT count() FROM system.processes WHERE query_id='{insert_query_id}'",
|
|
|
|
"0",
|
2023-09-15 18:39:52 +00:00
|
|
|
retry_count=120,
|
|
|
|
sleep_time=1,
|
|
|
|
)
|
2023-11-20 13:53:22 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("node_name", ["node", "node_with_inf_s3_retries"])
|
|
|
|
def test_adaptive_timeouts(cluster, broken_s3, node_name):
|
|
|
|
node = cluster.instances[node_name]
|
|
|
|
|
|
|
|
broken_s3.setup_fake_puts(part_length=1)
|
|
|
|
broken_s3.setup_slow_answers(
|
|
|
|
timeout=5,
|
|
|
|
count=1000000,
|
|
|
|
)
|
|
|
|
|
|
|
|
insert_query_id = f"TEST_ADAPTIVE_TIMEOUTS_{node_name}"
|
|
|
|
node.query(
|
|
|
|
f"""
|
|
|
|
INSERT INTO
|
|
|
|
TABLE FUNCTION s3(
|
|
|
|
'http://resolver:8083/root/data/adaptive_timeouts',
|
|
|
|
'minio', 'minio123',
|
|
|
|
'CSV', auto, 'none'
|
|
|
|
)
|
|
|
|
SELECT
|
|
|
|
*
|
|
|
|
FROM system.numbers
|
|
|
|
LIMIT 1
|
|
|
|
SETTINGS
|
|
|
|
s3_request_timeout_ms=30000,
|
|
|
|
s3_check_objects_after_upload=0
|
|
|
|
""",
|
|
|
|
query_id=insert_query_id,
|
|
|
|
)
|
|
|
|
|
|
|
|
broken_s3.reset()
|
|
|
|
|
|
|
|
put_objects, s3_errors = get_put_counters(
|
|
|
|
node, insert_query_id, log_type="QueryFinish"
|
|
|
|
)
|
|
|
|
|
|
|
|
assert put_objects == 1
|
|
|
|
|
|
|
|
s3_use_adaptive_timeouts = node.query(
|
|
|
|
f"""
|
|
|
|
SELECT
|
|
|
|
value
|
|
|
|
FROM system.settings
|
|
|
|
WHERE
|
|
|
|
name='s3_use_adaptive_timeouts'
|
|
|
|
"""
|
|
|
|
).strip()
|
|
|
|
|
|
|
|
if node_name == "node_with_inf_s3_retries":
|
|
|
|
# first 2 attempts failed
|
|
|
|
assert s3_use_adaptive_timeouts == "1"
|
|
|
|
assert s3_errors == 1
|
|
|
|
else:
|
|
|
|
assert s3_use_adaptive_timeouts == "0"
|
|
|
|
assert s3_errors == 0
|
2024-05-02 11:07:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
def test_no_key_found_disk(cluster, broken_s3):
|
|
|
|
node = cluster.instances["node"]
|
|
|
|
|
|
|
|
node.query(
|
|
|
|
"""
|
|
|
|
CREATE TABLE no_key_found_disk (
|
|
|
|
id Int64
|
|
|
|
) ENGINE=MergeTree()
|
|
|
|
ORDER BY id
|
|
|
|
SETTINGS
|
|
|
|
storage_policy='s3'
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
|
|
|
uuid = node.query(
|
|
|
|
"""
|
|
|
|
SELECT uuid
|
|
|
|
FROM system.tables
|
|
|
|
WHERE name = 'no_key_found_disk'
|
|
|
|
"""
|
|
|
|
).strip()
|
|
|
|
assert uuid
|
|
|
|
|
|
|
|
node.query("INSERT INTO no_key_found_disk VALUES (1)")
|
|
|
|
|
|
|
|
data = node.query("SELECT * FROM no_key_found_disk").strip()
|
|
|
|
|
|
|
|
assert data == "1"
|
|
|
|
|
|
|
|
remote_pathes = (
|
|
|
|
node.query(
|
|
|
|
f"""
|
|
|
|
SELECT remote_path
|
|
|
|
FROM system.remote_data_paths
|
|
|
|
WHERE
|
|
|
|
local_path LIKE '%{uuid}%'
|
|
|
|
AND local_path LIKE '%.bin%'
|
|
|
|
ORDER BY ALL
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
.strip()
|
|
|
|
.split()
|
|
|
|
)
|
|
|
|
|
|
|
|
assert len(remote_pathes) > 0
|
|
|
|
|
|
|
|
# path_prefix = os.path.join('/', cluster.minio_bucket)
|
|
|
|
for path in remote_pathes:
|
|
|
|
# name = os.path.relpath(path, path_prefix)
|
|
|
|
# assert False, f"deleting full {path} prefix {path_prefix} name {name}"
|
|
|
|
assert cluster.minio_client.stat_object(cluster.minio_bucket, path).size > 0
|
|
|
|
cluster.minio_client.remove_object(cluster.minio_bucket, path)
|
|
|
|
with pytest.raises(Exception) as exc_info:
|
|
|
|
size = cluster.minio_client.stat_object(cluster.minio_bucket, path).size
|
|
|
|
assert size == 0
|
|
|
|
assert "code: NoSuchKey" in str(exc_info.value)
|
|
|
|
|
|
|
|
error = node.query_and_get_error("SELECT * FROM no_key_found_disk").strip()
|
|
|
|
|
|
|
|
assert (
|
|
|
|
"DB::Exception: The specified key does not exist. This error happened for S3 disk."
|
|
|
|
in error
|
|
|
|
)
|