mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge pull request #53113 from ClickHouse/Fix_flaky_test_user_azure_query
Fix flaky test by using azure_query function
This commit is contained in:
commit
227586acf4
@ -35,10 +35,13 @@ def cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def azure_query(node, query, try_num=10, settings={}):
|
||||
def azure_query(node, query, expect_error="false", try_num=10, settings={}):
|
||||
for i in range(try_num):
|
||||
try:
|
||||
return node.query(query, settings=settings)
|
||||
if expect_error == "true":
|
||||
return node.query_and_get_error(query, settings=settings)
|
||||
else:
|
||||
return node.query(query, settings=settings)
|
||||
except Exception as ex:
|
||||
retriable_errors = [
|
||||
"DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response",
|
||||
@ -656,7 +659,7 @@ def test_read_from_not_existing_container(cluster):
|
||||
node = cluster.instances["node"]
|
||||
query = f"select * from azureBlobStorage('http://azurite1:10000/devstoreaccount1', 'cont_not_exists', 'test_table.csv', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto')"
|
||||
expected_err_msg = "container does not exist"
|
||||
assert expected_err_msg in node.query_and_get_error(query)
|
||||
assert expected_err_msg in azure_query(node, query, expect_error="true")
|
||||
|
||||
|
||||
def test_function_signatures(cluster):
|
||||
|
@ -17,6 +17,7 @@ from helpers.test_tools import TSV
|
||||
from helpers.network import PartitionManager
|
||||
from helpers.mock_servers import start_mock_servers
|
||||
from helpers.test_tools import exec_query_with_retry
|
||||
from test_storage_azure_blob_storage.test import azure_query
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
@ -48,26 +49,6 @@ def cluster():
|
||||
cluster.shutdown()
|
||||
|
||||
|
||||
def azure_query(node, query, try_num=3, settings={}):
|
||||
for i in range(try_num):
|
||||
try:
|
||||
return node.query(query, settings=settings)
|
||||
except Exception as ex:
|
||||
retriable_errors = [
|
||||
"DB::Exception: Azure::Core::Http::TransportException: Connection was closed by the server while trying to read a response"
|
||||
]
|
||||
retry = False
|
||||
for error in retriable_errors:
|
||||
if error in str(ex):
|
||||
retry = True
|
||||
print(f"Try num: {i}. Having retriable error: {ex}")
|
||||
time.sleep(i)
|
||||
break
|
||||
if not retry or i == try_num - 1:
|
||||
raise Exception(ex)
|
||||
continue
|
||||
|
||||
|
||||
def get_azure_file_content(filename):
|
||||
container_name = "cont"
|
||||
connection_string = "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;"
|
||||
@ -89,20 +70,22 @@ def test_select_all(cluster):
|
||||
)
|
||||
print(get_azure_file_content("test_cluster_select_all.csv"))
|
||||
|
||||
pure_azure = node.query(
|
||||
pure_azure = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT * from azureBlobStorage(
|
||||
'http://azurite1:10000/devstoreaccount1', 'cont', 'test_cluster_select_all.csv', 'devstoreaccount1',
|
||||
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV',
|
||||
'auto')"""
|
||||
'auto')""",
|
||||
)
|
||||
print(pure_azure)
|
||||
distributed_azure = node.query(
|
||||
distributed_azure = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT * from azureBlobStorageCluster(
|
||||
'simple_cluster', 'http://azurite1:10000/devstoreaccount1', 'cont', 'test_cluster_select_all.csv', 'devstoreaccount1',
|
||||
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV',
|
||||
'auto')"""
|
||||
'auto')""",
|
||||
)
|
||||
print(distributed_azure)
|
||||
assert TSV(pure_azure) == TSV(distributed_azure)
|
||||
@ -119,20 +102,22 @@ def test_count(cluster):
|
||||
)
|
||||
print(get_azure_file_content("test_cluster_count.csv"))
|
||||
|
||||
pure_azure = node.query(
|
||||
pure_azure = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT count(*) from azureBlobStorage(
|
||||
'http://azurite1:10000/devstoreaccount1', 'cont', 'test_cluster_count.csv', 'devstoreaccount1',
|
||||
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV',
|
||||
'auto', 'key UInt64')"""
|
||||
'auto', 'key UInt64')""",
|
||||
)
|
||||
print(pure_azure)
|
||||
distributed_azure = node.query(
|
||||
distributed_azure = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT count(*) from azureBlobStorageCluster(
|
||||
'simple_cluster', 'http://azurite1:10000/devstoreaccount1', 'cont', 'test_cluster_count.csv', 'devstoreaccount1',
|
||||
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV',
|
||||
'auto', 'key UInt64')"""
|
||||
'auto', 'key UInt64')""",
|
||||
)
|
||||
print(distributed_azure)
|
||||
assert TSV(pure_azure) == TSV(distributed_azure)
|
||||
@ -148,7 +133,8 @@ def test_union_all(cluster):
|
||||
"'auto', 'a Int32, b String') VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')",
|
||||
)
|
||||
|
||||
pure_azure = node.query(
|
||||
pure_azure = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT * FROM
|
||||
(
|
||||
@ -163,9 +149,10 @@ def test_union_all(cluster):
|
||||
'auto', 'a Int32, b String')
|
||||
)
|
||||
ORDER BY (a)
|
||||
"""
|
||||
""",
|
||||
)
|
||||
azure_distributed = node.query(
|
||||
azure_distributed = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT * FROM
|
||||
(
|
||||
@ -182,7 +169,7 @@ def test_union_all(cluster):
|
||||
'auto', 'a Int32, b String')
|
||||
)
|
||||
ORDER BY (a)
|
||||
"""
|
||||
""",
|
||||
)
|
||||
|
||||
assert TSV(pure_azure) == TSV(azure_distributed)
|
||||
@ -197,14 +184,15 @@ def test_skip_unavailable_shards(cluster):
|
||||
"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', "
|
||||
"'auto', 'a UInt64') VALUES (1), (2)",
|
||||
)
|
||||
result = node.query(
|
||||
result = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT count(*) from azureBlobStorageCluster(
|
||||
'cluster_non_existent_port',
|
||||
'http://azurite1:10000/devstoreaccount1', 'cont', 'test_skip_unavailable.csv', 'devstoreaccount1',
|
||||
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==')
|
||||
SETTINGS skip_unavailable_shards = 1
|
||||
"""
|
||||
""",
|
||||
)
|
||||
|
||||
assert result == "2\n"
|
||||
@ -220,13 +208,14 @@ def test_unset_skip_unavailable_shards(cluster):
|
||||
"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', "
|
||||
"'auto', 'a UInt64') VALUES (1), (2)",
|
||||
)
|
||||
result = node.query(
|
||||
result = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT count(*) from azureBlobStorageCluster(
|
||||
'cluster_non_existent_port',
|
||||
'http://azurite1:10000/devstoreaccount1', 'cont', 'test_skip_unavailable.csv', 'devstoreaccount1',
|
||||
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==')
|
||||
"""
|
||||
""",
|
||||
)
|
||||
|
||||
assert result == "2\n"
|
||||
@ -243,19 +232,21 @@ def test_cluster_with_named_collection(cluster):
|
||||
"'auto', 'a UInt64') VALUES (1), (2)",
|
||||
)
|
||||
|
||||
pure_azure = node.query(
|
||||
pure_azure = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT * from azureBlobStorage(
|
||||
'http://azurite1:10000/devstoreaccount1', 'cont', 'test_cluster_with_named_collection.csv', 'devstoreaccount1',
|
||||
'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==')
|
||||
"""
|
||||
""",
|
||||
)
|
||||
|
||||
azure_cluster = node.query(
|
||||
azure_cluster = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT * from azureBlobStorageCluster(
|
||||
'simple_cluster', azure_conf2, container='cont', blob_path='test_cluster_with_named_collection.csv')
|
||||
"""
|
||||
""",
|
||||
)
|
||||
|
||||
assert TSV(pure_azure) == TSV(azure_cluster)
|
||||
@ -277,12 +268,13 @@ def test_partition_parallel_readig_withcluster(cluster):
|
||||
assert "3,2,1\n" == get_azure_file_content("test_tf_1.csv")
|
||||
assert "78,43,45\n" == get_azure_file_content("test_tf_45.csv")
|
||||
|
||||
azure_cluster = node.query(
|
||||
azure_cluster = azure_query(
|
||||
node,
|
||||
"""
|
||||
SELECT count(*) from azureBlobStorageCluster(
|
||||
'simple_cluster',
|
||||
azure_conf2, container='cont', blob_path='test_tf_*.csv', format='CSV', compression='auto', structure='column1 UInt32, column2 UInt32, column3 UInt32')
|
||||
"""
|
||||
""",
|
||||
)
|
||||
|
||||
assert azure_cluster == "3\n"
|
||||
|
Loading…
Reference in New Issue
Block a user