mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-10-20 15:30:50 +00:00
7b4fcc5fc5
Before this patch if the query failes (due to "Too many simultaneous queries" for example) it will not read external tables info, and the next request will interpret them as the query beginning at got: DB::Exception: Unknown packet 11861 from client v2: reordering in the executeQuery() is not enough, since the query can fail in other places, before, i.e. quotas v3: I cannot make non-intergration test (since there is no ability to receive "Unknown packet" via client, only from the server log), hence added one
27 lines
779 B
Python
27 lines
779 B
Python
import pytest
|
|
import time
|
|
|
|
from helpers.cluster import ClickHouseCluster
|
|
from helpers.client import QueryTimeoutExceedException, QueryRuntimeException
|
|
|
|
cluster = ClickHouseCluster(__file__)
|
|
|
|
node = cluster.add_instance('node')
|
|
|
|
@pytest.fixture(scope="module")
|
|
def start_cluster():
|
|
try:
|
|
cluster.start()
|
|
|
|
yield cluster
|
|
finally:
|
|
cluster.shutdown()
|
|
|
|
def test_different_versions(start_cluster):
|
|
with pytest.raises(QueryTimeoutExceedException):
|
|
node.query("SELECT sleep(3)", timeout=1)
|
|
with pytest.raises(QueryRuntimeException):
|
|
node.query("SELECT 1", settings={'max_concurrent_queries_for_user': 1})
|
|
assert node.contains_in_log('Too many simultaneous queries for user')
|
|
assert not node.contains_in_log('Unknown packet')
|