test_distributed_directory_monitor_split_batch_on_failure: Lower memory limit

With the changes, when running under the sanitizers, some of the internala arenas do not reach
the threshold to move from the stack to the heap, so the allocations aren't accounted
and the limit won't be reached

Testing with a lower limit to confirm it's enough to throw in remote queries
but not locally
This commit is contained in:
Raúl Marín 2021-07-27 14:36:30 +02:00
parent 7e27b773d3
commit 772cc1910a

View File

@ -40,8 +40,8 @@ def test_distributed_directory_monitor_split_batch_on_failure_OFF(started_cluste
limit = 100e3
node2.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={
# max_memory_usage is the limit for the batch on the remote node
# (local query should not be affected since 30MB is enough for 100K rows)
'max_memory_usage': '30Mi',
# (local query should not be affected since 20MB is enough for 100K rows)
'max_memory_usage': '20Mi',
})
# "Received from" is mandatory, since the exception should be thrown on the remote node.
with pytest.raises(QueryRuntimeException, match=r'DB::Exception: Received from.*Memory limit \(for query\) exceeded: .*while pushing to view default\.mv'):
@ -53,8 +53,8 @@ def test_distributed_directory_monitor_split_batch_on_failure_ON(started_cluster
limit = 100e3
node1.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={
# max_memory_usage is the limit for the batch on the remote node
# (local query should not be affected since 30MB is enough for 100K rows)
'max_memory_usage': '30Mi',
# (local query should not be affected since 20MB is enough for 100K rows)
'max_memory_usage': '20Mi',
})
node1.query('system flush distributed dist')
assert int(node1.query('select count() from dist_data')) == 100000