From 772cc1910a884f25fc6218059c02b2f69259c97f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 27 Jul 2021 14:36:30 +0200 Subject: [PATCH] test_distributed_directory_monitor_split_batch_on_failure: Lower memory limit With the changes, when running under the sanitizers, some of the internala arenas do not reach the threshold to move from the stack to the heap, so the allocations aren't accounted and the limit won't be reached Testing with a lower limit to confirm it's enough to throw in remote queries but not locally --- .../test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py index 9cbf8771ee5..19afb1f13c7 100644 --- a/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py +++ b/tests/integration/test_distributed_directory_monitor_split_batch_on_failure/test.py @@ -40,8 +40,8 @@ def test_distributed_directory_monitor_split_batch_on_failure_OFF(started_cluste limit = 100e3 node2.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={ # max_memory_usage is the limit for the batch on the remote node - # (local query should not be affected since 30MB is enough for 100K rows) - 'max_memory_usage': '30Mi', + # (local query should not be affected since 20MB is enough for 100K rows) + 'max_memory_usage': '20Mi', }) # "Received from" is mandatory, since the exception should be thrown on the remote node. with pytest.raises(QueryRuntimeException, match=r'DB::Exception: Received from.*Memory limit \(for query\) exceeded: .*while pushing to view default\.mv'): @@ -53,8 +53,8 @@ def test_distributed_directory_monitor_split_batch_on_failure_ON(started_cluster limit = 100e3 node1.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={ # max_memory_usage is the limit for the batch on the remote node - # (local query should not be affected since 30MB is enough for 100K rows) - 'max_memory_usage': '30Mi', + # (local query should not be affected since 20MB is enough for 100K rows) + 'max_memory_usage': '20Mi', }) node1.query('system flush distributed dist') assert int(node1.query('select count() from dist_data')) == 100000