mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
test_distributed_directory_monitor_split_batch_on_failure: Track everything
Reverting the memory reduction and forcing max_untracked_memory to 0 which should be more reliable and closer to the original test
This commit is contained in:
parent
9f32ecca89
commit
aad913291d
@ -40,8 +40,9 @@ def test_distributed_directory_monitor_split_batch_on_failure_OFF(started_cluste
|
||||
limit = 100e3
|
||||
node2.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={
|
||||
# max_memory_usage is the limit for the batch on the remote node
|
||||
# (local query should not be affected since 20MB is enough for 100K rows)
|
||||
'max_memory_usage': '20Mi',
|
||||
# (local query should not be affected since 30MB is enough for 100K rows)
|
||||
'max_memory_usage': '30Mi',
|
||||
'max_untracked_memory': '0'
|
||||
})
|
||||
# "Received from" is mandatory, since the exception should be thrown on the remote node.
|
||||
with pytest.raises(QueryRuntimeException, match=r'DB::Exception: Received from.*Memory limit \(for query\) exceeded: .*while pushing to view default\.mv'):
|
||||
@ -53,8 +54,9 @@ def test_distributed_directory_monitor_split_batch_on_failure_ON(started_cluster
|
||||
limit = 100e3
|
||||
node1.query(f'insert into dist select number/100, number from system.numbers limit {limit} offset {limit*i}', settings={
|
||||
# max_memory_usage is the limit for the batch on the remote node
|
||||
# (local query should not be affected since 20MB is enough for 100K rows)
|
||||
'max_memory_usage': '20Mi',
|
||||
# (local query should not be affected since 30MB is enough for 100K rows)
|
||||
'max_memory_usage': '30Mi',
|
||||
'max_untracked_memory': '0'
|
||||
})
|
||||
node1.query('system flush distributed dist')
|
||||
assert int(node1.query('select count() from dist_data')) == 100000
|
||||
|
Loading…
Reference in New Issue
Block a user