Merge pull request #25731 from azat/fix-test

Fix 01641_memory_tracking_insert_optimize
This commit is contained in:
alexey-milovidov 2021-06-29 02:12:49 +03:00 committed by GitHub
commit 3f6fe26e9a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 13 additions and 3 deletions

View File

@ -0,0 +1,12 @@
drop table if exists data_01641;
create table data_01641 (key Int, value String) engine=MergeTree order by (key, repeat(value, 40)) settings old_parts_lifetime=0, min_bytes_for_wide_part=0;
SET max_block_size = 1000, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0;
insert into data_01641 select number, toString(number) from numbers(120000);
-- Definitely should fail and it proves that memory is tracked in OPTIMIZE query.
set max_memory_usage='10Mi', max_untracked_memory=0;
optimize table data_01641 final; -- { serverError 241 }
drop table data_01641;

View File

@ -18,7 +18,6 @@
"functions_bad_arguments", /// Too long for TSan
"01603_read_with_backoff_bug", /// Too long for TSan
"01646_system_restart_replicas_smoke", /// RESTART REPLICAS can acquire too much locks, while only 64 is possible from one thread under TSan
"01641_memory_tracking_insert_optimize", /// INSERT lots of rows is too heavy for TSan
"01017_uniqCombined_memory_usage" /// Fine thresholds on memory usage
],
"address-sanitizer": [
@ -71,8 +70,7 @@
"hyperscan",
"01193_metadata_loading",
"01473_event_time_microseconds",
"01396_inactive_replica_cleanup_nodes",
"01641_memory_tracking_insert_optimize" /// INSERT lots of rows is too heavy in debug build
"01396_inactive_replica_cleanup_nodes"
],
"unbundled-build": [
"00429",