mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 08:32:02 +00:00
77dd869414
* Add ability to log when max_partitions_per_insert_block is reached There's plenty of reasons to drop max_partitions_per_insert_block down from it's default of 100, with the main one being even touching remotely close to this number of partitions will create a lot of merge activity that can de-stabilise a cluster. This commit adds the ability to log when the above limit is reached rather than throw an exception, so that you can gauge the impact on users when dropping this value down. * Update docs/ru/operations/settings/query-complexity.md Co-authored-by: Nikita Taranov <nickita.taranov@gmail.com> * Update docs/ru/operations/settings/query-complexity.md Co-authored-by: Nikita Taranov <nickita.taranov@gmail.com> * Update docs/ru/operations/settings/query-complexity.md Co-authored-by: Nikita Taranov <nickita.taranov@gmail.com> --------- Co-authored-by: Nikita Taranov <nickita.taranov@gmail.com>
12 lines
761 B
SQL
12 lines
761 B
SQL
drop table if exists data_01593;
|
|
create table data_01593 (key Int) engine=MergeTree() order by key partition by key;
|
|
|
|
insert into data_01593 select * from numbers_mt(10);
|
|
insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1; -- { serverError TOO_MANY_PARTS }
|
|
-- throw_on_max_partitions_per_insert_block=false means we'll just log that the limit was reached rather than throw
|
|
insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1, throw_on_max_partitions_per_insert_block=false;
|
|
-- settings for INSERT is prefered
|
|
insert into data_01593 settings max_partitions_per_insert_block=100 select * from numbers_mt(10) settings max_partitions_per_insert_block=1;
|
|
|
|
drop table data_01593;
|