mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-24 00:22:29 +00:00
better
This commit is contained in:
parent
a157a5b3b3
commit
0260953a47
@ -141,7 +141,6 @@ class IColumn;
|
|||||||
\
|
\
|
||||||
M(Bool, force_index_by_date, 0, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
M(Bool, force_index_by_date, 0, "Throw an exception if there is a partition key in a table, and it is not used.", 0) \
|
||||||
M(Bool, force_primary_key, 0, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
M(Bool, force_primary_key, 0, "Throw an exception if there is primary key in a table, and it is not used.", 0) \
|
||||||
M(Bool, force_max_partition_limit, 0, "Throw an exception if max_partitions_to_read is violated.", 0) \
|
|
||||||
M(String, force_data_skipping_indices, "", "Comma separated list of strings or literals with the name of the data skipping indices that should be used during query execution, otherwise an exception will be thrown.", 0) \
|
M(String, force_data_skipping_indices, "", "Comma separated list of strings or literals with the name of the data skipping indices that should be used during query execution, otherwise an exception will be thrown.", 0) \
|
||||||
\
|
\
|
||||||
M(Float, max_streams_to_max_threads_ratio, 1, "Allows you to use more sources than the number of threads - to more evenly distribute work across threads. It is assumed that this is a temporary solution, since it will be possible in the future to make the number of sources equal to the number of threads, but for each source to dynamically select available work for itself.", 0) \
|
M(Float, max_streams_to_max_threads_ratio, 1, "Allows you to use more sources than the number of threads - to more evenly distribute work across threads. It is assumed that this is a temporary solution, since it will be possible in the future to make the number of sources equal to the number of threads, but for each source to dynamically select available work for itself.", 0) \
|
||||||
@ -354,6 +353,7 @@ class IColumn;
|
|||||||
M(Bool, allow_introspection_functions, false, "Allow functions for introspection of ELF and DWARF for query profiling. These functions are slow and may impose security considerations.", 0) \
|
M(Bool, allow_introspection_functions, false, "Allow functions for introspection of ELF and DWARF for query profiling. These functions are slow and may impose security considerations.", 0) \
|
||||||
\
|
\
|
||||||
M(UInt64, max_partitions_per_insert_block, 100, "Limit maximum number of partitions in single INSERTed block. Zero means unlimited. Throw exception if the block contains too many partitions. This setting is a safety threshold, because using large number of partitions is a common misconception.", 0) \
|
M(UInt64, max_partitions_per_insert_block, 100, "Limit maximum number of partitions in single INSERTed block. Zero means unlimited. Throw exception if the block contains too many partitions. This setting is a safety threshold, because using large number of partitions is a common misconception.", 0) \
|
||||||
|
M(UInt64, max_partitions_to_read, 0, "Limit the max number of partitions that can be accessed in one query. 0 means unlimited.", 0) \
|
||||||
M(Bool, check_query_single_value_result, true, "Return check query result as single 1/0 value", 0) \
|
M(Bool, check_query_single_value_result, true, "Return check query result as single 1/0 value", 0) \
|
||||||
M(Bool, allow_drop_detached, false, "Allow ALTER TABLE ... DROP DETACHED PART[ITION] ... queries", 0) \
|
M(Bool, allow_drop_detached, false, "Allow ALTER TABLE ... DROP DETACHED PART[ITION] ... queries", 0) \
|
||||||
\
|
\
|
||||||
|
@ -707,8 +707,9 @@ QueryPlanPtr MergeTreeDataSelectExecutor::readFromParts(
|
|||||||
if (parts_with_ranges.empty())
|
if (parts_with_ranges.empty())
|
||||||
return std::make_unique<QueryPlan>();
|
return std::make_unique<QueryPlan>();
|
||||||
|
|
||||||
auto max_partitions_to_read = data.getSettings()->max_partitions_to_read;
|
auto max_partitions_to_read
|
||||||
if (settings.force_max_partition_limit && max_partitions_to_read)
|
= settings.max_partitions_to_read.changed ? settings.max_partitions_to_read : data.getSettings()->max_partitions_to_read;
|
||||||
|
if (max_partitions_to_read)
|
||||||
{
|
{
|
||||||
std::set<String> partitions;
|
std::set<String> partitions;
|
||||||
for (auto & part_with_ranges : parts_with_ranges)
|
for (auto & part_with_ranges : parts_with_ranges)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
2021-01-03 1 2
|
2021-01-01 1 2
|
||||||
2021-01-04 3 4
|
2021-01-02 4 5
|
||||||
2021-01-03 1 2
|
2021-01-01 1 2
|
||||||
2021-01-04 3 4
|
2021-01-02 4 5
|
||||||
2021-01-03 1 2
|
2021-01-01 1 2
|
||||||
2021-01-04 3 4
|
2021-01-02 4 5
|
||||||
|
@ -2,16 +2,16 @@ drop table if exists p;
|
|||||||
|
|
||||||
create table p(d Date, i int, j int) engine MergeTree partition by d order by i settings max_partitions_to_read = 1;
|
create table p(d Date, i int, j int) engine MergeTree partition by d order by i settings max_partitions_to_read = 1;
|
||||||
|
|
||||||
insert into p values (yesterday(), 1, 2), (today(), 3, 4);
|
insert into p values ('2021-01-01', 1, 2), ('2021-01-02', 4, 5);
|
||||||
|
|
||||||
select * from p order by i; -- default no limit
|
select * from p order by i; -- { serverError 565 }
|
||||||
|
|
||||||
select * from p order by i settings force_max_partition_limit = 0;
|
select * from p order by i settings max_partitions_to_read = 2;
|
||||||
|
|
||||||
select * from p order by i settings force_max_partition_limit = 1; -- { serverError 565 }
|
select * from p order by i settings max_partitions_to_read = 0; -- unlimited
|
||||||
|
|
||||||
alter table p modify setting max_partitions_to_read = 2;
|
alter table p modify setting max_partitions_to_read = 2;
|
||||||
|
|
||||||
select * from p order by i settings force_max_partition_limit = 1;
|
select * from p order by i;
|
||||||
|
|
||||||
drop table if exists p;
|
drop table if exists p;
|
||||||
|
Loading…
Reference in New Issue
Block a user