This commit is contained in:
zhongyuankai 2024-05-10 08:36:13 +08:00
parent cf45181e8c
commit 15b74cee54
4 changed files with 19 additions and 13 deletions

View File

@ -756,7 +756,7 @@ size_t getMaxArraySize()
bool isLimitArraySize() bool isLimitArraySize()
{ {
if (auto context = Context::getGlobalContextInstance()) if (auto context = Context::getGlobalContextInstance())
return context->getServerSettings().aggregate_function_group_array_limit_size; return context->getServerSettings().aggregate_function_group_array_has_limit_size;
return false; return false;
} }

View File

@ -50,7 +50,7 @@ namespace DB
M(UInt64, max_temporary_data_on_disk_size, 0, "The maximum amount of storage that could be used for external aggregation, joins or sorting., ", 0) \ M(UInt64, max_temporary_data_on_disk_size, 0, "The maximum amount of storage that could be used for external aggregation, joins or sorting., ", 0) \
M(String, temporary_data_in_cache, "", "Cache disk name for temporary data.", 0) \ M(String, temporary_data_in_cache, "", "Cache disk name for temporary data.", 0) \
M(UInt64, aggregate_function_group_array_max_element_size, 0xFFFFFF, "Max array element size in bytes for groupArray function. This limit is checked at serialization and help to avoid large state size.", 0) \ M(UInt64, aggregate_function_group_array_max_element_size, 0xFFFFFF, "Max array element size in bytes for groupArray function. This limit is checked at serialization and help to avoid large state size.", 0) \
M(Bool, aggregate_function_group_array_limit_size, false, "This is set to true. Array elements are truncated when they exceed the max array element size.", 0) \ M(Bool, aggregate_function_group_array_has_limit_size, false, "When the max array element size is exceeded, a `Too large array size` exception will be thrown by default. When set to true, no exception will be thrown, and the excess elements will be discarded.", 0) \
M(UInt64, max_server_memory_usage, 0, "Maximum total memory usage of the server in bytes. Zero means unlimited.", 0) \ M(UInt64, max_server_memory_usage, 0, "Maximum total memory usage of the server in bytes. Zero means unlimited.", 0) \
M(Double, max_server_memory_usage_to_ram_ratio, 0.9, "Same as max_server_memory_usage but in to RAM ratio. Allows to lower max memory on low-memory systems.", 0) \ M(Double, max_server_memory_usage_to_ram_ratio, 0.9, "Same as max_server_memory_usage but in to RAM ratio. Allows to lower max memory on low-memory systems.", 0) \
M(UInt64, merges_mutations_memory_usage_soft_limit, 0, "Maximum total memory usage for merges and mutations in bytes. Zero means unlimited.", 0) \ M(UInt64, merges_mutations_memory_usage_soft_limit, 0, "Maximum total memory usage for merges and mutations in bytes. Zero means unlimited.", 0) \

View File

@ -1,4 +1,4 @@
<clickhouse> <clickhouse>
<aggregate_function_group_array_max_element_size>10</aggregate_function_group_array_max_element_size> <aggregate_function_group_array_max_element_size>10</aggregate_function_group_array_max_element_size>
<aggregate_function_group_array_limit_size>false</aggregate_function_group_array_limit_size> <aggregate_function_group_array_has_limit_size>false</aggregate_function_group_array_has_limit_size>
</clickhouse> </clickhouse>

View File

@ -9,6 +9,12 @@ node1 = cluster.add_instance(
stay_alive=True, stay_alive=True,
) )
node2 = cluster.add_instance(
"node2",
main_configs=["configs/group_array_max_element_size.xml"],
stay_alive=True,
)
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def started_cluster(): def started_cluster():
@ -66,30 +72,30 @@ def test_max_exement_size(started_cluster):
def test_limit_size(started_cluster): def test_limit_size(started_cluster):
node1.query( node2.query(
"CREATE TABLE tab4 (x AggregateFunction(groupArray, Array(UInt8))) ENGINE = MergeTree ORDER BY tuple()" "CREATE TABLE tab4 (x AggregateFunction(groupArray, Array(UInt8))) ENGINE = MergeTree ORDER BY tuple()"
) )
node1.query("insert into tab4 select groupArrayState([zero]) from zeros(10)") node2.query("insert into tab4 select groupArrayState([zero]) from zeros(10)")
assert node1.query("select length(groupArrayMerge(x)) from tab4") == "10\n" assert node2.query("select length(groupArrayMerge(x)) from tab4") == "10\n"
node1.replace_in_config( node2.replace_in_config(
"/etc/clickhouse-server/config.d/group_array_max_element_size.xml", "/etc/clickhouse-server/config.d/group_array_max_element_size.xml",
"false", "false",
"true", "true",
) )
node1.restart_clickhouse() node2.restart_clickhouse()
node1.query("insert into tab4 select groupArrayState([zero]) from zeros(100)") node2.query("insert into tab4 select groupArrayState([zero]) from zeros(100)")
assert node1.query("select length(groupArrayMerge(x)) from tab4") == "10\n" assert node2.query("select length(groupArrayMerge(x)) from tab4") == "10\n"
node1.replace_in_config( node2.replace_in_config(
"/etc/clickhouse-server/config.d/group_array_max_element_size.xml", "/etc/clickhouse-server/config.d/group_array_max_element_size.xml",
"true", "true",
"false", "false",
) )
node1.restart_clickhouse() node2.restart_clickhouse()
with pytest.raises(Exception, match=r"Too large array size"): with pytest.raises(Exception, match=r"Too large array size"):
node1.query("insert into tab4 select groupArrayState([zero]) from zeros(11)") node2.query("insert into tab4 select groupArrayState([zero]) from zeros(11)")