Added limit on size of AST after expansion of aliases [#CLICKHOUSE-18]

This commit is contained in:
Alexey Milovidov 2018-03-02 09:33:17 +03:00
parent 05893c1d44
commit 22bbc9f08c
5 changed files with 26 additions and 2 deletions

View File

@ -83,6 +83,7 @@ namespace ErrorCodes
extern const int ILLEGAL_AGGREGATION;
extern const int SUPPORT_IS_DISABLED;
extern const int TOO_DEEP_AST;
extern const int TOO_BIG_AST;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
@ -932,6 +933,16 @@ void ExpressionAnalyzer::normalizeTree()
SetOfASTs tmp_set;
MapOfASTs tmp_map;
normalizeTreeImpl(ast, tmp_map, tmp_set, "", 0);
try
{
ast->checkSize(settings.limits.max_expanded_ast_elements);
}
catch (Exception & e)
{
e.addMessage("(after expansion of aliases)");
throw;
}
}
@ -942,7 +953,8 @@ void ExpressionAnalyzer::normalizeTreeImpl(
ASTPtr & ast, MapOfASTs & finished_asts, SetOfASTs & current_asts, std::string current_alias, size_t level)
{
if (level > settings.limits.max_ast_depth)
throw Exception("Normalized AST is too deep. Maximum: " + settings.limits.max_ast_depth.toString(), ErrorCodes::TOO_DEEP_AST);
throw Exception("Normalized AST is too deep. Maximum: "
+ settings.limits.max_ast_depth.toString(), ErrorCodes::TOO_DEEP_AST);
if (finished_asts.count(ast))
{

View File

@ -63,6 +63,7 @@ struct Limits
M(SettingUInt64, max_pipeline_depth, 1000, "") \
M(SettingUInt64, max_ast_depth, 1000, "") /** Checked not during parsing, */ \
M(SettingUInt64, max_ast_elements, 50000, "") /** but after parsing the request. */ \
M(SettingUInt64, max_expanded_ast_elements, 500000, "Limit after expansion of aliases.") \
\
/** 0 - everything is allowed. 1 - only read requests. 2 - only read requests, as well as changing settings, except for the readonly setting. */ \
M(SettingUInt64, readonly, 0, "") \

View File

@ -38,7 +38,7 @@ BlockInputStreams StorageView::read(
const unsigned /*num_streams*/)
{
processed_stage = QueryProcessingStage::FetchColumns;
BlockInputStreams res = InterpreterSelectWithUnionQuery(inner_query->clone(), context, column_names).executeWithMultipleStreams();
BlockInputStreams res = InterpreterSelectWithUnionQuery(inner_query, context, column_names).executeWithMultipleStreams();
/// It's expected that the columns read from storage are not constant.
/// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery.

View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
. $CURDIR/../shell_config.sh
exception_pattern="too big"
${CLICKHOUSE_CLIENT} --max_expanded_ast_elements=500000 --query="
select 1 as a, a+a as b, b+b as c, c+c as d, d+d as e, e+e as f, f+f as g, g+g as h, h+h as i, i+i as j, j+j as k, k+k as l, l+l as m, m+m as n, n+n as o, o+o as p, p+p as q, q+q as r, r+r as s, s+s as t, t+t as u, u+u as v, v+v as w, w+w as x, x+x as y, y+y as z
" 2>&1 | grep -c "$exception_pattern"