mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge pull request #32751 from ClickHouse/fix-32668
Apply some more optimizations to NO_QUERY ast.
This commit is contained in:
commit
2e62f086a1
@ -41,6 +41,9 @@ struct SelectQueryOptions
|
||||
/// It is needed because lazy normal projections require special planning in FetchColumns stage, such as adding WHERE transform.
|
||||
/// It is also used to avoid adding aggregating step when aggregate projection is chosen.
|
||||
bool is_projection_query = false;
|
||||
/// This flag is needed for projection description.
|
||||
/// Otherwise, keys for GROUP BY may be removed as constants.
|
||||
bool ignore_ast_optimizations = false;
|
||||
bool ignore_alias = false;
|
||||
bool is_internal = false;
|
||||
bool is_subquery = false; // non-subquery can also have subquery_depth > 0, e.g. insert select
|
||||
@ -120,6 +123,12 @@ struct SelectQueryOptions
|
||||
return *this;
|
||||
}
|
||||
|
||||
SelectQueryOptions & ignoreASTOptimizationsAlias(bool value = true)
|
||||
{
|
||||
ignore_ast_optimizations = value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
SelectQueryOptions & setInternal(bool value = false)
|
||||
{
|
||||
is_internal = value;
|
||||
|
@ -1123,7 +1123,7 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
|
||||
TreeOptimizer::optimizeIf(query, result.aliases, settings.optimize_if_chain_to_multiif);
|
||||
|
||||
/// Only apply AST optimization for initial queries.
|
||||
if (getContext()->getClientInfo().query_kind == ClientInfo::QueryKind::INITIAL_QUERY)
|
||||
if (getContext()->getClientInfo().query_kind != ClientInfo::QueryKind::SECONDARY_QUERY && !select_options.ignore_ast_optimizations)
|
||||
TreeOptimizer::apply(query, result, tables_with_columns, getContext());
|
||||
|
||||
/// array_join_alias_to_name, array_join_result_to_source.
|
||||
|
@ -201,7 +201,7 @@ ProjectionDescription ProjectionDescription::getMinMaxCountProjection(
|
||||
select_expression_list->children.push_back(makeASTFunction("count"));
|
||||
select_query->setExpression(ASTProjectionSelectQuery::Expression::SELECT, std::move(select_expression_list));
|
||||
|
||||
if (partition_columns)
|
||||
if (partition_columns && !partition_columns->children.empty())
|
||||
select_query->setExpression(ASTProjectionSelectQuery::Expression::GROUP_BY, partition_columns->clone());
|
||||
|
||||
result.definition_ast = select_query;
|
||||
@ -211,7 +211,9 @@ ProjectionDescription ProjectionDescription::getMinMaxCountProjection(
|
||||
auto external_storage_holder = std::make_shared<TemporaryTableHolder>(query_context, columns, ConstraintsDescription{});
|
||||
StoragePtr storage = external_storage_holder->getTable();
|
||||
InterpreterSelectQuery select(
|
||||
result.query_ast, query_context, storage, {}, SelectQueryOptions{QueryProcessingStage::WithMergeableState}.modify().ignoreAlias());
|
||||
result.query_ast, query_context, storage, {},
|
||||
/// Here we ignore ast optimizations because otherwise aggregation keys may be removed from result header as constants.
|
||||
SelectQueryOptions{QueryProcessingStage::WithMergeableState}.modify().ignoreAlias().ignoreASTOptimizationsAlias());
|
||||
result.required_columns = select.getRequiredColumns();
|
||||
result.sample_block = select.getSampleBlock();
|
||||
|
||||
|
@ -284,6 +284,12 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster):
|
||||
ORDER BY key;
|
||||
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
|
||||
SELECT * FROM test.rabbitmq;
|
||||
|
||||
CREATE TABLE test.view2 (key UInt64, value UInt64)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY key;
|
||||
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
|
||||
SELECT * FROM test.rabbitmq group by (key, value);
|
||||
''')
|
||||
|
||||
credentials = pika.PlainCredentials('root', 'clickhouse')
|
||||
@ -297,14 +303,26 @@ def test_rabbitmq_materialized_view(rabbitmq_cluster):
|
||||
for message in messages:
|
||||
channel.basic_publish(exchange='mv', routing_key='', body=message)
|
||||
|
||||
while True:
|
||||
time_limit_sec = 60
|
||||
deadline = time.monotonic() + time_limit_sec
|
||||
|
||||
while time.monotonic() < deadline:
|
||||
result = instance.query('SELECT * FROM test.view ORDER BY key')
|
||||
if (rabbitmq_check_result(result)):
|
||||
break
|
||||
|
||||
connection.close()
|
||||
rabbitmq_check_result(result, True)
|
||||
|
||||
deadline = time.monotonic() + time_limit_sec
|
||||
|
||||
while time.monotonic() < deadline:
|
||||
result = instance.query('SELECT * FROM test.view2 ORDER BY key')
|
||||
if (rabbitmq_check_result(result)):
|
||||
break
|
||||
|
||||
rabbitmq_check_result(result, True)
|
||||
connection.close()
|
||||
|
||||
|
||||
def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster):
|
||||
instance.query('''
|
||||
|
Loading…
Reference in New Issue
Block a user