Merge pull request #64972 from ClickHouse/backport/24.3/64858

Backport #64858 to 24.3: Fix segfault for a very tricky case
This commit is contained in:
robot-clickhouse-ci-2 2024-06-07 15:35:08 +02:00 committed by GitHub
commit 9a1fc0961a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 78 additions and 3 deletions

View File

@ -166,8 +166,14 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType
{
ExpressionAndSets result;
auto ttl_string = queryToString(ast);
auto syntax_analyzer_result = TreeRewriter(context).analyze(ast, columns);
ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context);
auto context_copy = Context::createCopy(context);
/// FIXME All code here will work with old analyzer, however for TTL
/// with subqueries it's possible that new analyzer will be enabled in ::read method
/// of underlying storage when all other parts of infra are not ready for it
/// (built with old analyzer).
context_copy->setSetting("allow_experimental_analyzer", Field{0});
auto syntax_analyzer_result = TreeRewriter(context_copy).analyze(ast, columns);
ExpressionAnalyzer analyzer(ast, syntax_analyzer_result, context_copy);
auto dag = analyzer.getActionsDAG(false);
const auto * col = &dag->findInOutputs(ast->getColumnName());
@ -177,7 +183,7 @@ static ExpressionAndSets buildExpressionAndSets(ASTPtr & ast, const NamesAndType
dag->getOutputs() = {col};
dag->removeUnusedActions();
result.expression = std::make_shared<ExpressionActions>(dag, ExpressionActionsSettings::fromContext(context));
result.expression = std::make_shared<ExpressionActions>(dag, ExpressionActionsSettings::fromContext(context_copy));
result.sets = analyzer.getPreparedSets();
return result;

View File

@ -0,0 +1 @@
#!/usr/bin/env python3

View File

@ -0,0 +1,11 @@
<clickhouse>
<profiles>
<default>
<allow_experimental_analyzer>1</allow_experimental_analyzer>
<allow_experimental_parallel_reading_from_replicas>1</allow_experimental_parallel_reading_from_replicas>
<cluster_for_parallel_replicas>default</cluster_for_parallel_replicas>
<max_parallel_replicas>100</max_parallel_replicas>
<use_hedged_requests>0</use_hedged_requests>
</default>
</profiles>
</clickhouse>

View File

@ -0,0 +1,6 @@
<clickhouse>
<macros>
<replica>node1</replica>
<shard>default</shard>
</macros>
</clickhouse>

View File

@ -0,0 +1,51 @@
#!/usr/bin/env python3
import logging
import random
import string
import time
import pytest
from multiprocessing.dummy import Pool
from helpers.cluster import ClickHouseCluster
import minio
cluster = ClickHouseCluster(__file__)
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.add_instance(
"node1",
main_configs=["configs/node1_macro.xml"],
user_configs=[
"configs/enable_parallel_replicas.xml",
],
with_minio=True,
with_zookeeper=True,
)
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_replicated_db_and_ttl(started_cluster):
node1 = cluster.instances["node1"]
node1.query("DROP DATABASE default")
node1.query("CREATE DATABASE default ENGINE Replicated('/replicated')")
node1.query(
"CREATE TABLE 02908_main (a UInt32) ENGINE = ReplicatedMergeTree ORDER BY a"
)
node1.query(
"CREATE TABLE 02908_dependent (a UInt32, ts DateTime) ENGINE = ReplicatedMergeTree ORDER BY a TTL ts + 1 WHERE a IN (SELECT a FROM 02908_main)"
)
node1.query("INSERT INTO 02908_main VALUES (1)")
node1.query("INSERT INTO 02908_dependent VALUES (1, now())")
node1.query("SELECT * FROM 02908_dependent")