mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-12 09:22:05 +00:00
Fix: 02784_parallel_replicas_automatic_decision
This commit is contained in:
parent
b8d8b76dd5
commit
f990bb2a21
@ -899,7 +899,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
// (2) if it's ReadFromMergeTree - run index analysis and check number of rows to read
|
// (2) if it's ReadFromMergeTree - run index analysis and check number of rows to read
|
||||||
if (settings.parallel_replicas_min_number_of_rows_per_replica > 0)
|
if (settings.parallel_replicas_min_number_of_rows_per_replica > 0)
|
||||||
{
|
{
|
||||||
auto result_ptr = reading->selectRangesToRead(reading->getParts(), reading->getAlterConvertionsForParts());
|
auto result_ptr = reading->selectRangesToRead();
|
||||||
|
|
||||||
UInt64 rows_to_read = result_ptr->selected_rows;
|
UInt64 rows_to_read = result_ptr->selected_rows;
|
||||||
if (table_expression_query_info.limit > 0 && table_expression_query_info.limit < rows_to_read)
|
if (table_expression_query_info.limit > 0 && table_expression_query_info.limit < rows_to_read)
|
||||||
@ -909,7 +909,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
rows_to_read = max_block_size_limited;
|
rows_to_read = max_block_size_limited;
|
||||||
|
|
||||||
const size_t number_of_replicas_to_use = rows_to_read / settings.parallel_replicas_min_number_of_rows_per_replica;
|
const size_t number_of_replicas_to_use = rows_to_read / settings.parallel_replicas_min_number_of_rows_per_replica;
|
||||||
if (number_of_replicas_to_use > 1)
|
|
||||||
LOG_TRACE(
|
LOG_TRACE(
|
||||||
getLogger("Planner"),
|
getLogger("Planner"),
|
||||||
"Estimated {} rows to read. It is enough work for {} parallel replicas",
|
"Estimated {} rows to read. It is enough work for {} parallel replicas",
|
||||||
@ -1083,16 +1082,12 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
planner.buildQueryPlanIfNeeded();
|
planner.buildQueryPlanIfNeeded();
|
||||||
|
|
||||||
auto expected_header = planner.getQueryPlan().getCurrentDataStream().header;
|
auto expected_header = planner.getQueryPlan().getCurrentDataStream().header;
|
||||||
|
LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "expected_header:\n{}", expected_header.dumpStructure());
|
||||||
|
|
||||||
if (!blocksHaveEqualStructure(query_plan.getCurrentDataStream().header, expected_header))
|
if (!blocksHaveEqualStructure(query_plan.getCurrentDataStream().header, expected_header))
|
||||||
{
|
{
|
||||||
materializeBlockInplace(expected_header);
|
materializeBlockInplace(expected_header);
|
||||||
|
|
||||||
const Block & query_plan_header = query_plan.getCurrentDataStream().header;
|
|
||||||
|
|
||||||
LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "query_plan_header:\n{}", query_plan_header.dumpStructure());
|
|
||||||
LOG_DEBUG(getLogger(__PRETTY_FUNCTION__), "expected_header:\n{}", expected_header.dumpStructure());
|
|
||||||
|
|
||||||
auto rename_actions_dag = ActionsDAG::makeConvertingActions(
|
auto rename_actions_dag = ActionsDAG::makeConvertingActions(
|
||||||
query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(),
|
query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(),
|
||||||
expected_header.getColumnsWithTypeAndName(),
|
expected_header.getColumnsWithTypeAndName(),
|
||||||
|
Loading…
Reference in New Issue
Block a user