try to assign header unconditionally

This commit is contained in:
yariks5s 2024-05-30 18:28:24 +00:00
parent 5a09dce95d
commit 84c8c4ca48
3 changed files with 8 additions and 13 deletions

View File

@ -367,16 +367,13 @@ std::optional<Chain> generateViewChain(
bool check_access = !materialized_view->hasInnerTable() && materialized_view->getInMemoryMetadataPtr()->sql_security_type; bool check_access = !materialized_view->hasInnerTable() && materialized_view->getInMemoryMetadataPtr()->sql_security_type;
out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms, check_access); out = interpreter.buildChain(inner_table, inner_metadata_snapshot, insert_columns, thread_status_holder, view_counter_ms, check_access);
if (interpreter.shouldAddSquashingFroStorage(inner_table)) bool table_prefers_large_blocks = inner_table->prefersLargeBlocks();
{ const auto & settings = insert_context->getSettingsRef();
bool table_prefers_large_blocks = inner_table->prefersLargeBlocks();
const auto & settings = insert_context->getSettingsRef();
out.addSource(std::make_shared<SquashingTransform>( out.addSource(std::make_shared<SquashingTransform>(
out.getInputHeader(), out.getInputHeader(),
table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size, table_prefers_large_blocks ? settings.min_insert_block_size_rows : settings.max_block_size,
table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL)); table_prefers_large_blocks ? settings.min_insert_block_size_bytes : 0ULL));
}
auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), current_thread, insert_context->getQuota()); auto counting = std::make_shared<CountingTransform>(out.getInputHeader(), current_thread, insert_context->getQuota());
counting->setProcessListElement(insert_context->getProcessListElement()); counting->setProcessListElement(insert_context->getProcessListElement());

View File

@ -890,8 +890,7 @@ AsynchronousInsertQueue::PushResult TCPHandler::processAsyncInsertQuery(Asynchro
while (readDataNext()) while (readDataNext())
{ {
if (!apply_squashing.header) apply_squashing.header = state.block_for_insert;
apply_squashing.header = state.block_for_insert;
auto planned_chunk = plan_squashing.add({state.block_for_insert.getColumns(), state.block_for_insert.rows()}); auto planned_chunk = plan_squashing.add({state.block_for_insert.getColumns(), state.block_for_insert.rows()});
if (planned_chunk.hasChunkInfo()) if (planned_chunk.hasChunkInfo())
{ {

View File

@ -1316,8 +1316,7 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections()
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::MutateTaskProjectionsCalculationMicroseconds); ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::MutateTaskProjectionsCalculationMicroseconds);
Block block_to_squash = projection.calculate(cur_block, ctx->context); Block block_to_squash = projection.calculate(cur_block, ctx->context);
if (!projection_squashes[i].header) projection_squashes[i].header = block_to_squash;
projection_squashes[i].header = block_to_squash;
Chunk planned_chunk = projection_squash_plannings[i].add({block_to_squash.getColumns(), block_to_squash.rows()}); Chunk planned_chunk = projection_squash_plannings[i].add({block_to_squash.getColumns(), block_to_squash.rows()});
if (planned_chunk.hasChunkInfo()) if (planned_chunk.hasChunkInfo())