mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-17 20:02:05 +00:00
try to fix segfaults
This commit is contained in:
parent
aa35dee529
commit
d86580ef04
11
src/Interpreters/Squashing.h
Normal file → Executable file
11
src/Interpreters/Squashing.h
Normal file → Executable file
@ -32,10 +32,19 @@ public:
|
||||
explicit ApplySquashing(Block header_);
|
||||
|
||||
Chunk add(Chunk && input_chunk);
|
||||
const Block header;
|
||||
|
||||
void setHeader(Block header_)
|
||||
{
|
||||
header = header_;
|
||||
}
|
||||
Block getHeader()
|
||||
{
|
||||
return header;
|
||||
}
|
||||
|
||||
private:
|
||||
Chunk accumulated_chunk;
|
||||
Block header;
|
||||
|
||||
const ChunksToSquash * getInfoFromChunk(const Chunk & chunk);
|
||||
|
||||
|
4
src/Server/TCPHandler.cpp
Normal file → Executable file
4
src/Server/TCPHandler.cpp
Normal file → Executable file
@ -912,9 +912,9 @@ AsynchronousInsertQueue::PushResult TCPHandler::processAsyncInsertQuery(Asynchro
|
||||
if (planned_chunk.hasChunkInfo())
|
||||
result_chunk = apply_squashing.add(std::move(planned_chunk));
|
||||
ColumnsWithTypeAndName cols;
|
||||
if (result_chunk.hasColumns() && apply_squashing.header)
|
||||
if (result_chunk.hasColumns() && apply_squashing.getHeader())
|
||||
for (size_t j = 0; j < result_chunk.getNumColumns(); ++ j)
|
||||
cols.push_back(ColumnWithTypeAndName(result_chunk.getColumns()[j], apply_squashing.header.getDataTypes()[j], apply_squashing.header.getNames()[j]));
|
||||
cols.push_back(ColumnWithTypeAndName(result_chunk.getColumns()[j], apply_squashing.getHeader().getDataTypes()[j], apply_squashing.getHeader().getNames()[j]));
|
||||
auto result = Block(cols);
|
||||
return insert_queue.pushQueryWithBlock(state.parsed_query, std::move(result), query_context);
|
||||
}
|
||||
|
5
src/Storages/MergeTree/MutateTask.cpp
Normal file → Executable file
5
src/Storages/MergeTree/MutateTask.cpp
Normal file → Executable file
@ -1331,6 +1331,7 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections()
|
||||
tmp_part.finalize();
|
||||
tmp_part.part->getDataPartStorage().commitTransaction();
|
||||
projection_parts[projection.name].emplace_back(std::move(tmp_part.part));
|
||||
projection_squashes[i].setHeader(block_to_squash);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1351,9 +1352,9 @@ bool PartMergerWriter::mutateOriginalPartAndPrepareProjections()
|
||||
{
|
||||
Chunk projection_chunk = projection_squashes[i].add(std::move(planned_chunk));
|
||||
ColumnsWithTypeAndName cols;
|
||||
if (projection_chunk.hasColumns() && projection_squashes[i].header)
|
||||
if (projection_chunk.hasColumns() && projection_squashes[i].getHeader())
|
||||
for (size_t j = 0; j < projection_chunk.getNumColumns(); ++j)
|
||||
cols.push_back(ColumnWithTypeAndName(projection_chunk.getColumns()[j], projection_squashes[i].header.getDataTypes()[j], projection_squashes[i].header.getNames()[j]));
|
||||
cols.push_back(ColumnWithTypeAndName(projection_chunk.getColumns()[j], projection_squashes[i].getHeader().getDataTypes()[j], projection_squashes[i].getHeader().getNames()[j]));
|
||||
|
||||
auto temp_part = MergeTreeDataWriter::writeTempProjectionPart(
|
||||
*ctx->data, ctx->log, Block(cols), projection, ctx->new_data_part.get(), ++block_num);
|
||||
|
Loading…
Reference in New Issue
Block a user