mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Fixing build.
This commit is contained in:
parent
fd97a9d885
commit
333fd09dbf
@ -8,6 +8,7 @@
|
||||
#include <Common/setThreadName.h>
|
||||
#include <IO/ConnectionTimeoutsContext.h>
|
||||
#include <Interpreters/InterpreterInsertQuery.h>
|
||||
#include <Interpreters/InterpreterSelectQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Processors/Transforms/ExpressionTransform.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
@ -15,6 +16,9 @@
|
||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||
#include <Processors/Executors/PushingPipelineExecutor.h>
|
||||
#include <Processors/Sources/RemoteSource.h>
|
||||
#include <Processors/QueryPlan/QueryPlan.h>
|
||||
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
|
||||
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -1451,7 +1455,14 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
local_context->setSettings(task_cluster->settings_pull);
|
||||
local_context->setSetting("skip_unavailable_shards", true);
|
||||
|
||||
Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_select_ast, local_context)->execute().pipeline);
|
||||
InterpreterSelectQuery select(query_select_ast, local_context, SelectQueryOptions{});
|
||||
QueryPlan plan;
|
||||
select.buildQueryPlan(plan);
|
||||
auto builder = std::move(*plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(local_context),
|
||||
BuildQueryPipelineSettings::fromContext(local_context)));
|
||||
|
||||
Block block = getBlockWithAllStreamData(std::move(builder));
|
||||
count = (block) ? block.safeGetByPosition(0).column->getUInt(0) : 0;
|
||||
}
|
||||
|
||||
@ -1532,27 +1543,32 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
QueryPipeline input;
|
||||
QueryPipeline output;
|
||||
{
|
||||
BlockIO io_select = InterpreterFactory::get(query_select_ast, context_select)->execute();
|
||||
BlockIO io_insert = InterpreterFactory::get(query_insert_ast, context_insert)->execute();
|
||||
|
||||
InterpreterSelectQuery select(query_select_ast, context_select, SelectQueryOptions{});
|
||||
QueryPlan plan;
|
||||
select.buildQueryPlan(plan);
|
||||
auto builder = std::move(*plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(context_select),
|
||||
BuildQueryPipelineSettings::fromContext(context_select)));
|
||||
|
||||
output = std::move(io_insert.pipeline);
|
||||
|
||||
/// Add converting actions to make it possible to copy blocks with slightly different schema
|
||||
const auto & select_block = io_select.pipeline.getHeader();
|
||||
const auto & select_block = builder.getHeader();
|
||||
const auto & insert_block = output.getHeader();
|
||||
auto actions_dag = ActionsDAG::makeConvertingActions(
|
||||
select_block.getColumnsWithTypeAndName(),
|
||||
insert_block.getColumnsWithTypeAndName(),
|
||||
ActionsDAG::MatchColumnsMode::Position);
|
||||
|
||||
auto actions = std::make_shared<ExpressionActions>(actions_dag, ExpressionActionsSettings::fromContext(getContext()));
|
||||
|
||||
QueryPipelineBuilder builder;
|
||||
builder.init(std::move(io_select.pipeline));
|
||||
builder.addSimpleTransform([&](const Block & header)
|
||||
{
|
||||
return std::make_shared<ExpressionTransform>(header, actions);
|
||||
});
|
||||
input = QueryPipelineBuilder::getPipeline(std::move(builder));
|
||||
input = QueryPipelineBuilder::getPipeline2(std::move(builder));
|
||||
}
|
||||
|
||||
/// Fail-fast optimization to abort copying when the current clean state expires
|
||||
@ -1743,10 +1759,11 @@ String ClusterCopier::getRemoteCreateTable(
|
||||
remote_context->setSettings(settings);
|
||||
|
||||
String query = "SHOW CREATE TABLE " + getQuotedTable(table);
|
||||
Block block = getBlockWithAllStreamData(
|
||||
QueryPipeline(std::make_shared<RemoteSource>(
|
||||
std::make_shared<RemoteQueryExecutor>(connection, query, InterpreterShowCreateQuery::getSampleBlock(), remote_context), false, false)));
|
||||
|
||||
QueryPipelineBuilder builder;
|
||||
builder.init(Pipe(std::make_shared<RemoteSource>(
|
||||
std::make_shared<RemoteQueryExecutor>(connection, query, InterpreterShowCreateQuery::getSampleBlock(), remote_context), false, false)));
|
||||
Block block = getBlockWithAllStreamData(std::move(builder));
|
||||
return typeid_cast<const ColumnString &>(*block.safeGetByPosition(0).column).getDataAt(0).toString();
|
||||
}
|
||||
|
||||
@ -1858,7 +1875,14 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
|
||||
|
||||
auto local_context = Context::createCopy(context);
|
||||
local_context->setSettings(task_cluster->settings_pull);
|
||||
Block block = getBlockWithAllStreamData(InterpreterFactory::get(query_ast, local_context)->execute().pipeline);
|
||||
InterpreterSelectQuery select(query_ast, local_context, SelectQueryOptions{});
|
||||
QueryPlan plan;
|
||||
select.buildQueryPlan(plan);
|
||||
auto builder = std::move(*plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(local_context),
|
||||
BuildQueryPipelineSettings::fromContext(local_context)));
|
||||
|
||||
Block block = getBlockWithAllStreamData(std::move(builder));
|
||||
|
||||
if (block)
|
||||
{
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Processors/Transforms/SquashingChunksTransform.h>
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/extractKeyExpressionList.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -59,16 +60,14 @@ std::shared_ptr<ASTStorage> createASTStorageDistributed(
|
||||
}
|
||||
|
||||
|
||||
Block getBlockWithAllStreamData(QueryPipeline pipeline)
|
||||
Block getBlockWithAllStreamData(QueryPipelineBuilder builder)
|
||||
{
|
||||
QueryPipelineBuilder builder;
|
||||
builder.init(std::move(pipeline));
|
||||
builder.addTransform(std::make_shared<SquashingChunksTransform>(
|
||||
builder.getHeader(),
|
||||
std::numeric_limits<size_t>::max(),
|
||||
std::numeric_limits<size_t>::max()));
|
||||
|
||||
auto cur_pipeline = QueryPipelineBuilder::getPipeline(std::move(builder));
|
||||
auto cur_pipeline = QueryPipelineBuilder::getPipeline2(std::move(builder));
|
||||
Block block;
|
||||
PullingPipelineExecutor executor(cur_pipeline);
|
||||
executor.pull(block);
|
||||
|
@ -161,7 +161,7 @@ std::shared_ptr<ASTStorage> createASTStorageDistributed(
|
||||
const String & cluster_name, const String & database, const String & table,
|
||||
const ASTPtr & sharding_key_ast = nullptr);
|
||||
|
||||
Block getBlockWithAllStreamData(QueryPipeline pipeline);
|
||||
Block getBlockWithAllStreamData(QueryPipelineBuilder builder);
|
||||
|
||||
bool isExtendedDefinitionStorage(const ASTPtr & storage_ast);
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Processors/ISource.h>
|
||||
#include <QueryPipeline/Pipe.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Storages/System/StorageSystemPartsBase.h>
|
||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||
#include <QueryPipeline/Pipe.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include <Storages/System/StorageSystemDisks.h>
|
||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||
#include <QueryPipeline/Pipe.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Disks/IDiskRemote.h>
|
||||
|
||||
|
@ -36,7 +36,7 @@ protected:
|
||||
|
||||
next += step;
|
||||
|
||||
progress({column->size(), column->byteSize()});
|
||||
progress(column->size(), column->byteSize());
|
||||
|
||||
return { Columns {std::move(column)}, block_size };
|
||||
}
|
||||
@ -94,7 +94,7 @@ protected:
|
||||
while (pos < end)
|
||||
*pos++ = curr++;
|
||||
|
||||
progress({column->size(), column->byteSize()});
|
||||
progress(column->size(), column->byteSize());
|
||||
|
||||
return { Columns {std::move(column)}, block_size };
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <base/EnumReflection.h>
|
||||
#include <QueryPipeline/Pipe.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -54,7 +54,7 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
progress({column->size(), column->byteSize()});
|
||||
progress(column->size(), column->byteSize());
|
||||
|
||||
return { Columns {std::move(column_ptr)}, column_size };
|
||||
}
|
||||
|
@ -64,9 +64,7 @@ Block TableFunctionFormat::parseData(ColumnsDescription columns, ContextPtr cont
|
||||
|
||||
auto read_buf = std::make_unique<ReadBufferFromString>(data);
|
||||
auto input_format = context->getInputFormat(format, *read_buf, block, context->getSettingsRef().max_block_size);
|
||||
QueryPipelineBuilder builder;
|
||||
builder.init(Pipe(input_format));
|
||||
auto pipeline = std::make_unique<QueryPipeline>(QueryPipelineBuilder::getPipeline(std::move(builder)));
|
||||
auto pipeline = std::make_unique<QueryPipeline>(input_format);
|
||||
auto reader = std::make_unique<PullingPipelineExecutor>(*pipeline);
|
||||
|
||||
std::vector<Block> blocks;
|
||||
|
Loading…
Reference in New Issue
Block a user