mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 15:12:02 +00:00
Merge pull request #37647 from DevTeamBK/Fix-all-CheckTriviallyCopyableMove-Errors
Fix errors of CheckTriviallyCopyableMove type
This commit is contained in:
commit
2d87af2a15
@ -176,8 +176,6 @@ CheckOptions:
|
||||
value: CamelCase
|
||||
- key: modernize-loop-convert.UseCxx20ReverseRanges
|
||||
value: false
|
||||
- key: performance-move-const-arg.CheckTriviallyCopyableMove
|
||||
value: false
|
||||
# Workaround clang-tidy bug: https://github.com/llvm/llvm-project/issues/46097
|
||||
- key: readability-identifier-naming.TypeTemplateParameterIgnoredRegexp
|
||||
value: expr-type
|
||||
|
@ -619,7 +619,7 @@ private:
|
||||
|
||||
public:
|
||||
explicit MarkovModel(MarkovModelParameters params_)
|
||||
: params(std::move(params_)), code_points(params.order, BEGIN) {}
|
||||
: params(params_), code_points(params.order, BEGIN) {}
|
||||
|
||||
void consume(const char * data, size_t size)
|
||||
{
|
||||
@ -830,7 +830,7 @@ private:
|
||||
MarkovModel markov_model;
|
||||
|
||||
public:
|
||||
StringModel(UInt64 seed_, MarkovModelParameters params_) : seed(seed_), markov_model(std::move(params_)) {}
|
||||
StringModel(UInt64 seed_, MarkovModelParameters params_) : seed(seed_), markov_model(params_) {}
|
||||
|
||||
void train(const IColumn & column) override
|
||||
{
|
||||
|
@ -911,28 +911,28 @@ namespace MySQLReplication
|
||||
|
||||
MySQLBinlogEventReadBuffer event_payload(payload, checksum_signature_length);
|
||||
|
||||
EventHeader event_header;
|
||||
EventHeader event_header;
|
||||
event_header.parse(event_payload);
|
||||
|
||||
switch (event_header.type)
|
||||
{
|
||||
case FORMAT_DESCRIPTION_EVENT:
|
||||
{
|
||||
event = std::make_shared<FormatDescriptionEvent>(std::move(event_header));
|
||||
event = std::make_shared<FormatDescriptionEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
}
|
||||
case ROTATE_EVENT:
|
||||
{
|
||||
event = std::make_shared<RotateEvent>(std::move(event_header));
|
||||
event = std::make_shared<RotateEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
}
|
||||
case QUERY_EVENT:
|
||||
{
|
||||
event = std::make_shared<QueryEvent>(std::move(event_header));
|
||||
event = std::make_shared<QueryEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
|
||||
@ -942,7 +942,7 @@ namespace MySQLReplication
|
||||
case QUERY_EVENT_MULTI_TXN_FLAG:
|
||||
case QUERY_EVENT_XA:
|
||||
{
|
||||
event = std::make_shared<DryRunEvent>(std::move(query->header));
|
||||
event = std::make_shared<DryRunEvent>(query->header);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -952,7 +952,7 @@ namespace MySQLReplication
|
||||
}
|
||||
case XID_EVENT:
|
||||
{
|
||||
event = std::make_shared<XIDEvent>(std::move(event_header));
|
||||
event = std::make_shared<XIDEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
@ -963,14 +963,14 @@ namespace MySQLReplication
|
||||
map_event_header.parse(event_payload);
|
||||
if (doReplicate(map_event_header.schema, map_event_header.table))
|
||||
{
|
||||
event = std::make_shared<TableMapEvent>(std::move(event_header), map_event_header);
|
||||
event = std::make_shared<TableMapEvent>(event_header, map_event_header);
|
||||
event->parseEvent(event_payload);
|
||||
auto table_map = std::static_pointer_cast<TableMapEvent>(event);
|
||||
table_maps[table_map->table_id] = table_map;
|
||||
}
|
||||
else
|
||||
{
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
}
|
||||
break;
|
||||
@ -980,9 +980,9 @@ namespace MySQLReplication
|
||||
RowsEventHeader rows_header(event_header.type);
|
||||
rows_header.parse(event_payload);
|
||||
if (doReplicate(rows_header.table_id))
|
||||
event = std::make_shared<WriteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
|
||||
event = std::make_shared<WriteRowsEvent>(table_maps.at(rows_header.table_id), event_header, rows_header);
|
||||
else
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
|
||||
event->parseEvent(event_payload);
|
||||
if (rows_header.flags & ROWS_END_OF_STATEMENT)
|
||||
@ -994,9 +994,9 @@ namespace MySQLReplication
|
||||
RowsEventHeader rows_header(event_header.type);
|
||||
rows_header.parse(event_payload);
|
||||
if (doReplicate(rows_header.table_id))
|
||||
event = std::make_shared<DeleteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
|
||||
event = std::make_shared<DeleteRowsEvent>(table_maps.at(rows_header.table_id), event_header, rows_header);
|
||||
else
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
|
||||
event->parseEvent(event_payload);
|
||||
if (rows_header.flags & ROWS_END_OF_STATEMENT)
|
||||
@ -1008,9 +1008,9 @@ namespace MySQLReplication
|
||||
RowsEventHeader rows_header(event_header.type);
|
||||
rows_header.parse(event_payload);
|
||||
if (doReplicate(rows_header.table_id))
|
||||
event = std::make_shared<UpdateRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
|
||||
event = std::make_shared<UpdateRowsEvent>(table_maps.at(rows_header.table_id), event_header, rows_header);
|
||||
else
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
|
||||
event->parseEvent(event_payload);
|
||||
if (rows_header.flags & ROWS_END_OF_STATEMENT)
|
||||
@ -1019,14 +1019,14 @@ namespace MySQLReplication
|
||||
}
|
||||
case GTID_EVENT:
|
||||
{
|
||||
event = std::make_shared<GTIDEvent>(std::move(event_header));
|
||||
event = std::make_shared<GTIDEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
break;
|
||||
}
|
||||
|
@ -318,8 +318,7 @@ namespace MySQLReplication
|
||||
public:
|
||||
EventHeader header;
|
||||
|
||||
EventBase(EventHeader && header_) : header(std::move(header_)) {}
|
||||
|
||||
EventBase(EventHeader & header_) : header(header_) {}
|
||||
virtual ~EventBase() = default;
|
||||
virtual void dump(WriteBuffer & out) const = 0;
|
||||
virtual void parseEvent(ReadBuffer & payload) { parseImpl(payload); }
|
||||
@ -332,8 +331,8 @@ namespace MySQLReplication
|
||||
class FormatDescriptionEvent : public EventBase
|
||||
{
|
||||
public:
|
||||
FormatDescriptionEvent(EventHeader && header_)
|
||||
: EventBase(std::move(header_)), binlog_version(0), create_timestamp(0), event_header_length(0)
|
||||
FormatDescriptionEvent(EventHeader & header_)
|
||||
: EventBase(header_), binlog_version(0), create_timestamp(0), event_header_length(0)
|
||||
{
|
||||
}
|
||||
|
||||
@ -357,7 +356,8 @@ namespace MySQLReplication
|
||||
UInt64 position;
|
||||
String next_binlog;
|
||||
|
||||
RotateEvent(EventHeader && header_) : EventBase(std::move(header_)), position(0) {}
|
||||
RotateEvent(EventHeader & header_) : EventBase(header_), position(0) {}
|
||||
|
||||
void dump(WriteBuffer & out) const override;
|
||||
|
||||
protected:
|
||||
@ -385,8 +385,8 @@ namespace MySQLReplication
|
||||
QueryType typ = QUERY_EVENT_DDL;
|
||||
bool transaction_complete = true;
|
||||
|
||||
QueryEvent(EventHeader && header_)
|
||||
: EventBase(std::move(header_)), thread_id(0), exec_time(0), schema_len(0), error_code(0), status_len(0)
|
||||
QueryEvent(EventHeader & header_)
|
||||
: EventBase(header_), thread_id(0), exec_time(0), schema_len(0), error_code(0), status_len(0)
|
||||
{
|
||||
}
|
||||
|
||||
@ -400,7 +400,8 @@ namespace MySQLReplication
|
||||
class XIDEvent : public EventBase
|
||||
{
|
||||
public:
|
||||
XIDEvent(EventHeader && header_) : EventBase(std::move(header_)), xid(0) {}
|
||||
XIDEvent(EventHeader & header_) : EventBase(header_), xid(0) {}
|
||||
|
||||
|
||||
protected:
|
||||
UInt64 xid;
|
||||
@ -420,6 +421,7 @@ namespace MySQLReplication
|
||||
String table;
|
||||
|
||||
TableMapEventHeader(): table_id(0), flags(0), schema_len(0), table_len(0) {}
|
||||
|
||||
void parse(ReadBuffer & payload);
|
||||
};
|
||||
|
||||
@ -437,7 +439,7 @@ namespace MySQLReplication
|
||||
std::vector<UInt16> column_meta;
|
||||
Bitmap null_bitmap;
|
||||
|
||||
TableMapEvent(EventHeader && header_, const TableMapEventHeader & map_event_header) : EventBase(std::move(header_)), column_count(0)
|
||||
TableMapEvent(EventHeader & header_, const TableMapEventHeader & map_event_header) : EventBase(header_), column_count(0)
|
||||
{
|
||||
table_id = map_event_header.table_id;
|
||||
flags = map_event_header.flags;
|
||||
@ -477,8 +479,8 @@ namespace MySQLReplication
|
||||
String table;
|
||||
Row rows;
|
||||
|
||||
RowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader && header_, const RowsEventHeader & rows_header)
|
||||
: EventBase(std::move(header_)), number_columns(0), table_map(table_map_)
|
||||
RowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader & header_, const RowsEventHeader & rows_header)
|
||||
: EventBase(header_), number_columns(0), table_map(table_map_)
|
||||
{
|
||||
table_id = rows_header.table_id;
|
||||
flags = rows_header.flags;
|
||||
@ -496,7 +498,6 @@ namespace MySQLReplication
|
||||
|
||||
void parseImpl(ReadBuffer & payload) override;
|
||||
void parseRow(ReadBuffer & payload, Bitmap & bitmap);
|
||||
|
||||
private:
|
||||
std::shared_ptr<TableMapEvent> table_map;
|
||||
};
|
||||
@ -504,24 +505,24 @@ namespace MySQLReplication
|
||||
class WriteRowsEvent : public RowsEvent
|
||||
{
|
||||
public:
|
||||
WriteRowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader && header_, const RowsEventHeader & rows_header)
|
||||
: RowsEvent(table_map_, std::move(header_), rows_header) {}
|
||||
WriteRowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader & header_, const RowsEventHeader & rows_header)
|
||||
: RowsEvent(table_map_, header_, rows_header) {}
|
||||
MySQLEventType type() const override { return MYSQL_WRITE_ROWS_EVENT; }
|
||||
};
|
||||
|
||||
class DeleteRowsEvent : public RowsEvent
|
||||
{
|
||||
public:
|
||||
DeleteRowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader && header_, const RowsEventHeader & rows_header)
|
||||
: RowsEvent(table_map_, std::move(header_), rows_header) {}
|
||||
DeleteRowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader & header_, const RowsEventHeader & rows_header)
|
||||
: RowsEvent(table_map_, header_, rows_header) {}
|
||||
MySQLEventType type() const override { return MYSQL_DELETE_ROWS_EVENT; }
|
||||
};
|
||||
|
||||
class UpdateRowsEvent : public RowsEvent
|
||||
{
|
||||
public:
|
||||
UpdateRowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader && header_, const RowsEventHeader & rows_header)
|
||||
: RowsEvent(table_map_, std::move(header_), rows_header) {}
|
||||
UpdateRowsEvent(std::shared_ptr<TableMapEvent> table_map_, EventHeader & header_, const RowsEventHeader & rows_header)
|
||||
: RowsEvent(table_map_, header_, rows_header) {}
|
||||
MySQLEventType type() const override { return MYSQL_UPDATE_ROWS_EVENT; }
|
||||
};
|
||||
|
||||
@ -531,7 +532,7 @@ namespace MySQLReplication
|
||||
UInt8 commit_flag;
|
||||
GTID gtid;
|
||||
|
||||
GTIDEvent(EventHeader && header_) : EventBase(std::move(header_)), commit_flag(0) {}
|
||||
GTIDEvent(EventHeader & header_) : EventBase(header_), commit_flag(0) {}
|
||||
void dump(WriteBuffer & out) const override;
|
||||
|
||||
protected:
|
||||
@ -541,7 +542,8 @@ namespace MySQLReplication
|
||||
class DryRunEvent : public EventBase
|
||||
{
|
||||
public:
|
||||
DryRunEvent(EventHeader && header_) : EventBase(std::move(header_)) {}
|
||||
DryRunEvent(EventHeader & header_) : EventBase(header_) {}
|
||||
|
||||
void dump(WriteBuffer & out) const override;
|
||||
|
||||
protected:
|
||||
|
@ -11,7 +11,7 @@ static const auto DISK_CHECK_ERROR_RETRY_TIME = 3;
|
||||
|
||||
DiskLocalCheckThread::DiskLocalCheckThread(DiskLocal * disk_, ContextPtr context_, UInt64 local_disk_check_period_ms)
|
||||
: WithContext(context_)
|
||||
, disk(std::move(disk_))
|
||||
, disk(disk_)
|
||||
, check_period_ms(local_disk_check_period_ms)
|
||||
, log(&Poco::Logger::get(fmt::format("DiskLocalCheckThread({})", disk->getName())))
|
||||
{
|
||||
|
@ -333,7 +333,7 @@ public:
|
||||
}
|
||||
|
||||
size_t col_key_size = sub_map_column->size();
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), std::move(col_key_size)) : std::move(sub_map_column);
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), col_key_size) : std::move(sub_map_column);
|
||||
|
||||
ColumnsWithTypeAndName new_arguments =
|
||||
{
|
||||
@ -480,7 +480,7 @@ public:
|
||||
}
|
||||
|
||||
size_t col_key_size = sub_map_column->size();
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), std::move(col_key_size)) : std::move(sub_map_column);
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), col_key_size) : std::move(sub_map_column);
|
||||
|
||||
new_arguments = {
|
||||
{
|
||||
|
@ -588,7 +588,7 @@ void Aggregator::compileAggregateFunctionsIfNeeded()
|
||||
.aggregate_data_offset = offset_of_aggregate_function
|
||||
};
|
||||
|
||||
functions_to_compile.emplace_back(std::move(function_to_compile));
|
||||
functions_to_compile.emplace_back(function_to_compile);
|
||||
|
||||
functions_description += function->getDescription();
|
||||
functions_description += ' ';
|
||||
|
@ -109,7 +109,7 @@ void TablesStatusResponse::read(ReadBuffer & in, UInt64 server_protocol_revision
|
||||
|
||||
TableStatus status;
|
||||
status.read(in);
|
||||
table_states_by_id.emplace(std::move(table_name), std::move(status));
|
||||
table_states_by_id.emplace(std::move(table_name), status);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ namespace
|
||||
else
|
||||
return false;
|
||||
|
||||
res_all_limits.emplace_back(std::move(limits));
|
||||
res_all_limits.emplace_back(limits);
|
||||
return true;
|
||||
};
|
||||
|
||||
|
@ -42,7 +42,7 @@ ExecutingGraph::Edge & ExecutingGraph::addEdge(Edges & edges, Edge edge, const I
|
||||
from->getName());
|
||||
|
||||
edge.to = it->second;
|
||||
auto & added_edge = edges.emplace_back(std::move(edge));
|
||||
auto & added_edge = edges.emplace_back(edge);
|
||||
added_edge.update_info.id = &added_edge;
|
||||
return added_edge;
|
||||
}
|
||||
@ -66,7 +66,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
|
||||
const IProcessor * to = &it->getOutputPort().getProcessor();
|
||||
auto output_port_number = to->getOutputPortNumber(&it->getOutputPort());
|
||||
Edge edge(0, true, from_input, output_port_number, &nodes[node]->post_updated_input_ports);
|
||||
auto & added_edge = addEdge(nodes[node]->back_edges, std::move(edge), from, to);
|
||||
auto & added_edge = addEdge(nodes[node]->back_edges, edge, from, to);
|
||||
it->setUpdateInfo(&added_edge.update_info);
|
||||
}
|
||||
}
|
||||
@ -84,7 +84,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
|
||||
const IProcessor * to = &it->getInputPort().getProcessor();
|
||||
auto input_port_number = to->getInputPortNumber(&it->getInputPort());
|
||||
Edge edge(0, false, input_port_number, from_output, &nodes[node]->post_updated_output_ports);
|
||||
auto & added_edge = addEdge(nodes[node]->direct_edges, std::move(edge), from, to);
|
||||
auto & added_edge = addEdge(nodes[node]->direct_edges, edge, from, to);
|
||||
it->setUpdateInfo(&added_edge.update_info);
|
||||
}
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ AggregatingStep::AggregatingStep(
|
||||
: ITransformingStep(input_stream_, appendGroupingColumn(params_.getHeader(final_), grouping_sets_params_), getTraits(), false)
|
||||
, params(std::move(params_))
|
||||
, grouping_sets_params(std::move(grouping_sets_params_))
|
||||
, final(std::move(final_))
|
||||
, final(final_)
|
||||
, max_block_size(max_block_size_)
|
||||
, aggregation_in_order_max_block_bytes(aggregation_in_order_max_block_bytes_)
|
||||
, merge_threads(merge_threads_)
|
||||
|
@ -40,7 +40,7 @@ CreatingSetStep::CreatingSetStep(
|
||||
, WithContext(context_)
|
||||
, description(std::move(description_))
|
||||
, subquery_for_set(std::move(subquery_for_set_))
|
||||
, network_transfer_limits(std::move(network_transfer_limits_))
|
||||
, network_transfer_limits(network_transfer_limits_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -5,9 +5,9 @@ namespace DB
|
||||
{
|
||||
|
||||
ITransformingStep::ITransformingStep(DataStream input_stream, Block output_header, Traits traits, bool collect_processors_)
|
||||
: transform_traits(std::move(traits.transform_traits))
|
||||
: transform_traits(traits.transform_traits)
|
||||
, collect_processors(collect_processors_)
|
||||
, data_stream_traits(std::move(traits.data_stream_traits))
|
||||
, data_stream_traits(traits.data_stream_traits)
|
||||
{
|
||||
input_streams.emplace_back(std::move(input_stream));
|
||||
output_stream = createOutputStream(input_streams.front(), std::move(output_header), data_stream_traits);
|
||||
|
@ -195,7 +195,6 @@ void ReadFromRemote::addPipe(Pipes & pipes, const ClusterProxy::SelectStreamFact
|
||||
|
||||
remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
||||
shard.shard_info.pool, query_string, shard.header, context, throttler, scalars, external_tables, stage);
|
||||
|
||||
remote_query_executor->setLogger(log);
|
||||
remote_query_executor->setPoolMode(PoolMode::GET_MANY);
|
||||
|
||||
@ -244,7 +243,7 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep(
|
||||
: ISourceStep(DataStream{.header = std::move(header_)})
|
||||
, coordinator(std::move(coordinator_))
|
||||
, shard(std::move(shard_))
|
||||
, stage(std::move(stage_))
|
||||
, stage(stage_)
|
||||
, main_table(std::move(main_table_))
|
||||
, table_func_ptr(table_func_ptr_)
|
||||
, context(context_)
|
||||
@ -323,7 +322,7 @@ void ReadFromParallelRemoteReplicasStep::addPipeForSingeReplica(Pipes & pipes, s
|
||||
|
||||
remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
||||
pool, query_string, shard.header, context, throttler, scalars, external_tables, stage,
|
||||
RemoteQueryExecutor::Extension{.parallel_reading_coordinator = coordinator, .replica_info = std::move(replica_info)});
|
||||
RemoteQueryExecutor::Extension{.parallel_reading_coordinator = coordinator, .replica_info = replica_info});
|
||||
|
||||
remote_query_executor->setLogger(log);
|
||||
|
||||
|
@ -29,7 +29,7 @@ CreatingSetsTransform::CreatingSetsTransform(
|
||||
: IAccumulatingTransform(std::move(in_header_), std::move(out_header_))
|
||||
, WithContext(context_)
|
||||
, subquery(std::move(subquery_for_set_))
|
||||
, network_transfer_limits(std::move(network_transfer_limits_))
|
||||
, network_transfer_limits(network_transfer_limits_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ RemoteQueryExecutorReadContext::RemoteQueryExecutorReadContext(IConnections & co
|
||||
}
|
||||
|
||||
auto routine = RemoteQueryExecutorRoutine{connections, *this};
|
||||
fiber = boost::context::fiber(std::allocator_arg_t(), stack, std::move(routine));
|
||||
fiber = boost::context::fiber(std::allocator_arg_t(), stack, routine);
|
||||
}
|
||||
|
||||
void RemoteQueryExecutorReadContext::setConnectionFD(int fd, Poco::Timespan timeout, const std::string & fd_description)
|
||||
|
@ -175,7 +175,7 @@ HTTPRequestHandlerFactoryPtr createStaticHandlerFactory(IServer & server, const
|
||||
std::string response_content = server.config().getRawString(config_prefix + ".handler.response_content", "Ok.\n");
|
||||
std::string response_content_type = server.config().getString(config_prefix + ".handler.content_type", "text/plain; charset=UTF-8");
|
||||
auto factory = std::make_shared<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(
|
||||
server, std::move(response_content), std::move(status), std::move(response_content_type));
|
||||
server, std::move(response_content), status, std::move(response_content_type));
|
||||
|
||||
factory->addFiltersFromConfig(server.config(), config_prefix);
|
||||
|
||||
|
@ -760,7 +760,7 @@ void TCPHandler::processTablesStatusRequest()
|
||||
else
|
||||
status.is_replicated = false; //-V1048
|
||||
|
||||
response.table_states_by_id.emplace(table_name, std::move(status));
|
||||
response.table_states_by_id.emplace(table_name, status);
|
||||
}
|
||||
|
||||
|
||||
|
@ -293,10 +293,10 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g
|
||||
new_ctx->column_sizes = std::move(ctx->column_sizes);
|
||||
new_ctx->compression_codec = std::move(ctx->compression_codec);
|
||||
new_ctx->tmp_disk = std::move(ctx->tmp_disk);
|
||||
new_ctx->it_name_and_type = std::move(ctx->it_name_and_type);
|
||||
new_ctx->column_num_for_vertical_merge = std::move(ctx->column_num_for_vertical_merge);
|
||||
new_ctx->read_with_direct_io = std::move(ctx->read_with_direct_io);
|
||||
new_ctx->need_sync = std::move(ctx->need_sync);
|
||||
new_ctx->it_name_and_type = ctx->it_name_and_type;
|
||||
new_ctx->column_num_for_vertical_merge = ctx->column_num_for_vertical_merge;
|
||||
new_ctx->read_with_direct_io = ctx->read_with_direct_io;
|
||||
new_ctx->need_sync = ctx->need_sync;
|
||||
|
||||
ctx.reset();
|
||||
return new_ctx;
|
||||
@ -306,7 +306,7 @@ MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNe
|
||||
{
|
||||
auto new_ctx = std::make_shared<MergeProjectionsRuntimeContext>();
|
||||
|
||||
new_ctx->need_sync = std::move(ctx->need_sync);
|
||||
new_ctx->need_sync = ctx->need_sync;
|
||||
|
||||
ctx.reset();
|
||||
return new_ctx;
|
||||
|
@ -639,7 +639,7 @@ MergeTreeBaseSelectProcessor::Status MergeTreeBaseSelectProcessor::performReques
|
||||
.partition_id = std::move(partition_id),
|
||||
.part_name = std::move(part_name),
|
||||
.projection_name = std::move(projection_name),
|
||||
.block_range = std::move(block_range),
|
||||
.block_range = block_range,
|
||||
.mark_ranges = std::move(requested_ranges)
|
||||
};
|
||||
|
||||
|
@ -1501,7 +1501,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
|
||||
LOG_TRACE(log, "Found (RIGHT) boundary mark: {}", searched_right);
|
||||
|
||||
if (result_range.begin < result_range.end && may_be_true_in_range(result_range))
|
||||
res.emplace_back(std::move(result_range));
|
||||
res.emplace_back(result_range);
|
||||
|
||||
LOG_TRACE(log, "Found {} range in {} steps", res.empty() ? "empty" : "continuous", steps);
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ try
|
||||
/// If we need to read few rows, set one range per task to reduce number of read data.
|
||||
if (has_limit_below_one_block)
|
||||
{
|
||||
mark_ranges_for_task = { std::move(all_mark_ranges.front()) };
|
||||
mark_ranges_for_task = { all_mark_ranges.front() };
|
||||
all_mark_ranges.pop_front();
|
||||
}
|
||||
else
|
||||
|
@ -72,7 +72,7 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
|
||||
read_only_offsets[i] = (position != std::nullopt);
|
||||
}
|
||||
|
||||
column_positions[i] = std::move(position);
|
||||
column_positions[i] = position;
|
||||
}
|
||||
|
||||
/// Do not use max_read_buffer_size, but try to lower buffer size with maximal size of granule to avoid reading much data.
|
||||
|
@ -26,7 +26,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
|
||||
std::optional<ParallelReadingExtension> extension_)
|
||||
: MergeTreeBaseSelectProcessor{
|
||||
storage_snapshot_->getSampleBlockForColumns(required_columns_),
|
||||
storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_,
|
||||
storage_, storage_snapshot_, prewhere_info_, actions_settings, max_block_size_rows_,
|
||||
preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_,
|
||||
reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_},
|
||||
required_columns{std::move(required_columns_)},
|
||||
|
@ -29,7 +29,7 @@ MergeTreeThreadSelectProcessor::MergeTreeThreadSelectProcessor(
|
||||
std::optional<ParallelReadingExtension> extension_)
|
||||
:
|
||||
MergeTreeBaseSelectProcessor{
|
||||
pool_->getHeader(), storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_,
|
||||
pool_->getHeader(), storage_, storage_snapshot_, prewhere_info_, actions_settings, max_block_size_rows_,
|
||||
preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_,
|
||||
reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_},
|
||||
thread{thread_},
|
||||
|
@ -37,7 +37,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream(
|
||||
metadata_snapshot_,
|
||||
indices_to_recalc,
|
||||
default_codec,
|
||||
std::move(writer_settings),
|
||||
writer_settings,
|
||||
index_granularity);
|
||||
|
||||
auto * writer_on_disk = dynamic_cast<MergeTreeDataPartWriterOnDisk *>(writer.get());
|
||||
|
@ -22,7 +22,7 @@ ReplicatedMergeTreePartHeader ReplicatedMergeTreePartHeader::fromColumnsAndCheck
|
||||
{
|
||||
auto columns_hash = getSipHash(columns_znode);
|
||||
auto checksums = MinimalisticDataPartChecksums::deserializeFrom(checksums_znode);
|
||||
return ReplicatedMergeTreePartHeader(std::move(columns_hash), std::move(checksums));
|
||||
return ReplicatedMergeTreePartHeader(columns_hash, std::move(checksums));
|
||||
}
|
||||
|
||||
ReplicatedMergeTreePartHeader ReplicatedMergeTreePartHeader::fromColumnsAndChecksums(
|
||||
|
@ -2503,7 +2503,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo
|
||||
source_queue.emplace_back();
|
||||
auto & info = source_queue.back();
|
||||
info.data = std::move(res.data);
|
||||
info.stat = std::move(res.stat);
|
||||
info.stat = res.stat;
|
||||
try
|
||||
{
|
||||
info.parsed_entry = LogEntry::parse(info.data, info.stat);
|
||||
|
@ -83,7 +83,7 @@ void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::U
|
||||
rw_settings.max_single_part_upload_size = get_uint_for_key(key, "max_single_part_upload_size", true, settings.s3_max_single_part_upload_size);
|
||||
rw_settings.max_connections = get_uint_for_key(key, "max_connections", true, settings.s3_max_connections);
|
||||
|
||||
s3_settings.emplace(endpoint, S3Settings{std::move(auth_settings), std::move(rw_settings)});
|
||||
s3_settings.emplace(endpoint, S3Settings{std::move(auth_settings), rw_settings});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -24,19 +24,19 @@ static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody(
|
||||
{
|
||||
case DB::MySQLReplication::FORMAT_DESCRIPTION_EVENT:
|
||||
{
|
||||
event = std::make_shared<DB::MySQLReplication::FormatDescriptionEvent>(std::move(header));
|
||||
event = std::make_shared<DB::MySQLReplication::FormatDescriptionEvent>(header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
case DB::MySQLReplication::ROTATE_EVENT:
|
||||
{
|
||||
event = std::make_shared<DB::MySQLReplication::RotateEvent>(std::move(header));
|
||||
event = std::make_shared<DB::MySQLReplication::RotateEvent>(header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
case DB::MySQLReplication::QUERY_EVENT:
|
||||
{
|
||||
event = std::make_shared<DB::MySQLReplication::QueryEvent>(std::move(header));
|
||||
event = std::make_shared<DB::MySQLReplication::QueryEvent>(header);
|
||||
event->parseEvent(*event_payload);
|
||||
|
||||
auto query = std::static_pointer_cast<DB::MySQLReplication::QueryEvent>(event);
|
||||
@ -45,7 +45,7 @@ static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody(
|
||||
case DB::MySQLReplication::QUERY_EVENT_MULTI_TXN_FLAG:
|
||||
case DB::MySQLReplication::QUERY_EVENT_XA:
|
||||
{
|
||||
event = std::make_shared<DB::MySQLReplication::DryRunEvent>(std::move(query->header));
|
||||
event = std::make_shared<DB::MySQLReplication::DryRunEvent>(query->header);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -55,7 +55,7 @@ static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody(
|
||||
}
|
||||
case DB::MySQLReplication::XID_EVENT:
|
||||
{
|
||||
event = std::make_shared<DB::MySQLReplication::XIDEvent>(std::move(header));
|
||||
event = std::make_shared<DB::MySQLReplication::XIDEvent>(header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
@ -63,7 +63,7 @@ static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody(
|
||||
{
|
||||
DB::MySQLReplication::TableMapEventHeader map_event_header;
|
||||
map_event_header.parse(*event_payload);
|
||||
event = std::make_shared<DB::MySQLReplication::TableMapEvent>(std::move(header), map_event_header);
|
||||
event = std::make_shared<DB::MySQLReplication::TableMapEvent>(header, map_event_header);
|
||||
event->parseEvent(*event_payload);
|
||||
last_table_map_event = std::static_pointer_cast<DB::MySQLReplication::TableMapEvent>(event);
|
||||
break;
|
||||
@ -73,7 +73,7 @@ static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody(
|
||||
{
|
||||
DB::MySQLReplication::RowsEventHeader rows_header(header.type);
|
||||
rows_header.parse(*event_payload);
|
||||
event = std::make_shared<DB::MySQLReplication::WriteRowsEvent>(last_table_map_event, std::move(header), rows_header);
|
||||
event = std::make_shared<DB::MySQLReplication::WriteRowsEvent>(last_table_map_event, header, rows_header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
@ -82,7 +82,7 @@ static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody(
|
||||
{
|
||||
DB::MySQLReplication::RowsEventHeader rows_header(header.type);
|
||||
rows_header.parse(*event_payload);
|
||||
event = std::make_shared<DB::MySQLReplication::DeleteRowsEvent>(last_table_map_event, std::move(header), rows_header);
|
||||
event = std::make_shared<DB::MySQLReplication::DeleteRowsEvent>(last_table_map_event, header, rows_header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
@ -91,19 +91,19 @@ static DB::MySQLReplication::BinlogEventPtr parseSingleEventBody(
|
||||
{
|
||||
DB::MySQLReplication::RowsEventHeader rows_header(header.type);
|
||||
rows_header.parse(*event_payload);
|
||||
event = std::make_shared<DB::MySQLReplication::UpdateRowsEvent>(last_table_map_event, std::move(header), rows_header);
|
||||
event = std::make_shared<DB::MySQLReplication::UpdateRowsEvent>(last_table_map_event, header, rows_header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
case DB::MySQLReplication::GTID_EVENT:
|
||||
{
|
||||
event = std::make_shared<DB::MySQLReplication::GTIDEvent>(std::move(header));
|
||||
event = std::make_shared<DB::MySQLReplication::GTIDEvent>(header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
event = std::make_shared<DB::MySQLReplication::DryRunEvent>(std::move(header));
|
||||
event = std::make_shared<DB::MySQLReplication::DryRunEvent>(header);
|
||||
event->parseEvent(*event_payload);
|
||||
break;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user