mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 18:12:02 +00:00
Fixed clang-tidy-CheckTriviallyCopyableMove-errors
This commit is contained in:
parent
05d2b13510
commit
b7eb6bbd38
@ -918,21 +918,21 @@ namespace MySQLReplication
|
||||
{
|
||||
case FORMAT_DESCRIPTION_EVENT:
|
||||
{
|
||||
event = std::make_shared<FormatDescriptionEvent>(std::move(event_header));
|
||||
event = std::make_shared<FormatDescriptionEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
}
|
||||
case ROTATE_EVENT:
|
||||
{
|
||||
event = std::make_shared<RotateEvent>(std::move(event_header));
|
||||
event = std::make_shared<RotateEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
}
|
||||
case QUERY_EVENT:
|
||||
{
|
||||
event = std::make_shared<QueryEvent>(std::move(event_header));
|
||||
event = std::make_shared<QueryEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
|
||||
@ -942,7 +942,7 @@ namespace MySQLReplication
|
||||
case QUERY_EVENT_MULTI_TXN_FLAG:
|
||||
case QUERY_EVENT_XA:
|
||||
{
|
||||
event = std::make_shared<DryRunEvent>(std::move(query->header));
|
||||
event = std::make_shared<DryRunEvent>(query->header);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -952,7 +952,7 @@ namespace MySQLReplication
|
||||
}
|
||||
case XID_EVENT:
|
||||
{
|
||||
event = std::make_shared<XIDEvent>(std::move(event_header));
|
||||
event = std::make_shared<XIDEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
@ -963,14 +963,14 @@ namespace MySQLReplication
|
||||
map_event_header.parse(event_payload);
|
||||
if (doReplicate(map_event_header.schema, map_event_header.table))
|
||||
{
|
||||
event = std::make_shared<TableMapEvent>(std::move(event_header), map_event_header);
|
||||
event = std::make_shared<TableMapEvent>(event_header, map_event_header);
|
||||
event->parseEvent(event_payload);
|
||||
auto table_map = std::static_pointer_cast<TableMapEvent>(event);
|
||||
table_maps[table_map->table_id] = table_map;
|
||||
}
|
||||
else
|
||||
{
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
}
|
||||
break;
|
||||
@ -982,7 +982,7 @@ namespace MySQLReplication
|
||||
if (doReplicate(rows_header.table_id))
|
||||
event = std::make_shared<WriteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
|
||||
else
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
|
||||
event->parseEvent(event_payload);
|
||||
if (rows_header.flags & ROWS_END_OF_STATEMENT)
|
||||
@ -996,7 +996,7 @@ namespace MySQLReplication
|
||||
if (doReplicate(rows_header.table_id))
|
||||
event = std::make_shared<DeleteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
|
||||
else
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
|
||||
event->parseEvent(event_payload);
|
||||
if (rows_header.flags & ROWS_END_OF_STATEMENT)
|
||||
@ -1008,9 +1008,9 @@ namespace MySQLReplication
|
||||
RowsEventHeader rows_header(event_header.type);
|
||||
rows_header.parse(event_payload);
|
||||
if (doReplicate(rows_header.table_id))
|
||||
event = std::make_shared<UpdateRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
|
||||
event = std::make_shared<UpdateRowsEvent>(table_maps.at(rows_header.table_id), event_header, rows_header);
|
||||
else
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
|
||||
event->parseEvent(event_payload);
|
||||
if (rows_header.flags & ROWS_END_OF_STATEMENT)
|
||||
@ -1019,14 +1019,14 @@ namespace MySQLReplication
|
||||
}
|
||||
case GTID_EVENT:
|
||||
{
|
||||
event = std::make_shared<GTIDEvent>(std::move(event_header));
|
||||
event = std::make_shared<GTIDEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
position.update(event);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
event = std::make_shared<DryRunEvent>(std::move(event_header));
|
||||
event = std::make_shared<DryRunEvent>(event_header);
|
||||
event->parseEvent(event_payload);
|
||||
break;
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ static const auto DISK_CHECK_ERROR_RETRY_TIME = 3;
|
||||
|
||||
DiskLocalCheckThread::DiskLocalCheckThread(DiskLocal * disk_, ContextPtr context_, UInt64 local_disk_check_period_ms)
|
||||
: WithContext(context_)
|
||||
, disk(std::move(disk_))
|
||||
, disk(disk_)
|
||||
, check_period_ms(local_disk_check_period_ms)
|
||||
, log(&Poco::Logger::get(fmt::format("DiskLocalCheckThread({})", disk->getName())))
|
||||
{
|
||||
|
@ -333,7 +333,7 @@ public:
|
||||
}
|
||||
|
||||
size_t col_key_size = sub_map_column->size();
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), std::move(col_key_size)) : std::move(sub_map_column);
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), col_key_size) : std::move(sub_map_column);
|
||||
|
||||
ColumnsWithTypeAndName new_arguments =
|
||||
{
|
||||
@ -480,7 +480,7 @@ public:
|
||||
}
|
||||
|
||||
size_t col_key_size = sub_map_column->size();
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), std::move(col_key_size)) : std::move(sub_map_column);
|
||||
auto column = is_const? ColumnConst::create(std::move(sub_map_column), col_key_size) : std::move(sub_map_column);
|
||||
|
||||
new_arguments = {
|
||||
{
|
||||
|
@ -588,7 +588,7 @@ void Aggregator::compileAggregateFunctionsIfNeeded()
|
||||
.aggregate_data_offset = offset_of_aggregate_function
|
||||
};
|
||||
|
||||
functions_to_compile.emplace_back(std::move(function_to_compile));
|
||||
functions_to_compile.emplace_back(function_to_compile);
|
||||
|
||||
functions_description += function->getDescription();
|
||||
functions_description += ' ';
|
||||
|
@ -109,7 +109,7 @@ void TablesStatusResponse::read(ReadBuffer & in, UInt64 server_protocol_revision
|
||||
|
||||
TableStatus status;
|
||||
status.read(in);
|
||||
table_states_by_id.emplace(std::move(table_name), std::move(status));
|
||||
table_states_by_id.emplace(std::move(table_name), status);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -205,7 +205,7 @@ namespace
|
||||
else
|
||||
return false;
|
||||
|
||||
res_all_limits.emplace_back(std::move(limits));
|
||||
res_all_limits.emplace_back(limits);
|
||||
return true;
|
||||
};
|
||||
|
||||
|
@ -42,7 +42,7 @@ ExecutingGraph::Edge & ExecutingGraph::addEdge(Edges & edges, Edge edge, const I
|
||||
from->getName());
|
||||
|
||||
edge.to = it->second;
|
||||
auto & added_edge = edges.emplace_back(std::move(edge));
|
||||
auto & added_edge = edges.emplace_back(edge);
|
||||
added_edge.update_info.id = &added_edge;
|
||||
return added_edge;
|
||||
}
|
||||
@ -66,7 +66,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
|
||||
const IProcessor * to = &it->getOutputPort().getProcessor();
|
||||
auto output_port_number = to->getOutputPortNumber(&it->getOutputPort());
|
||||
Edge edge(0, true, from_input, output_port_number, &nodes[node]->post_updated_input_ports);
|
||||
auto & added_edge = addEdge(nodes[node]->back_edges, std::move(edge), from, to);
|
||||
auto & added_edge = addEdge(nodes[node]->back_edges, edge, from, to);
|
||||
it->setUpdateInfo(&added_edge.update_info);
|
||||
}
|
||||
}
|
||||
@ -84,7 +84,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
|
||||
const IProcessor * to = &it->getInputPort().getProcessor();
|
||||
auto input_port_number = to->getInputPortNumber(&it->getInputPort());
|
||||
Edge edge(0, false, input_port_number, from_output, &nodes[node]->post_updated_output_ports);
|
||||
auto & added_edge = addEdge(nodes[node]->direct_edges, std::move(edge), from, to);
|
||||
auto & added_edge = addEdge(nodes[node]->direct_edges, edge, from, to);
|
||||
it->setUpdateInfo(&added_edge.update_info);
|
||||
}
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ AggregatingStep::AggregatingStep(
|
||||
: ITransformingStep(input_stream_, appendGroupingColumn(params_.getHeader(final_), grouping_sets_params_), getTraits(), false)
|
||||
, params(std::move(params_))
|
||||
, grouping_sets_params(std::move(grouping_sets_params_))
|
||||
, final(std::move(final_))
|
||||
, final(final_)
|
||||
, max_block_size(max_block_size_)
|
||||
, aggregation_in_order_max_block_bytes(aggregation_in_order_max_block_bytes_)
|
||||
, merge_threads(merge_threads_)
|
||||
|
@ -40,7 +40,7 @@ CreatingSetStep::CreatingSetStep(
|
||||
, WithContext(context_)
|
||||
, description(std::move(description_))
|
||||
, subquery_for_set(std::move(subquery_for_set_))
|
||||
, network_transfer_limits(std::move(network_transfer_limits_))
|
||||
, network_transfer_limits(network_transfer_limits_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -5,9 +5,9 @@ namespace DB
|
||||
{
|
||||
|
||||
ITransformingStep::ITransformingStep(DataStream input_stream, Block output_header, Traits traits, bool collect_processors_)
|
||||
: transform_traits(std::move(traits.transform_traits))
|
||||
: transform_traits(traits.transform_traits)
|
||||
, collect_processors(collect_processors_)
|
||||
, data_stream_traits(std::move(traits.data_stream_traits))
|
||||
, data_stream_traits(traits.data_stream_traits)
|
||||
{
|
||||
input_streams.emplace_back(std::move(input_stream));
|
||||
output_stream = createOutputStream(input_streams.front(), std::move(output_header), data_stream_traits);
|
||||
|
@ -198,7 +198,7 @@ void ReadFromRemote::addPipe(Pipes & pipes, const ClusterProxy::IStreamFactory::
|
||||
|
||||
remote_query_executor = std::make_shared<RemoteQueryExecutor>(
|
||||
pool ? pool : shard.pool, query_string, shard.header, context, throttler, scalars, external_tables, stage,
|
||||
RemoteQueryExecutor::Extension{.parallel_reading_coordinator = std::move(coordinator), .replica_info = std::move(replica_info)});
|
||||
RemoteQueryExecutor::Extension{.parallel_reading_coordinator = std::move(coordinator), .replica_info = replica_info});
|
||||
|
||||
remote_query_executor->setLogger(log);
|
||||
|
||||
|
@ -29,7 +29,7 @@ CreatingSetsTransform::CreatingSetsTransform(
|
||||
: IAccumulatingTransform(std::move(in_header_), std::move(out_header_))
|
||||
, WithContext(context_)
|
||||
, subquery(std::move(subquery_for_set_))
|
||||
, network_transfer_limits(std::move(network_transfer_limits_))
|
||||
, network_transfer_limits(network_transfer_limits_)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ RemoteQueryExecutorReadContext::RemoteQueryExecutorReadContext(IConnections & co
|
||||
}
|
||||
|
||||
auto routine = RemoteQueryExecutorRoutine{connections, *this};
|
||||
fiber = boost::context::fiber(std::allocator_arg_t(), stack, std::move(routine));
|
||||
fiber = boost::context::fiber(std::allocator_arg_t(), stack, routine);
|
||||
}
|
||||
|
||||
void RemoteQueryExecutorReadContext::setConnectionFD(int fd, Poco::Timespan timeout, const std::string & fd_description)
|
||||
|
@ -175,7 +175,7 @@ HTTPRequestHandlerFactoryPtr createStaticHandlerFactory(IServer & server, const
|
||||
std::string response_content = server.config().getRawString(config_prefix + ".handler.response_content", "Ok.\n");
|
||||
std::string response_content_type = server.config().getString(config_prefix + ".handler.content_type", "text/plain; charset=UTF-8");
|
||||
auto factory = std::make_shared<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(
|
||||
server, std::move(response_content), std::move(status), std::move(response_content_type));
|
||||
server, std::move(response_content), status, std::move(response_content_type));
|
||||
|
||||
factory->addFiltersFromConfig(server.config(), config_prefix);
|
||||
|
||||
|
@ -760,7 +760,7 @@ void TCPHandler::processTablesStatusRequest()
|
||||
else
|
||||
status.is_replicated = false; //-V1048
|
||||
|
||||
response.table_states_by_id.emplace(table_name, std::move(status));
|
||||
response.table_states_by_id.emplace(table_name, status);
|
||||
}
|
||||
|
||||
|
||||
|
@ -293,10 +293,10 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g
|
||||
new_ctx->column_sizes = std::move(ctx->column_sizes);
|
||||
new_ctx->compression_codec = std::move(ctx->compression_codec);
|
||||
new_ctx->tmp_disk = std::move(ctx->tmp_disk);
|
||||
new_ctx->it_name_and_type = std::move(ctx->it_name_and_type);
|
||||
new_ctx->column_num_for_vertical_merge = std::move(ctx->column_num_for_vertical_merge);
|
||||
new_ctx->read_with_direct_io = std::move(ctx->read_with_direct_io);
|
||||
new_ctx->need_sync = std::move(ctx->need_sync);
|
||||
new_ctx->it_name_and_type = ctx->it_name_and_type;
|
||||
new_ctx->column_num_for_vertical_merge = ctx->column_num_for_vertical_merge;
|
||||
new_ctx->read_with_direct_io = ctx->read_with_direct_io;
|
||||
new_ctx->need_sync = ctx->need_sync;
|
||||
|
||||
ctx.reset();
|
||||
return new_ctx;
|
||||
@ -306,7 +306,7 @@ MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNe
|
||||
{
|
||||
auto new_ctx = std::make_shared<MergeProjectionsRuntimeContext>();
|
||||
|
||||
new_ctx->need_sync = std::move(ctx->need_sync);
|
||||
new_ctx->need_sync = ctx->need_sync;
|
||||
|
||||
ctx.reset();
|
||||
return new_ctx;
|
||||
|
@ -644,7 +644,7 @@ MergeTreeBaseSelectProcessor::Status MergeTreeBaseSelectProcessor::performReques
|
||||
.partition_id = std::move(partition_id),
|
||||
.part_name = std::move(part_name),
|
||||
.projection_name = std::move(projection_name),
|
||||
.block_range = std::move(block_range),
|
||||
.block_range = block_range,
|
||||
.mark_ranges = std::move(requested_ranges)
|
||||
};
|
||||
|
||||
|
@ -1496,7 +1496,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
|
||||
LOG_TRACE(log, "Found (RIGHT) boundary mark: {}", searched_right);
|
||||
|
||||
if (result_range.begin < result_range.end && may_be_true_in_range(result_range))
|
||||
res.emplace_back(std::move(result_range));
|
||||
res.emplace_back(result_range);
|
||||
|
||||
LOG_TRACE(log, "Found {} range in {} steps", res.empty() ? "empty" : "continuous", steps);
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ try
|
||||
/// If we need to read few rows, set one range per task to reduce number of read data.
|
||||
if (has_limit_below_one_block)
|
||||
{
|
||||
mark_ranges_for_task = { std::move(all_mark_ranges.front()) };
|
||||
mark_ranges_for_task = { all_mark_ranges.front() };
|
||||
all_mark_ranges.pop_front();
|
||||
}
|
||||
else
|
||||
|
@ -72,7 +72,7 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
|
||||
read_only_offsets[i] = (position != std::nullopt);
|
||||
}
|
||||
|
||||
column_positions[i] = std::move(position);
|
||||
column_positions[i] = position;
|
||||
}
|
||||
|
||||
/// Do not use max_read_buffer_size, but try to lower buffer size with maximal size of granule to avoid reading much data.
|
||||
|
@ -26,7 +26,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
|
||||
std::optional<ParallelReadingExtension> extension_)
|
||||
: MergeTreeBaseSelectProcessor{
|
||||
storage_snapshot_->getSampleBlockForColumns(required_columns_),
|
||||
storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_,
|
||||
storage_, storage_snapshot_, prewhere_info_, actions_settings, max_block_size_rows_,
|
||||
preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_,
|
||||
reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_},
|
||||
required_columns{std::move(required_columns_)},
|
||||
|
@ -29,7 +29,7 @@ MergeTreeThreadSelectProcessor::MergeTreeThreadSelectProcessor(
|
||||
std::optional<ParallelReadingExtension> extension_)
|
||||
:
|
||||
MergeTreeBaseSelectProcessor{
|
||||
pool_->getHeader(), storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_,
|
||||
pool_->getHeader(), storage_, storage_snapshot_, prewhere_info_, actions_settings, max_block_size_rows_,
|
||||
preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_,
|
||||
reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_},
|
||||
thread{thread_},
|
||||
|
@ -37,7 +37,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream(
|
||||
metadata_snapshot_,
|
||||
indices_to_recalc,
|
||||
default_codec,
|
||||
std::move(writer_settings),
|
||||
writer_settings,
|
||||
index_granularity);
|
||||
|
||||
auto * writer_on_disk = dynamic_cast<MergeTreeDataPartWriterOnDisk *>(writer.get());
|
||||
|
@ -22,7 +22,7 @@ ReplicatedMergeTreePartHeader ReplicatedMergeTreePartHeader::fromColumnsAndCheck
|
||||
{
|
||||
auto columns_hash = getSipHash(columns_znode);
|
||||
auto checksums = MinimalisticDataPartChecksums::deserializeFrom(checksums_znode);
|
||||
return ReplicatedMergeTreePartHeader(std::move(columns_hash), std::move(checksums));
|
||||
return ReplicatedMergeTreePartHeader(columns_hash, std::move(checksums));
|
||||
}
|
||||
|
||||
ReplicatedMergeTreePartHeader ReplicatedMergeTreePartHeader::fromColumnsAndChecksums(
|
||||
|
@ -2503,7 +2503,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo
|
||||
source_queue.emplace_back();
|
||||
auto & info = source_queue.back();
|
||||
info.data = std::move(res.data);
|
||||
info.stat = std::move(res.stat);
|
||||
info.stat = res.stat;
|
||||
try
|
||||
{
|
||||
info.parsed_entry = LogEntry::parse(info.data, info.stat);
|
||||
|
@ -83,7 +83,7 @@ void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::U
|
||||
rw_settings.max_single_part_upload_size = get_uint_for_key(key, "max_single_part_upload_size", true, settings.s3_max_single_part_upload_size);
|
||||
rw_settings.max_connections = get_uint_for_key(key, "max_connections", true, settings.s3_max_connections);
|
||||
|
||||
s3_settings.emplace(endpoint, S3Settings{std::move(auth_settings), std::move(rw_settings)});
|
||||
s3_settings.emplace(endpoint, S3Settings{std::move(auth_settings), rw_settings});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user