Fixed clang-tidy-CheckTriviallyCopyableMove-errors

This commit is contained in:
HeenaBansal2009 2022-05-30 11:09:03 -07:00
parent 05d2b13510
commit b7eb6bbd38
26 changed files with 46 additions and 46 deletions

View File

@ -918,21 +918,21 @@ namespace MySQLReplication
{ {
case FORMAT_DESCRIPTION_EVENT: case FORMAT_DESCRIPTION_EVENT:
{ {
event = std::make_shared<FormatDescriptionEvent>(std::move(event_header)); event = std::make_shared<FormatDescriptionEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
position.update(event); position.update(event);
break; break;
} }
case ROTATE_EVENT: case ROTATE_EVENT:
{ {
event = std::make_shared<RotateEvent>(std::move(event_header)); event = std::make_shared<RotateEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
position.update(event); position.update(event);
break; break;
} }
case QUERY_EVENT: case QUERY_EVENT:
{ {
event = std::make_shared<QueryEvent>(std::move(event_header)); event = std::make_shared<QueryEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
position.update(event); position.update(event);
@ -942,7 +942,7 @@ namespace MySQLReplication
case QUERY_EVENT_MULTI_TXN_FLAG: case QUERY_EVENT_MULTI_TXN_FLAG:
case QUERY_EVENT_XA: case QUERY_EVENT_XA:
{ {
event = std::make_shared<DryRunEvent>(std::move(query->header)); event = std::make_shared<DryRunEvent>(query->header);
break; break;
} }
default: default:
@ -952,7 +952,7 @@ namespace MySQLReplication
} }
case XID_EVENT: case XID_EVENT:
{ {
event = std::make_shared<XIDEvent>(std::move(event_header)); event = std::make_shared<XIDEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
position.update(event); position.update(event);
break; break;
@ -963,14 +963,14 @@ namespace MySQLReplication
map_event_header.parse(event_payload); map_event_header.parse(event_payload);
if (doReplicate(map_event_header.schema, map_event_header.table)) if (doReplicate(map_event_header.schema, map_event_header.table))
{ {
event = std::make_shared<TableMapEvent>(std::move(event_header), map_event_header); event = std::make_shared<TableMapEvent>(event_header, map_event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
auto table_map = std::static_pointer_cast<TableMapEvent>(event); auto table_map = std::static_pointer_cast<TableMapEvent>(event);
table_maps[table_map->table_id] = table_map; table_maps[table_map->table_id] = table_map;
} }
else else
{ {
event = std::make_shared<DryRunEvent>(std::move(event_header)); event = std::make_shared<DryRunEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
} }
break; break;
@ -982,7 +982,7 @@ namespace MySQLReplication
if (doReplicate(rows_header.table_id)) if (doReplicate(rows_header.table_id))
event = std::make_shared<WriteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header); event = std::make_shared<WriteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
else else
event = std::make_shared<DryRunEvent>(std::move(event_header)); event = std::make_shared<DryRunEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
if (rows_header.flags & ROWS_END_OF_STATEMENT) if (rows_header.flags & ROWS_END_OF_STATEMENT)
@ -996,7 +996,7 @@ namespace MySQLReplication
if (doReplicate(rows_header.table_id)) if (doReplicate(rows_header.table_id))
event = std::make_shared<DeleteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header); event = std::make_shared<DeleteRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header);
else else
event = std::make_shared<DryRunEvent>(std::move(event_header)); event = std::make_shared<DryRunEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
if (rows_header.flags & ROWS_END_OF_STATEMENT) if (rows_header.flags & ROWS_END_OF_STATEMENT)
@ -1008,9 +1008,9 @@ namespace MySQLReplication
RowsEventHeader rows_header(event_header.type); RowsEventHeader rows_header(event_header.type);
rows_header.parse(event_payload); rows_header.parse(event_payload);
if (doReplicate(rows_header.table_id)) if (doReplicate(rows_header.table_id))
event = std::make_shared<UpdateRowsEvent>(table_maps.at(rows_header.table_id), std::move(event_header), rows_header); event = std::make_shared<UpdateRowsEvent>(table_maps.at(rows_header.table_id), event_header, rows_header);
else else
event = std::make_shared<DryRunEvent>(std::move(event_header)); event = std::make_shared<DryRunEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
if (rows_header.flags & ROWS_END_OF_STATEMENT) if (rows_header.flags & ROWS_END_OF_STATEMENT)
@ -1019,14 +1019,14 @@ namespace MySQLReplication
} }
case GTID_EVENT: case GTID_EVENT:
{ {
event = std::make_shared<GTIDEvent>(std::move(event_header)); event = std::make_shared<GTIDEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
position.update(event); position.update(event);
break; break;
} }
default: default:
{ {
event = std::make_shared<DryRunEvent>(std::move(event_header)); event = std::make_shared<DryRunEvent>(event_header);
event->parseEvent(event_payload); event->parseEvent(event_payload);
break; break;
} }

View File

@ -11,7 +11,7 @@ static const auto DISK_CHECK_ERROR_RETRY_TIME = 3;
DiskLocalCheckThread::DiskLocalCheckThread(DiskLocal * disk_, ContextPtr context_, UInt64 local_disk_check_period_ms) DiskLocalCheckThread::DiskLocalCheckThread(DiskLocal * disk_, ContextPtr context_, UInt64 local_disk_check_period_ms)
: WithContext(context_) : WithContext(context_)
, disk(std::move(disk_)) , disk(disk_)
, check_period_ms(local_disk_check_period_ms) , check_period_ms(local_disk_check_period_ms)
, log(&Poco::Logger::get(fmt::format("DiskLocalCheckThread({})", disk->getName()))) , log(&Poco::Logger::get(fmt::format("DiskLocalCheckThread({})", disk->getName())))
{ {

View File

@ -333,7 +333,7 @@ public:
} }
size_t col_key_size = sub_map_column->size(); size_t col_key_size = sub_map_column->size();
auto column = is_const? ColumnConst::create(std::move(sub_map_column), std::move(col_key_size)) : std::move(sub_map_column); auto column = is_const? ColumnConst::create(std::move(sub_map_column), col_key_size) : std::move(sub_map_column);
ColumnsWithTypeAndName new_arguments = ColumnsWithTypeAndName new_arguments =
{ {
@ -480,7 +480,7 @@ public:
} }
size_t col_key_size = sub_map_column->size(); size_t col_key_size = sub_map_column->size();
auto column = is_const? ColumnConst::create(std::move(sub_map_column), std::move(col_key_size)) : std::move(sub_map_column); auto column = is_const? ColumnConst::create(std::move(sub_map_column), col_key_size) : std::move(sub_map_column);
new_arguments = { new_arguments = {
{ {

View File

@ -588,7 +588,7 @@ void Aggregator::compileAggregateFunctionsIfNeeded()
.aggregate_data_offset = offset_of_aggregate_function .aggregate_data_offset = offset_of_aggregate_function
}; };
functions_to_compile.emplace_back(std::move(function_to_compile)); functions_to_compile.emplace_back(function_to_compile);
functions_description += function->getDescription(); functions_description += function->getDescription();
functions_description += ' '; functions_description += ' ';

View File

@ -109,7 +109,7 @@ void TablesStatusResponse::read(ReadBuffer & in, UInt64 server_protocol_revision
TableStatus status; TableStatus status;
status.read(in); status.read(in);
table_states_by_id.emplace(std::move(table_name), std::move(status)); table_states_by_id.emplace(std::move(table_name), status);
} }
} }

View File

@ -205,7 +205,7 @@ namespace
else else
return false; return false;
res_all_limits.emplace_back(std::move(limits)); res_all_limits.emplace_back(limits);
return true; return true;
}; };

View File

@ -42,7 +42,7 @@ ExecutingGraph::Edge & ExecutingGraph::addEdge(Edges & edges, Edge edge, const I
from->getName()); from->getName());
edge.to = it->second; edge.to = it->second;
auto & added_edge = edges.emplace_back(std::move(edge)); auto & added_edge = edges.emplace_back(edge);
added_edge.update_info.id = &added_edge; added_edge.update_info.id = &added_edge;
return added_edge; return added_edge;
} }
@ -66,7 +66,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
const IProcessor * to = &it->getOutputPort().getProcessor(); const IProcessor * to = &it->getOutputPort().getProcessor();
auto output_port_number = to->getOutputPortNumber(&it->getOutputPort()); auto output_port_number = to->getOutputPortNumber(&it->getOutputPort());
Edge edge(0, true, from_input, output_port_number, &nodes[node]->post_updated_input_ports); Edge edge(0, true, from_input, output_port_number, &nodes[node]->post_updated_input_ports);
auto & added_edge = addEdge(nodes[node]->back_edges, std::move(edge), from, to); auto & added_edge = addEdge(nodes[node]->back_edges, edge, from, to);
it->setUpdateInfo(&added_edge.update_info); it->setUpdateInfo(&added_edge.update_info);
} }
} }
@ -84,7 +84,7 @@ bool ExecutingGraph::addEdges(uint64_t node)
const IProcessor * to = &it->getInputPort().getProcessor(); const IProcessor * to = &it->getInputPort().getProcessor();
auto input_port_number = to->getInputPortNumber(&it->getInputPort()); auto input_port_number = to->getInputPortNumber(&it->getInputPort());
Edge edge(0, false, input_port_number, from_output, &nodes[node]->post_updated_output_ports); Edge edge(0, false, input_port_number, from_output, &nodes[node]->post_updated_output_ports);
auto & added_edge = addEdge(nodes[node]->direct_edges, std::move(edge), from, to); auto & added_edge = addEdge(nodes[node]->direct_edges, edge, from, to);
it->setUpdateInfo(&added_edge.update_info); it->setUpdateInfo(&added_edge.update_info);
} }
} }

View File

@ -66,7 +66,7 @@ AggregatingStep::AggregatingStep(
: ITransformingStep(input_stream_, appendGroupingColumn(params_.getHeader(final_), grouping_sets_params_), getTraits(), false) : ITransformingStep(input_stream_, appendGroupingColumn(params_.getHeader(final_), grouping_sets_params_), getTraits(), false)
, params(std::move(params_)) , params(std::move(params_))
, grouping_sets_params(std::move(grouping_sets_params_)) , grouping_sets_params(std::move(grouping_sets_params_))
, final(std::move(final_)) , final(final_)
, max_block_size(max_block_size_) , max_block_size(max_block_size_)
, aggregation_in_order_max_block_bytes(aggregation_in_order_max_block_bytes_) , aggregation_in_order_max_block_bytes(aggregation_in_order_max_block_bytes_)
, merge_threads(merge_threads_) , merge_threads(merge_threads_)

View File

@ -40,7 +40,7 @@ CreatingSetStep::CreatingSetStep(
, WithContext(context_) , WithContext(context_)
, description(std::move(description_)) , description(std::move(description_))
, subquery_for_set(std::move(subquery_for_set_)) , subquery_for_set(std::move(subquery_for_set_))
, network_transfer_limits(std::move(network_transfer_limits_)) , network_transfer_limits(network_transfer_limits_)
{ {
} }

View File

@ -5,9 +5,9 @@ namespace DB
{ {
ITransformingStep::ITransformingStep(DataStream input_stream, Block output_header, Traits traits, bool collect_processors_) ITransformingStep::ITransformingStep(DataStream input_stream, Block output_header, Traits traits, bool collect_processors_)
: transform_traits(std::move(traits.transform_traits)) : transform_traits(traits.transform_traits)
, collect_processors(collect_processors_) , collect_processors(collect_processors_)
, data_stream_traits(std::move(traits.data_stream_traits)) , data_stream_traits(traits.data_stream_traits)
{ {
input_streams.emplace_back(std::move(input_stream)); input_streams.emplace_back(std::move(input_stream));
output_stream = createOutputStream(input_streams.front(), std::move(output_header), data_stream_traits); output_stream = createOutputStream(input_streams.front(), std::move(output_header), data_stream_traits);

View File

@ -198,7 +198,7 @@ void ReadFromRemote::addPipe(Pipes & pipes, const ClusterProxy::IStreamFactory::
remote_query_executor = std::make_shared<RemoteQueryExecutor>( remote_query_executor = std::make_shared<RemoteQueryExecutor>(
pool ? pool : shard.pool, query_string, shard.header, context, throttler, scalars, external_tables, stage, pool ? pool : shard.pool, query_string, shard.header, context, throttler, scalars, external_tables, stage,
RemoteQueryExecutor::Extension{.parallel_reading_coordinator = std::move(coordinator), .replica_info = std::move(replica_info)}); RemoteQueryExecutor::Extension{.parallel_reading_coordinator = std::move(coordinator), .replica_info = replica_info});
remote_query_executor->setLogger(log); remote_query_executor->setLogger(log);

View File

@ -29,7 +29,7 @@ CreatingSetsTransform::CreatingSetsTransform(
: IAccumulatingTransform(std::move(in_header_), std::move(out_header_)) : IAccumulatingTransform(std::move(in_header_), std::move(out_header_))
, WithContext(context_) , WithContext(context_)
, subquery(std::move(subquery_for_set_)) , subquery(std::move(subquery_for_set_))
, network_transfer_limits(std::move(network_transfer_limits_)) , network_transfer_limits(network_transfer_limits_)
{ {
} }

View File

@ -86,7 +86,7 @@ RemoteQueryExecutorReadContext::RemoteQueryExecutorReadContext(IConnections & co
} }
auto routine = RemoteQueryExecutorRoutine{connections, *this}; auto routine = RemoteQueryExecutorRoutine{connections, *this};
fiber = boost::context::fiber(std::allocator_arg_t(), stack, std::move(routine)); fiber = boost::context::fiber(std::allocator_arg_t(), stack, routine);
} }
void RemoteQueryExecutorReadContext::setConnectionFD(int fd, Poco::Timespan timeout, const std::string & fd_description) void RemoteQueryExecutorReadContext::setConnectionFD(int fd, Poco::Timespan timeout, const std::string & fd_description)

View File

@ -175,7 +175,7 @@ HTTPRequestHandlerFactoryPtr createStaticHandlerFactory(IServer & server, const
std::string response_content = server.config().getRawString(config_prefix + ".handler.response_content", "Ok.\n"); std::string response_content = server.config().getRawString(config_prefix + ".handler.response_content", "Ok.\n");
std::string response_content_type = server.config().getString(config_prefix + ".handler.content_type", "text/plain; charset=UTF-8"); std::string response_content_type = server.config().getString(config_prefix + ".handler.content_type", "text/plain; charset=UTF-8");
auto factory = std::make_shared<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>( auto factory = std::make_shared<HandlingRuleHTTPHandlerFactory<StaticRequestHandler>>(
server, std::move(response_content), std::move(status), std::move(response_content_type)); server, std::move(response_content), status, std::move(response_content_type));
factory->addFiltersFromConfig(server.config(), config_prefix); factory->addFiltersFromConfig(server.config(), config_prefix);

View File

@ -760,7 +760,7 @@ void TCPHandler::processTablesStatusRequest()
else else
status.is_replicated = false; //-V1048 status.is_replicated = false; //-V1048
response.table_states_by_id.emplace(table_name, std::move(status)); response.table_states_by_id.emplace(table_name, status);
} }

View File

@ -293,10 +293,10 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g
new_ctx->column_sizes = std::move(ctx->column_sizes); new_ctx->column_sizes = std::move(ctx->column_sizes);
new_ctx->compression_codec = std::move(ctx->compression_codec); new_ctx->compression_codec = std::move(ctx->compression_codec);
new_ctx->tmp_disk = std::move(ctx->tmp_disk); new_ctx->tmp_disk = std::move(ctx->tmp_disk);
new_ctx->it_name_and_type = std::move(ctx->it_name_and_type); new_ctx->it_name_and_type = ctx->it_name_and_type;
new_ctx->column_num_for_vertical_merge = std::move(ctx->column_num_for_vertical_merge); new_ctx->column_num_for_vertical_merge = ctx->column_num_for_vertical_merge;
new_ctx->read_with_direct_io = std::move(ctx->read_with_direct_io); new_ctx->read_with_direct_io = ctx->read_with_direct_io;
new_ctx->need_sync = std::move(ctx->need_sync); new_ctx->need_sync = ctx->need_sync;
ctx.reset(); ctx.reset();
return new_ctx; return new_ctx;
@ -306,7 +306,7 @@ MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNe
{ {
auto new_ctx = std::make_shared<MergeProjectionsRuntimeContext>(); auto new_ctx = std::make_shared<MergeProjectionsRuntimeContext>();
new_ctx->need_sync = std::move(ctx->need_sync); new_ctx->need_sync = ctx->need_sync;
ctx.reset(); ctx.reset();
return new_ctx; return new_ctx;

View File

@ -644,7 +644,7 @@ MergeTreeBaseSelectProcessor::Status MergeTreeBaseSelectProcessor::performReques
.partition_id = std::move(partition_id), .partition_id = std::move(partition_id),
.part_name = std::move(part_name), .part_name = std::move(part_name),
.projection_name = std::move(projection_name), .projection_name = std::move(projection_name),
.block_range = std::move(block_range), .block_range = block_range,
.mark_ranges = std::move(requested_ranges) .mark_ranges = std::move(requested_ranges)
}; };

View File

@ -1496,7 +1496,7 @@ MarkRanges MergeTreeDataSelectExecutor::markRangesFromPKRange(
LOG_TRACE(log, "Found (RIGHT) boundary mark: {}", searched_right); LOG_TRACE(log, "Found (RIGHT) boundary mark: {}", searched_right);
if (result_range.begin < result_range.end && may_be_true_in_range(result_range)) if (result_range.begin < result_range.end && may_be_true_in_range(result_range))
res.emplace_back(std::move(result_range)); res.emplace_back(result_range);
LOG_TRACE(log, "Found {} range in {} steps", res.empty() ? "empty" : "continuous", steps); LOG_TRACE(log, "Found {} range in {} steps", res.empty() ? "empty" : "continuous", steps);
} }

View File

@ -21,7 +21,7 @@ try
/// If we need to read few rows, set one range per task to reduce number of read data. /// If we need to read few rows, set one range per task to reduce number of read data.
if (has_limit_below_one_block) if (has_limit_below_one_block)
{ {
mark_ranges_for_task = { std::move(all_mark_ranges.front()) }; mark_ranges_for_task = { all_mark_ranges.front() };
all_mark_ranges.pop_front(); all_mark_ranges.pop_front();
} }
else else

View File

@ -72,7 +72,7 @@ MergeTreeReaderCompact::MergeTreeReaderCompact(
read_only_offsets[i] = (position != std::nullopt); read_only_offsets[i] = (position != std::nullopt);
} }
column_positions[i] = std::move(position); column_positions[i] = position;
} }
/// Do not use max_read_buffer_size, but try to lower buffer size with maximal size of granule to avoid reading much data. /// Do not use max_read_buffer_size, but try to lower buffer size with maximal size of granule to avoid reading much data.

View File

@ -26,7 +26,7 @@ MergeTreeSelectProcessor::MergeTreeSelectProcessor(
std::optional<ParallelReadingExtension> extension_) std::optional<ParallelReadingExtension> extension_)
: MergeTreeBaseSelectProcessor{ : MergeTreeBaseSelectProcessor{
storage_snapshot_->getSampleBlockForColumns(required_columns_), storage_snapshot_->getSampleBlockForColumns(required_columns_),
storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_, storage_, storage_snapshot_, prewhere_info_, actions_settings, max_block_size_rows_,
preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_,
reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_}, reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_},
required_columns{std::move(required_columns_)}, required_columns{std::move(required_columns_)},

View File

@ -29,7 +29,7 @@ MergeTreeThreadSelectProcessor::MergeTreeThreadSelectProcessor(
std::optional<ParallelReadingExtension> extension_) std::optional<ParallelReadingExtension> extension_)
: :
MergeTreeBaseSelectProcessor{ MergeTreeBaseSelectProcessor{
pool_->getHeader(), storage_, storage_snapshot_, prewhere_info_, std::move(actions_settings), max_block_size_rows_, pool_->getHeader(), storage_, storage_snapshot_, prewhere_info_, actions_settings, max_block_size_rows_,
preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_, preferred_block_size_bytes_, preferred_max_column_in_block_size_bytes_,
reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_}, reader_settings_, use_uncompressed_cache_, virt_column_names_, extension_},
thread{thread_}, thread{thread_},

View File

@ -37,7 +37,7 @@ MergedColumnOnlyOutputStream::MergedColumnOnlyOutputStream(
metadata_snapshot_, metadata_snapshot_,
indices_to_recalc, indices_to_recalc,
default_codec, default_codec,
std::move(writer_settings), writer_settings,
index_granularity); index_granularity);
auto * writer_on_disk = dynamic_cast<MergeTreeDataPartWriterOnDisk *>(writer.get()); auto * writer_on_disk = dynamic_cast<MergeTreeDataPartWriterOnDisk *>(writer.get());

View File

@ -22,7 +22,7 @@ ReplicatedMergeTreePartHeader ReplicatedMergeTreePartHeader::fromColumnsAndCheck
{ {
auto columns_hash = getSipHash(columns_znode); auto columns_hash = getSipHash(columns_znode);
auto checksums = MinimalisticDataPartChecksums::deserializeFrom(checksums_znode); auto checksums = MinimalisticDataPartChecksums::deserializeFrom(checksums_znode);
return ReplicatedMergeTreePartHeader(std::move(columns_hash), std::move(checksums)); return ReplicatedMergeTreePartHeader(columns_hash, std::move(checksums));
} }
ReplicatedMergeTreePartHeader ReplicatedMergeTreePartHeader::fromColumnsAndChecksums( ReplicatedMergeTreePartHeader ReplicatedMergeTreePartHeader::fromColumnsAndChecksums(

View File

@ -2503,7 +2503,7 @@ void StorageReplicatedMergeTree::cloneReplica(const String & source_replica, Coo
source_queue.emplace_back(); source_queue.emplace_back();
auto & info = source_queue.back(); auto & info = source_queue.back();
info.data = std::move(res.data); info.data = std::move(res.data);
info.stat = std::move(res.stat); info.stat = res.stat;
try try
{ {
info.parsed_entry = LogEntry::parse(info.data, info.stat); info.parsed_entry = LogEntry::parse(info.data, info.stat);

View File

@ -83,7 +83,7 @@ void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::U
rw_settings.max_single_part_upload_size = get_uint_for_key(key, "max_single_part_upload_size", true, settings.s3_max_single_part_upload_size); rw_settings.max_single_part_upload_size = get_uint_for_key(key, "max_single_part_upload_size", true, settings.s3_max_single_part_upload_size);
rw_settings.max_connections = get_uint_for_key(key, "max_connections", true, settings.s3_max_connections); rw_settings.max_connections = get_uint_for_key(key, "max_connections", true, settings.s3_max_connections);
s3_settings.emplace(endpoint, S3Settings{std::move(auth_settings), std::move(rw_settings)}); s3_settings.emplace(endpoint, S3Settings{std::move(auth_settings), rw_settings});
} }
} }
} }