Use setSetting() with implicit conversion true/false -> Field.

This commit is contained in:
Vitaly Baranov 2021-08-23 08:57:00 +03:00
parent 05247c7eef
commit ff1fad8521
6 changed files with 7 additions and 7 deletions

View File

@ -70,7 +70,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream(
// Do not deduplicate insertions into MV if the main insertion is Ok
if (disable_deduplication_for_children)
insert_context->setSetting("insert_deduplicate", Field{false});
insert_context->setSetting("insert_deduplicate", false);
// Separate min_insert_block_size_rows/min_insert_block_size_bytes for children
if (insert_settings.min_insert_block_size_rows_for_materialized_views)

View File

@ -297,7 +297,7 @@ void registerDictionarySourceExecutablePool(DictionarySourceFactory & factory)
/** Currently parallel parsing input format cannot read exactly max_block_size rows from input,
* so it will be blocked on ReadBufferFromFileDescriptor because this file descriptor represent pipe that does not have eof.
*/
context->setSetting("input_format_parallel_parsing", Field{false});
context->setSetting("input_format_parallel_parsing", false);
String settings_config_prefix = config_prefix + ".executable_pool";

View File

@ -1267,7 +1267,7 @@ void TCPHandler::receiveQuery()
/// compatibility.
if (query_kind == ClientInfo::QueryKind::SECONDARY_QUERY)
{
query_context->setSetting("normalize_function_names", Field(0));
query_context->setSetting("normalize_function_names", false);
}
}

View File

@ -1211,8 +1211,8 @@ MergeTreeData::MutableDataPartPtr MergeTreeDataMergerMutator::mutatePartToTempor
context_for_reading->setSetting("max_streams_to_max_threads_ratio", 1);
context_for_reading->setSetting("max_threads", 1);
/// Allow mutations to work when force_index_by_date or force_primary_key is on.
context_for_reading->setSetting("force_index_by_date", Field(0));
context_for_reading->setSetting("force_primary_key", Field(0));
context_for_reading->setSetting("force_index_by_date", false);
context_for_reading->setSetting("force_primary_key", false);
MutationCommands commands_for_part;
for (const auto & command : commands)

View File

@ -196,7 +196,7 @@ String StorageRabbitMQ::getTableBasedName(String name, const StorageID & table_i
std::shared_ptr<Context> StorageRabbitMQ::addSettings(ContextPtr local_context) const
{
auto modified_context = Context::createCopy(local_context);
modified_context->setSetting("input_format_skip_unknown_fields", Field{true});
modified_context->setSetting("input_format_skip_unknown_fields", true);
modified_context->setSetting("input_format_allow_errors_ratio", 0.);
modified_context->setSetting("input_format_allow_errors_num", rabbitmq_settings->rabbitmq_skip_broken_messages.value);

View File

@ -209,7 +209,7 @@ Pipe StorageMerge::read(
* since there is no certainty that it works when one of table is MergeTree and other is not.
*/
auto modified_context = Context::createCopy(local_context);
modified_context->setSetting("optimize_move_to_prewhere", Field{false});
modified_context->setSetting("optimize_move_to_prewhere", false);
/// What will be result structure depending on query processed stage in source tables?
Block header = getHeaderForProcessingStage(*this, column_names, metadata_snapshot, query_info, local_context, processed_stage);