diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 887c5cb86bc..3e613532f3a 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -209,8 +209,8 @@ std::vector Client::loadWarningMessages() {} /* query_parameters */, "" /* query_id */, QueryProcessingStage::Complete, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), false, {}); + &client_context->getSettingsRef(), + &client_context->getClientInfo(), false, {}); while (true) { Packet packet = connection->receivePacket(); @@ -306,9 +306,6 @@ void Client::initialize(Poco::Util::Application & self) if (env_password && !config().has("password")) config().setString("password", env_password); - // global_context->setApplicationType(Context::ApplicationType::CLIENT); - global_context->setQueryParameters(query_parameters); - /// settings and limits could be specified in config file, but passed settings has higher priority for (const auto & setting : global_context->getSettingsRef().allUnchanged()) { @@ -382,7 +379,7 @@ try showWarnings(); /// Set user password complexity rules - auto & access_control = global_context->getAccessControl(); + auto & access_control = client_context->getAccessControl(); access_control.setPasswordComplexityRules(connection->getPasswordComplexityRules()); if (is_interactive && !delayed_interactive) @@ -459,7 +456,7 @@ void Client::connect() << connection_parameters.host << ":" << connection_parameters.port << (!connection_parameters.user.empty() ? " as user " + connection_parameters.user : "") << "." << std::endl; - connection = Connection::createConnection(connection_parameters, global_context); + connection = Connection::createConnection(connection_parameters, client_context); if (max_client_network_bandwidth) { @@ -528,7 +525,7 @@ void Client::connect() } } - if (!global_context->getSettingsRef().use_client_time_zone) + if (!client_context->getSettingsRef().use_client_time_zone) { const auto & time_zone = connection->getServerTimezone(connection_parameters.timeouts); if (!time_zone.empty()) @@ -611,7 +608,7 @@ void Client::printChangedSettings() const } }; - print_changes(global_context->getSettingsRef().changes(), "settings"); + print_changes(client_context->getSettingsRef().changes(), "settings"); print_changes(cmd_merge_tree_settings.changes(), "MergeTree settings"); } @@ -709,7 +706,7 @@ bool Client::processWithFuzzing(const String & full_query) { const char * begin = full_query.data(); orig_ast = parseQuery(begin, begin + full_query.size(), - global_context->getSettingsRef(), + client_context->getSettingsRef(), /*allow_multi_statements=*/ true); } catch (const Exception & e) @@ -733,7 +730,7 @@ bool Client::processWithFuzzing(const String & full_query) } // Kusto is not a subject for fuzzing (yet) - if (global_context->getSettingsRef().dialect == DB::Dialect::kusto) + if (client_context->getSettingsRef().dialect == DB::Dialect::kusto) { return true; } @@ -1166,6 +1163,11 @@ void Client::processOptions(const OptionsDescription & options_description, if (options.count("opentelemetry-tracestate")) global_context->getClientTraceContext().tracestate = options["opentelemetry-tracestate"].as(); + + /// In case of clickhouse-client the `client_context` can be just an alias for the `global_context`. + /// (There is no need to copy the context because clickhouse-client has no background tasks so it won't use that context in parallel.) + client_context = global_context; + initClientContext(); } @@ -1205,11 +1207,6 @@ void Client::processConfig() pager = config().getString("pager", ""); setDefaultFormatsAndCompressionFromConfiguration(); - - global_context->setClientName(std::string(DEFAULT_CLIENT_NAME)); - global_context->setQueryKindInitial(); - global_context->setQuotaClientKey(config().getString("quota_key", "")); - global_context->setQueryKind(query_kind); } diff --git a/programs/client/Client.h b/programs/client/Client.h index 229608f787d..9571440d6ba 100644 --- a/programs/client/Client.h +++ b/programs/client/Client.h @@ -16,7 +16,6 @@ public: int main(const std::vector & /*args*/) override; protected: - Poco::Util::LayeredConfiguration & getClientConfiguration() override; bool processWithFuzzing(const String & full_query) override; diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 48e0cca7b73..ce0e179939d 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -295,6 +295,8 @@ void LocalServer::cleanup() if (suggest) suggest.reset(); + client_context.reset(); + if (global_context) { global_context->shutdown(); @@ -436,7 +438,7 @@ void LocalServer::connect() in = input.get(); } connection = LocalConnection::createConnection( - connection_parameters, global_context, in, need_render_progress, need_render_profile_events, server_display_name); + connection_parameters, client_context, in, need_render_progress, need_render_profile_events, server_display_name); } @@ -497,8 +499,6 @@ try initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default"))); ASTAlterCommand::setFormatAlterCommandsWithParentheses(true); - applyCmdSettings(global_context); - /// try to load user defined executable functions, throw on error and die try { @@ -510,6 +510,11 @@ try throw; } + /// Must be called after we stopped initializing the global context and changing its settings. + /// After this point the global context must be stayed almost unchanged till shutdown, + /// and all necessary changes must be made to the client context instead. + createClientContext(); + if (is_interactive) { clearTerminal(); @@ -735,6 +740,9 @@ void LocalServer::processConfig() /// Load global settings from default_profile and system_profile. global_context->setDefaultProfiles(getClientConfiguration()); + /// Command-line parameters can override settings from the default profile. + applyCmdSettings(global_context); + /// We load temporary database first, because projections need it. DatabaseCatalog::instance().initializeAndLoadTemporaryDatabase(); @@ -778,10 +786,6 @@ void LocalServer::processConfig() server_display_name = getClientConfiguration().getString("display_name", ""); prompt_by_server_display_name = getClientConfiguration().getRawString("prompt_by_server_display_name.default", ":) "); - - global_context->setQueryKindInitial(); - global_context->setQueryKind(query_kind); - global_context->setQueryParameters(query_parameters); } @@ -860,6 +864,16 @@ void LocalServer::applyCmdOptions(ContextMutablePtr context) } +void LocalServer::createClientContext() +{ + /// In case of clickhouse-local it's necessary to use a separate context for client-related purposes. + /// We can't just change the global context because it is used in background tasks (for example, in merges) + /// which don't expect that the global context can suddenly change. + client_context = Context::createCopy(global_context); + initClientContext(); +} + + void LocalServer::processOptions(const OptionsDescription &, const CommandLineOptions & options, const std::vector &, const std::vector &) { if (options.count("table")) diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 0715f358313..ae9980311e1 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -31,7 +31,6 @@ public: int main(const std::vector & /*args*/) override; protected: - Poco::Util::LayeredConfiguration & getClientConfiguration() override; void connect() override; @@ -50,7 +49,6 @@ protected: void processConfig() override; void readArguments(int argc, char ** argv, Arguments & common_arguments, std::vector &, std::vector &) override; - void updateLoggerLevel(const String & logs_level) override; private: @@ -67,6 +65,8 @@ private: void applyCmdOptions(ContextMutablePtr context); void applyCmdSettings(ContextMutablePtr context); + void createClientContext(); + ServerSettings server_settings; std::optional status; diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index b1fe2554988..51fe5ee6ec2 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -1740,7 +1740,7 @@ QueryAnalyzer::QueryTreeNodesWithNames QueryAnalyzer::resolveQualifiedMatcher(Qu const auto * tuple_data_type = typeid_cast(result_type.get()); if (!tuple_data_type) throw Exception(ErrorCodes::UNSUPPORTED_METHOD, - "Qualified matcher {} find non compound expression {} with type {}. Expected tuple or array of tuples. In scope {}", + "Qualified matcher {} found a non-compound expression {} with type {}. Expected a tuple or an array of tuples. In scope {}", matcher_node->formatASTForErrorMessage(), expression_query_tree_node->formatASTForErrorMessage(), expression_query_tree_node->getResultType()->getName(), diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 04af9db7afe..78d8e7e3d6a 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -477,7 +477,7 @@ void ClientBase::sendExternalTables(ASTPtr parsed_query) std::vector data; for (auto & table : external_tables) - data.emplace_back(table.getData(global_context)); + data.emplace_back(table.getData(client_context)); connection->sendExternalTablesData(data); } @@ -690,10 +690,10 @@ try /// intermixed with data with parallel formatting. /// It may increase code complexity significantly. if (!extras_into_stdout || select_only_into_file) - output_format = global_context->getOutputFormatParallelIfPossible( + output_format = client_context->getOutputFormatParallelIfPossible( current_format, out_file_buf ? *out_file_buf : *out_buf, block); else - output_format = global_context->getOutputFormat( + output_format = client_context->getOutputFormat( current_format, out_file_buf ? *out_file_buf : *out_buf, block); output_format->setAutoFlush(); @@ -772,6 +772,15 @@ void ClientBase::adjustSettings() global_context->setSettings(settings); } +void ClientBase::initClientContext() +{ + client_context->setClientName(std::string(DEFAULT_CLIENT_NAME)); + client_context->setQuotaClientKey(getClientConfiguration().getString("quota_key", "")); + client_context->setQueryKindInitial(); + client_context->setQueryKind(query_kind); + client_context->setQueryParameters(query_parameters); +} + bool ClientBase::isRegularFile(int fd) { struct stat file_stat; @@ -962,7 +971,7 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) /// client-side. Thus we need to parse the query. const char * begin = full_query.data(); auto parsed_query = parseQuery(begin, begin + full_query.size(), - global_context->getSettingsRef(), + client_context->getSettingsRef(), /*allow_multi_statements=*/ false); if (!parsed_query) @@ -985,7 +994,7 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) /// But for asynchronous inserts we don't extract data, because it's needed /// to be done on server side in that case (for coalescing the data from multiple inserts on server side). const auto * insert = parsed_query->as(); - if (insert && isSyncInsertWithData(*insert, global_context)) + if (insert && isSyncInsertWithData(*insert, client_context)) query_to_execute = full_query.substr(0, insert->data - full_query.data()); else query_to_execute = full_query; @@ -1103,7 +1112,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa } } - const auto & settings = global_context->getSettingsRef(); + const auto & settings = client_context->getSettingsRef(); const Int32 signals_before_stop = settings.partial_result_on_first_cancel ? 2 : 1; int retries_left = 10; @@ -1118,10 +1127,10 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa connection_parameters.timeouts, query, query_parameters, - global_context->getCurrentQueryId(), + client_context->getCurrentQueryId(), query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), + &client_context->getSettingsRef(), + &client_context->getClientInfo(), true, [&](const Progress & progress) { onProgress(progress); }); @@ -1308,7 +1317,7 @@ void ClientBase::onProgress(const Progress & value) void ClientBase::onTimezoneUpdate(const String & tz) { - global_context->setSetting("session_timezone", tz); + client_context->setSetting("session_timezone", tz); } @@ -1504,13 +1513,13 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de void ClientBase::setInsertionTable(const ASTInsertQuery & insert_query) { - if (!global_context->hasInsertionTable() && insert_query.table) + if (!client_context->hasInsertionTable() && insert_query.table) { String table = insert_query.table->as().shortName(); if (!table.empty()) { String database = insert_query.database ? insert_query.database->as().shortName() : ""; - global_context->setInsertionTable(StorageID(database, table)); + client_context->setInsertionTable(StorageID(database, table)); } } } @@ -1561,7 +1570,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars const auto & parsed_insert_query = parsed_query->as(); if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && !isStdinNotEmptyAndValid(std_in)))) { - const auto & settings = global_context->getSettingsRef(); + const auto & settings = client_context->getSettingsRef(); if (settings.throw_if_no_data_to_insert) throw Exception(ErrorCodes::NO_DATA_TO_INSERT, "No data to insert"); else @@ -1575,10 +1584,10 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars connection_parameters.timeouts, query, query_parameters, - global_context->getCurrentQueryId(), + client_context->getCurrentQueryId(), query_processing_stage, - &global_context->getSettingsRef(), - &global_context->getClientInfo(), + &client_context->getSettingsRef(), + &client_context->getClientInfo(), true, [&](const Progress & progress) { onProgress(progress); }); @@ -1626,7 +1635,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des /// Set callback to be called on file progress. if (tty_buf) - progress_indication.setFileProgressCallback(global_context, *tty_buf); + progress_indication.setFileProgressCallback(client_context, *tty_buf); } /// If data fetched from file (maybe compressed file) @@ -1660,10 +1669,10 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des } StorageFile::CommonArguments args{ - WithContext(global_context), + WithContext(client_context), parsed_insert_query->table_id, current_format, - getFormatSettings(global_context), + getFormatSettings(client_context), compression_method, columns_for_storage_file, ConstraintsDescription{}, @@ -1671,7 +1680,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des {}, String{}, }; - StoragePtr storage = std::make_shared(in_file, global_context->getUserFilesPath(), args); + StoragePtr storage = std::make_shared(in_file, client_context->getUserFilesPath(), args); storage->startup(); SelectQueryInfo query_info; @@ -1682,16 +1691,16 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des storage->read( plan, sample.getNames(), - storage->getStorageSnapshot(metadata, global_context), + storage->getStorageSnapshot(metadata, client_context), query_info, - global_context, + client_context, {}, - global_context->getSettingsRef().max_block_size, + client_context->getSettingsRef().max_block_size, getNumberOfPhysicalCPUCores()); auto builder = plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(global_context), - BuildQueryPipelineSettings::fromContext(global_context)); + QueryPlanOptimizationSettings::fromContext(client_context), + BuildQueryPipelineSettings::fromContext(client_context)); QueryPlanResourceHolder resources; auto pipe = QueryPipelineBuilder::getPipe(std::move(*builder), resources); @@ -1752,14 +1761,14 @@ void ClientBase::sendDataFrom(ReadBuffer & buf, Block & sample, const ColumnsDes current_format = insert->format; } - auto source = global_context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); + auto source = client_context->getInputFormat(current_format, buf, sample, insert_format_max_block_size); Pipe pipe(source); if (columns_description.hasDefaults()) { pipe.addSimpleTransform([&](const Block & header) { - return std::make_shared(header, columns_description, *source, global_context); + return std::make_shared(header, columns_description, *source, client_context); }); } @@ -1921,12 +1930,12 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (is_interactive) { - global_context->setCurrentQueryId(""); + client_context->setCurrentQueryId(""); // Generate a new query_id for (const auto & query_id_format : query_id_formats) { writeString(query_id_format.first, std_out); - writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", global_context->getCurrentQueryId())), std_out); + writeString(fmt::format(fmt::runtime(query_id_format.second), fmt::arg("query_id", client_context->getCurrentQueryId())), std_out); writeChar('\n', std_out); std_out.next(); } @@ -1953,7 +1962,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin auto password = auth_data->getPassword(); if (password) - global_context->getAccessControl().checkPasswordComplexityRules(*password); + client_context->getAccessControl().checkPasswordComplexityRules(*password); } } } @@ -1968,15 +1977,15 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin std::optional old_settings; SCOPE_EXIT_SAFE({ if (old_settings) - global_context->setSettings(*old_settings); + client_context->setSettings(*old_settings); }); auto apply_query_settings = [&](const IAST & settings_ast) { if (!old_settings) - old_settings.emplace(global_context->getSettingsRef()); - global_context->applySettingsChanges(settings_ast.as()->changes); - global_context->resetSettingsToDefaultValue(settings_ast.as()->default_settings); + old_settings.emplace(client_context->getSettingsRef()); + client_context->applySettingsChanges(settings_ast.as()->changes); + client_context->resetSettingsToDefaultValue(settings_ast.as()->default_settings); }; const auto * insert = parsed_query->as(); @@ -2009,7 +2018,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (insert && insert->select) insert->tryFindInputFunction(input_function); - bool is_async_insert_with_inlined_data = global_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); + bool is_async_insert_with_inlined_data = client_context->getSettingsRef().async_insert && insert && insert->hasInlinedData(); if (is_async_insert_with_inlined_data) { @@ -2044,9 +2053,9 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin if (change.name == "profile") current_profile = change.value.safeGet(); else - global_context->applySettingChange(change); + client_context->applySettingChange(change); } - global_context->resetSettingsToDefaultValue(set_query->default_settings); + client_context->resetSettingsToDefaultValue(set_query->default_settings); /// Query parameters inside SET queries should be also saved on the client side /// to override their previous definitions set with --param_* arguments @@ -2054,7 +2063,7 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin for (const auto & [name, value] : set_query->query_parameters) query_parameters.insert_or_assign(name, value); - global_context->addQueryParameters(NameToNameMap{set_query->query_parameters.begin(), set_query->query_parameters.end()}); + client_context->addQueryParameters(NameToNameMap{set_query->query_parameters.begin(), set_query->query_parameters.end()}); } if (const auto * use_query = parsed_query->as()) { @@ -2131,8 +2140,8 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( if (this_query_begin >= all_queries_end) return MultiQueryProcessingStage::QUERIES_END; - unsigned max_parser_depth = static_cast(global_context->getSettingsRef().max_parser_depth); - unsigned max_parser_backtracks = static_cast(global_context->getSettingsRef().max_parser_backtracks); + unsigned max_parser_depth = static_cast(client_context->getSettingsRef().max_parser_depth); + unsigned max_parser_backtracks = static_cast(client_context->getSettingsRef().max_parser_backtracks); // If there are only comments left until the end of file, we just // stop. The parser can't handle this situation because it always @@ -2152,7 +2161,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( try { parsed_query = parseQuery(this_query_end, all_queries_end, - global_context->getSettingsRef(), + client_context->getSettingsRef(), /*allow_multi_statements=*/ true); } catch (const Exception & e) @@ -2195,7 +2204,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText( { this_query_end = find_first_symbols<'\n'>(insert_ast->data, all_queries_end); insert_ast->end = this_query_end; - query_to_execute_end = isSyncInsertWithData(*insert_ast, global_context) ? insert_ast->data : this_query_end; + query_to_execute_end = isSyncInsertWithData(*insert_ast, client_context) ? insert_ast->data : this_query_end; } query_to_execute = all_queries_text.substr(this_query_begin - all_queries_text.data(), query_to_execute_end - this_query_begin); @@ -2404,13 +2413,13 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text) // , where the inline data is delimited by semicolon and not by a // newline. auto * insert_ast = parsed_query->as(); - if (insert_ast && isSyncInsertWithData(*insert_ast, global_context)) + if (insert_ast && isSyncInsertWithData(*insert_ast, client_context)) { this_query_end = insert_ast->end; adjustQueryEnd( this_query_end, all_queries_end, - static_cast(global_context->getSettingsRef().max_parser_depth), - static_cast(global_context->getSettingsRef().max_parser_backtracks)); + static_cast(client_context->getSettingsRef().max_parser_depth), + static_cast(client_context->getSettingsRef().max_parser_backtracks)); } // Report error. @@ -2541,10 +2550,10 @@ void ClientBase::runInteractive() if (load_suggestions) { /// Load suggestion data from the server. - if (global_context->getApplicationType() == Context::ApplicationType::CLIENT) - suggest->load(global_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); - else if (global_context->getApplicationType() == Context::ApplicationType::LOCAL) - suggest->load(global_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); + if (client_context->getApplicationType() == Context::ApplicationType::CLIENT) + suggest->load(client_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); + else if (client_context->getApplicationType() == Context::ApplicationType::LOCAL) + suggest->load(client_context, connection_parameters, getClientConfiguration().getInt("suggestion_limit"), wait_for_suggestions_to_load); } if (home_path.empty()) @@ -2682,7 +2691,7 @@ void ClientBase::runInteractive() { // If a separate connection loading suggestions failed to open a new session, // use the main session to receive them. - suggest->load(*connection, connection_parameters.timeouts, getClientConfiguration().getInt("suggestion_limit"), global_context->getClientInfo()); + suggest->load(*connection, connection_parameters.timeouts, getClientConfiguration().getInt("suggestion_limit"), client_context->getClientInfo()); } try @@ -2731,10 +2740,10 @@ bool ClientBase::processMultiQueryFromFile(const String & file_name) if (!getClientConfiguration().has("log_comment")) { - Settings settings = global_context->getSettings(); + Settings settings = client_context->getSettings(); /// NOTE: cannot use even weakly_canonical() since it fails for /dev/stdin due to resolving of "pipe:[X]" settings.log_comment = fs::absolute(fs::path(file_name)); - global_context->setSettings(settings); + client_context->setSettings(settings); } return executeMultiQuery(queries_from_file); diff --git a/src/Client/ClientBase.h b/src/Client/ClientBase.h index 986990aecaa..e3d5fe8dc97 100644 --- a/src/Client/ClientBase.h +++ b/src/Client/ClientBase.h @@ -206,6 +206,9 @@ protected: /// Adjust some settings after command line options and config had been processed. void adjustSettings(); + /// Initializes the client context. + void initClientContext(); + void setDefaultFormatsAndCompressionFromConfiguration(); void initTTYBuffer(ProgressOption progress); @@ -215,6 +218,9 @@ protected: SharedContextHolder shared_context; ContextMutablePtr global_context; + /// Client context is a context used only by the client to parse queries, process query parameters and to connect to clickhouse-server. + ContextMutablePtr client_context; + LoggerPtr fatal_log; Poco::AutoPtr fatal_channel_ptr; Poco::AutoPtr fatal_console_channel_ptr; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 86e04b2ab4e..c7a1a7e2739 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -186,7 +186,7 @@ class IColumn; M(Bool, allow_suspicious_ttl_expressions, false, "Reject TTL expressions that don't depend on any of table's columns. It indicates a user error most of the time.", 0) \ M(Bool, allow_suspicious_variant_types, false, "In CREATE TABLE statement allows specifying Variant type with similar variant types (for example, with different numeric or date types). Enabling this setting may introduce some ambiguity when working with values with similar types.", 0) \ M(Bool, allow_suspicious_primary_key, false, "Forbid suspicious PRIMARY KEY/ORDER BY for MergeTree (i.e. SimpleAggregateFunction)", 0) \ - M(Bool, compile_expressions, false, "Compile some scalar functions and operators to native code.", 0) \ + M(Bool, compile_expressions, true, "Compile some scalar functions and operators to native code.", 0) \ M(UInt64, min_count_to_compile_expression, 3, "The number of identical expressions before they are JIT-compiled", 0) \ M(Bool, compile_aggregate_expressions, true, "Compile aggregate functions to native code.", 0) \ M(UInt64, min_count_to_compile_aggregate_expression, 3, "The number of identical aggregate expressions before they are JIT-compiled", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 9faf77e9087..87eaeff0ca9 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -57,6 +57,7 @@ String ClickHouseVersion::toString() const /// Note: please check if the key already exists to prevent duplicate entries. static std::initializer_list> settings_changes_history_initializer = { + {"24.8", {{"compile_expressions", false, true, "We believe that the LLVM infrastructure behind the JIT compiler is stable enough to enable this setting by default."}}}, {"24.7", {{"output_format_parquet_write_page_index", false, true, "Add a possibility to write page index into parquet files."}, {"output_format_binary_encode_types_in_binary_format", false, false, "Added new setting to allow to write type names in binary format in RowBinaryWithNamesAndTypes output format"}, {"input_format_binary_decode_types_in_binary_format", false, false, "Added new setting to allow to read type names in binary format in RowBinaryWithNamesAndTypes input format"}, @@ -80,7 +81,7 @@ static std::initializer_listgetFileName(); } - size_t getFileSize() override { return impl->getFileSize(); } + std::optional tryGetFileSize() override { return impl->tryGetFileSize(); } String getInfoForLog() override { return impl->getInfoForLog(); } diff --git a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp index da1ea65f2ea..ba864035777 100644 --- a/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp +++ b/src/Disks/IO/ReadBufferFromAzureBlobStorage.cpp @@ -253,16 +253,15 @@ void ReadBufferFromAzureBlobStorage::initialize() initialized = true; } -size_t ReadBufferFromAzureBlobStorage::getFileSize() +std::optional ReadBufferFromAzureBlobStorage::tryGetFileSize() { if (!blob_client) blob_client = std::make_unique(blob_container_client->GetBlobClient(path)); - if (file_size.has_value()) - return *file_size; + if (!file_size) + file_size = blob_client->GetProperties().Value.BlobSize; - file_size = blob_client->GetProperties().Value.BlobSize; - return *file_size; + return file_size; } size_t ReadBufferFromAzureBlobStorage::readBigAt(char * to, size_t n, size_t range_begin, const std::function & /*progress_callback*/) const diff --git a/src/Disks/IO/ReadBufferFromAzureBlobStorage.h b/src/Disks/IO/ReadBufferFromAzureBlobStorage.h index d328195cc26..f407f27e099 100644 --- a/src/Disks/IO/ReadBufferFromAzureBlobStorage.h +++ b/src/Disks/IO/ReadBufferFromAzureBlobStorage.h @@ -42,7 +42,7 @@ public: bool supportsRightBoundedReads() const override { return true; } - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t readBigAt(char * to, size_t n, size_t range_begin, const std::function & progress_callback) const override; diff --git a/src/Disks/IO/ReadBufferFromRemoteFSGather.h b/src/Disks/IO/ReadBufferFromRemoteFSGather.h index e36365a8174..9f1cb681f1a 100644 --- a/src/Disks/IO/ReadBufferFromRemoteFSGather.h +++ b/src/Disks/IO/ReadBufferFromRemoteFSGather.h @@ -41,7 +41,7 @@ public: void setReadUntilEnd() override { setReadUntilPosition(getFileSize()); } - size_t getFileSize() override { return getTotalSize(blobs_to_read); } + std::optional tryGetFileSize() override { return getTotalSize(blobs_to_read); } size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; } diff --git a/src/IO/Archives/LibArchiveReader.cpp b/src/IO/Archives/LibArchiveReader.cpp index e3fe63fa40d..31bad4d6638 100644 --- a/src/IO/Archives/LibArchiveReader.cpp +++ b/src/IO/Archives/LibArchiveReader.cpp @@ -321,7 +321,7 @@ public: off_t getPosition() override { throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "getPosition not supported when reading from archive"); } String getFileName() const override { return handle.getFileName(); } - size_t getFileSize() override { return handle.getFileInfo().uncompressed_size; } + std::optional tryGetFileSize() override { return handle.getFileInfo().uncompressed_size; } Handle releaseHandle() && { return std::move(handle); } diff --git a/src/IO/Archives/ZipArchiveReader.cpp b/src/IO/Archives/ZipArchiveReader.cpp index 2a9b7a43519..12b07d550c2 100644 --- a/src/IO/Archives/ZipArchiveReader.cpp +++ b/src/IO/Archives/ZipArchiveReader.cpp @@ -317,7 +317,7 @@ public: String getFileName() const override { return handle.getFileName(); } - size_t getFileSize() override { return handle.getFileInfo().uncompressed_size; } + std::optional tryGetFileSize() override { return handle.getFileInfo().uncompressed_size; } /// Releases owned handle to pass it to an enumerator. HandleHolder releaseHandle() && diff --git a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp index f8c00d62732..6c4bd09b76f 100644 --- a/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp +++ b/src/IO/AsynchronousReadBufferFromFileDescriptor.cpp @@ -244,7 +244,7 @@ void AsynchronousReadBufferFromFileDescriptor::rewind() file_offset_of_buffer_end = 0; } -size_t AsynchronousReadBufferFromFileDescriptor::getFileSize() +std::optional AsynchronousReadBufferFromFileDescriptor::tryGetFileSize() { return getSizeFromFileDescriptor(fd, getFileName()); } diff --git a/src/IO/AsynchronousReadBufferFromFileDescriptor.h b/src/IO/AsynchronousReadBufferFromFileDescriptor.h index 82659b1aca7..097979fbe00 100644 --- a/src/IO/AsynchronousReadBufferFromFileDescriptor.h +++ b/src/IO/AsynchronousReadBufferFromFileDescriptor.h @@ -68,7 +68,7 @@ public: /// Seek to the beginning, discarding already read data if any. Useful to reread file that changes on every read. void rewind(); - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; } diff --git a/src/IO/ConcatSeekableReadBuffer.h b/src/IO/ConcatSeekableReadBuffer.h index c8c16c5d887..609f0dc25b8 100644 --- a/src/IO/ConcatSeekableReadBuffer.h +++ b/src/IO/ConcatSeekableReadBuffer.h @@ -21,7 +21,7 @@ public: off_t seek(off_t off, int whence) override; off_t getPosition() override; - size_t getFileSize() override { return total_size; } + std::optional tryGetFileSize() override { return total_size; } private: bool nextImpl() override; diff --git a/src/IO/MMapReadBufferFromFileDescriptor.cpp b/src/IO/MMapReadBufferFromFileDescriptor.cpp index f27828f71b2..83dd192de54 100644 --- a/src/IO/MMapReadBufferFromFileDescriptor.cpp +++ b/src/IO/MMapReadBufferFromFileDescriptor.cpp @@ -87,7 +87,7 @@ off_t MMapReadBufferFromFileDescriptor::seek(off_t offset, int whence) return new_pos; } -size_t MMapReadBufferFromFileDescriptor::getFileSize() +std::optional MMapReadBufferFromFileDescriptor::tryGetFileSize() { return getSizeFromFileDescriptor(getFD(), getFileName()); } diff --git a/src/IO/MMapReadBufferFromFileDescriptor.h b/src/IO/MMapReadBufferFromFileDescriptor.h index f774538374a..de44ec3f9d8 100644 --- a/src/IO/MMapReadBufferFromFileDescriptor.h +++ b/src/IO/MMapReadBufferFromFileDescriptor.h @@ -38,7 +38,7 @@ public: int getFD() const; - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t readBigAt(char * to, size_t n, size_t offset, const std::function &) const override; bool supportsReadAt() override { return true; } diff --git a/src/IO/ParallelReadBuffer.cpp b/src/IO/ParallelReadBuffer.cpp index e6771235a8e..89cff670e37 100644 --- a/src/IO/ParallelReadBuffer.cpp +++ b/src/IO/ParallelReadBuffer.cpp @@ -152,7 +152,7 @@ off_t ParallelReadBuffer::seek(off_t offset, int whence) return offset; } -size_t ParallelReadBuffer::getFileSize() +std::optional ParallelReadBuffer::tryGetFileSize() { return file_size; } diff --git a/src/IO/ParallelReadBuffer.h b/src/IO/ParallelReadBuffer.h index cfeec2b3677..8852472a8bc 100644 --- a/src/IO/ParallelReadBuffer.h +++ b/src/IO/ParallelReadBuffer.h @@ -33,7 +33,7 @@ public: ~ParallelReadBuffer() override { finishAndWait(); } off_t seek(off_t off, int whence) override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; off_t getPosition() override; const SeekableReadBuffer & getReadBuffer() const { return input; } diff --git a/src/IO/ReadBufferFromEmptyFile.h b/src/IO/ReadBufferFromEmptyFile.h index f21f2f507dc..7808ef62fd9 100644 --- a/src/IO/ReadBufferFromEmptyFile.h +++ b/src/IO/ReadBufferFromEmptyFile.h @@ -19,7 +19,8 @@ private: std::string getFileName() const override { return ""; } off_t seek(off_t /*off*/, int /*whence*/) override { return 0; } off_t getPosition() override { return 0; } - size_t getFileSize() override { return 0; } + std::optional tryGetFileSize() override { return 0; } + size_t getFileOffsetOfBufferEnd() const override { return 0; } }; } diff --git a/src/IO/ReadBufferFromEncryptedFile.h b/src/IO/ReadBufferFromEncryptedFile.h index 3626daccb3e..213d242bb91 100644 --- a/src/IO/ReadBufferFromEncryptedFile.h +++ b/src/IO/ReadBufferFromEncryptedFile.h @@ -30,7 +30,7 @@ public: void setReadUntilEnd() override { in->setReadUntilEnd(); } - size_t getFileSize() override { return in->getFileSize(); } + std::optional tryGetFileSize() override { return in->tryGetFileSize(); } private: bool nextImpl() override; diff --git a/src/IO/ReadBufferFromFileBase.cpp b/src/IO/ReadBufferFromFileBase.cpp index 4ac3f984f78..b7a1438cff8 100644 --- a/src/IO/ReadBufferFromFileBase.cpp +++ b/src/IO/ReadBufferFromFileBase.cpp @@ -5,11 +5,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int UNKNOWN_FILE_SIZE; -} - ReadBufferFromFileBase::ReadBufferFromFileBase() : BufferWithOwnMemory(0) { } @@ -26,11 +21,9 @@ ReadBufferFromFileBase::ReadBufferFromFileBase( ReadBufferFromFileBase::~ReadBufferFromFileBase() = default; -size_t ReadBufferFromFileBase::getFileSize() +std::optional ReadBufferFromFileBase::tryGetFileSize() { - if (file_size) - return *file_size; - throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size for read buffer"); + return file_size; } void ReadBufferFromFileBase::setProgressCallback(ContextPtr context) diff --git a/src/IO/ReadBufferFromFileBase.h b/src/IO/ReadBufferFromFileBase.h index 9870d8bbe43..c98dcd5a93e 100644 --- a/src/IO/ReadBufferFromFileBase.h +++ b/src/IO/ReadBufferFromFileBase.h @@ -50,7 +50,7 @@ public: clock_type = clock_type_; } - size_t getFileSize() override; + std::optional tryGetFileSize() override; void setProgressCallback(ContextPtr context); diff --git a/src/IO/ReadBufferFromFileDecorator.cpp b/src/IO/ReadBufferFromFileDecorator.cpp index 9ac0fb4e475..8a6468b9bd0 100644 --- a/src/IO/ReadBufferFromFileDecorator.cpp +++ b/src/IO/ReadBufferFromFileDecorator.cpp @@ -52,9 +52,9 @@ bool ReadBufferFromFileDecorator::nextImpl() return result; } -size_t ReadBufferFromFileDecorator::getFileSize() +std::optional ReadBufferFromFileDecorator::tryGetFileSize() { - return getFileSizeFromReadBuffer(*impl); + return tryGetFileSizeFromReadBuffer(*impl); } } diff --git a/src/IO/ReadBufferFromFileDecorator.h b/src/IO/ReadBufferFromFileDecorator.h index 6e62c7f741b..69f029c5cf7 100644 --- a/src/IO/ReadBufferFromFileDecorator.h +++ b/src/IO/ReadBufferFromFileDecorator.h @@ -27,7 +27,7 @@ public: ReadBuffer & getWrappedReadBuffer() { return *impl; } - size_t getFileSize() override; + std::optional tryGetFileSize() override; protected: std::unique_ptr impl; diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index 76a80f145e7..51a1a5d8d93 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -253,7 +253,7 @@ void ReadBufferFromFileDescriptor::rewind() file_offset_of_buffer_end = 0; } -size_t ReadBufferFromFileDescriptor::getFileSize() +std::optional ReadBufferFromFileDescriptor::tryGetFileSize() { return getSizeFromFileDescriptor(fd, getFileName()); } diff --git a/src/IO/ReadBufferFromFileDescriptor.h b/src/IO/ReadBufferFromFileDescriptor.h index db256ef91c7..6083e744c95 100644 --- a/src/IO/ReadBufferFromFileDescriptor.h +++ b/src/IO/ReadBufferFromFileDescriptor.h @@ -69,7 +69,7 @@ public: /// Seek to the beginning, discarding already read data if any. Useful to reread file that changes on every read. void rewind(); - size_t getFileSize() override; + std::optional tryGetFileSize() override; bool checkIfActuallySeekable() override; diff --git a/src/IO/ReadBufferFromS3.cpp b/src/IO/ReadBufferFromS3.cpp index bf97e959ee0..6972bae64b4 100644 --- a/src/IO/ReadBufferFromS3.cpp +++ b/src/IO/ReadBufferFromS3.cpp @@ -311,15 +311,15 @@ off_t ReadBufferFromS3::seek(off_t offset_, int whence) return offset; } -size_t ReadBufferFromS3::getFileSize() +std::optional ReadBufferFromS3::tryGetFileSize() { if (file_size) - return *file_size; + return file_size; auto object_size = S3::getObjectSize(*client_ptr, bucket, key, version_id); file_size = object_size; - return *file_size; + return file_size; } off_t ReadBufferFromS3::getPosition() diff --git a/src/IO/ReadBufferFromS3.h b/src/IO/ReadBufferFromS3.h index c6625c2d632..ff04f78ce7b 100644 --- a/src/IO/ReadBufferFromS3.h +++ b/src/IO/ReadBufferFromS3.h @@ -63,7 +63,7 @@ public: off_t getPosition() override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; void setReadUntilPosition(size_t position) override; void setReadUntilEnd() override; diff --git a/src/IO/ReadWriteBufferFromHTTP.cpp b/src/IO/ReadWriteBufferFromHTTP.cpp index b753e66da48..4b2e6580f9b 100644 --- a/src/IO/ReadWriteBufferFromHTTP.cpp +++ b/src/IO/ReadWriteBufferFromHTTP.cpp @@ -72,7 +72,6 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; extern const int CANNOT_SEEK_THROUGH_FILE; extern const int SEEK_POSITION_OUT_OF_BOUND; - extern const int UNKNOWN_FILE_SIZE; } std::unique_ptr ReadWriteBufferFromHTTP::CallResult::transformToReadBuffer(size_t buf_size) && @@ -121,15 +120,33 @@ void ReadWriteBufferFromHTTP::prepareRequest(Poco::Net::HTTPRequest & request, s credentials.authenticate(request); } -size_t ReadWriteBufferFromHTTP::getFileSize() +std::optional ReadWriteBufferFromHTTP::tryGetFileSize() { if (!file_info) - file_info = getFileInfo(); + { + try + { + file_info = getFileInfo(); + } + catch (const HTTPException &) + { + return std::nullopt; + } + catch (const NetException &) + { + return std::nullopt; + } + catch (const Poco::Net::NetException &) + { + return std::nullopt; + } + catch (const Poco::IOException &) + { + return std::nullopt; + } + } - if (file_info->file_size) - return *file_info->file_size; - - throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size for: {}", initial_uri.toString()); + return file_info->file_size; } bool ReadWriteBufferFromHTTP::supportsReadAt() @@ -311,12 +328,12 @@ void ReadWriteBufferFromHTTP::doWithRetries(std::function && callable, error_message = e.displayText(); exception = std::current_exception(); } - catch (DB::NetException & e) + catch (NetException & e) { error_message = e.displayText(); exception = std::current_exception(); } - catch (DB::HTTPException & e) + catch (HTTPException & e) { if (!isRetriableError(e.getHTTPStatus())) is_retriable = false; @@ -324,7 +341,7 @@ void ReadWriteBufferFromHTTP::doWithRetries(std::function && callable, error_message = e.displayText(); exception = std::current_exception(); } - catch (DB::Exception & e) + catch (Exception & e) { is_retriable = false; @@ -683,7 +700,19 @@ std::optional ReadWriteBufferFromHTTP::tryGetLastModificationTime() { file_info = getFileInfo(); } - catch (...) + catch (const HTTPException &) + { + return std::nullopt; + } + catch (const NetException &) + { + return std::nullopt; + } + catch (const Poco::Net::NetException &) + { + return std::nullopt; + } + catch (const Poco::IOException &) { return std::nullopt; } @@ -704,7 +733,7 @@ ReadWriteBufferFromHTTP::HTTPFileInfo ReadWriteBufferFromHTTP::getFileInfo() { getHeadResponse(response); } - catch (HTTPException & e) + catch (const HTTPException & e) { /// Maybe the web server doesn't support HEAD requests. /// E.g. webhdfs reports status 400. diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index f496fe3ddcd..1c9bda53008 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -118,7 +118,7 @@ private: std::unique_ptr initialize(); - size_t getFileSize() override; + std::optional tryGetFileSize() override; bool supportsReadAt() override; diff --git a/src/IO/WithFileSize.cpp b/src/IO/WithFileSize.cpp index 3660d962c08..cbbcab83de2 100644 --- a/src/IO/WithFileSize.cpp +++ b/src/IO/WithFileSize.cpp @@ -13,41 +13,47 @@ namespace ErrorCodes extern const int UNKNOWN_FILE_SIZE; } -template -static size_t getFileSize(T & in) +size_t WithFileSize::getFileSize() { - if (auto * with_file_size = dynamic_cast(&in)) - { - return with_file_size->getFileSize(); - } + if (auto maybe_size = tryGetFileSize()) + return *maybe_size; throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); } -size_t getFileSizeFromReadBuffer(ReadBuffer & in) +template +static std::optional tryGetFileSize(T & in) { - if (auto * delegate = dynamic_cast(&in)) - { - return getFileSize(delegate->getWrappedReadBuffer()); - } - else if (auto * compressed = dynamic_cast(&in)) - { - return getFileSize(compressed->getWrappedReadBuffer()); - } + if (auto * with_file_size = dynamic_cast(&in)) + return with_file_size->tryGetFileSize(); - return getFileSize(in); + return std::nullopt; +} + +template +static size_t getFileSize(T & in) +{ + if (auto maybe_size = tryGetFileSize(in)) + return *maybe_size; + + throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); } std::optional tryGetFileSizeFromReadBuffer(ReadBuffer & in) { - try - { - return getFileSizeFromReadBuffer(in); - } - catch (...) - { - return std::nullopt; - } + if (auto * delegate = dynamic_cast(&in)) + return tryGetFileSize(delegate->getWrappedReadBuffer()); + else if (auto * compressed = dynamic_cast(&in)) + return tryGetFileSize(compressed->getWrappedReadBuffer()); + return tryGetFileSize(in); +} + +size_t getFileSizeFromReadBuffer(ReadBuffer & in) +{ + if (auto maybe_size = tryGetFileSizeFromReadBuffer(in)) + return *maybe_size; + + throw Exception(ErrorCodes::UNKNOWN_FILE_SIZE, "Cannot find out file size"); } bool isBufferWithFileSize(const ReadBuffer & in) diff --git a/src/IO/WithFileSize.h b/src/IO/WithFileSize.h index 0ae3af98ea0..e5dc383fab0 100644 --- a/src/IO/WithFileSize.h +++ b/src/IO/WithFileSize.h @@ -10,15 +10,16 @@ class ReadBuffer; class WithFileSize { public: - virtual size_t getFileSize() = 0; + /// Returns nullopt if couldn't find out file size; + virtual std::optional tryGetFileSize() = 0; virtual ~WithFileSize() = default; + + size_t getFileSize(); }; bool isBufferWithFileSize(const ReadBuffer & in); size_t getFileSizeFromReadBuffer(ReadBuffer & in); - -/// Return nullopt if couldn't find out file size; std::optional tryGetFileSizeFromReadBuffer(ReadBuffer & in); size_t getDataOffsetMaybeCompressed(const ReadBuffer & in); diff --git a/src/Interpreters/SubstituteColumnOptimizer.cpp b/src/Interpreters/SubstituteColumnOptimizer.cpp index c4aef89fed2..925ded15857 100644 --- a/src/Interpreters/SubstituteColumnOptimizer.cpp +++ b/src/Interpreters/SubstituteColumnOptimizer.cpp @@ -13,10 +13,6 @@ namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} namespace { @@ -237,16 +233,8 @@ void SubstituteColumnOptimizer::perform() const auto & compare_graph = metadata_snapshot->getConstraints().getGraph(); - // Fill aliases - if (select_query->select()) - { - auto * list = select_query->refSelect()->as(); - if (!list) - throw Exception(ErrorCodes::LOGICAL_ERROR, "List of selected columns must be ASTExpressionList"); - - for (ASTPtr & ast : list->children) - ast->setAlias(ast->getAliasOrColumnName()); - } + if (compare_graph.getNumOfComponents() == 0) + return; auto run_for_all = [&](const auto func) { diff --git a/src/Interpreters/SubstituteColumnOptimizer.h b/src/Interpreters/SubstituteColumnOptimizer.h index 28aa8be0801..ecb65cd7707 100644 --- a/src/Interpreters/SubstituteColumnOptimizer.h +++ b/src/Interpreters/SubstituteColumnOptimizer.h @@ -15,7 +15,7 @@ struct StorageInMemoryMetadata; using StorageMetadataPtr = std::shared_ptr; /// Optimizer that tries to replace columns to equal columns (according to constraints) -/// with lower size (according to compressed and uncomressed size). +/// with lower size (according to compressed and uncompressed sizes). class SubstituteColumnOptimizer { public: diff --git a/src/Parsers/IAST.h b/src/Parsers/IAST.h index ee70fed0f07..4f8edac8597 100644 --- a/src/Parsers/IAST.h +++ b/src/Parsers/IAST.h @@ -66,7 +66,7 @@ public: /** Set the alias. */ virtual void setAlias(const String & /*to*/) { - throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't set alias of {}", getColumnName()); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't set alias of {} of {}", getColumnName(), getID()); } /** Get the text that identifies this element. */ diff --git a/src/Storages/Cache/ExternalDataSourceCache.h b/src/Storages/Cache/ExternalDataSourceCache.h index 4c8c7974005..3b4eff28307 100644 --- a/src/Storages/Cache/ExternalDataSourceCache.h +++ b/src/Storages/Cache/ExternalDataSourceCache.h @@ -53,7 +53,7 @@ public: bool nextImpl() override; off_t seek(off_t off, int whence) override; off_t getPosition() override; - size_t getFileSize() override { return remote_file_size; } + std::optional tryGetFileSize() override { return remote_file_size; } private: std::unique_ptr local_file_holder; diff --git a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp index 21df7e35284..3bbc4e8a2ea 100644 --- a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp +++ b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.cpp @@ -91,9 +91,9 @@ void AsynchronousReadBufferFromHDFS::prefetch(Priority priority) } -size_t AsynchronousReadBufferFromHDFS::getFileSize() +std::optional AsynchronousReadBufferFromHDFS::tryGetFileSize() { - return impl->getFileSize(); + return impl->tryGetFileSize(); } String AsynchronousReadBufferFromHDFS::getFileName() const diff --git a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h index 5aef92315a4..9846d74453b 100644 --- a/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h +++ b/src/Storages/ObjectStorage/HDFS/AsynchronousReadBufferFromHDFS.h @@ -35,7 +35,7 @@ public: void prefetch(Priority priority) override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; String getFileName() const override; diff --git a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp index be339d021dc..bf6f9db722c 100644 --- a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp +++ b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.cpp @@ -31,7 +31,7 @@ namespace ErrorCodes } -struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory +struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory, public WithFileSize { String hdfs_uri; String hdfs_file_path; @@ -90,7 +90,7 @@ struct ReadBufferFromHDFS::ReadBufferFromHDFSImpl : public BufferWithOwnMemory tryGetFileSize() override { return file_size; } @@ -191,9 +191,9 @@ ReadBufferFromHDFS::ReadBufferFromHDFS( ReadBufferFromHDFS::~ReadBufferFromHDFS() = default; -size_t ReadBufferFromHDFS::getFileSize() +std::optional ReadBufferFromHDFS::tryGetFileSize() { - return impl->getFileSize(); + return impl->tryGetFileSize(); } bool ReadBufferFromHDFS::nextImpl() diff --git a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h index d9671e7e445..5363f07967b 100644 --- a/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h +++ b/src/Storages/ObjectStorage/HDFS/ReadBufferFromHDFS.h @@ -40,7 +40,7 @@ public: off_t getPosition() override; - size_t getFileSize() override; + std::optional tryGetFileSize() override; size_t getFileOffsetOfBufferEnd() const override; diff --git a/tests/clickhouse-test b/tests/clickhouse-test index a29c786e998..55ce88917f4 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -779,7 +779,7 @@ class SettingsRandomizer: "filesystem_prefetch_step_bytes": lambda: random.choice( [0, "100Mi"] ), # 0 means 'auto' - # "compile_expressions": lambda: random.randint(0, 1), - this setting has a bug: https://github.com/ClickHouse/ClickHouse/issues/51264 + "compile_expressions": lambda: random.randint(0, 1), "compile_aggregate_expressions": lambda: random.randint(0, 1), "compile_sort_description": lambda: random.randint(0, 1), "merge_tree_coarse_index_granularity": lambda: random.randint(2, 32), diff --git a/tests/queries/0_stateless/01622_constraints_simple_optimization.reference b/tests/queries/0_stateless/01622_constraints_simple_optimization.reference index d267df2237f..84c872856ff 100644 --- a/tests/queries/0_stateless/01622_constraints_simple_optimization.reference +++ b/tests/queries/0_stateless/01622_constraints_simple_optimization.reference @@ -32,10 +32,10 @@ 1 1 0 -SELECT count() AS `count()` +SELECT count() FROM constraint_test_constants WHERE (b > 100) OR (c > 100) -SELECT count() AS `count()` +SELECT count() FROM constraint_test_constants WHERE c > 100 QUERY id: 0 @@ -53,7 +53,7 @@ QUERY id: 0 COLUMN id: 6, column_name: c, result_type: Int64, source_id: 3 CONSTANT id: 7, constant_value: UInt64_100, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 -SELECT count() AS `count()` +SELECT count() FROM constraint_test_constants WHERE c > 100 QUERY id: 0 @@ -71,7 +71,7 @@ QUERY id: 0 COLUMN id: 6, column_name: c, result_type: Int64, source_id: 3 CONSTANT id: 7, constant_value: UInt64_100, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 -SELECT count() AS `count()` +SELECT count() FROM constraint_test_constants QUERY id: 0 PROJECTION COLUMNS diff --git a/tests/queries/0_stateless/01623_constraints_column_swap.reference b/tests/queries/0_stateless/01623_constraints_column_swap.reference index 555a4c93f70..d504a86365b 100644 --- a/tests/queries/0_stateless/01623_constraints_column_swap.reference +++ b/tests/queries/0_stateless/01623_constraints_column_swap.reference @@ -1,6 +1,6 @@ SELECT - (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, - (b AS b) + 3 AS `plus(b, 3)` + (b AS `cityHash64(a)`) + 10, + (b AS b) + 3 FROM column_swap_test_test WHERE b = 1 QUERY id: 0 @@ -59,8 +59,8 @@ QUERY id: 0 CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 SELECT - (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, - (b AS b) + 3 AS `plus(b, 3)` + (b AS `cityHash64(a)`) + 10, + (b AS b) + 3 FROM column_swap_test_test WHERE b = 0 QUERY id: 0 @@ -89,8 +89,8 @@ QUERY id: 0 CONSTANT id: 14, constant_value: UInt64_0, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 SELECT - (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, - (b AS b) + 3 AS `plus(b, 3)` + (b AS `cityHash64(a)`) + 10, + (b AS b) + 3 FROM column_swap_test_test WHERE b = 0 QUERY id: 0 @@ -119,8 +119,8 @@ QUERY id: 0 CONSTANT id: 14, constant_value: UInt64_0, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 SELECT - (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, - (b AS b) + 3 AS `plus(b, 3)` + (b AS `cityHash64(a)`) + 10, + (b AS b) + 3 FROM column_swap_test_test WHERE b = 1 QUERY id: 0 @@ -148,7 +148,7 @@ QUERY id: 0 COLUMN id: 13, column_name: b, result_type: UInt64, source_id: 5 CONSTANT id: 14, constant_value: UInt64_1, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 -SELECT (b AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)` +SELECT (b AS `cityHash64(a)`) + 10 FROM column_swap_test_test WHERE b = 0 QUERY id: 0 @@ -171,8 +171,8 @@ QUERY id: 0 CONSTANT id: 10, constant_value: UInt64_0, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 SELECT - (cityHash64(a) AS `cityHash64(a)`) + 10 AS `plus(cityHash64(a), 10)`, - a AS a + (cityHash64(a) AS `cityHash64(a)`) + 10, + a FROM column_swap_test_test WHERE cityHash64(a) = 0 QUERY id: 0 @@ -203,8 +203,8 @@ QUERY id: 0 CONSTANT id: 15, constant_value: UInt64_0, constant_value_type: UInt8 SETTINGS allow_experimental_analyzer=1 SELECT - (cityHash64(a) AS b) + 10 AS `plus(b, 10)`, - a AS a + (cityHash64(a) AS b) + 10, + a FROM column_swap_test_test WHERE cityHash64(a) = 0 QUERY id: 0 diff --git a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh index 2f512697868..1027f18fc83 100755 --- a/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh +++ b/tests/queries/0_stateless/02481_parquet_list_monotonically_increasing_offsets.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash -# Tags: no-ubsan, no-fasttest, no-tsan +# Tags: no-ubsan, no-fasttest, no-tsan, no-msan, no-asan # It is too slow under TSan +# It eats too much memory under ASan or MSan CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh diff --git a/tests/queries/0_stateless/02982_aggregation_states_destruction.reference b/tests/queries/0_stateless/02982_aggregation_states_destruction.reference deleted file mode 100644 index d00491fd7e5..00000000000 --- a/tests/queries/0_stateless/02982_aggregation_states_destruction.reference +++ /dev/null @@ -1 +0,0 @@ -1 diff --git a/tests/queries/0_stateless/02982_aggregation_states_destruction.sh b/tests/queries/0_stateless/02982_aggregation_states_destruction.sh deleted file mode 100755 index 84183606d48..00000000000 --- a/tests/queries/0_stateless/02982_aggregation_states_destruction.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -# Tags: no-random-settings - -CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -# shellcheck source=../shell_config.sh -. "$CURDIR"/../shell_config.sh - - -query_id="02982_$RANDOM" -$CLICKHOUSE_CLIENT --query_id $query_id --log_query_threads 1 --query="select number, uniq(number) from numbers_mt(1e7) group by number limit 100 format Null;" - -$CLICKHOUSE_CLIENT -q "system flush logs;" - -$CLICKHOUSE_CLIENT -q "select count() > 0 from system.query_thread_log where query_id = '$query_id' and current_database = currentDatabase() and thread_name = 'AggregDestruct';" diff --git a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh index 723b11ad620..0c585d36348 100755 --- a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh +++ b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh @@ -6,8 +6,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function test { - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" } test "materialize(42)::UInt8" diff --git a/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh b/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh index 8231691e184..57dc36d8a8f 100755 --- a/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh +++ b/tests/queries/0_stateless/03201_variant_null_map_subcolumn.sh @@ -17,8 +17,7 @@ function test() $CH_CLIENT -q "select v.UInt64.null, v.\`Array(Variant(String, UInt64))\`.null, v.\`Array(Variant(String, UInt64))\`.size0, v.\`Array(Variant(String, UInt64))\`.UInt64.null from test order by id" $CH_CLIENT -q "select v.\`Array(Variant(String, UInt64))\`.null, v.\`Array(Variant(String, UInt64))\`.size0, v.\`Array(Variant(String, UInt64))\`.UInt64.null, v.\`Array(Variant(String, UInt64))\`.String.null from test order by id" $CH_CLIENT -q "select id from test where v.UInt64 is null order by id" - - $CH_CLIENT -q "insert into test select number, multiIf(number % 3 == 2, NULL, number % 3 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10))) from numbers(1000000) settings min_insert_block_size_rows=100000" + $CH_CLIENT -q "insert into test select number, multiIf(number % 3 == 2, NULL, number % 3 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10))) from numbers(250000) settings min_insert_block_size_rows=100000, min_insert_block_size_bytes=0" $CH_CLIENT -q "select v, v.UInt64.null, v.\`Array(Variant(String, UInt64))\`.null, v.\`Array(Variant(String, UInt64))\`.size0, v.\`Array(Variant(String, UInt64))\`.UInt64.null from test order by id format Null" $CH_CLIENT -q "select v.UInt64.null, v.\`Array(Variant(String, UInt64))\`.null, v.\`Array(Variant(String, UInt64))\`.size0, v.\`Array(Variant(String, UInt64))\`.UInt64.null from test order by id format Null" $CH_CLIENT -q "select v.\`Array(Variant(String, UInt64))\`.null, v.\`Array(Variant(String, UInt64))\`.size0, v.\`Array(Variant(String, UInt64))\`.UInt64.null, v.\`Array(Variant(String, UInt64))\`.String.null from test order by id format Null" @@ -41,4 +40,3 @@ echo "MergeTree wide" $CH_CLIENT -q "create table test (id UInt64, v Variant(UInt64, Array(Variant(String, UInt64)))) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;" test $CH_CLIENT -q "drop table test;" - diff --git a/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.reference b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.reference new file mode 100644 index 00000000000..11277a62b06 --- /dev/null +++ b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.reference @@ -0,0 +1 @@ +Hello world diff --git a/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh new file mode 100755 index 00000000000..00efd1f4591 --- /dev/null +++ b/tests/queries/0_stateless/03206_no_exceptions_clickhouse_local.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +CLICKHOUSE_TERMINATE_ON_ANY_EXCEPTION=1 ${CLICKHOUSE_LOCAL} --query "SELECT * FROM table" --input-format CSV <<<"Hello, world" diff --git a/tests/queries/0_stateless/03212_optimize_with_constraints_logical_error.reference b/tests/queries/0_stateless/03212_optimize_with_constraints_logical_error.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03212_optimize_with_constraints_logical_error.sql b/tests/queries/0_stateless/03212_optimize_with_constraints_logical_error.sql new file mode 100644 index 00000000000..16a27af986b --- /dev/null +++ b/tests/queries/0_stateless/03212_optimize_with_constraints_logical_error.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +EXPLAIN SYNTAX +WITH 1 AS compound_value SELECT * APPLY (x -> compound_value.*) +FROM test_table WHERE x > 0 +SETTINGS convert_query_to_cnf = true, optimize_using_constraints = true, optimize_substitute_columns = true; -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE test_table;