diff --git a/base/base/ReplxxLineReader.cpp b/base/base/ReplxxLineReader.cpp index 38867faf5d5..3c2ac1f8891 100644 --- a/base/base/ReplxxLineReader.cpp +++ b/base/base/ReplxxLineReader.cpp @@ -25,6 +25,16 @@ void trim(String & s) s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); } +std::string getEditor() +{ + const char * editor = std::getenv("EDITOR"); + + if (!editor || !*editor) + editor = "vim"; + + return editor; +} + /// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx. /// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org) /// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com) @@ -123,6 +133,7 @@ ReplxxLineReader::ReplxxLineReader( Patterns delimiters_, replxx::Replxx::highlighter_callback_t highlighter_) : LineReader(history_file_path_, multiline_, std::move(extenders_), std::move(delimiters_)), highlighter(std::move(highlighter_)) + , editor(getEditor()) { using namespace std::placeholders; using Replxx = replxx::Replxx; @@ -236,14 +247,13 @@ void ReplxxLineReader::addToHistory(const String & line) rx.print("Unlock of history file failed: %s\n", errnoToString(errno).c_str()); } -int ReplxxLineReader::execute(const std::string & command) +/// See comments in ShellCommand::executeImpl() +/// (for the vfork via dlsym()) +int ReplxxLineReader::executeEditor(const std::string & path) { - std::vector argv0("sh", &("sh"[3])); - std::vector argv1("-c", &("-c"[3])); - std::vector argv2(command.data(), command.data() + command.size() + 1); - - const char * filename = "/bin/sh"; - char * const argv[] = {argv0.data(), argv1.data(), argv2.data(), nullptr}; + std::vector argv0(editor.data(), editor.data() + editor.size() + 1); + std::vector argv1(path.data(), path.data() + path.size() + 1); + char * const argv[] = {argv0.data(), argv1.data(), nullptr}; static void * real_vfork = dlsym(RTLD_DEFAULT, "vfork"); if (!real_vfork) @@ -260,6 +270,7 @@ int ReplxxLineReader::execute(const std::string & command) return -1; } + /// Child if (0 == pid) { sigset_t mask; @@ -267,16 +278,26 @@ int ReplxxLineReader::execute(const std::string & command) sigprocmask(0, nullptr, &mask); sigprocmask(SIG_UNBLOCK, &mask, nullptr); - execv(filename, argv); + execvp(editor.c_str(), argv); + rx.print("Cannot execute %s: %s\n", editor.c_str(), errnoToString(errno).c_str()); _exit(-1); } int status = 0; - if (-1 == waitpid(pid, &status, 0)) + do { - rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str()); - return -1; - } + int exited_pid = waitpid(pid, &status, 0); + if (exited_pid == -1) + { + if (errno == EINTR) + continue; + + rx.print("Cannot waitpid: %s\n", errnoToString(errno).c_str()); + return -1; + } + else + break; + } while (true); return status; } @@ -290,10 +311,6 @@ void ReplxxLineReader::openEditor() return; } - const char * editor = std::getenv("EDITOR"); - if (!editor || !*editor) - editor = "vim"; - replxx::Replxx::State state(rx.get_state()); size_t bytes_written = 0; @@ -316,7 +333,7 @@ void ReplxxLineReader::openEditor() return; } - if (0 == execute(fmt::format("{} {}", editor, filename))) + if (0 == executeEditor(filename)) { try { diff --git a/base/base/ReplxxLineReader.h b/base/base/ReplxxLineReader.h index 9aa32a1e26d..d4cc7de1e7a 100644 --- a/base/base/ReplxxLineReader.h +++ b/base/base/ReplxxLineReader.h @@ -22,7 +22,7 @@ public: private: InputStatus readOneLine(const String & prompt) override; void addToHistory(const String & line) override; - int execute(const std::string & command); + int executeEditor(const std::string & path); void openEditor(); replxx::Replxx rx; @@ -31,4 +31,6 @@ private: // used to call flock() to synchronize multiple clients using same history file int history_file_fd = -1; bool bracketed_paste_enabled = false; + + std::string editor; }; diff --git a/docker/test/stress/run.sh b/docker/test/stress/run.sh index 04845f2a4d1..ed215bd4273 100755 --- a/docker/test/stress/run.sh +++ b/docker/test/stress/run.sh @@ -37,6 +37,12 @@ function configure() # install test configs /usr/share/clickhouse-test/config/install.sh + # avoid too slow startup + sudo cat /etc/clickhouse-server/config.d/keeper_port.xml | sed "s|100000|10000|" > /etc/clickhouse-server/config.d/keeper_port.xml.tmp + sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml + sudo chown clickhouse /etc/clickhouse-server/config.d/keeper_port.xml + sudo chgrp clickhouse /etc/clickhouse-server/config.d/keeper_port.xml + # for clickhouse-server (via service) echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment # for clickhouse-client diff --git a/docs/en/introduction/adopters.md b/docs/en/introduction/adopters.md index e5d010b84ce..61e29ee1840 100644 --- a/docs/en/introduction/adopters.md +++ b/docs/en/introduction/adopters.md @@ -108,6 +108,7 @@ toc_title: Adopters | Panelbear | Analytics | Monitoring and Analytics | — | — | [Tech Stack, November 2020](https://panelbear.com/blog/tech-stack/) | | Percent 百分点 | Analytics | Main Product | — | — | [Slides in Chinese, June 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup24/4.%20ClickHouse万亿数据双中心的设计与实践%20.pdf) | | Percona | Performance analysis | Percona Monitoring and Management | — | — | [Official website, Mar 2020](https://www.percona.com/blog/2020/03/30/advanced-query-analysis-in-percona-monitoring-and-management-with-direct-clickhouse-access/) | +| Piwik PRO | Web Analytics | Main Product | — | — | [Official website, Dec 2018](https://piwik.pro/blog/piwik-pro-clickhouse-faster-efficient-reports/) | | Plausible | Analytics | Main Product | — | — | [Blog post, June 2020](https://twitter.com/PlausibleHQ/status/1273889629087969280) | | PostHog | Product Analytics | Main Product | — | — | [Release Notes, Oct 2020](https://posthog.com/blog/the-posthog-array-1-15-0) | | Postmates | Delivery | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=188) | @@ -175,5 +176,6 @@ toc_title: Adopters | ДомКлик | Real Estate | — | — | — | [Article in Russian, October 2021](https://habr.com/ru/company/domclick/blog/585936/) | | Deepl | Machine Learning | — | — | — | [Video, October 2021](https://www.youtube.com/watch?v=WIYJiPwxXdM&t=1182s) | | Vercel | Traffic and Performance Analytics | — | — | — | Direct reference, October 2021 | +| YourAnalytics | Web Analytics | — | — | — | [Tweet, Nov 2021](https://twitter.com/mikenikles/status/1460860140249235461) | [Original article](https://clickhouse.com/docs/en/introduction/adopters/) diff --git a/docs/zh/sql-reference/data-types/special-data-types/set.md b/docs/zh/sql-reference/data-types/special-data-types/set.md index f814ab04325..b66f25cef18 100644 --- a/docs/zh/sql-reference/data-types/special-data-types/set.md +++ b/docs/zh/sql-reference/data-types/special-data-types/set.md @@ -1,3 +1,3 @@ -# 设置 {#set} +# 集合 {#set} 可以用在 IN 表达式的右半部分。 diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 51040f06c48..383b9bb5e52 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -1003,7 +1003,6 @@ void Client::addOptions(OptionsDescription & options_description) ("password", po::value()->implicit_value("\n", ""), "password") ("ask-password", "ask-password") ("quota_key", po::value(), "A string to differentiate quotas when the user have keyed quotas configured on server") - ("pager", po::value(), "pager") ("testmode,T", "enable test hints in comments") ("max_client_network_bandwidth", po::value(), "the maximum speed of data exchange over the network for the client in bytes per second.") @@ -1104,8 +1103,6 @@ void Client::processOptions(const OptionsDescription & options_description, config().setString("host", options["host"].as()); if (options.count("interleave-queries-file")) interleave_queries_files = options["interleave-queries-file"].as>(); - if (options.count("pager")) - config().setString("pager", options["pager"].as()); if (options.count("port") && !options["port"].defaulted()) config().setInt("port", options["port"].as()); if (options.count("secure")) diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 8b6cb9a9881..141a6d4f911 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -744,8 +744,8 @@ std::shared_ptr rewriteCreateQueryStorage(const ASTPtr & create_ if (create.storage == nullptr || new_storage_ast == nullptr) throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR); - res->database = new_table.first; - res->table = new_table.second; + res->setDatabase(new_table.first); + res->setTable(new_table.second); res->children.clear(); res->set(res->columns_list, create.columns_list->clone()); @@ -1659,7 +1659,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl( void ClusterCopier::dropAndCreateLocalTable(const ASTPtr & create_ast) { const auto & create = create_ast->as(); - dropLocalTableIfExists({create.database, create.table}); + dropLocalTableIfExists({create.getDatabase(), create.getTable()}); auto create_context = Context::createCopy(getContext()); @@ -1671,8 +1671,8 @@ void ClusterCopier::dropLocalTableIfExists(const DatabaseAndTableName & table_na { auto drop_ast = std::make_shared(); drop_ast->if_exists = true; - drop_ast->database = table_name.first; - drop_ast->table = table_name.second; + drop_ast->setDatabase(table_name.first); + drop_ast->setTable(table_name.second); auto drop_context = Context::createCopy(getContext()); diff --git a/src/Backups/BackupUtils.cpp b/src/Backups/BackupUtils.cpp index bd5a31e0bf7..5da87cfd6f7 100644 --- a/src/Backups/BackupUtils.cpp +++ b/src/Backups/BackupUtils.cpp @@ -312,11 +312,11 @@ namespace String getDataPathInBackup(const IAST & create_query) { const auto & create = create_query.as(); - if (create.table.empty()) + if (!create.table) return {}; if (create.temporary) - return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table}); - return getDataPathInBackup({create.database, create.table}); + return getDataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); + return getDataPathInBackup({create.getDatabase(), create.getTable()}); } String getMetadataPathInBackup(const DatabaseAndTableName & table_name) @@ -336,11 +336,11 @@ namespace String getMetadataPathInBackup(const IAST & create_query) { const auto & create = create_query.as(); - if (create.table.empty()) - return getMetadataPathInBackup(create.database); + if (!create.table) + return getMetadataPathInBackup(create.getDatabase()); if (create.temporary) - return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.table}); - return getMetadataPathInBackup({create.database, create.table}); + return getMetadataPathInBackup({DatabaseCatalog::TEMPORARY_DATABASE, create.getTable()}); + return getMetadataPathInBackup({create.getDatabase(), create.getTable()}); } void backupCreateQuery(const IAST & create_query, BackupEntries & backup_entries) @@ -419,7 +419,7 @@ namespace /// We create and execute `create` query for the database name. auto create_query = std::make_shared(); - create_query->database = database_name; + create_query->setDatabase(database_name); create_query->if_not_exists = true; InterpreterCreateQuery create_interpreter{create_query, context}; create_interpreter.execute(); @@ -460,7 +460,7 @@ namespace restore_tasks.emplace_back([table_name, new_create_query, partitions, context, backup]() -> RestoreDataTasks { - DatabaseAndTableName new_table_name{new_create_query->database, new_create_query->table}; + DatabaseAndTableName new_table_name{new_create_query->getDatabase(), new_create_query->getTable()}; if (new_create_query->temporary) new_table_name.first = DatabaseCatalog::TEMPORARY_DATABASE; @@ -536,7 +536,7 @@ namespace restore_tasks.emplace_back([database_name, new_create_query, except_list, context, backup, renaming_config]() -> RestoreDataTasks { - const String & new_database_name = new_create_query->database; + const String & new_database_name = new_create_query->getDatabase(); context->checkAccess(AccessType::SHOW_TABLES, new_database_name); if (!DatabaseCatalog::instance().isDatabaseExist(new_database_name)) diff --git a/src/Backups/renameInCreateQuery.cpp b/src/Backups/renameInCreateQuery.cpp index 4c78844d266..7a94a755f67 100644 --- a/src/Backups/renameInCreateQuery.cpp +++ b/src/Backups/renameInCreateQuery.cpp @@ -48,21 +48,23 @@ namespace { if (create.temporary) { - if (create.table.empty()) + if (!create.table) throw Exception(ErrorCodes::LOGICAL_ERROR, "Table name specified in the CREATE TEMPORARY TABLE query must not be empty"); - create.table = data.renaming_config->getNewTemporaryTableName(create.table); + create.setTable(data.renaming_config->getNewTemporaryTableName(create.getTable())); } - else if (create.table.empty()) + else if (!create.table) { - if (create.database.empty()) + if (!create.database) throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE DATABASE query must not be empty"); - create.database = data.renaming_config->getNewDatabaseName(create.database); + create.setDatabase(data.renaming_config->getNewDatabaseName(create.getDatabase())); } else { - if (create.database.empty()) + if (!create.database) throw Exception(ErrorCodes::LOGICAL_ERROR, "Database name specified in the CREATE TABLE query must not be empty"); - std::tie(create.database, create.table) = data.renaming_config->getNewTableName({create.database, create.table}); + auto table_and_database_name = data.renaming_config->getNewTableName({create.getDatabase(), create.getTable()}); + create.setDatabase(table_and_database_name.first); + create.setTable(table_and_database_name.second); } create.uuid = UUIDHelpers::Nil; diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 3998f558620..9870325ef97 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -490,7 +490,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa ReplaceQueryParameterVisitor visitor(query_parameters); visitor.visit(parsed_query); - /// Get new query after substitutions. Note that it cannot be done for INSERT query with embedded data. + /// Get new query after substitutions. query = serializeAST(*parsed_query); } @@ -824,6 +824,17 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query) { + auto query = query_to_execute; + if (!query_parameters.empty()) + { + /// Replace ASTQueryParameter with ASTLiteral for prepared statements. + ReplaceQueryParameterVisitor visitor(query_parameters); + visitor.visit(parsed_query); + + /// Get new query after substitutions. + query = serializeAST(*parsed_query); + } + /// Process the query that requires transferring data blocks to the server. const auto parsed_insert_query = parsed_query->as(); if ((!parsed_insert_query.data && !parsed_insert_query.infile) && (is_interactive || (!stdin_is_a_tty && std_in.eof()))) @@ -831,7 +842,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars connection->sendQuery( connection_parameters.timeouts, - query_to_execute, + query, global_context->getCurrentQueryId(), query_processing_stage, &global_context->getSettingsRef(), @@ -884,8 +895,7 @@ void ClientBase::sendData(Block & sample, const ColumnsDescription & columns_des /// Get name of this file (path to file) const auto & in_file_node = parsed_insert_query->infile->as(); const auto in_file = in_file_node.value.safeGet(); - /// Get name of table - const auto table_name = parsed_insert_query->table_id.getTableName(); + std::string compression_method; /// Compression method can be specified in query if (parsed_insert_query->compression) @@ -1703,6 +1713,7 @@ void ClientBase::init(int argc, char ** argv) ("profile-events-delay-ms", po::value()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)") ("interactive", "Process queries-file or --query query and start interactive mode") + ("pager", po::value(), "Pipe all output into this command (less or similar)") ; addOptions(options_description); @@ -1774,6 +1785,8 @@ void ClientBase::init(int argc, char ** argv) config().setBool("verbose", true); if (options.count("interactive")) config().setBool("interactive", true); + if (options.count("pager")) + config().setString("pager", options["pager"].as()); if (options.count("log-level")) Poco::Logger::root().setLevel(options["log-level"].as()); diff --git a/src/Common/setThreadName.cpp b/src/Common/setThreadName.cpp index 958404b9ad1..727bf23b891 100644 --- a/src/Common/setThreadName.cpp +++ b/src/Common/setThreadName.cpp @@ -12,6 +12,8 @@ #include #include +#define THREAD_NAME_SIZE 16 + namespace DB { @@ -23,13 +25,13 @@ namespace ErrorCodes /// Cache thread_name to avoid prctl(PR_GET_NAME) for query_log/text_log -static thread_local std::string thread_name; +static thread_local char thread_name[THREAD_NAME_SIZE]{}; void setThreadName(const char * name) { #ifndef NDEBUG - if (strlen(name) > 15) + if (strlen(name) > THREAD_NAME_SIZE - 1) throw DB::Exception("Thread name cannot be longer than 15 bytes", DB::ErrorCodes::PTHREAD_ERROR); #endif @@ -45,28 +47,25 @@ void setThreadName(const char * name) #endif DB::throwFromErrno("Cannot set thread name with prctl(PR_SET_NAME, ...)", DB::ErrorCodes::PTHREAD_ERROR); - thread_name = name; + memcpy(thread_name, name, 1 + strlen(name)); } -const std::string & getThreadName() +const char * getThreadName() { - if (!thread_name.empty()) + if (thread_name[0]) return thread_name; - thread_name.resize(16); - #if defined(__APPLE__) || defined(OS_SUNOS) - if (pthread_getname_np(pthread_self(), thread_name.data(), thread_name.size())) + if (pthread_getname_np(pthread_self(), thread_name, THREAD_NAME_SIZE)) throw DB::Exception("Cannot get thread name with pthread_getname_np()", DB::ErrorCodes::PTHREAD_ERROR); #elif defined(__FreeBSD__) // TODO: make test. freebsd will have this function soon https://freshbsd.org/commit/freebsd/r337983 -// if (pthread_get_name_np(pthread_self(), thread_name.data(), thread_name.size())) +// if (pthread_get_name_np(pthread_self(), thread_name, THREAD_NAME_SIZE)) // throw DB::Exception("Cannot get thread name with pthread_get_name_np()", DB::ErrorCodes::PTHREAD_ERROR); #else - if (0 != prctl(PR_GET_NAME, thread_name.data(), 0, 0, 0)) + if (0 != prctl(PR_GET_NAME, thread_name, 0, 0, 0)) DB::throwFromErrno("Cannot get thread name with prctl(PR_GET_NAME)", DB::ErrorCodes::PTHREAD_ERROR); #endif - thread_name.resize(std::strlen(thread_name.data())); return thread_name; } diff --git a/src/Common/setThreadName.h b/src/Common/setThreadName.h index ea988885db2..1834ea9696f 100644 --- a/src/Common/setThreadName.h +++ b/src/Common/setThreadName.h @@ -7,4 +7,4 @@ */ void setThreadName(const char * name); -const std::string & getThreadName(); +const char * getThreadName(); diff --git a/src/Core/Defines.h b/src/Core/Defines.h index 215bf6780d9..9665a20a397 100644 --- a/src/Core/Defines.h +++ b/src/Core/Defines.h @@ -44,7 +44,7 @@ /// The boundary on which the blocks for asynchronous file operations should be aligned. #define DEFAULT_AIO_FILE_BLOCK_SIZE 4096 -#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 1800 +#define DEFAULT_HTTP_READ_BUFFER_TIMEOUT 180 #define DEFAULT_HTTP_READ_BUFFER_CONNECTION_TIMEOUT 1 /// Maximum number of http-connections between two endpoints /// the number is unmotivated diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 0ada66b806a..76e836a9b71 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -295,9 +295,9 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora try { std::unique_lock lock{mutex}; - if (query.database != database_name) + if (query.getDatabase() != database_name) throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed to `{}`, cannot create table in `{}`", - database_name, query.database); + database_name, query.getDatabase()); /// Do some checks before renaming file from .tmp to .sql not_in_use = cleanupDetachedTables(); assertDetachedTableNotInUse(query.uuid); @@ -314,8 +314,8 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora /// It throws if `table_metadata_path` already exists (it's possible if table was detached) renameNoReplace(table_metadata_tmp_path, table_metadata_path); /// Commit point (a sort of) - attachTableUnlocked(query.table, table, lock); /// Should never throw - table_name_to_path.emplace(query.table, table_data_path); + attachTableUnlocked(query.getTable(), table, lock); /// Should never throw + table_name_to_path.emplace(query.getTable(), table_data_path); } catch (...) { @@ -325,7 +325,7 @@ void DatabaseAtomic::commitCreateTable(const ASTCreateQuery & query, const Stora throw; } if (table->storesDataOnDisk()) - tryCreateSymlink(query.table, table_data_path); + tryCreateSymlink(query.getTable(), table_data_path); } void DatabaseAtomic::commitAlterTable(const StorageID & table_id, const String & table_metadata_tmp_path, const String & table_metadata_path, diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp index 03d91324672..74b103afbc4 100644 --- a/src/Databases/DatabaseFactory.cpp +++ b/src/Databases/DatabaseFactory.cpp @@ -103,7 +103,7 @@ static inline ValueType safeGetLiteralValue(const ASTPtr &ast, const String &eng DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String & metadata_path, ContextPtr context) { auto * engine_define = create.storage; - const String & database_name = create.database; + const String & database_name = create.getDatabase(); const String & engine_name = engine_define->engine->name; const UUID & uuid = create.uuid; diff --git a/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp index 288f34ad7d1..8128bb8378e 100644 --- a/src/Databases/DatabaseMemory.cpp +++ b/src/Databases/DatabaseMemory.cpp @@ -75,7 +75,7 @@ void DatabaseMemory::dropTable( ASTPtr DatabaseMemory::getCreateDatabaseQuery() const { auto create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, std::make_shared()); create_query->storage->set(create_query->storage->engine, makeASTFunction(getEngineName())); diff --git a/src/Databases/DatabaseMemory.h b/src/Databases/DatabaseMemory.h index a661d1c49b0..b854d9be1f3 100644 --- a/src/Databases/DatabaseMemory.h +++ b/src/Databases/DatabaseMemory.h @@ -42,7 +42,7 @@ public: /// TODO May be it's better to use DiskMemory for such tables. /// To save data on disk it's possible to explicitly CREATE DATABASE db ENGINE=Ordinary in clickhouse-local. String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } - String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); } + String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } UUID tryGetTableUUID(const String & table_name) const override; diff --git a/src/Databases/DatabaseOnDisk.cpp b/src/Databases/DatabaseOnDisk.cpp index 55e5a3071bc..a1e0a825736 100644 --- a/src/Databases/DatabaseOnDisk.cpp +++ b/src/Databases/DatabaseOnDisk.cpp @@ -51,7 +51,7 @@ std::pair createTableFromAST( bool force_restore) { ast_create_query.attach = true; - ast_create_query.database = database_name; + ast_create_query.setDatabase(database_name); if (ast_create_query.as_table_function) { @@ -60,9 +60,9 @@ std::pair createTableFromAST( ColumnsDescription columns; if (ast_create_query.columns_list && ast_create_query.columns_list->columns) columns = InterpreterCreateQuery::getColumnsDescription(*ast_create_query.columns_list->columns, context, true); - StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.table, std::move(columns)); + StoragePtr storage = table_function->execute(ast_create_query.as_table_function, context, ast_create_query.getTable(), std::move(columns)); storage->renameInMemory(ast_create_query); - return {ast_create_query.table, storage}; + return {ast_create_query.getTable(), storage}; } ColumnsDescription columns; @@ -82,7 +82,7 @@ std::pair createTableFromAST( return { - ast_create_query.table, + ast_create_query.getTable(), StorageFactory::instance().get( ast_create_query, table_data_path_relative, @@ -112,7 +112,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query) /// We remove everything that is not needed for ATTACH from the query. assert(!create->temporary); - create->database.clear(); + create->database.reset(); create->as_database.clear(); create->as_table.clear(); create->if_not_exists = false; @@ -129,7 +129,7 @@ String getObjectDefinitionFromCreateQuery(const ASTPtr & query) create->out_file = nullptr; if (create->uuid != UUIDHelpers::Nil) - create->table = TABLE_WITH_UUID_NAME_PLACEHOLDER; + create->setTable(TABLE_WITH_UUID_NAME_PLACEHOLDER); WriteBufferFromOwnString statement_buf; formatAST(*create, statement_buf, false); @@ -161,7 +161,7 @@ void DatabaseOnDisk::createTable( { const auto & settings = local_context->getSettingsRef(); const auto & create = query->as(); - assert(table_name == create.table); + assert(table_name == create.getTable()); /// Create a file with metadata if necessary - if the query is not ATTACH. /// Write the query of `ATTACH table` to it. @@ -251,7 +251,7 @@ void DatabaseOnDisk::commitCreateTable(const ASTCreateQuery & query, const Stora try { /// Add a table to the map of known tables. - attachTable(query.table, table, getTableDataPath(query)); + attachTable(query.getTable(), table, getTableDataPath(query)); /// If it was ATTACH query and file with table metadata already exist /// (so, ATTACH is done after DETACH), then rename atomically replaces old file with new one. @@ -382,8 +382,8 @@ void DatabaseOnDisk::renameTable( table_metadata_path = getObjectMetadataPath(table_name); attach_query = parseQueryFromMetadata(log, local_context, table_metadata_path); auto & create = attach_query->as(); - create.database = to_database.getDatabaseName(); - create.table = to_table_name; + create.setDatabase(to_database.getDatabaseName()); + create.setTable(to_table_name); if (from_ordinary_to_atomic) create.uuid = UUIDHelpers::generateV4(); if (from_atomic_to_ordinary) @@ -458,7 +458,7 @@ ASTPtr DatabaseOnDisk::getCreateDatabaseQuery() const ast = parseQueryFromMetadata(log, getContext(), database_metadata_path, true); auto & ast_create_query = ast->as(); ast_create_query.attach = false; - ast_create_query.database = database_name; + ast_create_query.setDatabase(database_name); } if (!ast) { @@ -642,18 +642,18 @@ ASTPtr DatabaseOnDisk::parseQueryFromMetadata( return nullptr; auto & create = ast->as(); - if (!create.table.empty() && create.uuid != UUIDHelpers::Nil) + if (create.table && create.uuid != UUIDHelpers::Nil) { String table_name = unescapeForFileName(fs::path(metadata_file_path).stem()); - if (create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger) + if (create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER && logger) LOG_WARNING( logger, "File {} contains both UUID and table name. Will use name `{}` instead of `{}`", metadata_file_path, table_name, - create.table); - create.table = table_name; + create.getTable()); + create.setTable(table_name); } return ast; @@ -667,7 +667,7 @@ ASTPtr DatabaseOnDisk::getCreateQueryFromMetadata(const String & database_metada { auto & ast_create_query = ast->as(); ast_create_query.attach = false; - ast_create_query.database = getDatabaseName(); + ast_create_query.setDatabase(getDatabaseName()); } return ast; diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index 81d5bd5adcb..f27a332fa3e 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -63,7 +63,7 @@ public: String getDataPath() const override { return data_path; } String getTableDataPath(const String & table_name) const override { return data_path + escapeForFileName(table_name) + "/"; } - String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.table); } + String getTableDataPath(const ASTCreateQuery & query) const override { return getTableDataPath(query.getTable()); } String getMetadataPath() const override { return metadata_path; } static ASTPtr parseQueryFromMetadata(Poco::Logger * log, ContextPtr context, const String & metadata_file_path, bool throw_on_error = true, bool remove_empty = false); diff --git a/src/Databases/DatabaseOrdinary.cpp b/src/Databases/DatabaseOrdinary.cpp index 061529f4e12..2da17ed388f 100644 --- a/src/Databases/DatabaseOrdinary.cpp +++ b/src/Databases/DatabaseOrdinary.cpp @@ -55,7 +55,7 @@ namespace catch (Exception & e) { e.addMessage( - "Cannot attach table " + backQuote(database_name) + "." + backQuote(query.table) + " from metadata file " + metadata_path + "Cannot attach table " + backQuote(database_name) + "." + backQuote(query.getTable()) + " from metadata file " + metadata_path + " from query " + serializeAST(query)); throw; } @@ -168,7 +168,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables if (ast) { auto * create_query = ast->as(); - create_query->database = database_name; + create_query->setDatabase(database_name); if (fs::exists(full_path.string() + detached_suffix)) { @@ -182,7 +182,7 @@ void DatabaseOrdinary::loadTablesMetadata(ContextPtr local_context, ParsedTables } TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext(), ast); - QualifiedTableName qualified_name{database_name, create_query->table}; + QualifiedTableName qualified_name{database_name, create_query->getTable()}; std::lock_guard lock{metadata.mutex}; metadata.parsed_tables[qualified_name] = ParsedTableMetadata{full_path.string(), ast}; diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index 3f253ed0924..89d1f96857e 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -349,9 +349,9 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ /// Replicas will set correct name of current database in query context (database name can be different on replicas) if (auto * ddl_query = dynamic_cast(query.get())) { - if (ddl_query->database != getDatabaseName()) + if (ddl_query->getDatabase() != getDatabaseName()) throw Exception(ErrorCodes::UNKNOWN_DATABASE, "Database was renamed"); - ddl_query->database.clear(); + ddl_query->database.reset(); if (auto * create = query->as()) { @@ -391,7 +391,7 @@ void DatabaseReplicated::checkQueryValid(const ASTPtr & query, ContextPtr query_ /// NOTE: we cannot check here that substituted values will be actually different on shards and replicas. Macros::MacroExpansionInfo info; - info.table_id = {getDatabaseName(), create->table, create->uuid}; + info.table_id = {getDatabaseName(), create->getTable(), create->uuid}; query_context->getMacros()->expand(maybe_path, info); bool maybe_shard_macros = info.expanded_other; info.expanded_other = false; @@ -715,13 +715,13 @@ ASTPtr DatabaseReplicated::parseQueryFromMetadataInZooKeeper(const String & node auto ast = parseQuery(parser, query, description, 0, getContext()->getSettingsRef().max_parser_depth); auto & create = ast->as(); - if (create.uuid == UUIDHelpers::Nil || create.table != TABLE_WITH_UUID_NAME_PLACEHOLDER || !create.database.empty()) + if (create.uuid == UUIDHelpers::Nil || create.getTable() != TABLE_WITH_UUID_NAME_PLACEHOLDER || create.database) throw Exception(ErrorCodes::LOGICAL_ERROR, "Got unexpected query from {}: {}", node_name, query); bool is_materialized_view_with_inner_table = create.is_materialized_view && create.to_table_id.empty(); - create.database = getDatabaseName(); - create.table = unescapeForFileName(node_name); + create.setDatabase(getDatabaseName()); + create.setTable(unescapeForFileName(node_name)); create.attach = is_materialized_view_with_inner_table; return ast; @@ -811,7 +811,7 @@ void DatabaseReplicated::commitCreateTable(const ASTCreateQuery & query, const S assert(!ddl_worker->isCurrentlyActive() || txn); if (txn && txn->isInitialQuery()) { - String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.table); + String metadata_zk_path = zookeeper_path + "/metadata/" + escapeForFileName(query.getTable()); String statement = getObjectDefinitionFromCreateQuery(query.clone()); /// zk::multi(...) will throw if `metadata_zk_path` exists txn->addOp(zkutil::makeCreateRequest(metadata_zk_path, statement, zkutil::CreateMode::Persistent)); diff --git a/src/Databases/DatabasesCommon.cpp b/src/Databases/DatabasesCommon.cpp index e5e54f287ec..c92aac44083 100644 --- a/src/Databases/DatabasesCommon.cpp +++ b/src/Databases/DatabasesCommon.cpp @@ -29,7 +29,7 @@ void applyMetadataChangesToCreateQuery(const ASTPtr & query, const StorageInMemo bool has_structure = ast_create_query.columns_list && ast_create_query.columns_list->columns; if (ast_create_query.as_table_function && !has_structure) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Cannot alter table {} because it was created AS table function" - " and doesn't have structure in metadata", backQuote(ast_create_query.table)); + " and doesn't have structure in metadata", backQuote(ast_create_query.getTable())); assert(has_structure); ASTPtr new_columns = InterpreterCreateQuery::formatColumns(metadata.columns); diff --git a/src/Databases/MySQL/DatabaseMySQL.cpp b/src/Databases/MySQL/DatabaseMySQL.cpp index feebf33f58a..275a1015c7b 100644 --- a/src/Databases/MySQL/DatabaseMySQL.cpp +++ b/src/Databases/MySQL/DatabaseMySQL.cpp @@ -129,8 +129,8 @@ static ASTPtr getCreateQueryFromStorage(const StoragePtr & storage, const ASTPtr { /// init create query. auto table_id = storage->getStorageID(); - create_table_query->table = table_id.table_name; - create_table_query->database = table_id.database_name; + create_table_query->setTable(table_id.table_name); + create_table_query->setDatabase(table_id.database_name); auto metadata_snapshot = storage->getInMemoryMetadataPtr(); for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) @@ -192,7 +192,7 @@ time_t DatabaseMySQL::getObjectMetadataModificationTime(const String & table_nam ASTPtr DatabaseMySQL::getCreateDatabaseQuery() const { const auto & create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, database_engine_define); if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index 8b6161c9bd0..058708afb80 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -3,26 +3,27 @@ #if USE_MYSQL #include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include namespace DB { @@ -765,7 +766,7 @@ void MaterializedMySQLSyncThread::executeDDLAtomic(const QueryEvent & query_even bool MaterializedMySQLSyncThread::isMySQLSyncThread() { - return getThreadName() == MYSQL_BACKGROUND_THREAD_NAME; + return getThreadName() == std::string_view(MYSQL_BACKGROUND_THREAD_NAME); } void MaterializedMySQLSyncThread::setSynchronizationThreadException(const std::exception_ptr & exception) diff --git a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp index 2b0d73a02ae..14488f08811 100644 --- a/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabaseMaterializedPostgreSQL.cpp @@ -238,7 +238,7 @@ ASTPtr DatabaseMaterializedPostgreSQL::createAlterSettingsQuery(const SettingCha auto * alter = query->as(); alter->alter_object = ASTAlterQuery::AlterObjectType::DATABASE; - alter->database = database_name; + alter->setDatabase(database_name); alter->set(alter->command_list, command_list); return query; diff --git a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp index 5d3493d0c82..e995a6d2377 100644 --- a/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp +++ b/src/Databases/PostgreSQL/DatabasePostgreSQL.cpp @@ -355,7 +355,7 @@ void DatabasePostgreSQL::shutdown() ASTPtr DatabasePostgreSQL::getCreateDatabaseQuery() const { const auto & create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, database_engine_define); if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) @@ -388,8 +388,8 @@ ASTPtr DatabasePostgreSQL::getCreateTableQueryImpl(const String & table_name, Co /// init create query. auto table_id = storage->getStorageID(); - create_table_query->table = table_id.table_name; - create_table_query->database = table_id.database_name; + create_table_query->setTable(table_id.table_name); + create_table_query->setDatabase(table_id.database_name); auto metadata_snapshot = storage->getInMemoryMetadataPtr(); for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) diff --git a/src/Databases/SQLite/DatabaseSQLite.cpp b/src/Databases/SQLite/DatabaseSQLite.cpp index 05bc2f24834..ba53ab08407 100644 --- a/src/Databases/SQLite/DatabaseSQLite.cpp +++ b/src/Databases/SQLite/DatabaseSQLite.cpp @@ -160,7 +160,7 @@ StoragePtr DatabaseSQLite::fetchTable(const String & table_name, ContextPtr loca ASTPtr DatabaseSQLite::getCreateDatabaseQuery() const { const auto & create_query = std::make_shared(); - create_query->database = getDatabaseName(); + create_query->setDatabase(getDatabaseName()); create_query->set(create_query->storage, database_engine_define); if (const auto comment_value = getDatabaseComment(); !comment_value.empty()) @@ -193,8 +193,8 @@ ASTPtr DatabaseSQLite::getCreateTableQueryImpl(const String & table_name, Contex /// init create query. auto table_id = storage->getStorageID(); - create_table_query->table = table_id.table_name; - create_table_query->database = table_id.database_name; + create_table_query->setTable(table_id.table_name); + create_table_query->setDatabase(table_id.database_name); auto metadata_snapshot = storage->getInMemoryMetadataPtr(); for (const auto & column_type_and_name : metadata_snapshot->getColumns().getOrdinary()) diff --git a/src/Dictionaries/getDictionaryConfigurationFromAST.cpp b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp index 0c6944eeccc..f031e8ff038 100644 --- a/src/Dictionaries/getDictionaryConfigurationFromAST.cpp +++ b/src/Dictionaries/getDictionaryConfigurationFromAST.cpp @@ -536,12 +536,12 @@ getDictionaryConfigurationFromAST(const ASTCreateQuery & query, ContextPtr conte AutoPtr name_element(xml_document->createElement("name")); current_dictionary->appendChild(name_element); - AutoPtr name(xml_document->createTextNode(query.table)); + AutoPtr name(xml_document->createTextNode(query.getTable())); name_element->appendChild(name); AutoPtr database_element(xml_document->createElement("database")); current_dictionary->appendChild(database_element); - AutoPtr database(xml_document->createTextNode(!database_.empty() ? database_ : query.database)); + AutoPtr database(xml_document->createTextNode(!database_.empty() ? database_ : query.getDatabase())); database_element->appendChild(database); if (query.uuid != UUIDHelpers::Nil) diff --git a/src/IO/ReadBufferFromEmptyFile.h b/src/IO/ReadBufferFromEmptyFile.h new file mode 100644 index 00000000000..311aee1559b --- /dev/null +++ b/src/IO/ReadBufferFromEmptyFile.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace DB +{ + +/// In case of empty file it does not make any sense to read it. +/// +/// Plus regular readers from file has an assert that buffer is not empty, that will fail: +/// - ReadBufferFromFileDescriptor +/// - SynchronousReader +/// - ThreadPoolReader +class ReadBufferFromEmptyFile : public ReadBufferFromFileBase +{ +private: + bool nextImpl() override { return false; } + std::string getFileName() const override { return ""; } + off_t seek(off_t /*off*/, int /*whence*/) override { return 0; } + off_t getPosition() override { return 0; } +}; + +} diff --git a/src/IO/ReadBufferFromFileDescriptor.cpp b/src/IO/ReadBufferFromFileDescriptor.cpp index a710dfe33fb..ed8eba62f04 100644 --- a/src/IO/ReadBufferFromFileDescriptor.cpp +++ b/src/IO/ReadBufferFromFileDescriptor.cpp @@ -51,6 +51,9 @@ std::string ReadBufferFromFileDescriptor::getFileName() const bool ReadBufferFromFileDescriptor::nextImpl() { + /// If internal_buffer size is empty, then read() cannot be distinguished from EOF + assert(!internal_buffer.empty()); + size_t bytes_read = 0; while (!bytes_read) { diff --git a/src/IO/SynchronousReader.cpp b/src/IO/SynchronousReader.cpp index e7dfc7c91b7..599299ddad4 100644 --- a/src/IO/SynchronousReader.cpp +++ b/src/IO/SynchronousReader.cpp @@ -36,6 +36,9 @@ namespace ErrorCodes std::future SynchronousReader::submit(Request request) { + /// If size is zero, then read() cannot be distinguished from EOF + assert(request.size); + int fd = assert_cast(*request.descriptor).fd; #if defined(POSIX_FADV_WILLNEED) diff --git a/src/IO/ThreadPoolReader.cpp b/src/IO/ThreadPoolReader.cpp index 701fa759848..273778df37c 100644 --- a/src/IO/ThreadPoolReader.cpp +++ b/src/IO/ThreadPoolReader.cpp @@ -76,6 +76,9 @@ ThreadPoolReader::ThreadPoolReader(size_t pool_size, size_t queue_size_) std::future ThreadPoolReader::submit(Request request) { + /// If size is zero, then read() cannot be distinguished from EOF + assert(request.size); + int fd = assert_cast(*request.descriptor).fd; #if defined(__linux__) diff --git a/src/IO/createReadBufferFromFileBase.cpp b/src/IO/createReadBufferFromFileBase.cpp index 05de97c5c92..bed97d54ab0 100644 --- a/src/IO/createReadBufferFromFileBase.cpp +++ b/src/IO/createReadBufferFromFileBase.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -33,6 +34,8 @@ std::unique_ptr createReadBufferFromFileBase( char * existing_memory, size_t alignment) { + if (size.has_value() && !*size) + return std::make_unique(); size_t estimated_size = size.has_value() ? *size : 0; if (!existing_memory diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index 480a7c01b98..d5039a2f19e 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -239,8 +239,8 @@ private: if (only_replace_current_database_function) return; - if (node.database.empty()) - node.database = database_name; + if (!node.database) + node.setDatabase(database_name); } void visitDDL(ASTRenameQuery & node, ASTPtr &) const @@ -262,8 +262,8 @@ private: if (only_replace_current_database_function) return; - if (node.database.empty()) - node.database = database_name; + if (!node.database) + node.setDatabase(database_name); for (const auto & child : node.command_list->children) { diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index cfce6ac9463..b8d3d64feea 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -257,12 +257,12 @@ bool DDLTask::tryFindHostInCluster() * */ is_circular_replicated = true; auto * query_with_table = dynamic_cast(query.get()); - if (!query_with_table || query_with_table->database.empty()) + if (!query_with_table || !query_with_table->database) { throw Exception(ErrorCodes::INCONSISTENT_CLUSTER_DEFINITION, "For a distributed DDL on circular replicated cluster its table name must be qualified by database name."); } - if (default_database == query_with_table->database) + if (default_database == query_with_table->getDatabase()) return true; } } @@ -351,8 +351,8 @@ void DatabaseReplicatedTask::parseQueryFromEntry(ContextPtr context) if (auto * ddl_query = dynamic_cast(query.get())) { /// Update database name with actual name of local database - assert(ddl_query->database.empty()); - ddl_query->database = database->getDatabaseName(); + assert(!ddl_query->database); + ddl_query->setDatabase(database->getDatabaseName()); } } diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 4633b22ca49..9fefcf012a9 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -662,7 +662,7 @@ void DDLWorker::processTask(DDLTaskBase & task, const ZooKeeperPtr & zookeeper) StoragePtr storage; if (auto * query_with_table = dynamic_cast(task.query.get()); query_with_table) { - if (!query_with_table->table.empty()) + if (query_with_table->table) { /// It's not CREATE DATABASE auto table_id = context->tryResolveStorageID(*query_with_table, Context::ResolveOrdinary); diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp index fa8f3e099ff..898e169bc44 100644 --- a/src/Interpreters/DatabaseCatalog.cpp +++ b/src/Interpreters/DatabaseCatalog.cpp @@ -66,9 +66,9 @@ TemporaryTableHolder::TemporaryTableHolder(ContextPtr context_, const TemporaryT if (create->uuid == UUIDHelpers::Nil) create->uuid = UUIDHelpers::generateV4(); id = create->uuid; - create->table = "_tmp_" + toString(id); - global_name = create->table; - create->database = DatabaseCatalog::TEMPORARY_DATABASE; + create->setTable("_tmp_" + toString(id)); + global_name = create->getTable(); + create->setDatabase(DatabaseCatalog::TEMPORARY_DATABASE); } else { @@ -786,8 +786,8 @@ void DatabaseCatalog::enqueueDroppedTableCleanup(StorageID table_id, StoragePtr if (create) { String data_path = "store/" + getPathForUUID(table_id.uuid); - create->database = table_id.database_name; - create->table = table_id.table_name; + create->setDatabase(table_id.database_name); + create->setTable(table_id.table_name); try { table = createTableFromAST(*create, table_id.getDatabaseName(), data_path, getContext(), false).second; diff --git a/src/Interpreters/InterpreterAlterQuery.cpp b/src/Interpreters/InterpreterAlterQuery.cpp index 354d25fbe8f..665d76625ef 100644 --- a/src/Interpreters/InterpreterAlterQuery.cpp +++ b/src/Interpreters/InterpreterAlterQuery.cpp @@ -62,7 +62,7 @@ BlockIO InterpreterAlterQuery::executeToTable(const ASTAlterQuery & alter) getContext()->checkAccess(getRequiredAccess()); auto table_id = getContext()->resolveStorageID(alter, Context::ResolveOrdinary); - query_ptr->as().database = table_id.database_name; + query_ptr->as().setDatabase(table_id.database_name); DatabasePtr database = DatabaseCatalog::instance().getDatabase(table_id.database_name); if (typeid_cast(database.get()) @@ -175,7 +175,7 @@ BlockIO InterpreterAlterQuery::executeToDatabase(const ASTAlterQuery & alter) { BlockIO res; getContext()->checkAccess(getRequiredAccess()); - DatabasePtr database = DatabaseCatalog::instance().getDatabase(alter.database); + DatabasePtr database = DatabaseCatalog::instance().getDatabase(alter.getDatabase()); AlterCommands alter_commands; for (const auto & child : alter.command_list->children) @@ -215,7 +215,7 @@ AccessRightsElements InterpreterAlterQuery::getRequiredAccess() const AccessRightsElements required_access; const auto & alter = query_ptr->as(); for (const auto & child : alter.command_list->children) - boost::range::push_back(required_access, getRequiredAccessForCommand(child->as(), alter.database, alter.table)); + boost::range::push_back(required_access, getRequiredAccessForCommand(child->as(), alter.getDatabase(), alter.getTable())); return required_access; } diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index 7e6c3f113b5..bef59907782 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -101,7 +101,7 @@ InterpreterCreateQuery::InterpreterCreateQuery(const ASTPtr & query_ptr_, Contex BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) { - String database_name = create.database; + String database_name = create.getDatabase(); auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, ""); @@ -127,11 +127,11 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) /// Short syntax: try read database definition from file auto ast = DatabaseOnDisk::parseQueryFromMetadata(nullptr, getContext(), metadata_file_path); create = ast->as(); - if (!create.table.empty() || !create.storage) + if (create.table || !create.storage) throw Exception(ErrorCodes::INCORRECT_QUERY, "Metadata file {} contains incorrect CREATE DATABASE query", metadata_file_path.string()); create.attach = true; create.attach_short_syntax = true; - create.database = database_name; + create.setDatabase(database_name); } else if (!create.storage) { @@ -161,7 +161,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) { if (create.attach && create.uuid == UUIDHelpers::Nil) throw Exception(ErrorCodes::INCORRECT_QUERY, "UUID must be specified for ATTACH. " - "If you want to attach existing database, use just ATTACH DATABASE {};", create.database); + "If you want to attach existing database, use just ATTACH DATABASE {};", create.getDatabase()); else if (create.uuid == UUIDHelpers::Nil) create.uuid = UUIDHelpers::generateV4(); @@ -238,7 +238,7 @@ BlockIO InterpreterCreateQuery::createDatabase(ASTCreateQuery & create) DatabasePtr database = DatabaseFactory::get(create, metadata_path / "", getContext()); if (create.uuid != UUIDHelpers::Nil) - create.database = TABLE_WITH_UUID_NAME_PLACEHOLDER; + create.setDatabase(TABLE_WITH_UUID_NAME_PLACEHOLDER); bool need_write_metadata = !create.attach || !fs::exists(metadata_file_path); @@ -823,7 +823,7 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data bool has_uuid = create.uuid != UUIDHelpers::Nil || create.to_inner_uuid != UUIDHelpers::Nil; if (has_uuid && !is_on_cluster) throw Exception(ErrorCodes::INCORRECT_QUERY, - "{} UUID specified, but engine of database {} is not Atomic", kind, create.database); + "{} UUID specified, but engine of database {} is not Atomic", kind, create.getDatabase()); /// Ignore UUID if it's ON CLUSTER query create.uuid = UUIDHelpers::Nil; @@ -835,12 +835,12 @@ void InterpreterCreateQuery::assertOrSetUUID(ASTCreateQuery & create, const Data BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) { /// Temporary tables are created out of databases. - if (create.temporary && !create.database.empty()) + if (create.temporary && create.database) throw Exception("Temporary tables cannot be inside a database. You should not specify a database for a temporary table.", ErrorCodes::BAD_DATABASE_FOR_TEMPORARY_TABLE); String current_database = getContext()->getCurrentDatabase(); - auto database_name = create.database.empty() ? current_database : create.database; + auto database_name = create.database ? create.getDatabase() : current_database; // If this is a stub ATTACH query, read the query definition from the database if (create.attach && !create.storage && !create.columns_list) @@ -849,12 +849,12 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (database->getEngineName() == "Replicated") { - auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, create.table); + auto guard = DatabaseCatalog::instance().getDDLGuard(database_name, create.getTable()); if (auto* ptr = typeid_cast(database.get()); ptr && !getContext()->getClientInfo().is_replicated_database_internal) { - create.database = database_name; + create.setDatabase(database_name); guard->releaseTableLock(); return ptr->tryEnqueueReplicatedDDL(query_ptr, getContext()); } @@ -863,18 +863,18 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) bool if_not_exists = create.if_not_exists; // Table SQL definition is available even if the table is detached (even permanently) - auto query = database->getCreateTableQuery(create.table, getContext()); + auto query = database->getCreateTableQuery(create.getTable(), getContext()); auto create_query = query->as(); if (!create.is_dictionary && create_query.is_dictionary) throw Exception(ErrorCodes::INCORRECT_QUERY, "Cannot ATTACH TABLE {}.{}, it is a Dictionary", - backQuoteIfNeed(database_name), backQuoteIfNeed(create.table)); + backQuoteIfNeed(database_name), backQuoteIfNeed(create.getTable())); if (create.is_dictionary && !create_query.is_dictionary) throw Exception(ErrorCodes::INCORRECT_QUERY, "Cannot ATTACH DICTIONARY {}.{}, it is a Table", - backQuoteIfNeed(database_name), backQuoteIfNeed(create.table)); + backQuoteIfNeed(database_name), backQuoteIfNeed(create.getTable())); create = create_query; // Copy the saved create query, but use ATTACH instead of CREATE @@ -917,11 +917,11 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) "use either ATTACH TABLE {}; to attach existing table " "or CREATE TABLE {} ; to create new table " "or ATTACH TABLE {} FROM '/path/to/data/'
; to create new table and attach data.", - create.table, create.table, create.table); + create.getTable(), create.getTable(), create.getTable()); } - if (!create.temporary && create.database.empty()) - create.database = current_database; + if (!create.temporary && !create.database) + create.setDatabase(current_database); if (create.to_table_id && create.to_table_id.database_name.empty()) create.to_table_id.database_name = current_database; @@ -949,7 +949,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) if (need_add_to_database && database->getEngineName() == "Replicated") { - auto guard = DatabaseCatalog::instance().getDDLGuard(create.database, create.table); + auto guard = DatabaseCatalog::instance().getDDLGuard(create.getDatabase(), create.getTable()); if (auto * ptr = typeid_cast(database.get()); ptr && !getContext()->getClientInfo().is_replicated_database_internal) @@ -972,7 +972,7 @@ BlockIO InterpreterCreateQuery::createTable(ASTCreateQuery & create) /// If table has dependencies - add them to the graph TableNamesSet loading_dependencies = getDependenciesSetFromCreateQuery(getContext()->getGlobalContext(), query_ptr); if (!loading_dependencies.empty()) - DatabaseCatalog::instance().addLoadingDependencies(QualifiedTableName{database_name, create.table}, std::move(loading_dependencies)); + DatabaseCatalog::instance().addLoadingDependencies(QualifiedTableName{database_name, create.getTable()}, std::move(loading_dependencies)); return fillTableIfNeeded(create); } @@ -991,16 +991,16 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, /** If the request specifies IF NOT EXISTS, we allow concurrent CREATE queries (which do nothing). * If table doesn't exist, one thread is creating table, while others wait in DDLGuard. */ - guard = DatabaseCatalog::instance().getDDLGuard(create.database, create.table); + guard = DatabaseCatalog::instance().getDDLGuard(create.getDatabase(), create.getTable()); - database = DatabaseCatalog::instance().getDatabase(create.database); + database = DatabaseCatalog::instance().getDatabase(create.getDatabase()); assertOrSetUUID(create, database); String storage_name = create.is_dictionary ? "Dictionary" : "Table"; auto storage_already_exists_error_code = create.is_dictionary ? ErrorCodes::DICTIONARY_ALREADY_EXISTS : ErrorCodes::TABLE_ALREADY_EXISTS; /// Table can be created before or it can be created concurrently in another thread, while we were waiting in DDLGuard. - if (database->isTableExist(create.table, getContext())) + if (database->isTableExist(create.getTable(), getContext())) { /// TODO Check structure of table if (create.if_not_exists) @@ -1009,8 +1009,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, { /// when executing CREATE OR REPLACE VIEW, drop current existing view auto drop_ast = std::make_shared(); - drop_ast->database = create.database; - drop_ast->table = create.table; + drop_ast->setDatabase(create.getDatabase()); + drop_ast->setTable(create.getTable()); drop_ast->no_ddl_lock = true; auto drop_context = Context::createCopy(context); @@ -1019,7 +1019,7 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, } else throw Exception(storage_already_exists_error_code, - "{} {}.{} already exists", storage_name, backQuoteIfNeed(create.database), backQuoteIfNeed(create.table)); + "{} {}.{} already exists", storage_name, backQuoteIfNeed(create.getDatabase()), backQuoteIfNeed(create.getTable())); } data_path = database->getTableDataPath(create); @@ -1030,10 +1030,10 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, } else { - if (create.if_not_exists && getContext()->tryResolveStorageID({"", create.table}, Context::ResolveExternal)) + if (create.if_not_exists && getContext()->tryResolveStorageID({"", create.getTable()}, Context::ResolveExternal)) return false; - String temporary_table_name = create.table; + String temporary_table_name = create.getTable(); auto temporary_table = TemporaryTableHolder(getContext(), properties.columns, properties.constraints, query_ptr); getContext()->getSessionContext()->addExternalTable(temporary_table_name, std::move(temporary_table)); return true; @@ -1070,8 +1070,8 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, { const auto & factory = TableFunctionFactory::instance(); auto table_func = factory.get(create.as_table_function, getContext()); - res = table_func->execute(create.as_table_function, getContext(), create.table, properties.columns); - res->renameInMemory({create.database, create.table, create.uuid}); + res = table_func->execute(create.as_table_function, getContext(), create.getTable(), properties.columns); + res->renameInMemory({create.getDatabase(), create.getTable(), create.uuid}); } else { @@ -1089,12 +1089,12 @@ bool InterpreterCreateQuery::doCreateTable(ASTCreateQuery & create, "ATTACH ... FROM ... query is not supported for {} table engine, " "because such tables do not store any data on disk. Use CREATE instead.", res->getName()); - database->createTable(getContext(), create.table, res, query_ptr); + database->createTable(getContext(), create.getTable(), res, query_ptr); /// Move table data to the proper place. Wo do not move data earlier to avoid situations /// when data directory moved, but table has not been created due to some error. if (from_path) - res->rename(actual_data_path, {create.database, create.table, create.uuid}); + res->rename(actual_data_path, {create.getDatabase(), create.getTable(), create.uuid}); /// We must call "startup" and "shutdown" while holding DDLGuard. /// Because otherwise method "shutdown" (from InterpreterDropQuery) can be called before startup @@ -1142,30 +1142,30 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, }; auto ast_drop = std::make_shared(); - String table_to_replace_name = create.table; + String table_to_replace_name = create.getTable(); { - auto database = DatabaseCatalog::instance().getDatabase(create.database); + auto database = DatabaseCatalog::instance().getDatabase(create.getDatabase()); if (database->getUUID() == UUIDHelpers::Nil) throw Exception(ErrorCodes::INCORRECT_QUERY, "{} query is supported only for Atomic databases", create.create_or_replace ? "CREATE OR REPLACE TABLE" : "REPLACE TABLE"); - UInt64 name_hash = sipHash64(create.database + create.table); + UInt64 name_hash = sipHash64(create.getDatabase() + create.getTable()); UInt16 random_suffix = thread_local_rng(); if (auto txn = current_context->getZooKeeperMetadataTransaction()) { /// Avoid different table name on database replicas random_suffix = sipHash64(txn->getTaskZooKeeperPath()); } - create.table = fmt::format("_tmp_replace_{}_{}", - getHexUIntLowercase(name_hash), - getHexUIntLowercase(random_suffix)); + create.setTable(fmt::format("_tmp_replace_{}_{}", + getHexUIntLowercase(name_hash), + getHexUIntLowercase(random_suffix))); - ast_drop->table = create.table; + ast_drop->setTable(create.getTable()); ast_drop->is_dictionary = create.is_dictionary; - ast_drop->database = create.database; + ast_drop->setDatabase(create.getDatabase()); ast_drop->kind = ASTDropQuery::Drop; } @@ -1186,8 +1186,8 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, auto ast_rename = std::make_shared(); ASTRenameQuery::Element elem { - ASTRenameQuery::Table{create.database, create.table}, - ASTRenameQuery::Table{create.database, table_to_replace_name} + ASTRenameQuery::Table{create.getDatabase(), create.getTable()}, + ASTRenameQuery::Table{create.getDatabase(), table_to_replace_name} }; ast_rename->elements.push_back(std::move(elem)); @@ -1217,7 +1217,7 @@ BlockIO InterpreterCreateQuery::doCreateOrReplaceTable(ASTCreateQuery & create, InterpreterDropQuery(ast_drop, drop_context).execute(); } - create.table = table_to_replace_name; + create.setTable(table_to_replace_name); return {}; } @@ -1240,7 +1240,7 @@ BlockIO InterpreterCreateQuery::fillTableIfNeeded(const ASTCreateQuery & create) && !create.is_ordinary_view && !create.is_live_view && (!create.is_materialized_view || create.is_populate)) { auto insert = std::make_shared(); - insert->table_id = {create.database, create.table, create.uuid}; + insert->table_id = {create.getDatabase(), create.getTable(), create.uuid}; insert->select = create.select->clone(); if (create.temporary && !getContext()->getSessionContext()->hasQueryContext()) @@ -1316,7 +1316,7 @@ BlockIO InterpreterCreateQuery::execute() ASTQueryWithOutput::resetOutputASTIfExist(create); /// CREATE|ATTACH DATABASE - if (!create.database.empty() && create.table.empty()) + if (create.database && !create.table) return createDatabase(create); else return createTable(create); @@ -1332,21 +1332,21 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const AccessRightsElements required_access; const auto & create = query_ptr->as(); - if (create.table.empty()) + if (!create.table) { - required_access.emplace_back(AccessType::CREATE_DATABASE, create.database); + required_access.emplace_back(AccessType::CREATE_DATABASE, create.getDatabase()); } else if (create.is_dictionary) { - required_access.emplace_back(AccessType::CREATE_DICTIONARY, create.database, create.table); + required_access.emplace_back(AccessType::CREATE_DICTIONARY, create.getDatabase(), create.getTable()); } else if (create.isView()) { assert(!create.temporary); if (create.replace_view) - required_access.emplace_back(AccessType::DROP_VIEW | AccessType::CREATE_VIEW, create.database, create.table); + required_access.emplace_back(AccessType::DROP_VIEW | AccessType::CREATE_VIEW, create.getDatabase(), create.getTable()); else - required_access.emplace_back(AccessType::CREATE_VIEW, create.database, create.table); + required_access.emplace_back(AccessType::CREATE_VIEW, create.getDatabase(), create.getTable()); } else { @@ -1355,8 +1355,8 @@ AccessRightsElements InterpreterCreateQuery::getRequiredAccess() const else { if (create.replace_table) - required_access.emplace_back(AccessType::DROP_TABLE, create.database, create.table); - required_access.emplace_back(AccessType::CREATE_TABLE, create.database, create.table); + required_access.emplace_back(AccessType::DROP_TABLE, create.getDatabase(), create.getTable()); + required_access.emplace_back(AccessType::CREATE_TABLE, create.getDatabase(), create.getTable()); } } diff --git a/src/Interpreters/InterpreterDropQuery.cpp b/src/Interpreters/InterpreterDropQuery.cpp index 03ec34fb6be..ce54ecc529a 100644 --- a/src/Interpreters/InterpreterDropQuery.cpp +++ b/src/Interpreters/InterpreterDropQuery.cpp @@ -56,9 +56,9 @@ BlockIO InterpreterDropQuery::execute() if (getContext()->getSettingsRef().database_atomic_wait_for_drop_and_detach_synchronously) drop.no_delay = true; - if (!drop.table.empty()) + if (drop.table) return executeToTable(drop); - else if (!drop.database.empty()) + else if (drop.database) return executeToDatabase(drop); else throw Exception("Nothing to drop, both names are empty", ErrorCodes::LOGICAL_ERROR); @@ -95,7 +95,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP if (getContext()->tryResolveStorageID(table_id, Context::ResolveExternal)) return executeToTemporaryTable(table_id.getTableName(), query.kind); else - query.database = table_id.database_name = getContext()->getCurrentDatabase(); + query.setDatabase(table_id.database_name = getContext()->getCurrentDatabase()); } if (query.temporary) @@ -130,7 +130,7 @@ BlockIO InterpreterDropQuery::executeToTableImpl(ASTDropQuery & query, DatabaseP table_id.uuid = database->tryGetTableUUID(table_id.table_name); /// Prevents recursive drop from drop database query. The original query must specify a table. - bool is_drop_or_detach_database = query_ptr->as()->table.empty(); + bool is_drop_or_detach_database = !query_ptr->as()->table; bool is_replicated_ddl_query = typeid_cast(database.get()) && !getContext()->getClientInfo().is_replicated_database_internal && !is_drop_or_detach_database; @@ -302,7 +302,7 @@ BlockIO InterpreterDropQuery::executeToDatabase(const ASTDropQuery & query) BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, DatabasePtr & database, std::vector & uuids_to_wait) { - const auto & database_name = query.database; + const auto & database_name = query.getDatabase(); auto ddl_guard = DatabaseCatalog::instance().getDDLGuard(database_name, ""); database = tryGetDatabase(database_name, query.if_exists); @@ -336,7 +336,7 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, ASTDropQuery query_for_table; query_for_table.kind = query.kind; query_for_table.if_exists = true; - query_for_table.database = database_name; + query_for_table.setDatabase(database_name); query_for_table.no_delay = query.no_delay; /// Flush should not be done if shouldBeEmptyOnDetach() == false, @@ -351,7 +351,7 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query, { DatabasePtr db; UUID table_to_wait = UUIDHelpers::Nil; - query_for_table.table = iterator->name(); + query_for_table.setTable(iterator->name()); query_for_table.is_dictionary = iterator->table()->isDictionary(); executeToTableImpl(query_for_table, db, table_to_wait); uuids_to_wait.push_back(table_to_wait); @@ -385,29 +385,29 @@ AccessRightsElements InterpreterDropQuery::getRequiredAccessForDDLOnCluster() co AccessRightsElements required_access; const auto & drop = query_ptr->as(); - if (drop.table.empty()) + if (!drop.table) { if (drop.kind == ASTDropQuery::Kind::Detach) - required_access.emplace_back(AccessType::DROP_DATABASE, drop.database); + required_access.emplace_back(AccessType::DROP_DATABASE, drop.getDatabase()); else if (drop.kind == ASTDropQuery::Kind::Drop) - required_access.emplace_back(AccessType::DROP_DATABASE, drop.database); + required_access.emplace_back(AccessType::DROP_DATABASE, drop.getDatabase()); } else if (drop.is_dictionary) { if (drop.kind == ASTDropQuery::Kind::Detach) - required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.getDatabase(), drop.getTable()); else if (drop.kind == ASTDropQuery::Kind::Drop) - required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_DICTIONARY, drop.getDatabase(), drop.getTable()); } else if (!drop.temporary) { /// It can be view or table. if (drop.kind == ASTDropQuery::Kind::Drop) - required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.getDatabase(), drop.getTable()); else if (drop.kind == ASTDropQuery::Kind::Truncate) - required_access.emplace_back(AccessType::TRUNCATE, drop.database, drop.table); + required_access.emplace_back(AccessType::TRUNCATE, drop.getDatabase(), drop.getTable()); else if (drop.kind == ASTDropQuery::Kind::Detach) - required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.database, drop.table); + required_access.emplace_back(AccessType::DROP_TABLE | AccessType::DROP_VIEW, drop.getDatabase(), drop.getTable()); } return required_access; @@ -424,8 +424,8 @@ void InterpreterDropQuery::executeDropQuery(ASTDropQuery::Kind kind, ContextPtr { /// We create and execute `drop` query for internal table. auto drop_query = std::make_shared(); - drop_query->database = target_table_id.database_name; - drop_query->table = target_table_id.table_name; + drop_query->setDatabase(target_table_id.database_name); + drop_query->setTable(target_table_id.table_name); drop_query->kind = kind; drop_query->no_delay = no_delay; drop_query->if_exists = true; diff --git a/src/Interpreters/InterpreterExistsQuery.cpp b/src/Interpreters/InterpreterExistsQuery.cpp index 5af51c61b29..758c6d81407 100644 --- a/src/Interpreters/InterpreterExistsQuery.cpp +++ b/src/Interpreters/InterpreterExistsQuery.cpp @@ -44,25 +44,25 @@ QueryPipeline InterpreterExistsQuery::executeImpl() if (exists_query->temporary) { result = static_cast(getContext()->tryResolveStorageID( - {"", exists_query->table}, Context::ResolveExternal)); + {"", exists_query->getTable()}, Context::ResolveExternal)); } else { - String database = getContext()->resolveDatabase(exists_query->database); - getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->table); - result = DatabaseCatalog::instance().isTableExist({database, exists_query->table}, getContext()); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); + getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->getTable()); + result = DatabaseCatalog::instance().isTableExist({database, exists_query->getTable()}, getContext()); } } else if ((exists_query = query_ptr->as())) { - String database = getContext()->resolveDatabase(exists_query->database); - getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->table); - auto table = DatabaseCatalog::instance().tryGetTable({database, exists_query->table}, getContext()); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); + getContext()->checkAccess(AccessType::SHOW_TABLES, database, exists_query->getTable()); + auto table = DatabaseCatalog::instance().tryGetTable({database, exists_query->getTable()}, getContext()); result = table && table->isView(); } else if ((exists_query = query_ptr->as())) { - String database = getContext()->resolveDatabase(exists_query->database); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); getContext()->checkAccess(AccessType::SHOW_DATABASES, database); result = DatabaseCatalog::instance().isDatabaseExist(database); } @@ -70,9 +70,9 @@ QueryPipeline InterpreterExistsQuery::executeImpl() { if (exists_query->temporary) throw Exception("Temporary dictionaries are not possible.", ErrorCodes::SYNTAX_ERROR); - String database = getContext()->resolveDatabase(exists_query->database); - getContext()->checkAccess(AccessType::SHOW_DICTIONARIES, database, exists_query->table); - result = DatabaseCatalog::instance().isDictionaryExist({database, exists_query->table}); + String database = getContext()->resolveDatabase(exists_query->getDatabase()); + getContext()->checkAccess(AccessType::SHOW_DICTIONARIES, database, exists_query->getTable()); + result = DatabaseCatalog::instance().isDictionaryExist({database, exists_query->getTable()}); } return QueryPipeline(std::make_shared(Block{{ diff --git a/src/Interpreters/InterpreterInsertQuery.cpp b/src/Interpreters/InterpreterInsertQuery.cpp index 231eb15b02f..99b68897549 100644 --- a/src/Interpreters/InterpreterInsertQuery.cpp +++ b/src/Interpreters/InterpreterInsertQuery.cpp @@ -62,7 +62,18 @@ StoragePtr InterpreterInsertQuery::getTable(ASTInsertQuery & query) return table_function_ptr->execute(query.table_function, getContext(), table_function_ptr->getName()); } - query.table_id = getContext()->resolveStorageID(query.table_id); + if (query.table_id) + { + query.table_id = getContext()->resolveStorageID(query.table_id); + } + else + { + /// Insert query parser does not fill table_id because table and + /// database can be parameters and be filled after parsing. + StorageID local_table_id(query.getDatabase(), query.getTable()); + query.table_id = getContext()->resolveStorageID(local_table_id); + } + return DatabaseCatalog::instance().getTable(query.table_id, getContext()); } diff --git a/src/Interpreters/InterpreterOptimizeQuery.cpp b/src/Interpreters/InterpreterOptimizeQuery.cpp index a44a49ec020..f9a701a0a77 100644 --- a/src/Interpreters/InterpreterOptimizeQuery.cpp +++ b/src/Interpreters/InterpreterOptimizeQuery.cpp @@ -79,7 +79,7 @@ AccessRightsElements InterpreterOptimizeQuery::getRequiredAccess() const { const auto & optimize = query_ptr->as(); AccessRightsElements required_access; - required_access.emplace_back(AccessType::OPTIMIZE, optimize.database, optimize.table); + required_access.emplace_back(AccessType::OPTIMIZE, optimize.getDatabase(), optimize.getTable()); return required_access; } diff --git a/src/Interpreters/InterpreterShowCreateQuery.cpp b/src/Interpreters/InterpreterShowCreateQuery.cpp index 84dbae0fac5..6ae51c1ff23 100644 --- a/src/Interpreters/InterpreterShowCreateQuery.cpp +++ b/src/Interpreters/InterpreterShowCreateQuery.cpp @@ -64,26 +64,26 @@ QueryPipeline InterpreterShowCreateQuery::executeImpl() { if (!ast_create_query.isView()) throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a VIEW", - backQuote(ast_create_query.database), backQuote(ast_create_query.table)); + backQuote(ast_create_query.getDatabase()), backQuote(ast_create_query.getTable())); } else if (is_dictionary) { if (!ast_create_query.is_dictionary) throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}.{} is not a DICTIONARY", - backQuote(ast_create_query.database), backQuote(ast_create_query.table)); + backQuote(ast_create_query.getDatabase()), backQuote(ast_create_query.getTable())); } } else if ((show_query = query_ptr->as())) { if (show_query->temporary) throw Exception("Temporary databases are not possible.", ErrorCodes::SYNTAX_ERROR); - show_query->database = getContext()->resolveDatabase(show_query->database); - getContext()->checkAccess(AccessType::SHOW_DATABASES, show_query->database); - create_query = DatabaseCatalog::instance().getDatabase(show_query->database)->getCreateDatabaseQuery(); + show_query->setDatabase(getContext()->resolveDatabase(show_query->getDatabase())); + getContext()->checkAccess(AccessType::SHOW_DATABASES, show_query->getDatabase()); + create_query = DatabaseCatalog::instance().getDatabase(show_query->getDatabase())->getCreateDatabaseQuery(); } if (!create_query) - throw Exception("Unable to show the create query of " + show_query->table + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY); + throw Exception("Unable to show the create query of " + show_query->getTable() + ". Maybe it was created by the system.", ErrorCodes::THERE_IS_NO_QUERY); if (!getContext()->getSettingsRef().show_table_uuid_in_table_create_query_if_not_nil) { diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index 69bf036ae97..1610ae56280 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -217,12 +217,12 @@ BlockIO InterpreterSystemQuery::execute() /// Make canonical query for simpler processing if (query.type == Type::RELOAD_DICTIONARY) { - if (!query.database.empty()) - query.table = query.database + "." + query.table; + if (query.database) + query.setTable(query.getDatabase() + "." + query.getTable()); } - else if (!query.table.empty()) + else if (query.table) { - table_id = getContext()->resolveStorageID(StorageID(query.database, query.table), Context::ResolveOrdinary); + table_id = getContext()->resolveStorageID(StorageID(query.getDatabase(), query.getTable()), Context::ResolveOrdinary); } @@ -302,7 +302,7 @@ BlockIO InterpreterSystemQuery::execute() getContext()->checkAccess(AccessType::SYSTEM_RELOAD_DICTIONARY); auto & external_dictionaries_loader = system_context->getExternalDictionariesLoader(); - external_dictionaries_loader.reloadDictionary(query.table, getContext()); + external_dictionaries_loader.reloadDictionary(query.getTable(), getContext()); ExternalDictionariesLoader::resetAll(); break; @@ -594,10 +594,10 @@ void InterpreterSystemQuery::dropReplica(ASTSystemQuery & query) if (!dropReplicaImpl(query, table)) throw Exception(ErrorCodes::BAD_ARGUMENTS, table_is_not_replicated.data(), table_id.getNameForLogs()); } - else if (!query.database.empty()) + else if (query.database) { - getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA, query.database); - DatabasePtr database = DatabaseCatalog::instance().getDatabase(query.database); + getContext()->checkAccess(AccessType::SYSTEM_DROP_REPLICA, query.getDatabase()); + DatabasePtr database = DatabaseCatalog::instance().getDatabase(query.getDatabase()); for (auto iterator = database->getTablesIterator(getContext()); iterator->isValid(); iterator->next()) dropReplicaImpl(query, iterator->table()); LOG_TRACE(log, "Dropped replica {} from database {}", query.replica, backQuoteIfNeed(database->getDatabaseName())); @@ -790,84 +790,84 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() case Type::STOP_MERGES: [[fallthrough]]; case Type::START_MERGES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_MERGES); else - required_access.emplace_back(AccessType::SYSTEM_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MERGES, query.getDatabase(), query.getTable()); break; } case Type::STOP_TTL_MERGES: [[fallthrough]]; case Type::START_TTL_MERGES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES); else - required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_TTL_MERGES, query.getDatabase(), query.getTable()); break; } case Type::STOP_MOVES: [[fallthrough]]; case Type::START_MOVES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_MOVES); else - required_access.emplace_back(AccessType::SYSTEM_MOVES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_MOVES, query.getDatabase(), query.getTable()); break; } case Type::STOP_FETCHES: [[fallthrough]]; case Type::START_FETCHES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_FETCHES); else - required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FETCHES, query.getDatabase(), query.getTable()); break; } case Type::STOP_DISTRIBUTED_SENDS: [[fallthrough]]; case Type::START_DISTRIBUTED_SENDS: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS); else - required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_DISTRIBUTED_SENDS, query.getDatabase(), query.getTable()); break; } case Type::STOP_REPLICATED_SENDS: [[fallthrough]]; case Type::START_REPLICATED_SENDS: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS); else - required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATED_SENDS, query.getDatabase(), query.getTable()); break; } case Type::STOP_REPLICATION_QUEUES: [[fallthrough]]; case Type::START_REPLICATION_QUEUES: { - if (query.table.empty()) + if (!query.table) required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES); else - required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_REPLICATION_QUEUES, query.getDatabase(), query.getTable()); break; } case Type::DROP_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_DROP_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_DROP_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::RESTORE_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_RESTORE_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_RESTORE_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::SYNC_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_SYNC_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::RESTART_REPLICA: { - required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_RESTART_REPLICA, query.getDatabase(), query.getTable()); break; } case Type::RESTART_REPLICAS: @@ -877,7 +877,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster() } case Type::FLUSH_DISTRIBUTED: { - required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.database, query.table); + required_access.emplace_back(AccessType::SYSTEM_FLUSH_DISTRIBUTED, query.getDatabase(), query.getTable()); break; } case Type::FLUSH_LOGS: diff --git a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp index 187c3dd0191..54b87e3bed6 100644 --- a/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp +++ b/src/Interpreters/MySQL/InterpretersMySQLDDLQuery.cpp @@ -449,8 +449,8 @@ ASTs InterpreterCreateImpl::getRewrittenQueries( String table_name = table_like->shortName(); ASTPtr rewritten_create_ast = DatabaseCatalog::instance().getDatabase(mapped_to_database)->getCreateTableQuery(table_name, context); auto * create_ptr = rewritten_create_ast->as(); - create_ptr->database = mapped_to_database; - create_ptr->table = create_query.table; + create_ptr->setDatabase(mapped_to_database); + create_ptr->setTable(create_query.table); create_ptr->uuid = UUIDHelpers::generateV4(); create_ptr->if_not_exists = create_query.if_not_exists; return ASTs{rewritten_create_ast}; @@ -513,8 +513,8 @@ ASTs InterpreterCreateImpl::getRewrittenQueries( storage->set(storage->engine, makeASTFunction("ReplacingMergeTree", std::make_shared(version_column_name))); - rewritten_query->database = mapped_to_database; - rewritten_query->table = create_query.table; + rewritten_query->setDatabase(mapped_to_database); + rewritten_query->setTable(create_query.table); rewritten_query->if_not_exists = create_query.if_not_exists; rewritten_query->set(rewritten_query->storage, storage); rewritten_query->set(rewritten_query->columns_list, columns); @@ -529,14 +529,14 @@ void InterpreterDropImpl::validate(const InterpreterDropImpl::TQuery & /*query*/ ASTs InterpreterDropImpl::getRewrittenQueries( const InterpreterDropImpl::TQuery & drop_query, ContextPtr context, const String & mapped_to_database, const String & mysql_database) { - const auto & database_name = resolveDatabase(drop_query.database, mysql_database, mapped_to_database, context); + const auto & database_name = resolveDatabase(drop_query.getDatabase(), mysql_database, mapped_to_database, context); /// Skip drop database|view|dictionary - if (database_name != mapped_to_database || drop_query.table.empty() || drop_query.is_view || drop_query.is_dictionary) + if (database_name != mapped_to_database || !drop_query.table || drop_query.is_view || drop_query.is_dictionary) return {}; ASTPtr rewritten_query = drop_query.clone(); - rewritten_query->as()->database = mapped_to_database; + rewritten_query->as()->setDatabase(mapped_to_database); return ASTs{rewritten_query}; } @@ -588,8 +588,8 @@ ASTs InterpreterAlterImpl::getRewrittenQueries( auto rewritten_alter_query = std::make_shared(); auto rewritten_rename_query = std::make_shared(); - rewritten_alter_query->database = mapped_to_database; - rewritten_alter_query->table = alter_query.table; + rewritten_alter_query->setDatabase(mapped_to_database); + rewritten_alter_query->setTable(alter_query.table); rewritten_alter_query->alter_object = ASTAlterQuery::AlterObjectType::TABLE; rewritten_alter_query->set(rewritten_alter_query->command_list, std::make_shared()); diff --git a/src/Interpreters/StorageID.cpp b/src/Interpreters/StorageID.cpp index 2e76618e4c0..8811adc087b 100644 --- a/src/Interpreters/StorageID.cpp +++ b/src/Interpreters/StorageID.cpp @@ -18,8 +18,8 @@ namespace ErrorCodes StorageID::StorageID(const ASTQueryWithTableAndOutput & query) { - database_name = query.database; - table_name = query.table; + database_name = query.getDatabase(); + table_name = query.getTable(); uuid = query.uuid; assertNotEmpty(); } diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index ce0062e8c77..ee3e3062e13 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -595,8 +595,8 @@ ASTPtr SystemLog::getCreateTableQuery() { auto create = std::make_shared(); - create->database = table_id.database_name; - create->table = table_id.table_name; + create->setDatabase(table_id.database_name); + create->setTable(table_id.table_name); auto ordinary_columns = LogElement::getNamesAndTypes(); auto alias_columns = LogElement::getNamesAndAliases(); diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index f401f708ab1..f67eac10878 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -431,12 +431,6 @@ static std::tuple executeQueryImpl( InterpreterSetQuery(query_with_output->settings_ast, context).executeForCurrentContext(); } - if (const auto * query_with_table_output = dynamic_cast(ast.get())) - { - query_database = query_with_table_output->database; - query_table = query_with_table_output->table; - } - if (auto * create_query = ast->as()) { if (create_query->select) @@ -510,6 +504,12 @@ static std::tuple executeQueryImpl( query = serializeAST(*ast); } + if (const auto * query_with_table_output = dynamic_cast(ast.get())) + { + query_database = query_with_table_output->getDatabase(); + query_table = query_with_table_output->getTable(); + } + /// MUST go before any modification (except for prepared statements, /// since it substitute parameters and w/o them query does not contain /// parameters), to keep query as-is in query_log and server log. diff --git a/src/Interpreters/loadMetadata.cpp b/src/Interpreters/loadMetadata.cpp index 96f94b96529..c5bef6f7205 100644 --- a/src/Interpreters/loadMetadata.cpp +++ b/src/Interpreters/loadMetadata.cpp @@ -39,7 +39,7 @@ static void executeCreateQuery( parser, query.data(), query.data() + query.size(), "in file " + file_name, 0, context->getSettingsRef().max_parser_depth); auto & ast_create_query = ast->as(); - ast_create_query.database = database; + ast_create_query.setDatabase(database); InterpreterCreateQuery interpreter(ast, context); interpreter.setInternal(true); diff --git a/src/Parsers/ASTAlterQuery.cpp b/src/Parsers/ASTAlterQuery.cpp index 50c56c9aeff..2ae494854ec 100644 --- a/src/Parsers/ASTAlterQuery.cpp +++ b/src/Parsers/ASTAlterQuery.cpp @@ -485,7 +485,7 @@ bool ASTAlterQuery::isDropPartitionAlter() const /** Get the text that identifies this element. */ String ASTAlterQuery::getID(char delim) const { - return "AlterQuery" + (delim + database) + delim + table; + return "AlterQuery" + (delim + getDatabase()) + delim + getTable(); } ASTPtr ASTAlterQuery::clone() const @@ -523,18 +523,18 @@ void ASTAlterQuery::formatQueryImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? hilite_none : ""); - if (!table.empty()) + if (table) { - if (!database.empty()) + if (database) { - settings.ostr << indent_str << backQuoteIfNeed(database); + settings.ostr << indent_str << backQuoteIfNeed(getDatabase()); settings.ostr << "."; } - settings.ostr << indent_str << backQuoteIfNeed(table); + settings.ostr << indent_str << backQuoteIfNeed(getTable()); } - else if (alter_object == AlterObjectType::DATABASE && !database.empty()) + else if (alter_object == AlterObjectType::DATABASE && database) { - settings.ostr << indent_str << backQuoteIfNeed(database); + settings.ostr << indent_str << backQuoteIfNeed(getDatabase()); } formatOnCluster(settings); diff --git a/src/Parsers/ASTCheckQuery.h b/src/Parsers/ASTCheckQuery.h index fdd1179ec90..8b376ef8d60 100644 --- a/src/Parsers/ASTCheckQuery.h +++ b/src/Parsers/ASTCheckQuery.h @@ -12,13 +12,14 @@ struct ASTCheckQuery : public ASTQueryWithTableAndOutput ASTPtr partition; /** Get the text that identifies this element. */ - String getID(char delim) const override { return "CheckQuery" + (delim + database) + delim + table; } + String getID(char delim) const override { return "CheckQuery" + (delim + getDatabase()) + delim + getTable(); } ASTPtr clone() const override { auto res = std::make_shared(*this); res->children.clear(); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -32,14 +33,14 @@ protected: settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << "CHECK TABLE " << (settings.hilite ? hilite_none : ""); - if (!table.empty()) + if (table) { - if (!database.empty()) + if (database) { - settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(database) << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(getDatabase()) << (settings.hilite ? hilite_none : ""); settings.ostr << "."; } - settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(table) << (settings.hilite ? hilite_none : ""); + settings.ostr << (settings.hilite ? hilite_keyword : "") << indent_str << backQuoteIfNeed(getTable()) << (settings.hilite ? hilite_none : ""); } if (partition) diff --git a/src/Parsers/ASTCreateQuery.cpp b/src/Parsers/ASTCreateQuery.cpp index 2c048886247..608098b8d95 100644 --- a/src/Parsers/ASTCreateQuery.cpp +++ b/src/Parsers/ASTCreateQuery.cpp @@ -212,6 +212,7 @@ ASTPtr ASTCreateQuery::clone() const res->set(res->comment, comment->clone()); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -220,13 +221,13 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat { frame.need_parens = false; - if (!database.empty() && table.empty()) + if (database && !table) { settings.ostr << (settings.hilite ? hilite_keyword : "") << (attach ? "ATTACH DATABASE " : "CREATE DATABASE ") << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") - << backQuoteIfNeed(database); + << backQuoteIfNeed(getDatabase()); if (uuid != UUIDHelpers::Nil) { @@ -275,7 +276,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat << what << " " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); if (uuid != UUIDHelpers::Nil) settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "") @@ -316,7 +317,7 @@ void ASTCreateQuery::formatQueryImpl(const FormatSettings & settings, FormatStat /// Always DICTIONARY settings.ostr << (settings.hilite ? hilite_keyword : "") << action << " DICTIONARY " << (if_not_exists ? "IF NOT EXISTS " : "") << (settings.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); if (uuid != UUIDHelpers::Nil) settings.ostr << (settings.hilite ? hilite_keyword : "") << " UUID " << (settings.hilite ? hilite_none : "") << quoteString(toString(uuid)); diff --git a/src/Parsers/ASTCreateQuery.h b/src/Parsers/ASTCreateQuery.h index 51c4ac15e67..4c7768517b1 100644 --- a/src/Parsers/ASTCreateQuery.h +++ b/src/Parsers/ASTCreateQuery.h @@ -91,7 +91,7 @@ public: bool create_or_replace{false}; /** Get the text that identifies this element. */ - String getID(char delim) const override { return (attach ? "AttachQuery" : "CreateQuery") + (delim + database) + delim + table; } + String getID(char delim) const override { return (attach ? "AttachQuery" : "CreateQuery") + (delim + getDatabase()) + delim + getTable(); } ASTPtr clone() const override; diff --git a/src/Parsers/ASTDropQuery.cpp b/src/Parsers/ASTDropQuery.cpp index 6ea6c81c3be..9e815ee75de 100644 --- a/src/Parsers/ASTDropQuery.cpp +++ b/src/Parsers/ASTDropQuery.cpp @@ -15,11 +15,11 @@ namespace ErrorCodes String ASTDropQuery::getID(char delim) const { if (kind == ASTDropQuery::Kind::Drop) - return "DropQuery" + (delim + database) + delim + table; + return "DropQuery" + (delim + getDatabase()) + delim + getTable(); else if (kind == ASTDropQuery::Kind::Detach) - return "DetachQuery" + (delim + database) + delim + table; + return "DetachQuery" + (delim + getDatabase()) + delim + getTable(); else if (kind == ASTDropQuery::Kind::Truncate) - return "TruncateQuery" + (delim + database) + delim + table; + return "TruncateQuery" + (delim + getDatabase()) + delim + getTable(); else throw Exception("Not supported kind of drop query.", ErrorCodes::SYNTAX_ERROR); } @@ -28,6 +28,7 @@ ASTPtr ASTDropQuery::clone() const { auto res = std::make_shared(*this); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -46,7 +47,8 @@ void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState if (temporary) settings.ostr << "TEMPORARY "; - if (table.empty() && !database.empty()) + + if (!table && database) settings.ostr << "DATABASE "; else if (is_dictionary) settings.ostr << "DICTIONARY "; @@ -60,10 +62,10 @@ void ASTDropQuery::formatQueryImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? hilite_none : ""); - if (table.empty() && !database.empty()) - settings.ostr << backQuoteIfNeed(database); + if (!table && database) + settings.ostr << backQuoteIfNeed(getDatabase()); else - settings.ostr << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + settings.ostr << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); formatOnCluster(settings); diff --git a/src/Parsers/ASTIdentifier.cpp b/src/Parsers/ASTIdentifier.cpp index 58052b1419b..a889680c81f 100644 --- a/src/Parsers/ASTIdentifier.cpp +++ b/src/Parsers/ASTIdentifier.cpp @@ -201,6 +201,40 @@ String ASTTableIdentifier::getDatabaseName() const else return {}; } +ASTPtr ASTTableIdentifier::getTable() const +{ + if (name_parts.size() == 2) + { + if (!name_parts[1].empty()) + return std::make_shared(name_parts[1]); + + if (name_parts[0].empty()) + return std::make_shared("", children[1]->clone()); + else + return std::make_shared("", children[0]->clone()); + } + else if (name_parts.size() == 1) + { + if (name_parts[0].empty()) + return std::make_shared("", children[0]->clone()); + else + return std::make_shared(name_parts[0]); + } + else return {}; +} + +ASTPtr ASTTableIdentifier::getDatabase() const +{ + if (name_parts.size() == 2) + { + if (name_parts[0].empty()) + return std::make_shared("", children[0]->clone()); + else + return std::make_shared(name_parts[0]); + } + else return {}; +} + void ASTTableIdentifier::resetTable(const String & database_name, const String & table_name) { auto identifier = std::make_shared(database_name, table_name); diff --git a/src/Parsers/ASTIdentifier.h b/src/Parsers/ASTIdentifier.h index 323280e07bc..048b32e438d 100644 --- a/src/Parsers/ASTIdentifier.h +++ b/src/Parsers/ASTIdentifier.h @@ -61,6 +61,7 @@ protected: private: using ASTWithAlias::children; /// ASTIdentifier is child free + friend class ASTTableIdentifier; friend class ReplaceQueryParameterVisitor; friend struct IdentifierSemantic; friend void setIdentifierSpecial(ASTPtr & ast); @@ -83,6 +84,9 @@ public: StorageID getTableId() const; String getDatabaseName() const; + ASTPtr getTable() const; + ASTPtr getDatabase() const; + // FIXME: used only when it's needed to rewrite distributed table name to real remote table name. void resetTable(const String & database_name, const String & table_name); // TODO(ilezhankin): get rid of this diff --git a/src/Parsers/ASTInsertQuery.cpp b/src/Parsers/ASTInsertQuery.cpp index 3c8c31b85dd..c733398a32b 100644 --- a/src/Parsers/ASTInsertQuery.cpp +++ b/src/Parsers/ASTInsertQuery.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -15,6 +16,35 @@ namespace ErrorCodes extern const int INVALID_USAGE_OF_INPUT; } +String ASTInsertQuery::getDatabase() const +{ + String name; + tryGetIdentifierNameInto(database, name); + return name; +} + +String ASTInsertQuery::getTable() const +{ + String name; + tryGetIdentifierNameInto(table, name); + return name; +} + +void ASTInsertQuery::setDatabase(const String & name) +{ + if (name.empty()) + database.reset(); + else + database = std::make_shared(name); +} + +void ASTInsertQuery::setTable(const String & name) +{ + if (name.empty()) + table.reset(); + else + table = std::make_shared(name); +} void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { @@ -31,9 +61,16 @@ void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & s partition_by->formatImpl(settings, state, frame); } } - else + else if (table_id) + { settings.ostr << (settings.hilite ? hilite_none : "") << (!table_id.database_name.empty() ? backQuoteIfNeed(table_id.database_name) + "." : "") << backQuoteIfNeed(table_id.table_name); + } + else + { + settings.ostr << (settings.hilite ? hilite_none : "") + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); + } if (columns) { diff --git a/src/Parsers/ASTInsertQuery.h b/src/Parsers/ASTInsertQuery.h index 81925ec2e66..d539ad5fdb3 100644 --- a/src/Parsers/ASTInsertQuery.h +++ b/src/Parsers/ASTInsertQuery.h @@ -13,6 +13,10 @@ class ASTInsertQuery : public IAST { public: StorageID table_id = StorageID::createEmpty(); + + ASTPtr database; + ASTPtr table; + ASTPtr columns; String format; ASTPtr table_function; @@ -31,6 +35,12 @@ public: /// Data from buffer to insert after inlined one - may be nullptr. ReadBuffer * tail = nullptr; + String getDatabase() const; + String getTable() const; + + void setDatabase(const String & name); + void setTable(const String & name); + bool hasInlinedData() const { return data || tail; } /// Try to find table function input() in SELECT part @@ -44,6 +54,8 @@ public: auto res = std::make_shared(*this); res->children.clear(); + if (database) { res->database = database->clone(); res->children.push_back(res->database); } + if (table) { res->table = table->clone(); res->children.push_back(res->table); } if (columns) { res->columns = columns->clone(); res->children.push_back(res->columns); } if (select) { res->select = select->clone(); res->children.push_back(res->select); } if (watch) { res->watch = watch->clone(); res->children.push_back(res->watch); } diff --git a/src/Parsers/ASTOptimizeQuery.cpp b/src/Parsers/ASTOptimizeQuery.cpp index 6423e247ecc..720c7699fb6 100644 --- a/src/Parsers/ASTOptimizeQuery.cpp +++ b/src/Parsers/ASTOptimizeQuery.cpp @@ -8,7 +8,7 @@ namespace DB void ASTOptimizeQuery::formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const { settings.ostr << (settings.hilite ? hilite_keyword : "") << "OPTIMIZE TABLE " << (settings.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); formatOnCluster(settings); diff --git a/src/Parsers/ASTOptimizeQuery.h b/src/Parsers/ASTOptimizeQuery.h index f4981d156c7..cac2ef1c6d0 100644 --- a/src/Parsers/ASTOptimizeQuery.h +++ b/src/Parsers/ASTOptimizeQuery.h @@ -25,7 +25,7 @@ public: /** Get the text that identifies this element. */ String getID(char delim) const override { - return "OptimizeQuery" + (delim + database) + delim + table + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : ""); + return "OptimizeQuery" + (delim + getDatabase()) + delim + getTable() + (final ? "_final" : "") + (deduplicate ? "_deduplicate" : ""); } ASTPtr clone() const override diff --git a/src/Parsers/ASTQueryWithOnCluster.h b/src/Parsers/ASTQueryWithOnCluster.h index e053b50ffb2..b309ae5e847 100644 --- a/src/Parsers/ASTQueryWithOnCluster.h +++ b/src/Parsers/ASTQueryWithOnCluster.h @@ -39,8 +39,8 @@ protected: T & query = static_cast(*query_ptr); query.cluster.clear(); - if (query.database.empty()) - query.database = new_database; + if (!query.database) + query.setDatabase(new_database); return query_ptr; } diff --git a/src/Parsers/ASTQueryWithTableAndOutput.cpp b/src/Parsers/ASTQueryWithTableAndOutput.cpp index d44ba988d7a..a216aeaa11f 100644 --- a/src/Parsers/ASTQueryWithTableAndOutput.cpp +++ b/src/Parsers/ASTQueryWithTableAndOutput.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -6,10 +7,67 @@ namespace DB { +String ASTQueryWithTableAndOutput::getDatabase() const +{ + String name; + tryGetIdentifierNameInto(database, name); + return name; +} + +String ASTQueryWithTableAndOutput::getTable() const +{ + String name; + tryGetIdentifierNameInto(table, name); + return name; +} + +void ASTQueryWithTableAndOutput::setDatabase(const String & name) +{ + if (database) + { + std::erase(children, database); + database.reset(); + } + + if (!name.empty()) + { + database = std::make_shared(name); + children.push_back(database); + } +} + +void ASTQueryWithTableAndOutput::setTable(const String & name) +{ + if (table) + { + std::erase(children, table); + table.reset(); + } + + if (!name.empty()) + { + table = std::make_shared(name); + children.push_back(table); + } +} + +void ASTQueryWithTableAndOutput::cloneTableOptions(ASTQueryWithTableAndOutput & cloned) const +{ + if (database) + { + cloned.database = database->clone(); + cloned.children.push_back(cloned.database); + } + if (table) + { + cloned.table = table->clone(); + cloned.children.push_back(cloned.table); + } +} void ASTQueryWithTableAndOutput::formatHelper(const FormatSettings & settings, const char * name) const { settings.ostr << (settings.hilite ? hilite_keyword : "") << name << " " << (settings.hilite ? hilite_none : ""); - settings.ostr << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + settings.ostr << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); } } diff --git a/src/Parsers/ASTQueryWithTableAndOutput.h b/src/Parsers/ASTQueryWithTableAndOutput.h index 03f5fa7bf22..233028c5023 100644 --- a/src/Parsers/ASTQueryWithTableAndOutput.h +++ b/src/Parsers/ASTQueryWithTableAndOutput.h @@ -14,11 +14,21 @@ namespace DB class ASTQueryWithTableAndOutput : public ASTQueryWithOutput { public: - String database; - String table; + ASTPtr database; + ASTPtr table; + UUID uuid = UUIDHelpers::Nil; bool temporary{false}; + String getDatabase() const; + String getTable() const; + + // Once database or table are set they cannot be assigned with empty value + void setDatabase(const String & name); + void setTable(const String & name); + + void cloneTableOptions(ASTQueryWithTableAndOutput & cloned) const; + protected: void formatHelper(const FormatSettings & settings, const char * name) const; }; @@ -28,13 +38,14 @@ template class ASTQueryWithTableAndOutputImpl : public ASTQueryWithTableAndOutput { public: - String getID(char delim) const override { return AstIDAndQueryNames::ID + (delim + database) + delim + table; } + String getID(char delim) const override { return AstIDAndQueryNames::ID + (delim + getDatabase()) + delim + getTable(); } ASTPtr clone() const override { auto res = std::make_shared>(*this); res->children.clear(); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index ba8e49b98ca..b8056862bfc 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -39,6 +40,50 @@ const char * ASTSystemQuery::typeToString(Type type) return type_name.data(); } +String ASTSystemQuery::getDatabase() const +{ + String name; + tryGetIdentifierNameInto(database, name); + return name; +} + +String ASTSystemQuery::getTable() const +{ + String name; + tryGetIdentifierNameInto(table, name); + return name; +} + +void ASTSystemQuery::setDatabase(const String & name) +{ + if (database) + { + std::erase(children, database); + database.reset(); + } + + if (!name.empty()) + { + database = std::make_shared(name); + children.push_back(database); + } +} + +void ASTSystemQuery::setTable(const String & name) +{ + if (table) + { + std::erase(children, table); + table.reset(); + } + + if (!name.empty()) + { + table = std::make_shared(name); + children.push_back(table); + } +} + void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const { settings.ostr << (settings.hilite ? hilite_keyword : "") << "SYSTEM "; @@ -47,19 +92,19 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, auto print_database_table = [&] { settings.ostr << " "; - if (!database.empty()) + if (database) { - settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(database) + settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getDatabase()) << (settings.hilite ? hilite_none : "") << "."; } - settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(table) + settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getTable()) << (settings.hilite ? hilite_none : ""); }; auto print_drop_replica = [&] { settings.ostr << " " << quoteString(replica); - if (!table.empty()) + if (table) { settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM TABLE" << (settings.hilite ? hilite_none : ""); @@ -70,11 +115,11 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM ZKPATH " << (settings.hilite ? hilite_none : "") << quoteString(replica_zk_path); } - else if (!database.empty()) + else if (database) { settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM DATABASE " << (settings.hilite ? hilite_none : ""); - settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(database) + settings.ostr << (settings.hilite ? hilite_identifier : "") << backQuoteIfNeed(getDatabase()) << (settings.hilite ? hilite_none : ""); } }; @@ -107,7 +152,7 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState &, || type == Type::STOP_DISTRIBUTED_SENDS || type == Type::START_DISTRIBUTED_SENDS) { - if (!table.empty()) + if (table) print_database_table(); else if (!volume.empty()) print_on_volume(); diff --git a/src/Parsers/ASTSystemQuery.h b/src/Parsers/ASTSystemQuery.h index 419c22dd0d5..16f8a3c118a 100644 --- a/src/Parsers/ASTSystemQuery.h +++ b/src/Parsers/ASTSystemQuery.h @@ -70,10 +70,17 @@ public: Type type = Type::UNKNOWN; + ASTPtr database; + ASTPtr table; + + String getDatabase() const; + String getTable() const; + + void setDatabase(const String & name); + void setTable(const String & name); + String target_model; String target_function; - String database; - String table; String replica; String replica_zk_path; bool is_drop_whole_replica{}; @@ -84,7 +91,16 @@ public: String getID(char) const override { return "SYSTEM query"; } - ASTPtr clone() const override { return std::make_shared(*this); } + ASTPtr clone() const override + { + auto res = std::make_shared(*this); + res->children.clear(); + + if (database) { res->database = database->clone(); res->children.push_back(res->database); } + if (table) { res->table = table->clone(); res->children.push_back(res->table); } + + return res; + } ASTPtr getRewrittenASTWithoutOnCluster(const std::string & new_database) const override { diff --git a/src/Parsers/ASTWatchQuery.h b/src/Parsers/ASTWatchQuery.h index 7db6443e024..ea02c18a51d 100644 --- a/src/Parsers/ASTWatchQuery.h +++ b/src/Parsers/ASTWatchQuery.h @@ -26,13 +26,14 @@ public: bool is_watch_events; ASTWatchQuery() = default; - String getID(char) const override { return "WatchQuery_" + database + "_" + table; } + String getID(char) const override { return "WatchQuery_" + getDatabase() + "_" + getTable(); } ASTPtr clone() const override { std::shared_ptr res = std::make_shared(*this); res->children.clear(); cloneOutputOptions(*res); + cloneTableOptions(*res); return res; } @@ -42,7 +43,7 @@ protected: std::string indent_str = s.one_line ? "" : std::string(4 * frame.indent, ' '); s.ostr << (s.hilite ? hilite_keyword : "") << "WATCH " << (s.hilite ? hilite_none : "") - << (!database.empty() ? backQuoteIfNeed(database) + "." : "") << backQuoteIfNeed(table); + << (database ? backQuoteIfNeed(getDatabase()) + "." : "") << backQuoteIfNeed(getTable()); if (is_watch_events) { diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index 1ea64d94fe7..66645ccaf0e 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -861,12 +861,12 @@ bool ParserAlterQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (alter_object_type == ASTAlterQuery::AlterObjectType::DATABASE) { - if (!parseDatabase(pos, expected, query->database)) + if (!parseDatabaseAsAST(pos, expected, query->database)) return false; } else { - if (!parseDatabaseAndTableName(pos, expected, query->database, query->table)) + if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table)) return false; String cluster_str; @@ -886,6 +886,12 @@ bool ParserAlterQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) query->set(query->command_list, command_list); query->alter_object = alter_object_type; + if (query->database) + query->children.push_back(query->database); + + if (query->table) + query->children.push_back(query->table); + return true; } diff --git a/src/Parsers/ParserCheckQuery.cpp b/src/Parsers/ParserCheckQuery.cpp index c397e1c33c5..a89416ab253 100644 --- a/src/Parsers/ParserCheckQuery.cpp +++ b/src/Parsers/ParserCheckQuery.cpp @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB @@ -15,31 +16,15 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_partition("PARTITION"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier table_parser; ParserPartition partition_parser; - ASTPtr table; - ASTPtr database; - if (!s_check_table.ignore(pos, expected)) return false; - if (!table_parser.parse(pos, database, expected)) - return false; auto query = std::make_shared(); - if (s_dot.ignore(pos)) - { - if (!table_parser.parse(pos, table, expected)) - return false; - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); - } - else - { - table = database; - tryGetIdentifierNameInto(table, query->table); - } + if (!parseDatabaseAndTableAsAST(pos, expected, query->database, query->table)) + return false; if (s_partition.ignore(pos, expected)) { @@ -47,6 +32,12 @@ bool ParserCheckQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; } + if (query->database) + query->children.push_back(query->database); + + if (query->table) + query->children.push_back(query->table); + node = query; return true; } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index af2966948fb..bc08d420684 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -426,7 +426,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe ParserKeyword s_temporary("TEMPORARY"); ParserKeyword s_table("TABLE"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); - ParserCompoundIdentifier table_name_p(true); + ParserCompoundIdentifier table_name_p(true, true); ParserKeyword s_from("FROM"); ParserKeyword s_on("ON"); ParserKeyword s_as("AS"); @@ -495,7 +495,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe return false; } - auto table_id = table->as()->getTableId(); + auto * table_id = table->as(); // Shortcut for ATTACH a previously detached table bool short_attach = attach && !from_path; @@ -508,9 +508,14 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->if_not_exists = if_not_exists; query->cluster = cluster_str; - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; + + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); return true; } @@ -585,11 +590,16 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->if_not_exists = if_not_exists; query->temporary = is_temporary; - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; query->cluster = cluster_str; + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); + query->set(query->columns_list, columns_list); query->set(query->storage, storage); @@ -620,7 +630,7 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e ParserKeyword s_create("CREATE"); ParserKeyword s_attach("ATTACH"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); - ParserCompoundIdentifier table_name_p(true); + ParserCompoundIdentifier table_name_p(true, true); ParserKeyword s_as("AS"); ParserKeyword s_view("VIEW"); ParserKeyword s_live("LIVE"); @@ -735,12 +745,17 @@ bool ParserCreateLiveViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e query->if_not_exists = if_not_exists; query->is_live_view = true; - auto table_id = table->as()->getTableId(); - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + auto * table_id = table->as(); + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; query->cluster = cluster_str; + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); + if (to_table) query->to_table_id = to_table->as()->getTableId(); @@ -766,7 +781,7 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e ParserKeyword s_database("DATABASE"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserStorage storage_p; - ParserIdentifier name_p; + ParserIdentifier name_p(true); ASTPtr database; ASTPtr storage; @@ -817,9 +832,12 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e query->attach = attach; query->if_not_exists = if_not_exists; - tryGetIdentifierNameInto(database, query->database); query->uuid = uuid; query->cluster = cluster_str; + query->database = database; + + if (database) + query->children.push_back(database); query->set(query->storage, storage); if (comment) @@ -833,7 +851,7 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec ParserKeyword s_create("CREATE"); ParserKeyword s_attach("ATTACH"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); - ParserCompoundIdentifier table_name_p(true); + ParserCompoundIdentifier table_name_p(true, true); ParserKeyword s_as("AS"); ParserKeyword s_view("VIEW"); ParserKeyword s_materialized("MATERIALIZED"); @@ -954,12 +972,17 @@ bool ParserCreateViewQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec query->is_populate = is_populate; query->replace_view = replace_view; - auto table_id = table->as()->getTableId(); - query->database = table_id.database_name; - query->table = table_id.table_name; - query->uuid = table_id.uuid; + auto * table_id = table->as(); + query->database = table_id->getDatabase(); + query->table = table_id->getTable(); + query->uuid = table_id->uuid; query->cluster = cluster_str; + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); + if (to_table) query->to_table_id = to_table->as()->getTableId(); if (to_inner_uuid) @@ -987,7 +1010,7 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E ParserKeyword s_dictionary("DICTIONARY"); ParserKeyword s_if_not_exists("IF NOT EXISTS"); ParserKeyword s_on("ON"); - ParserCompoundIdentifier dict_name_p(true); + ParserCompoundIdentifier dict_name_p(true, true); ParserToken s_left_paren(TokenType::OpeningRoundBracket); ParserToken s_right_paren(TokenType::ClosingRoundBracket); ParserToken s_dot(TokenType::Dot); @@ -1059,10 +1082,15 @@ bool ParserCreateDictionaryQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, E query->create_or_replace = or_replace; query->replace_table = replace; - auto dict_id = name->as()->getTableId(); - query->database = dict_id.database_name; - query->table = dict_id.table_name; - query->uuid = dict_id.uuid; + auto * dict_id = name->as(); + query->database = dict_id->getDatabase(); + query->table = dict_id->getTable(); + query->uuid = dict_id->uuid; + + if (query->database) + query->children.push_back(query->database); + if (query->table) + query->children.push_back(query->table); query->if_not_exists = if_not_exists; query->set(query->dictionary_attributes_list, attributes); diff --git a/src/Parsers/ParserDropQuery.cpp b/src/Parsers/ParserDropQuery.cpp index 5400f33fbd9..211c7699723 100644 --- a/src/Parsers/ParserDropQuery.cpp +++ b/src/Parsers/ParserDropQuery.cpp @@ -20,7 +20,7 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, cons ParserKeyword s_database("DATABASE"); ParserToken s_dot(TokenType::Dot); ParserKeyword s_if_exists("IF EXISTS"); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserKeyword s_permanently("PERMANENTLY"); ParserKeyword s_no_delay("NO DELAY"); ParserKeyword s_sync("SYNC"); @@ -96,9 +96,14 @@ bool parseDropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected, cons query->is_view = is_view; query->no_delay = no_delay; query->permanently = permanently; + query->database = database; + query->table = table; - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); + if (database) + query->children.push_back(database); + + if (table) + query->children.push_back(table); query->cluster = cluster_str; diff --git a/src/Parsers/ParserInsertQuery.cpp b/src/Parsers/ParserInsertQuery.cpp index 1bc552af384..8925de4cf55 100644 --- a/src/Parsers/ParserInsertQuery.cpp +++ b/src/Parsers/ParserInsertQuery.cpp @@ -41,7 +41,7 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) ParserKeyword s_with("WITH"); ParserToken s_lparen(TokenType::OpeningRoundBracket); ParserToken s_rparen(TokenType::ClosingRoundBracket); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserList columns_p(std::make_unique(), std::make_unique(TokenType::Comma), false); ParserFunction table_function_p{false}; ParserStringLiteral infile_name_p; @@ -244,8 +244,13 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) } else { - tryGetIdentifierNameInto(database, query->table_id.database_name); - tryGetIdentifierNameInto(table, query->table_id.table_name); + query->database = database; + query->table = table; + + if (database) + query->children.push_back(database); + if (table) + query->children.push_back(table); } query->columns = columns; diff --git a/src/Parsers/ParserOptimizeQuery.cpp b/src/Parsers/ParserOptimizeQuery.cpp index 441cec1465e..b1bfd43936b 100644 --- a/src/Parsers/ParserOptimizeQuery.cpp +++ b/src/Parsers/ParserOptimizeQuery.cpp @@ -31,7 +31,7 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte ParserKeyword s_deduplicate("DEDUPLICATE"); ParserKeyword s_by("BY"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserPartition partition_p; ASTPtr database; @@ -80,15 +80,20 @@ bool ParserOptimizeQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte auto query = std::make_shared(); node = query; - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); - query->cluster = cluster_str; if ((query->partition = partition)) query->children.push_back(partition); query->final = final; query->deduplicate = deduplicate; query->deduplicate_by_columns = deduplicate_by_columns; + query->database = database; + query->table = table; + + if (database) + query->children.push_back(database); + + if (table) + query->children.push_back(table); return true; } diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index b861cbd2a5a..5a61929bdb3 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -39,14 +39,13 @@ static bool parseQueryWithOnClusterAndMaybeTable(std::shared_ptr ASTPtr ast; if (ParserStringLiteral{}.parse(pos, ast, expected)) { - res->database = {}; - res->table = ast->as().value.safeGet(); + res->setTable(ast->as().value.safeGet()); parsed_table = true; } } if (!parsed_table) - parsed_table = parseDatabaseAndTableName(pos, expected, res->database, res->table); + parsed_table = parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); if (!parsed_table && require_table) return false; @@ -56,6 +55,12 @@ static bool parseQueryWithOnClusterAndMaybeTable(std::shared_ptr return false; res->cluster = cluster; + + if (res->database) + res->children.push_back(res->database); + if (res->table) + res->children.push_back(res->table); + return true; } @@ -163,14 +168,12 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & if (ParserKeyword{"DATABASE"}.ignore(pos, expected)) { ParserIdentifier database_parser; - ASTPtr database; - if (!database_parser.parse(pos, database, expected)) + if (!database_parser.parse(pos, res->database, expected)) return false; - tryGetIdentifierNameInto(database, res->database); } else if (ParserKeyword{"TABLE"}.ignore(pos, expected)) { - parseDatabaseAndTableName(pos, expected, res->database, res->table); + parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); } else if (ParserKeyword{"ZKPATH"}.ignore(pos, expected)) { @@ -193,7 +196,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & case Type::RESTART_REPLICA: case Type::SYNC_REPLICA: - if (!parseDatabaseAndTableName(pos, expected, res->database, res->table)) + if (!parseDatabaseAndTableAsAST(pos, expected, res->database, res->table)) return false; break; @@ -251,7 +254,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & res->storage_policy = storage_policy_str; res->volume = volume_str; if (res->volume.empty() && res->storage_policy.empty()) - parseDatabaseAndTableName(pos, expected, res->database, res->table); + parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); break; } @@ -265,7 +268,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & case Type::START_REPLICATED_SENDS: case Type::STOP_REPLICATION_QUEUES: case Type::START_REPLICATION_QUEUES: - parseDatabaseAndTableName(pos, expected, res->database, res->table); + parseDatabaseAndTableAsAST(pos, expected, res->database, res->table); break; case Type::SUSPEND: @@ -287,6 +290,11 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & break; } + if (res->database) + res->children.push_back(res->database); + if (res->table) + res->children.push_back(res->table); + node = std::move(res); return true; } diff --git a/src/Parsers/ParserTablePropertiesQuery.cpp b/src/Parsers/ParserTablePropertiesQuery.cpp index 30be37bc4a1..bf3ff399f61 100644 --- a/src/Parsers/ParserTablePropertiesQuery.cpp +++ b/src/Parsers/ParserTablePropertiesQuery.cpp @@ -24,7 +24,7 @@ bool ParserTablePropertiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & ParserKeyword s_view("VIEW"); ParserKeyword s_dictionary("DICTIONARY"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ASTPtr database; ASTPtr table; @@ -110,8 +110,14 @@ bool ParserTablePropertiesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & } } - tryGetIdentifierNameInto(database, query->database); - tryGetIdentifierNameInto(table, query->table); + query->database = database; + query->table = table; + + if (database) + query->children.push_back(database); + + if (table) + query->children.push_back(table); node = query; diff --git a/src/Parsers/ParserWatchQuery.cpp b/src/Parsers/ParserWatchQuery.cpp index 5171f4f2536..40d2c47bd4f 100644 --- a/src/Parsers/ParserWatchQuery.cpp +++ b/src/Parsers/ParserWatchQuery.cpp @@ -24,7 +24,7 @@ bool ParserWatchQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { ParserKeyword s_watch("WATCH"); ParserToken s_dot(TokenType::Dot); - ParserIdentifier name_p; + ParserIdentifier name_p(true); ParserKeyword s_events("EVENTS"); ParserKeyword s_limit("LIMIT"); @@ -62,11 +62,14 @@ bool ParserWatchQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) return false; } + query->database = database; + query->table = table; + if (database) - query->database = getIdentifierName(database); + query->children.push_back(database); if (table) - query->table = getIdentifierName(table); + query->children.push_back(table); node = query; diff --git a/src/Parsers/TablePropertiesQueriesASTs.h b/src/Parsers/TablePropertiesQueriesASTs.h index edb040d72d9..b5baddcf268 100644 --- a/src/Parsers/TablePropertiesQueriesASTs.h +++ b/src/Parsers/TablePropertiesQueriesASTs.h @@ -89,7 +89,7 @@ protected: void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override { settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTExistsDatabaseQueryIDAndQueryNames::Query - << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(database); + << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(getDatabase()); } }; @@ -99,7 +99,7 @@ protected: void formatQueryImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const override { settings.ostr << (settings.hilite ? hilite_keyword : "") << ASTShowCreateDatabaseQueryIDAndQueryNames::Query - << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(database); + << " " << (settings.hilite ? hilite_none : "") << backQuoteIfNeed(getDatabase()); } }; diff --git a/src/Parsers/parseDatabaseAndTableName.cpp b/src/Parsers/parseDatabaseAndTableName.cpp index c071f1b6eb4..48f47dbb68c 100644 --- a/src/Parsers/parseDatabaseAndTableName.cpp +++ b/src/Parsers/parseDatabaseAndTableName.cpp @@ -41,6 +41,24 @@ bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String & return true; } +bool parseDatabaseAndTableAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database, ASTPtr & table) +{ + ParserToken s_dot(TokenType::Dot); + ParserIdentifier table_parser(true); + + if (!table_parser.parse(pos, table, expected)) + return false; + + if (s_dot.ignore(pos)) + { + database = table; + if (!table_parser.parse(pos, table, expected)) + return false; + } + + return true; +} + bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str) { @@ -57,6 +75,12 @@ bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_st return true; } +bool parseDatabaseAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database) +{ + ParserIdentifier identifier_parser(/* allow_query_parameter */true); + return identifier_parser.parse(pos, database, expected); +} + bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table) { diff --git a/src/Parsers/parseDatabaseAndTableName.h b/src/Parsers/parseDatabaseAndTableName.h index dc435ca047e..6ec354d68b4 100644 --- a/src/Parsers/parseDatabaseAndTableName.h +++ b/src/Parsers/parseDatabaseAndTableName.h @@ -7,9 +7,13 @@ namespace DB /// Parses [db.]name bool parseDatabaseAndTableName(IParser::Pos & pos, Expected & expected, String & database_str, String & table_str); +bool parseDatabaseAndTableAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database, ASTPtr & table); + /// Parses [db.]name or [db.]* or [*.]* bool parseDatabaseAndTableNameOrAsterisks(IParser::Pos & pos, Expected & expected, String & database, bool & any_database, String & table, bool & any_table); bool parseDatabase(IParser::Pos & pos, Expected & expected, String & database_str); +bool parseDatabaseAsAST(IParser::Pos & pos, Expected & expected, ASTPtr & database); + } diff --git a/src/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp index 967f6848404..25f48fc27c8 100644 --- a/src/Parsers/tests/gtest_dictionary_parser.cpp +++ b/src/Parsers/tests/gtest_dictionary_parser.cpp @@ -42,8 +42,8 @@ TEST(ParserDictionaryDDL, SimpleDictionary) ParserCreateDictionaryQuery parser; ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); - EXPECT_EQ(create->table, "dict1"); - EXPECT_EQ(create->database, "test"); + EXPECT_EQ(create->getTable(), "dict1"); + EXPECT_EQ(create->getDatabase(), "test"); EXPECT_EQ(create->is_dictionary, true); EXPECT_NE(create->dictionary, nullptr); EXPECT_NE(create->dictionary->lifetime, nullptr); @@ -138,8 +138,8 @@ TEST(ParserDictionaryDDL, AttributesWithMultipleProperties) ParserCreateDictionaryQuery parser; ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); - EXPECT_EQ(create->table, "dict2"); - EXPECT_EQ(create->database, ""); + EXPECT_EQ(create->getTable(), "dict2"); + EXPECT_EQ(create->getDatabase(), ""); /// test attributes EXPECT_NE(create->dictionary_attributes_list, nullptr); @@ -240,8 +240,8 @@ TEST(ParserDictionaryDDL, NestedSource) ParserCreateDictionaryQuery parser; ASTPtr ast = parseQuery(parser, input.data(), input.data() + input.size(), "", 0, 0); ASTCreateQuery * create = ast->as(); - EXPECT_EQ(create->table, "dict4"); - EXPECT_EQ(create->database, ""); + EXPECT_EQ(create->getTable(), "dict4"); + EXPECT_EQ(create->getDatabase(), ""); /// source test EXPECT_EQ(create->dictionary->source->name, "mysql"); @@ -301,8 +301,8 @@ TEST(ParserDictionaryDDL, ParseDropQuery) ASTDropQuery * drop1 = ast1->as(); EXPECT_TRUE(drop1->is_dictionary); - EXPECT_EQ(drop1->database, "test"); - EXPECT_EQ(drop1->table, "dict1"); + EXPECT_EQ(drop1->getDatabase(), "test"); + EXPECT_EQ(drop1->getTable(), "dict1"); auto str1 = serializeAST(*drop1, true); EXPECT_EQ(input1, str1); @@ -312,8 +312,8 @@ TEST(ParserDictionaryDDL, ParseDropQuery) ASTDropQuery * drop2 = ast2->as(); EXPECT_TRUE(drop2->is_dictionary); - EXPECT_EQ(drop2->database, ""); - EXPECT_EQ(drop2->table, "dict2"); + EXPECT_EQ(drop2->getDatabase(), ""); + EXPECT_EQ(drop2->getTable(), "dict2"); auto str2 = serializeAST(*drop2, true); EXPECT_EQ(input2, str2); } @@ -326,8 +326,8 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) ASTPtr ast1 = parseQuery(parser, input1.data(), input1.data() + input1.size(), "", 0, 0); ASTShowCreateDictionaryQuery * show1 = ast1->as(); - EXPECT_EQ(show1->table, "dict1"); - EXPECT_EQ(show1->database, "test"); + EXPECT_EQ(show1->getTable(), "dict1"); + EXPECT_EQ(show1->getDatabase(), "test"); EXPECT_EQ(serializeAST(*show1), input1); String input2 = "EXISTS DICTIONARY dict2"; @@ -335,7 +335,7 @@ TEST(ParserDictionaryDDL, ParsePropertiesQueries) ASTPtr ast2 = parseQuery(parser, input2.data(), input2.data() + input2.size(), "", 0, 0); ASTExistsDictionaryQuery * show2 = ast2->as(); - EXPECT_EQ(show2->table, "dict2"); - EXPECT_EQ(show2->database, ""); + EXPECT_EQ(show2->getTable(), "dict2"); + EXPECT_EQ(show2->getDatabase(), ""); EXPECT_EQ(serializeAST(*show2), input2); } diff --git a/src/Server/GRPCServer.cpp b/src/Server/GRPCServer.cpp index 676d9a61926..bac88e350e2 100644 --- a/src/Server/GRPCServer.cpp +++ b/src/Server/GRPCServer.cpp @@ -958,7 +958,18 @@ namespace { if (insert_query) { - auto table_id = query_context->resolveStorageID(insert_query->table_id, Context::ResolveOrdinary); + auto table_id = StorageID::createEmpty(); + + if (insert_query->table_id) + { + table_id = query_context->resolveStorageID(insert_query->table_id, Context::ResolveOrdinary); + } + else + { + StorageID local_table_id(insert_query->getDatabase(), insert_query->getTable()); + table_id = query_context->resolveStorageID(local_table_id, Context::ResolveOrdinary); + } + if (query_context->getSettingsRef().input_format_defaults_for_omitted_fields && table_id) { StoragePtr storage = DatabaseCatalog::instance().getTable(table_id, query_context); diff --git a/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp b/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp index 69369cbc1a3..12af472247d 100644 --- a/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp +++ b/src/Storages/LiveView/TemporaryLiveViewCleaner.cpp @@ -24,8 +24,8 @@ namespace { /// We create and execute `drop` query for this table auto drop_query = std::make_shared(); - drop_query->database = storage_id.database_name; - drop_query->table = storage_id.table_name; + drop_query->setDatabase(storage_id.database_name); + drop_query->setTable(storage_id.table_name); drop_query->kind = ASTDropQuery::Kind::Drop; ASTPtr ast_drop_query = drop_query; InterpreterDropQuery drop_interpreter(ast_drop_query, context); diff --git a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp index f000b43f61a..1f8642db886 100644 --- a/src/Storages/MergeTree/MergeTreeReaderCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeReaderCompact.cpp @@ -71,6 +71,9 @@ MergeTreeReaderCompact::MergeTreeReaderCompact( if (buffer_size) settings.read_settings = settings.read_settings.adjustBufferSize(buffer_size); + if (!settings.read_settings.local_fs_buffer_size || !settings.read_settings.remote_fs_buffer_size) + throw Exception(ErrorCodes::CANNOT_READ_ALL_DATA, "Cannot read to empty buffer."); + const String full_data_path = data_part->getFullRelativePath() + MergeTreeDataPartCompact::DATA_FILE_NAME_WITH_EXTENSION; if (uncompressed_cache) { diff --git a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp index f771b2239ef..84afe740091 100644 --- a/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp +++ b/src/Storages/PostgreSQL/StorageMaterializedPostgreSQL.cpp @@ -369,8 +369,8 @@ ASTPtr StorageMaterializedPostgreSQL::getCreateNestedTableQuery(PostgreSQLTableS auto create_table_query = std::make_shared(); auto table_id = getStorageID(); - create_table_query->table = getNestedTableName(); - create_table_query->database = table_id.database_name; + create_table_query->setTable(getNestedTableName()); + create_table_query->setDatabase(table_id.database_name); if (is_materialized_postgresql_database) create_table_query->uuid = table_id.uuid; diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index ba2ac3f72a3..1aa6ae98e7a 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -201,7 +201,7 @@ StoragePtr StorageFactory::get( .storage_def = storage_def, .query = query, .relative_data_path = relative_data_path, - .table_id = StorageID(query.database, query.table, query.uuid), + .table_id = StorageID(query.getDatabase(), query.getTable(), query.uuid), .local_context = local_context, .context = context, .columns = columns, diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index e7de3010c33..210e6548465 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -105,8 +105,8 @@ StorageMaterializedView::StorageMaterializedView( /// We will create a query to create an internal table. auto create_context = Context::createCopy(local_context); auto manual_create_query = std::make_shared(); - manual_create_query->database = getStorageID().database_name; - manual_create_query->table = generateInnerTableName(getStorageID()); + manual_create_query->setDatabase(getStorageID().database_name); + manual_create_query->setTable(generateInnerTableName(getStorageID())); manual_create_query->uuid = query.to_inner_uuid; auto new_columns_list = std::make_shared(); @@ -119,7 +119,7 @@ StorageMaterializedView::StorageMaterializedView( create_interpreter.setInternal(true); create_interpreter.execute(); - target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->database, manual_create_query->table}, getContext())->getStorageID(); + target_table_id = DatabaseCatalog::instance().getTable({manual_create_query->getDatabase(), manual_create_query->getTable()}, getContext())->getStorageID(); } if (!select.select_table_id.empty()) diff --git a/src/Storages/System/attachInformationSchemaTables.cpp b/src/Storages/System/attachInformationSchemaTables.cpp index cfe5de1dc41..803e9d55dac 100644 --- a/src/Storages/System/attachInformationSchemaTables.cpp +++ b/src/Storages/System/attachInformationSchemaTables.cpp @@ -31,14 +31,14 @@ static void createInformationSchemaView(ContextMutablePtr context, IDatabase & d DBMS_DEFAULT_MAX_QUERY_SIZE, DBMS_DEFAULT_MAX_PARSER_DEPTH); auto & ast_create = ast->as(); - assert(view_name == ast_create.table); + assert(view_name == ast_create.getTable()); if (is_uppercase) - ast_create.table = Poco::toUpper(view_name); + ast_create.setTable(Poco::toUpper(view_name)); StoragePtr view = createTableFromAST(ast_create, database.getDatabaseName(), database.getTableDataPath(ast_create), context, true).second; - database.createTable(context, ast_create.table, view, ast); + database.createTable(context, ast_create.getTable(), view, ast); } catch (...) { diff --git a/tests/config/config.d/keeper_port.xml b/tests/config/config.d/keeper_port.xml index fc6f8240f1e..85d4bc0f04f 100644 --- a/tests/config/config.d/keeper_port.xml +++ b/tests/config/config.d/keeper_port.xml @@ -10,7 +10,7 @@ 240000 1000000000000000 - 10000 + 100000 0 diff --git a/tests/integration/helpers/0_common_instance_config.xml b/tests/integration/helpers/0_common_instance_config.xml index f4a9a0b6296..71a2f8f4b13 100644 --- a/tests/integration/helpers/0_common_instance_config.xml +++ b/tests/integration/helpers/0_common_instance_config.xml @@ -16,5 +16,6 @@ 10 /var/log/clickhouse-server/stderr.log /var/log/clickhouse-server/stdout.log + true diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 1d5d2d004f9..6ab8b29895b 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -31,8 +31,9 @@ from kazoo.exceptions import KazooException from minio import Minio from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT -from helpers.test_tools import assert_eq_with_retry +from helpers.test_tools import assert_eq_with_retry, exec_query_with_retry from helpers import pytest_xdist_logging_to_separate_files +from helpers.client import QueryRuntimeException import docker @@ -225,6 +226,8 @@ class ClickHouseCluster: self.docker_logs_path = p.join(self.instances_dir, 'docker.log') self.env_file = p.join(self.instances_dir, DEFAULT_ENV_NAME) self.env_variables = {} + self.env_variables["TSAN_OPTIONS"] = "second_deadlock_stack=1" + self.env_variables["CLICKHOUSE_WATCHDOG_ENABLE"] = "0" self.up_called = False custom_dockerd_host = custom_dockerd_host or os.environ.get('CLICKHOUSE_TESTS_DOCKERD_HOST') @@ -413,6 +416,10 @@ class ClickHouseCluster: logging.debug(f"CLUSTER INIT base_config_dir:{self.base_config_dir}") def cleanup(self): + if os.environ and 'DISABLE_CLEANUP' in os.environ and os.environ['DISABLE_CLEANUP'] == "1": + logging.warning("Cleanup is disabled") + return + # Just in case kill unstopped containers from previous launch try: # docker-compose names containers using the following formula: @@ -422,13 +429,13 @@ class ClickHouseCluster: filter_name = f'^/{self.project_name}_.*_1$' if int(run_and_check(f'docker container list --all --filter name={filter_name} | wc -l', shell=True)) > 1: logging.debug(f"Trying to kill unstopped containers for project {self.project_name}:") - unstopped_containers = run_and_check(f'docker container list --all --filter name={filter_name}', shell=True) - unstopped_containers_ids = [line.split()[0] for line in unstopped_containers.splitlines()[1:]] - for id in unstopped_containers_ids: + unstopped_containers = run_and_check(f'docker container list --all --filter name={filter_name}', shell=True).splitlines() + logging.debug(f"Unstopped containers {unstopped_containers}") + for id in unstopped_containers: run_and_check(f'docker kill {id}', shell=True, nothrow=True) run_and_check(f'docker rm {id}', shell=True, nothrow=True) - logging.debug("Unstopped containers killed") - run_and_check(f'docker container list --all --filter name={filter_name}', shell=True) + left_ids = run_and_check(f'docker container list --all --filter name={filter_name}', shell=True) + logging.debug(f"Unstopped containers killed. Left {left_ids}") else: logging.debug(f"No running containers for project: {self.project_name}") except: @@ -962,6 +969,9 @@ class ClickHouseCluster: logging.info("Restart node with ip change") # In builds with sanitizer the server can take a long time to start node.wait_for_start(start_timeout=180.0, connection_timeout=600.0) # seconds + res = node.client.query("SELECT 30") + logging.debug(f"Read '{res}'") + assert "30\n" == res logging.info("Restarted") return node @@ -1414,7 +1424,7 @@ class ClickHouseCluster: # retry_exception(10, 5, subprocess_check_call, Exception, clickhouse_pull_cmd) if destroy_dirs and p.exists(self.instances_dir): - logging.debug(("Removing instances dir %s", self.instances_dir)) + logging.debug(f"Removing instances dir {self.instances_dir}") shutil.rmtree(self.instances_dir) for instance in list(self.instances.values()): @@ -1424,7 +1434,7 @@ class ClickHouseCluster: _create_env_file(os.path.join(self.env_file), self.env_variables) self.docker_client = docker.DockerClient(base_url='unix:///var/run/docker.sock', version=self.docker_api_version, timeout=600) - common_opts = ['up', '-d'] + common_opts = ['--verbose', 'up', '-d'] if self.with_zookeeper_secure and self.base_zookeeper_cmd: logging.debug('Setup ZooKeeper Secure') @@ -1644,7 +1654,7 @@ class ClickHouseCluster: self.shutdown() raise - def shutdown(self, kill=True): + def shutdown(self, kill=True, ignore_fatal=True): sanitizer_assert_instance = None fatal_log = None @@ -1672,10 +1682,10 @@ class ClickHouseCluster: # NOTE: we cannot do this via docker since in case of Fatal message container may already die. for name, instance in self.instances.items(): if instance.contains_in_log(SANITIZER_SIGN, from_host=True): - sanitizer_assert_instance = instance.grep_in_log(SANITIZER_SIGN, from_host=True) + sanitizer_assert_instance = instance.grep_in_log(SANITIZER_SIGN, from_host=True, filename='stderr.log') logging.error("Sanitizer in instance %s log %s", name, sanitizer_assert_instance) - if instance.contains_in_log("Fatal", from_host=True): + if not ignore_fatal and instance.contains_in_log("Fatal", from_host=True): fatal_log = instance.grep_in_log("Fatal", from_host=True) if 'Child process was terminated by signal 9 (KILL)' in fatal_log: fatal_log = None @@ -1685,7 +1695,7 @@ class ClickHouseCluster: try: subprocess_check_call(self.base_cmd + ['down', '--volumes']) except Exception as e: - logging.debug("Down + remove orphans failed durung shutdown. {}".format(repr(e))) + logging.debug("Down + remove orphans failed during shutdown. {}".format(repr(e))) else: logging.warning("docker-compose up was not called. Trying to export docker.log for running containers") @@ -1768,7 +1778,7 @@ CLICKHOUSE_START_COMMAND = "clickhouse server --config-file=/etc/clickhouse-serv " --log-file=/var/log/clickhouse-server/clickhouse-server.log " \ " --errorlog-file=/var/log/clickhouse-server/clickhouse-server.err.log" -CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "trap \'killall tail\' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!"'.format(CLICKHOUSE_START_COMMAND) +CLICKHOUSE_STAY_ALIVE_COMMAND = 'bash -c "trap \'pkill tail\' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!"'.format(CLICKHOUSE_START_COMMAND) # /run/xtables.lock passed inside for correct iptables --wait DOCKER_COMPOSE_TEMPLATE = ''' @@ -2034,84 +2044,122 @@ class ClickHouseInstance: if not self.stay_alive: raise Exception("clickhouse can be stopped only with stay_alive=True instance") try: - ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], user='root') + ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root') if ps_clickhouse == " PID TTY STAT TIME COMMAND" : logging.warning("ClickHouse process already stopped") return self.exec_in_container(["bash", "-c", "pkill {} clickhouse".format("-9" if kill else "")], user='root') - sleep_time = 0.1 - num_steps = int(stop_wait_sec / sleep_time) + start_time = time.time() stopped = False - for step in range(num_steps): - time.sleep(sleep_time) - ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], user='root') - if ps_clickhouse == " PID TTY STAT TIME COMMAND": + while time.time() <= start_time + stop_wait_sec: + pid = self.get_process_pid("clickhouse") + if pid is None: stopped = True break + else: + time.sleep(1) if not stopped: - logging.warning(f"Force kill clickhouse in stop_clickhouse. ps:{ps_clickhouse}") - self.stop_clickhouse(kill=True) + pid = self.get_process_pid("clickhouse") + if pid is not None: + logging.warning(f"Force kill clickhouse in stop_clickhouse. ps:{pid}") + self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid} > {os.path.join(self.path, 'logs/stdout.log')}"], user='root') + self.stop_clickhouse(kill=True) + else: + ps_all = self.exec_in_container(["bash", "-c", "ps aux"], nothrow=True, user='root') + logging.warning(f"We want force stop clickhouse, but no clickhouse-server is running\n{ps_all}") + return except Exception as e: logging.warning(f"Stop ClickHouse raised an error {e}") - def start_clickhouse(self, start_wait_sec=30): + def start_clickhouse(self, start_wait_sec=60): if not self.stay_alive: raise Exception("ClickHouse can be started again only with stay_alive=True instance") - + start_time = time.time() time_to_sleep = 0.5 - start_tries = 5 - total_tries = int(start_wait_sec / time_to_sleep) - query_tries = int(total_tries / start_tries) - - for i in range(start_tries): + while start_time + start_wait_sec >= time.time(): # sometimes after SIGKILL (hard reset) server may refuse to start for some time # for different reasons. - self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) - started = False - for _ in range(query_tries): + pid = self.get_process_pid("clickhouse") + if pid is None: + logging.debug("No clickhouse process running. Start new one.") + self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) + time.sleep(1) + continue + else: + logging.debug("Clickhouse process running.") try: - self.query("select 1") - started = True - break - except: - time.sleep(time_to_sleep) - if started: - break - else: - raise Exception("Cannot start ClickHouse, see additional info in logs") + self.wait_start(start_wait_sec + start_time - time.time()) + return + except Exception as e: + logging.warning(f"Current start attempt failed. Will kill {pid} just in case.") + self.exec_in_container(["bash", "-c", f"kill -9 {pid}"], user='root', nothrow=True) + time.sleep(time_to_sleep) + + raise Exception("Cannot start ClickHouse, see additional info in logs") - def restart_clickhouse(self, stop_start_wait_sec=30, kill=False): + def wait_start(self, start_wait_sec): + start_time = time.time() + last_err = None + while time.time() <= start_time + start_wait_sec: + try: + pid = self.get_process_pid("clickhouse") + if pid is None: + raise Exception("ClickHouse server is not running. Check logs.") + exec_query_with_retry(self, 'select 20', retry_count = 10, silent=True) + return + except QueryRuntimeException as err: + last_err = err + pid = self.get_process_pid("clickhouse") + if pid is not None: + logging.warning(f"ERROR {err}") + else: + raise Exception("ClickHouse server is not running. Check logs.") + logging.error(f"No time left to start. But process is still running. Will dump threads.") + ps_clickhouse = self.exec_in_container(["bash", "-c", "ps -C clickhouse"], nothrow=True, user='root') + logging.info(f"PS RESULT:\n{ps_clickhouse}") + pid = self.get_process_pid("clickhouse") + if pid is not None: + self.exec_in_container(["bash", "-c", f"gdb -batch -ex 'thread apply all bt full' -p {pid}"], user='root') + if last_err is not None: + raise last_err + + def restart_clickhouse(self, stop_start_wait_sec=60, kill=False): self.stop_clickhouse(stop_start_wait_sec, kill) self.start_clickhouse(stop_start_wait_sec) def exec_in_container(self, cmd, detach=False, nothrow=False, **kwargs): return self.cluster.exec_in_container(self.docker_id, cmd, detach, nothrow, **kwargs) - def contains_in_log(self, substring, from_host=False): + def rotate_logs(self): + self.exec_in_container(["bash", "-c", f"kill -HUP {self.get_process_pid('clickhouse server')}"], user='root') + + def contains_in_log(self, substring, from_host=False, filename='clickhouse-server.log'): if from_host: + # We check fist file exists but want to look for all rotated logs as well result = subprocess_check_call(["bash", "-c", - f'[ -f {self.logs_dir}/clickhouse-server.log ] && grep -a "{substring}" {self.logs_dir}/clickhouse-server.log || true' + f'[ -f {self.logs_dir}/{filename} ] && zgrep -aH "{substring}" {self.logs_dir}/{filename}* || true' ]) else: result = self.exec_in_container(["bash", "-c", - f'[ -f /var/log/clickhouse-server/clickhouse-server.log ] && grep -a "{substring}" /var/log/clickhouse-server/clickhouse-server.log || true' + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -aH "{substring}" /var/log/clickhouse-server/{filename} || true' ]) return len(result) > 0 - def grep_in_log(self, substring, from_host=False): + def grep_in_log(self, substring, from_host=False, filename='clickhouse-server.log'): logging.debug(f"grep in log called %s", substring) if from_host: + # We check fist file exists but want to look for all rotated logs as well result = subprocess_check_call(["bash", "-c", - f'grep -a "{substring}" {self.logs_dir}/clickhouse-server.log || true' + f'[ -f {self.logs_dir}/{filename} ] && zgrep -a "{substring}" {self.logs_dir}/{filename}* || true' ]) else: result = self.exec_in_container(["bash", "-c", - f'grep -a "{substring}" /var/log/clickhouse-server/clickhouse-server.log || true' + f'[ -f /var/log/clickhouse-server/{filename} ] && zgrep -a "{substring}" /var/log/clickhouse-server/{filename}* || true' ]) logging.debug("grep result %s", result) return result @@ -2146,7 +2194,7 @@ class ClickHouseInstance: def get_process_pid(self, process_name): output = self.exec_in_container(["bash", "-c", - "ps ax | grep '{}' | grep -v 'grep' | grep -v 'bash -c' | awk '{{print $1}}'".format( + "ps ax | grep '{}' | grep -v 'grep' | grep -v 'coproc' | grep -v 'bash -c' | awk '{{print $1}}'".format( process_name)]) if output: try: @@ -2157,6 +2205,7 @@ class ClickHouseInstance: return None def restart_with_original_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): + begin_time = time.time() if not self.stay_alive: raise Exception("Cannot restart not stay alive container") self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') @@ -2176,6 +2225,7 @@ class ClickHouseInstance: if callback_onstop: callback_onstop(self) + self.exec_in_container(["bash", "-c", "echo 'restart_with_original_version: From version' && /usr/bin/clickhouse server --version && echo 'To version' && /usr/share/clickhouse_original server --version"]) self.exec_in_container( ["bash", "-c", "cp /usr/share/clickhouse_original /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], user='root') @@ -2185,9 +2235,14 @@ class ClickHouseInstance: self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) # wait start - assert_eq_with_retry(self, "select 1", "1", retry_count=retries) + time_left = begin_time + stop_start_wait_sec - time.time() + if time_left <= 0: + raise Exception(f"No time left during restart") + else: + self.wait_start(time_left) def restart_with_latest_version(self, stop_start_wait_sec=300, callback_onstop=None, signal=15): + begin_time = time.time() if not self.stay_alive: raise Exception("Cannot restart not stay alive container") self.exec_in_container(["bash", "-c", "pkill -{} clickhouse".format(signal)], user='root') @@ -2213,13 +2268,18 @@ class ClickHouseInstance: self.exec_in_container( ["bash", "-c", "cp /usr/share/clickhouse_fresh /usr/bin/clickhouse && chmod 777 /usr/bin/clickhouse"], user='root') + self.exec_in_container(["bash", "-c", "echo 'restart_with_latest_version: From version' && /usr/share/clickhouse_original server --version && echo 'To version' /usr/share/clickhouse_fresh server --version"]) self.exec_in_container(["bash", "-c", "cp /usr/share/clickhouse-odbc-bridge_fresh /usr/bin/clickhouse-odbc-bridge && chmod 777 /usr/bin/clickhouse"], user='root') self.exec_in_container(["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], user=str(os.getuid())) # wait start - assert_eq_with_retry(self, "select 1", "1", retry_count=retries) + time_left = begin_time + stop_start_wait_sec - time.time() + if time_left <= 0: + raise Exception(f"No time left during restart") + else: + self.wait_start(time_left) def get_docker_handle(self): return self.cluster.get_docker_handle(self.docker_id) diff --git a/tests/integration/helpers/test_tools.py b/tests/integration/helpers/test_tools.py index b5d40659629..3577553be34 100644 --- a/tests/integration/helpers/test_tools.py +++ b/tests/integration/helpers/test_tools.py @@ -85,15 +85,18 @@ def assert_logs_contain_with_retry(instance, substring, retry_count=20, sleep_ti else: raise AssertionError("'{}' not found in logs".format(substring)) -def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, settings={}): +def exec_query_with_retry(instance, query, retry_count=40, sleep_time=0.5, silent=False, settings={}): exception = None - for _ in range(retry_count): + for cnt in range(retry_count): try: - instance.query(query, timeout=30, settings=settings) + res = instance.query(query, timeout=30, settings=settings) + if not silent: + logging.debug(f"Result of {query} on {cnt} try is {res}") break except Exception as ex: exception = ex - logging.exception(f"Failed to execute query '{query}' on instance '{instance.name}' will retry") + if not silent: + logging.exception(f"Failed to execute query '{query}' on {cnt} try on instance '{instance.name}' will retry") time.sleep(sleep_time) else: raise exception diff --git a/tests/integration/test_broken_part_during_merge/test.py b/tests/integration/test_broken_part_during_merge/test.py index 910dbc1d1a9..1c03add49db 100644 --- a/tests/integration/test_broken_part_during_merge/test.py +++ b/tests/integration/test_broken_part_during_merge/test.py @@ -15,12 +15,6 @@ node1 = cluster.add_instance('node1', with_zookeeper=True) def started_cluster(): try: cluster.start() - - node1.query(''' - CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) - ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id; - '''.format(replica=node1.name)) - yield cluster finally: @@ -28,6 +22,12 @@ def started_cluster(): def test_merge_and_part_corruption(started_cluster): + node1.query(''' + CREATE TABLE replicated_mt(date Date, id UInt32, value Int32) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/replicated_mt', '{replica}') ORDER BY id; + '''.format(replica=node1.name)) + + node1.query("SYSTEM STOP REPLICATION QUEUES replicated_mt") for i in range(4): node1.query("INSERT INTO replicated_mt SELECT toDate('2019-10-01'), number, number * number FROM numbers ({f}, 100000)".format(f=i*100000)) @@ -53,3 +53,5 @@ def test_merge_and_part_corruption(started_cluster): # will hung if checked bug not fixed node1.query("ALTER TABLE replicated_mt UPDATE value = 7 WHERE 1", settings={"mutations_sync": 2}, timeout=30) assert node1.query("SELECT sum(value) FROM replicated_mt") == "2100000\n" + + node1.query('DROP TABLE replicated_mt SYNC') diff --git a/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py b/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py index 4db020d0226..58a8a609b10 100644 --- a/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py +++ b/tests/integration/test_cleanup_dir_after_bad_zk_conn/test.py @@ -48,7 +48,8 @@ def test_cleanup_dir_after_bad_zk_conn(start_cluster): node1.query_with_retry(query_create) node1.query_with_retry('''INSERT INTO replica.test VALUES (1, now())''') assert "1\n" in node1.query('''SELECT count() from replica.test FORMAT TSV''') - + node1.query("DROP TABLE replica.test SYNC") + node1.query("DROP DATABASE replica") def test_cleanup_dir_after_wrong_replica_name(start_cluster): node1.query_with_retry( @@ -68,7 +69,8 @@ def test_cleanup_dir_after_wrong_zk_path(start_cluster): assert "Cannot create" in error node1.query( "CREATE TABLE test3_r2 (n UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test3/', 'r2') ORDER BY n") - + node1.query("DROP TABLE test3_r1 SYNC") + node1.query("DROP TABLE test3_r2 SYNC") def test_attach_without_zk(start_cluster): node1.query_with_retry( @@ -82,3 +84,4 @@ def test_attach_without_zk(start_cluster): pass node1.query("ATTACH TABLE IF NOT EXISTS test4_r1") node1.query("SELECT * FROM test4_r1") + node1.query("DROP TABLE test4_r1 SYNC") diff --git a/tests/integration/test_concurrent_ttl_merges/test.py b/tests/integration/test_concurrent_ttl_merges/test.py index 3e25398d633..8c3c490d055 100644 --- a/tests/integration/test_concurrent_ttl_merges/test.py +++ b/tests/integration/test_concurrent_ttl_merges/test.py @@ -1,4 +1,5 @@ import time +import logging import pytest from helpers.cluster import ClickHouseCluster @@ -13,7 +14,6 @@ node2 = cluster.add_instance('node2', main_configs=['configs/fast_background_poo def started_cluster(): try: cluster.start() - yield cluster finally: @@ -22,7 +22,7 @@ def started_cluster(): def count_ttl_merges_in_queue(node, table): result = node.query( - "SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table)) + f"SELECT count() FROM system.replication_queue WHERE merge_type = 'TTL_DELETE' and table = '{table}'") if not result: return 0 return int(result.strip()) @@ -30,22 +30,22 @@ def count_ttl_merges_in_queue(node, table): def count_ttl_merges_in_background_pool(node, table, level): result = TSV(node.query( - "SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{}'".format(table))) + f"SELECT * FROM system.merges WHERE merge_type = 'TTL_DELETE' and table = '{table}'")) count = len(result) if count >= level: - print("count_ttl_merges_in_background_pool: merges more than warn level:\n{}".format(result)) + logging.debug(f"count_ttl_merges_in_background_pool: merges more than warn level:\n{result}") return count def count_regular_merges_in_background_pool(node, table): - result = node.query("SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{}'".format(table)) + result = node.query(f"SELECT count() FROM system.merges WHERE merge_type = 'REGULAR' and table = '{table}'") if not result: return 0 return int(result.strip()) def count_running_mutations(node, table): - result = node.query("SELECT count() FROM system.merges WHERE table = '{}' and is_mutation=1".format(table)) + result = node.query(f"SELECT count() FROM system.merges WHERE table = '{table}' and is_mutation=1") if not result: return 0 return int(result.strip()) @@ -55,7 +55,6 @@ def count_running_mutations(node, table): # but it revealed a bug when we assign different merges to the same part # on the borders of partitions. def test_no_ttl_merges_in_busy_pool(started_cluster): - node1.query("DROP TABLE IF EXISTS test_ttl") node1.query( "CREATE TABLE test_ttl (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0, number_of_free_entries_in_pool_to_execute_mutation = 0") @@ -63,12 +62,12 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): for i in range(1, 7): node1.query( - "INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {}, number FROM numbers(5)".format(i)) + f"INSERT INTO test_ttl SELECT now() - INTERVAL 1 MONTH + number - 1, {i}, number FROM numbers(5)") node1.query("ALTER TABLE test_ttl UPDATE data = data + 1 WHERE sleepEachRow(1) = 0") while count_running_mutations(node1, "test_ttl") < 6: - print("Mutations count", count_running_mutations(node1, "test_ttl")) + logging.debug(f"Mutations count {count_running_mutations(node1, 'test_ttl')}") assert count_ttl_merges_in_background_pool(node1, "test_ttl", 1) == 0 time.sleep(0.5) @@ -76,7 +75,7 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): rows_count = [] while count_running_mutations(node1, "test_ttl") == 6: - print("Mutations count after start TTL", count_running_mutations(node1, "test_ttl")) + logging.debug(f"Mutations count after start TTL{count_running_mutations(node1, 'test_ttl')}") rows_count.append(int(node1.query("SELECT count() FROM test_ttl").strip())) time.sleep(0.5) @@ -85,17 +84,17 @@ def test_no_ttl_merges_in_busy_pool(started_cluster): assert sum([1 for count in rows_count if count == 30]) > 4 assert_eq_with_retry(node1, "SELECT COUNT() FROM test_ttl", "0") + node1.query("DROP TABLE test_ttl SYNC") def test_limited_ttl_merges_in_empty_pool(started_cluster): - node1.query("DROP TABLE IF EXISTS test_ttl_v2") node1.query( "CREATE TABLE test_ttl_v2 (d DateTime, key UInt64, data UInt64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") node1.query("SYSTEM STOP TTL MERGES") for i in range(100): - node1.query("INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(1)".format(i)) + node1.query(f"INSERT INTO test_ttl_v2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)") assert node1.query("SELECT COUNT() FROM test_ttl_v2") == "100\n" @@ -109,17 +108,17 @@ def test_limited_ttl_merges_in_empty_pool(started_cluster): break assert max(merges_with_ttl_count) <= 2 + node1.query("DROP TABLE test_ttl_v2 SYNC") def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): - node1.query("DROP TABLE IF EXISTS replicated_ttl") node1.query( "CREATE TABLE replicated_ttl (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") node1.query("SYSTEM STOP TTL MERGES") for i in range(100): - node1.query_with_retry("INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(1)".format(i)) + node1.query_with_retry(f"INSERT INTO replicated_ttl SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(1)") assert node1.query("SELECT COUNT() FROM replicated_ttl") == "100\n" @@ -137,12 +136,11 @@ def test_limited_ttl_merges_in_empty_pool_replicated(started_cluster): assert max(merges_with_ttl_count) <= 2 assert max(entries_with_ttl_count) <= 1 + node1.query("DROP TABLE replicated_ttl SYNC") + def test_limited_ttl_merges_two_replicas(started_cluster): # Actually this test quite fast and often we cannot catch any merges. - node1.query("DROP TABLE IF EXISTS replicated_ttl_2") - node2.query("DROP TABLE IF EXISTS replicated_ttl_2") - node1.query( "CREATE TABLE replicated_ttl_2 (d DateTime, key UInt64, data UInt64) ENGINE = ReplicatedMergeTree('/test/t2', '1') ORDER BY tuple() PARTITION BY key TTL d + INTERVAL 1 MONTH SETTINGS merge_with_ttl_timeout = 0") node2.query( @@ -153,7 +151,7 @@ def test_limited_ttl_merges_two_replicas(started_cluster): for i in range(100): node1.query_with_retry( - "INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {}, number FROM numbers(10000)".format(i)) + f"INSERT INTO replicated_ttl_2 SELECT now() - INTERVAL 1 MONTH, {i}, number FROM numbers(10000)") node2.query("SYSTEM SYNC REPLICA replicated_ttl_2", timeout=10) assert node1.query("SELECT COUNT() FROM replicated_ttl_2") == "1000000\n" @@ -176,3 +174,6 @@ def test_limited_ttl_merges_two_replicas(started_cluster): # check them assert max(merges_with_ttl_count_node1) <= 2 assert max(merges_with_ttl_count_node2) <= 2 + + node1.query("DROP TABLE replicated_ttl_2 SYNC") + node2.query("DROP TABLE replicated_ttl_2 SYNC") diff --git a/tests/integration/test_default_compression_codec/test.py b/tests/integration/test_default_compression_codec/test.py index 4d111db59ab..d114954d739 100644 --- a/tests/integration/test_default_compression_codec/test.py +++ b/tests/integration/test_default_compression_codec/test.py @@ -1,7 +1,8 @@ import random import string - +import logging import pytest +import time from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) @@ -130,6 +131,9 @@ def test_default_codec_single(start_cluster): assert node1.query("SELECT COUNT() FROM compression_table") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table") == "3\n" + node1.query("DROP TABLE compression_table SYNC") + node2.query("DROP TABLE compression_table SYNC") + def test_default_codec_multiple(start_cluster): for i, node in enumerate([node1, node2]): @@ -199,6 +203,9 @@ def test_default_codec_multiple(start_cluster): assert node1.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" assert node2.query("SELECT COUNT() FROM compression_table_multiple") == "3\n" + node1.query("DROP TABLE compression_table_multiple SYNC") + node2.query("DROP TABLE compression_table_multiple SYNC") + def test_default_codec_version_update(start_cluster): node3.query(""" @@ -212,8 +219,10 @@ def test_default_codec_version_update(start_cluster): node3.query("INSERT INTO compression_table VALUES (2, '{}')".format(get_random_string(2048))) node3.query("INSERT INTO compression_table VALUES (3, '{}')".format(get_random_string(22048))) + old_version = node3.query("SELECT version()") node3.restart_with_latest_version() - + new_version = node3.query("SELECT version()") + logging.debug(f"Updated from {old_version} to {new_version}") assert node3.query( "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '1_1_1_0'") == "ZSTD(1)\n" assert node3.query( @@ -230,6 +239,16 @@ def test_default_codec_version_update(start_cluster): assert node3.query( "SELECT default_compression_codec FROM system.parts WHERE table = 'compression_table' and name = '3_3_3_1'") == "LZ4\n" + node3.query("DROP TABLE compression_table SYNC") + + def callback(n): + n.exec_in_container(['bash', '-c', 'rm -rf /var/lib/clickhouse/metadata/system /var/lib/clickhouse/data/system '], user='root') + node3.restart_with_original_version(callback_onstop=callback) + + cur_version = node3.query("SELECT version()") + logging.debug(f"End with {cur_version}") + + def test_default_codec_for_compact_parts(start_cluster): node4.query(""" CREATE TABLE compact_parts_table ( @@ -254,3 +273,4 @@ def test_default_codec_for_compact_parts(start_cluster): node4.query("ATTACH TABLE compact_parts_table") assert node4.query("SELECT COUNT() FROM compact_parts_table") == "1\n" + node4.query("DROP TABLE compact_parts_table SYNC") diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore new file mode 100644 index 00000000000..12657916b22 --- /dev/null +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/.gitignore @@ -0,0 +1,2 @@ +dictionaries/* +!.gitignore diff --git a/tests/integration/test_dictionaries_dependency/test.py b/tests/integration/test_dictionaries_dependency/test.py index 9b1019822e3..7dc7f84d50b 100644 --- a/tests/integration/test_dictionaries_dependency/test.py +++ b/tests/integration/test_dictionaries_dependency/test.py @@ -90,7 +90,10 @@ def test_dependency_via_explicit_table(node): # Restart must not break anything. node.restart_clickhouse() check() - + for dct in d_names: + node.query(f"DROP DICTIONARY {dct}") + for tbl in tbl_names: + node.query(f"DROP TABLE {tbl}") @pytest.mark.parametrize("node", nodes) def test_dependency_via_dictionary_database(node): @@ -114,3 +117,9 @@ def test_dependency_via_dictionary_database(node): node.restart_clickhouse() for d_name in d_names: assert node.query_with_retry("SELECT dictGet({}, 'y', toUInt64(5))".format(d_name)) == "6\n" + + # cleanup + for d_name in d_names: + node.query(f"DROP DICTIONARY IF EXISTS {d_name} SYNC") + node.query("DROP DATABASE dict_db SYNC") + node.restart_clickhouse() diff --git a/tests/integration/test_dictionaries_update_field/test.py b/tests/integration/test_dictionaries_update_field/test.py index c52c836b4f7..2e46403c63b 100644 --- a/tests/integration/test_dictionaries_update_field/test.py +++ b/tests/integration/test_dictionaries_update_field/test.py @@ -4,7 +4,6 @@ import time import pytest from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseKiller -from helpers.network import PartitionManager cluster = ClickHouseCluster(__file__) diff --git a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py index 9ab790c0fd6..cfd5f4d5607 100644 --- a/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py +++ b/tests/integration/test_dictionary_allow_read_expired_keys/test_default_reading.py @@ -51,7 +51,7 @@ def test_default_reading(started_cluster): test_helper() with PartitionManager() as pm, ClickHouseKiller(dictionary_node): - assert None == dictionary_node.get_process_pid("clickhouse"), "CLickHouse must be alive" + assert None == dictionary_node.get_process_pid("clickhouse"), "ClickHouse must be alive" # Remove connection between main_node and dictionary for sure pm.heal_all() diff --git a/tests/integration/test_grpc_protocol/test.py b/tests/integration/test_grpc_protocol/test.py index 7b2cdee8d76..c892fc94712 100644 --- a/tests/integration/test_grpc_protocol/test.py +++ b/tests/integration/test_grpc_protocol/test.py @@ -213,6 +213,7 @@ def test_errors_handling(): def test_authentication(): query("CREATE USER OR REPLACE john IDENTIFIED BY 'qwe123'") assert query("SELECT currentUser()", user_name="john", password="qwe123") == "john\n" + query("DROP USER john") def test_logs(): logs = query_and_get_logs("SELECT 1", settings={'send_logs_level':'debug'}) diff --git a/tests/integration/test_insert_into_distributed_sync_async/test.py b/tests/integration/test_insert_into_distributed_sync_async/test.py index 372ed04cd2c..1f479003b99 100755 --- a/tests/integration/test_insert_into_distributed_sync_async/test.py +++ b/tests/integration/test_insert_into_distributed_sync_async/test.py @@ -68,6 +68,8 @@ def test_insertion_sync(started_cluster): 2000-01-01 100500''') assert TSV(node2.query('SELECT date, val FROM local_table WHERE val = 100500 ORDER BY date')) == expected + node1.query("TRUNCATE TABLE local_table SYNC") + node2.query("TRUNCATE TABLE local_table SYNC") """ def test_insertion_sync_fails_on_error(started_cluster): diff --git a/tests/integration/test_keeper_three_nodes_two_alive/test.py b/tests/integration/test_keeper_three_nodes_two_alive/test.py index eb63d28b3e2..f7cfb3c354e 100644 --- a/tests/integration/test_keeper_three_nodes_two_alive/test.py +++ b/tests/integration/test_keeper_three_nodes_two_alive/test.py @@ -53,9 +53,11 @@ def test_start_offline(started_cluster): assert node3.contains_in_log("Cannot connect to ZooKeeper (or Keeper) before internal Keeper start") node2_zk = get_fake_zk("node2") - node2_zk.create("/test_dead", b"data") + node2_zk.create("/c", b"data") + finally: p.map(start, [node1, node2, node3]) + get_fake_zk("node1").delete("/test_alive") def test_start_non_existing(started_cluster): @@ -80,6 +82,8 @@ def test_start_non_existing(started_cluster): node1.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper1.xml', 'non_existing_node', 'node3') node2.replace_in_config('/etc/clickhouse-server/config.d/enable_keeper2.xml', 'non_existing_node', 'node3') p.map(start, [node1, node2, node3]) + node2_zk.delete("/test_non_exising") + def test_restart_third_node(started_cluster): node1_zk = get_fake_zk("node1") @@ -88,3 +92,4 @@ def test_restart_third_node(started_cluster): node3.restart_clickhouse() assert node3.contains_in_log("Connected to ZooKeeper (or Keeper) before internal Keeper start") + node1_zk.delete("/test_restart") diff --git a/tests/integration/test_match_process_uid_against_data_owner/test.py b/tests/integration/test_match_process_uid_against_data_owner/test.py index 754ebc731c1..cf8a4bc711b 100644 --- a/tests/integration/test_match_process_uid_against_data_owner/test.py +++ b/tests/integration/test_match_process_uid_against_data_owner/test.py @@ -1,40 +1,38 @@ import os import pwd import re - -import docker import pytest -from helpers.cluster import ClickHouseCluster, CLICKHOUSE_START_COMMAND +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance('node', stay_alive=True) +other_user_id = pwd.getpwnam('nobody').pw_uid +current_user_id = os.getuid() + +@pytest.fixture(scope="module", autouse=True) +def started_cluster(): + try: + if current_user_id != 0: + return + + cluster.start() + yield cluster + + finally: + cluster.shutdown(ignore_fatal=True) -def test_different_user(): - current_user_id = os.getuid() +def test_different_user(started_cluster): + with pytest.raises(Exception): + node.stop_clickhouse() + node.exec_in_container(["bash", "-c", f"chown {other_user_id} /var/lib/clickhouse"], privileged=True) + node.start_clickhouse(start_wait_sec=3) - if current_user_id != 0: - return - - other_user_id = pwd.getpwnam('nobody').pw_uid - - cluster = ClickHouseCluster(__file__) - node = cluster.add_instance('node') - - cluster.start() - - docker_api = cluster.docker_client.api - container = node.get_docker_handle() - container.stop() - container.start() - container.exec_run('chown {} /var/lib/clickhouse'.format(other_user_id), privileged=True) - container.exec_run(CLICKHOUSE_START_COMMAND) - - cluster.shutdown() # cleanup - - with open(os.path.join(node.path, 'logs/clickhouse-server.err.log')) as log: - expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\." - - last_message = [row for row in log.readlines() if "Effective" in row][-1] - - if re.search(expected_message, last_message) is None: - pytest.fail( - 'Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, - last_message)) + log = node.grep_in_log("Effective") + expected_message = "Effective user of the process \(.*\) does not match the owner of the data \(.*\)\. Run under 'sudo -u .*'\." + if re.search(expected_message, log) is None: + pytest.fail( + 'Expected the server to fail with a message "{}", but the last message is "{}"'.format(expected_message, log)) + node.exec_in_container(["bash", "-c", f"chown {current_user_id} /var/lib/clickhouse"], privileged=True) + node.start_clickhouse() + node.rotate_logs() diff --git a/tests/integration/test_replicated_database/test.py b/tests/integration/test_replicated_database/test.py index fa1e9cf49fa..b688e650a8c 100644 --- a/tests/integration/test_replicated_database/test.py +++ b/tests/integration/test_replicated_database/test.py @@ -8,6 +8,8 @@ from helpers.cluster import ClickHouseCluster from helpers.test_tools import assert_eq_with_retry, assert_logs_contain from helpers.network import PartitionManager +test_recover_staled_replica_run = 1 + cluster = ClickHouseCluster(__file__) main_node = cluster.add_instance('main_node', main_configs=['configs/config.xml'], user_configs=['configs/settings.xml'], with_zookeeper=True, stay_alive=True, macros={"shard": 1, "replica": 1}) @@ -29,14 +31,14 @@ def assert_create_query(nodes, table_name, expected): def started_cluster(): try: cluster.start() - main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") - dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") yield cluster finally: cluster.shutdown() def test_create_replicated_table(started_cluster): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") assert "Explicit zookeeper_path and replica_name are specified" in \ main_node.query_and_get_error("CREATE TABLE testdb.replicated_table (d Date, k UInt64, i32 Int32) " "ENGINE=ReplicatedMergeTree('/test/tmp', 'r') ORDER BY k PARTITION BY toYYYYMM(d);") @@ -57,9 +59,13 @@ def test_create_replicated_table(started_cluster): assert_create_query([main_node, dummy_node], "testdb.replicated_table", expected) # assert without replacing uuid assert main_node.query("show create testdb.replicated_table") == dummy_node.query("show create testdb.replicated_table") + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ['MergeTree', 'ReplicatedMergeTree']) def test_simple_alter_table(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") # test_simple_alter_table name = "testdb.alter_test_{}".format(engine) main_node.query("CREATE TABLE {} " @@ -100,12 +106,13 @@ def test_simple_alter_table(started_cluster, engine): "SETTINGS index_granularity = 8192".format(name, full_engine) assert_create_query([main_node, dummy_node, competing_node], name, expected) - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") + competing_node.query("DROP DATABASE testdb SYNC") def get_table_uuid(database, name): return main_node.query(f"SELECT uuid FROM system.tables WHERE database = '{database}' and name = '{name}'").strip() - @pytest.fixture(scope="module", name="attachable_part") def fixture_attachable_part(started_cluster): main_node.query(f"CREATE DATABASE testdb_attach_atomic ENGINE = Atomic") @@ -115,10 +122,11 @@ def fixture_attachable_part(started_cluster): table_uuid = get_table_uuid("testdb_attach_atomic", "test") return os.path.join(main_node.path, f"database/shadow/test_attach/store/{table_uuid[:3]}/{table_uuid}/all_1_1_0") - - @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_attach(started_cluster, attachable_part, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + name = "alter_attach_test_{}".format(engine) main_node.query(f"CREATE TABLE testdb.{name} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") table_uuid = get_table_uuid("testdb", name) @@ -134,10 +142,14 @@ def test_alter_attach(started_cluster, attachable_part, engine): assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == "123\n" else: assert dummy_node.query(f"SELECT CounterID FROM testdb.{name}") == "" - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_drop_part(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + table = f"alter_drop_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") @@ -151,10 +163,14 @@ def test_alter_drop_part(started_cluster, engine): assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == "" else: assert dummy_node.query(f"SELECT CounterID FROM testdb.{table}") == "456\n" - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_detach_part(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + table = f"alter_detach_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") @@ -169,10 +185,14 @@ def test_alter_detach_part(started_cluster, engine): assert dummy_node.query(detached_parts_query) == f"{part_name}\n" else: assert dummy_node.query(detached_parts_query) == "" - + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") @pytest.mark.parametrize("engine", ["MergeTree", "ReplicatedMergeTree"]) def test_alter_drop_detached_part(started_cluster, engine): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + table = f"alter_drop_detached_{engine}" part_name = "all_0_0_0" if engine == "ReplicatedMergeTree" else "all_1_1_0" main_node.query(f"CREATE TABLE testdb.{table} (CounterID UInt32) ENGINE = {engine} ORDER BY (CounterID)") @@ -186,8 +206,14 @@ def test_alter_drop_detached_part(started_cluster, engine): assert main_node.query(detached_parts_query) == "" assert dummy_node.query(detached_parts_query) == f"{part_name}\n" + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") + def test_alter_fetch(started_cluster): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + main_node.query("CREATE TABLE testdb.fetch_source (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)") main_node.query("CREATE TABLE testdb.fetch_target (CounterID UInt32) ENGINE = ReplicatedMergeTree ORDER BY (CounterID)") main_node.query("INSERT INTO testdb.fetch_source VALUES (123)") @@ -197,10 +223,15 @@ def test_alter_fetch(started_cluster): assert main_node.query(detached_parts_query) == "all_0_0_0\n" assert dummy_node.query(detached_parts_query) == "" + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") def test_alters_from_different_replicas(started_cluster): + main_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica1');") + dummy_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica2');") + # test_alters_from_different_replicas - competing_node.query("CREATE DATABASE IF NOT EXISTS testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');") + competing_node.query("CREATE DATABASE testdb ENGINE = Replicated('/clickhouse/databases/test1', 'shard1', 'replica3');") main_node.query("CREATE TABLE testdb.concurrent_test " "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " @@ -234,7 +265,7 @@ def test_alters_from_different_replicas(started_cluster): assert_create_query([main_node, competing_node], "testdb.concurrent_test", expected) # test_create_replica_after_delay - main_node.query("DROP TABLE testdb.concurrent_test") + main_node.query("DROP TABLE testdb.concurrent_test SYNC") main_node.query("CREATE TABLE testdb.concurrent_test " "(CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) " "ENGINE = ReplicatedMergeTree ORDER BY CounterID;") @@ -294,6 +325,11 @@ def test_alters_from_different_replicas(started_cluster): "9\t2021-02-11\t1241149650\n" assert_eq_with_retry(dummy_node, "SELECT CounterID, StartDate, UserID FROM testdb.dist ORDER BY CounterID", expected) + main_node.query("DROP DATABASE testdb SYNC") + dummy_node.query("DROP DATABASE testdb SYNC") + competing_node.query("DROP DATABASE testdb SYNC") + snapshotting_node.query("DROP DATABASE testdb SYNC") + snapshot_recovering_node.query("DROP DATABASE testdb SYNC") def test_recover_staled_replica(started_cluster): main_node.query("CREATE DATABASE recover ENGINE = Replicated('/clickhouse/databases/recover', 'shard1', 'replica1');") @@ -365,11 +401,12 @@ def test_recover_staled_replica(started_cluster): assert dummy_node.query("SELECT (*,).1 FROM recover.{}".format(table)) == "42\n" for table in ['m1', 'mt1']: assert dummy_node.query("SELECT count() FROM recover.{}".format(table)) == "0\n" - - assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'") == "2\n" - table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%'").strip() + global test_recover_staled_replica_run + assert dummy_node.query("SELECT count() FROM system.tables WHERE database='recover_broken_tables'") == f"{2*test_recover_staled_replica_run}\n" + test_recover_staled_replica_run += 1 + table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'mt1_29_%' LIMIT 1").strip() assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n" - table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%'").strip() + table = dummy_node.query("SHOW TABLES FROM recover_broken_tables LIKE 'rmt5_29_%' LIMIT 1").strip() assert dummy_node.query("SELECT (*,).1 FROM recover_broken_tables.{}".format(table)) == "42\n" expected = "Cleaned 6 outdated objects: dropped 1 dictionaries and 3 tables, moved 2 tables" @@ -377,10 +414,10 @@ def test_recover_staled_replica(started_cluster): dummy_node.query("DROP TABLE recover.tmp") assert_eq_with_retry(main_node, "SELECT count() FROM system.tables WHERE database='recover' AND name='tmp'", "0\n") + main_node.query("DROP DATABASE recover SYNC") + dummy_node.query("DROP DATABASE recover SYNC") def test_startup_without_zk(started_cluster): - main_node.query("DROP DATABASE IF EXISTS testdb SYNC") - main_node.query("DROP DATABASE IF EXISTS recover SYNC") with PartitionManager() as pm: pm.drop_instance_zk_connections(main_node) err = main_node.query_and_get_error("CREATE DATABASE startup ENGINE = Replicated('/clickhouse/databases/startup', 'shard1', 'replica1');") @@ -403,7 +440,7 @@ def test_startup_without_zk(started_cluster): main_node.query("EXCHANGE TABLES startup.rmt AND startup.m") assert main_node.query("SELECT (*,).1 FROM startup.m") == "42\n" - + main_node.query("DROP DATABASE startup SYNC") def test_server_uuid(started_cluster): uuid1 = main_node.query("select serverUUID()") diff --git a/tests/integration/test_replicated_fetches_bandwidth/test.py b/tests/integration/test_replicated_fetches_bandwidth/test.py index 5b8332cda16..f39baea064c 100644 --- a/tests/integration/test_replicated_fetches_bandwidth/test.py +++ b/tests/integration/test_replicated_fetches_bandwidth/test.py @@ -34,7 +34,7 @@ def test_limited_fetch_single_table(start_cluster): node2.query("SYSTEM STOP FETCHES limited_fetch_table") for i in range(5): - node1.query("INSERT INTO limited_fetch_table SELECT {}, '{}' FROM numbers(300)".format(i, get_random_string(104857))) + node1.query("INSERT INTO limited_fetch_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(300)".format(i)) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -66,7 +66,7 @@ def test_limited_send_single_table(start_cluster): node2.query("SYSTEM STOP FETCHES limited_send_table") for i in range(5): - node1.query("INSERT INTO limited_send_table SELECT {}, '{}' FROM numbers(150)".format(i, get_random_string(104857))) + node1.query("INSERT INTO limited_send_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(150)".format(i)) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -100,7 +100,7 @@ def test_limited_fetches_for_server(start_cluster): for j in range(5): node3.query(f"SYSTEM STOP FETCHES limited_fetches{j}") for i in range(5): - node1.query("INSERT INTO limited_fetches{} SELECT {}, '{}' FROM numbers(50)".format(j, i, get_random_string(104857))) + node1.query("INSERT INTO limited_fetches{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(j, i)) n1_net = NetThroughput(node1) n3_net = NetThroughput(node3) @@ -137,7 +137,7 @@ def test_limited_sends_for_server(start_cluster): for j in range(5): node1.query(f"SYSTEM STOP FETCHES limited_sends{j}") for i in range(5): - node3.query("INSERT INTO limited_sends{} SELECT {}, '{}' FROM numbers(50)".format(j, i, get_random_string(104857))) + node3.query("INSERT INTO limited_sends{} SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(50)".format(j, i)) n1_net = NetThroughput(node1) n3_net = NetThroughput(node3) @@ -173,7 +173,7 @@ def test_should_execute_fetch(start_cluster): node2.query("SYSTEM STOP FETCHES should_execute_table") for i in range(3): - node1.query("INSERT INTO should_execute_table SELECT {}, '{}' FROM numbers(200)".format(i, get_random_string(104857))) + node1.query("INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(200)".format(i)) n1_net = NetThroughput(node1) n2_net = NetThroughput(node2) @@ -181,7 +181,7 @@ def test_should_execute_fetch(start_cluster): node2.query("SYSTEM START FETCHES should_execute_table") for i in range(10): - node1.query("INSERT INTO should_execute_table SELECT {}, '{}' FROM numbers(3)".format(i, get_random_string(104857))) + node1.query("INSERT INTO should_execute_table SELECT {}, (select randomPrintableASCII(104857)) FROM numbers(3)".format(i)) n2_fetch_speed = [] replication_queue_data = [] diff --git a/tests/integration/test_restore_replica/test.py b/tests/integration/test_restore_replica/test.py index 4197c064243..4013b5b474c 100644 --- a/tests/integration/test_restore_replica/test.py +++ b/tests/integration/test_restore_replica/test.py @@ -4,7 +4,6 @@ import pytest from helpers.cluster import ClickHouseCluster from helpers.cluster import ClickHouseKiller from helpers.test_tools import assert_eq_with_retry -from helpers.network import PartitionManager def fill_nodes(nodes): for node in nodes: diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index bf9ab4f9e27..c9e53b67de3 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1779,9 +1779,13 @@ def test_kafka_virtual_columns2(kafka_cluster): assert TSV(result) == TSV(expected) + instance.query(''' + DROP TABLE test.kafka; + DROP TABLE test.view; + ''') kafka_delete_topic(admin_client, "virt2_0") kafka_delete_topic(admin_client, "virt2_1") - + instance.rotate_logs() def test_kafka_produce_key_timestamp(kafka_cluster): diff --git a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference index 3c92ffb0a8c..887c701a5e4 100644 --- a/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference +++ b/tests/queries/0_stateless/01604_explain_ast_of_nonselect_query.reference @@ -1,4 +1,4 @@ -AlterQuery t1 (children 1) +AlterQuery t1 (children 2) ExpressionList (children 1) AlterCommand 33 (children 1) Function equals (children 1) @@ -6,3 +6,4 @@ AlterQuery t1 (children 1) Identifier date Function today (children 1) ExpressionList + Identifier t1 diff --git a/tests/queries/0_stateless/01610_client_spawn_editor.sh b/tests/queries/0_stateless/01610_client_spawn_editor.sh index 873b45e12a9..4df34911a3b 100755 --- a/tests/queries/0_stateless/01610_client_spawn_editor.sh +++ b/tests/queries/0_stateless/01610_client_spawn_editor.sh @@ -7,7 +7,7 @@ match_max 100000 if ![info exists env(CLICKHOUSE_PORT_TCP)] {set env(CLICKHOUSE_PORT_TCP) 9000} -set env(EDITOR) [file dirname [file normalize [info script]]]"/01610_client_spawn_editor_open.editor" +set env(EDITOR) [file dirname [file normalize [info script]]]/01610_client_spawn_editor_open.editor spawn clickhouse-client --disable_suggestion expect ":) " diff --git a/tests/queries/0_stateless/02014_query_parameters.reference b/tests/queries/0_stateless/02014_query_parameters.reference new file mode 100644 index 00000000000..d126b1e773e --- /dev/null +++ b/tests/queries/0_stateless/02014_query_parameters.reference @@ -0,0 +1,4 @@ +1 2 +1 +4 +2 diff --git a/tests/queries/0_stateless/02014_query_parameters.sh b/tests/queries/0_stateless/02014_query_parameters.sh new file mode 100755 index 00000000000..b3d718fa578 --- /dev/null +++ b/tests/queries/0_stateless/02014_query_parameters.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Tags: no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} --query "DROP DATABASE IF EXISTS test_db"; + +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "CREATE DATABASE {db:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "CREATE TABLE {db:Identifier}.{tbl:Identifier} (id UInt64, col1 UInt64) ENGINE = MergeTree() ORDER BY id"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "INSERT INTO {db:Identifier}.{tbl:Identifier} VALUES (1,2)"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "SELECT * FROM {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "OPTIMIZE TABLE {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "ALTER TABLE {db:Identifier}.{tbl:Identifier} RENAME COLUMN col1 to col2"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "EXISTS TABLE {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "INSERT INTO {db:Identifier}.{tbl:Identifier} VALUES (3,4)"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "SELECT col2 FROM {db:Identifier}.{tbl:Identifier} ORDER BY col2 DESC"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "DROP TABLE {db:Identifier}.{tbl:Identifier}"; +${CLICKHOUSE_CLIENT} --param_db="test_db" --param_tbl="test_t" --query "DROP DATABASE {db:Identifier}"; diff --git a/tests/queries/0_stateless/02121_pager.reference b/tests/queries/0_stateless/02121_pager.reference new file mode 100644 index 00000000000..7290ba859f4 --- /dev/null +++ b/tests/queries/0_stateless/02121_pager.reference @@ -0,0 +1,2 @@ +4 +4 diff --git a/tests/queries/0_stateless/02121_pager.sh b/tests/queries/0_stateless/02121_pager.sh new file mode 100755 index 00000000000..9ca9637116b --- /dev/null +++ b/tests/queries/0_stateless/02121_pager.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --pager 'wc -c' --query 'select 123' +$CLICKHOUSE_LOCAL --pager 'wc -c' --query 'select 123'