diff --git a/.clang-tidy b/.clang-tidy index 3c222fbf8da..b0971418e0e 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -9,7 +9,7 @@ Checks: '-*, misc-unused-alias-decls, misc-unused-parameters, misc-unused-using-decls, - + modernize-avoid-bind, modernize-loop-convert, modernize-make-shared, @@ -33,7 +33,7 @@ Checks: '-*, performance-no-automatic-move, performance-trivially-destructible, performance-unnecessary-copy-initialization, - + readability-avoid-const-params-in-decls, readability-const-return-type, readability-container-size-empty, @@ -58,7 +58,7 @@ Checks: '-*, readability-simplify-boolean-expr, readability-inconsistent-declaration-parameter-name, readability-identifier-naming, - + bugprone-undelegated-constructor, bugprone-argument-comment, bugprone-bad-signal-to-kill-thread, @@ -102,7 +102,7 @@ Checks: '-*, bugprone-unused-return-value, bugprone-use-after-move, bugprone-virtual-near-miss, - + cert-dcl21-cpp, cert-dcl50-cpp, cert-env33-c, @@ -112,7 +112,7 @@ Checks: '-*, cert-mem57-cpp, cert-msc50-cpp, cert-oop58-cpp, - + google-build-explicit-make-pair, google-build-namespaces, google-default-arguments, @@ -121,9 +121,9 @@ Checks: '-*, google-readability-avoid-underscore-in-googletest-name, google-runtime-int, google-runtime-operator, - + hicpp-exception-baseclass, - + clang-analyzer-core.CallAndMessage, clang-analyzer-core.DivideZero, clang-analyzer-core.NonNullParamChecker, diff --git a/docker/packager/deb/Dockerfile b/docker/packager/deb/Dockerfile index 78f396f5c75..ae70bc8c594 100644 --- a/docker/packager/deb/Dockerfile +++ b/docker/packager/deb/Dockerfile @@ -5,6 +5,7 @@ RUN apt-get --allow-unauthenticated update -y && apt-get install --yes wget gnup RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - RUN echo "deb [trusted=yes] http://apt.llvm.org/eoan/ llvm-toolchain-eoan-10 main" >> /etc/apt/sources.list + RUN apt-get --allow-unauthenticated update -y \ && env DEBIAN_FRONTEND=noninteractive \ apt-get --allow-unauthenticated install --yes --no-install-recommends \ @@ -17,6 +18,14 @@ RUN apt-get --allow-unauthenticated update -y \ apt-transport-https \ ca-certificates +# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able +# to compress files using pigz (https://zlib.net/pigz/) instead of gzip. +# Significantly increase deb packaging speed and compatible with old systems +RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/dpkg-deb +RUN chmod +x dpkg-deb +RUN cp dpkg-deb /usr/bin + + # Libraries from OS are only needed to test the "unbundled" build (that is not used in production). RUN apt-get --allow-unauthenticated update -y \ && env DEBIAN_FRONTEND=noninteractive \ @@ -74,12 +83,6 @@ RUN apt-get --allow-unauthenticated update -y \ libldap2-dev -# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able -# to compress files using pigz (https://zlib.net/pigz/) instead of gzip. -# Significantly increase deb packaging speed and compatible with old systems -RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/dpkg-deb -RUN chmod +x dpkg-deb -RUN cp dpkg-deb /usr/bin # This symlink required by gcc to find lld compiler RUN ln -s /usr/bin/lld-10 /usr/bin/ld.lld diff --git a/docker/test/performance-comparison/perf.py b/docker/test/performance-comparison/perf.py index be8155cdddb..fd16673bd89 100755 --- a/docker/test/performance-comparison/perf.py +++ b/docker/test/performance-comparison/perf.py @@ -151,7 +151,7 @@ for query_index, q in enumerate(test_queries): # use the test name + the test-wide query index. query_display_name = q if len(query_display_name) > 1000: - query_display_name = f'{query_display_name[:1000]}...({i})' + query_display_name = f'{query_display_name[:1000]}...({query_index})' print(f'display-name\t{query_index}\t{tsv_escape(query_display_name)}') diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index 4b29baed70d..d7824443c1d 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -72,7 +72,7 @@ Examples: kafka_format = 'JSONEachRow', kafka_num_consumers = 4; - CREATE TABLE queue2 ( + CREATE TABLE queue3 ( timestamp UInt64, level String, message String diff --git a/docs/ru/whats-new/extended-roadmap.md b/docs/ru/whats-new/extended-roadmap.md index 9c640504aec..b7756f7fb79 100644 --- a/docs/ru/whats-new/extended-roadmap.md +++ b/docs/ru/whats-new/extended-roadmap.md @@ -174,7 +174,7 @@ Upd. Всё ещё ждём удаление старого кода, котор ### 2.3. Перенос столбцового ser/de из DataType в Column {#perenos-stolbtsovogo-serde-iz-datatype-v-column} -В очереди. +В очереди. Антон Попов. ### 2.4. Перевод LowCardinality из DataType в Column. Добавление ColumnSparse {#perevod-lowcardinality-iz-datatype-v-column-dobavlenie-columnsparse} @@ -977,10 +977,10 @@ Q2. [Виталий Баранов](https://github.com/vitlibar) и Денис Глазачев, Altinity. Требует 12.1. -### 12.6. Информация о пользователях и квотах в системной таблице {#informatsiia-o-polzovateliakh-i-kvotakh-v-sistemnoi-tablitse} +### 12.6. + Информация о пользователях и квотах в системной таблице {#informatsiia-o-polzovateliakh-i-kvotakh-v-sistemnoi-tablitse} [Виталий Баранов](https://github.com/vitlibar). Требует 12.1. -Есть pull request. Q2. +Есть pull request. Q2. Готово. ## 13. Разделение ресурсов, multi-tenancy {#razdelenie-resursov-multi-tenancy} diff --git a/docs/tools/build.py b/docs/tools/build.py index 406f5689bc4..95e887f046f 100755 --- a/docs/tools/build.py +++ b/docs/tools/build.py @@ -58,17 +58,6 @@ def build_for_lang(lang, args): 'custom_dir': os.path.join(os.path.dirname(__file__), '..', args.theme_dir), 'language': lang, 'direction': 'rtl' if lang == 'fa' else 'ltr', - # TODO: cleanup - 'feature': { - 'tabs': False - }, - 'palette': { - 'primary': 'white', - 'accent': 'white' - }, - 'font': False, - 'logo': 'images/logo.svg', - 'favicon': 'assets/images/favicon.ico', 'static_templates': ['404.html'], 'extra': { 'now': int(time.mktime(datetime.datetime.now().timetuple())) # TODO better way to avoid caching diff --git a/docs/zh/getting-started/install.md b/docs/zh/getting-started/install.md index 9f06317ebd0..32eb7fa0a82 100644 --- a/docs/zh/getting-started/install.md +++ b/docs/zh/getting-started/install.md @@ -46,7 +46,7 @@ sudo yum-config-manager --add-repo https://repo.yandex.ru/clickhouse/rpm/stable/ sudo yum install clickhouse-server clickhouse-client ``` -您也可以从此处手动下载和安装软件包:https://repo.yandex.ru/clickhouse/rpm/stable/x86\_64。 +您也可以从此处手动下载和安装软件包:https://repo.yandex.ru/clickhouse/rpm/stable/x86_64。 ### 来自Docker {#from-docker-image} diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index 7bc31452aa4..65742697333 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -1,3 +1,7 @@ +if (USE_CLANG_TIDY) + set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") +endif () + # 'clickhouse' binary is a multi purpose tool, # that contain multiple execution modes (client, server, etc.) # each of them is built and linked as a separate library, defined below. diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index ce59f5cac7f..91c43160e0f 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -289,7 +289,7 @@ private: connection_entries.emplace_back(std::make_shared( connection->get(ConnectionTimeouts::getTCPTimeoutsWithoutFailover(settings)))); - pool.scheduleOrThrowOnError(std::bind(&Benchmark::thread, this, connection_entries)); + pool.scheduleOrThrowOnError([this, connection_entries]() mutable { thread(connection_entries); }); } } catch (...) diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 53947283faf..d6cac7a7b02 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -485,7 +485,7 @@ private: history_file = config().getString("history_file"); else { - auto history_file_from_env = getenv("CLICKHOUSE_HISTORY_FILE"); + auto * history_file_from_env = getenv("CLICKHOUSE_HISTORY_FILE"); if (history_file_from_env) history_file = history_file_from_env; else if (!home_path.empty()) @@ -1480,7 +1480,7 @@ private: "\033[1m↗\033[0m", }; - auto indicator = indicators[increment % 8]; + const char * indicator = indicators[increment % 8]; if (!send_logs && written_progress_chars) message << '\r'; diff --git a/programs/client/ConnectionParameters.cpp b/programs/client/ConnectionParameters.cpp index 50cac3b7800..f0ef3ae5694 100644 --- a/programs/client/ConnectionParameters.cpp +++ b/programs/client/ConnectionParameters.cpp @@ -51,7 +51,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati { std::string prompt{"Password for user (" + user + "): "}; char buf[1000] = {}; - if (auto result = readpassphrase(prompt.c_str(), buf, sizeof(buf), 0)) + if (auto * result = readpassphrase(prompt.c_str(), buf, sizeof(buf), 0)) password = result; } diff --git a/programs/client/Suggest.h b/programs/client/Suggest.h index 6c81a388ea7..b13289ac322 100644 --- a/programs/client/Suggest.h +++ b/programs/client/Suggest.h @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB diff --git a/programs/copier/ClusterCopier.cpp b/programs/copier/ClusterCopier.cpp index 8df55b63407..45cfb4963a3 100644 --- a/programs/copier/ClusterCopier.cpp +++ b/programs/copier/ClusterCopier.cpp @@ -442,7 +442,7 @@ bool ClusterCopier::checkPartitionPieceIsDone(const TaskTable & task_table, cons /// Collect all shards that contain partition piece number piece_number. Strings piece_status_paths; - for (auto & shard : shards_with_partition) + for (const auto & shard : shards_with_partition) { ShardPartition & task_shard_partition = shard->partition_tasks.find(partition_name)->second; ShardPartitionPiece & shard_partition_piece = task_shard_partition.pieces[piece_number]; @@ -702,7 +702,7 @@ ASTPtr ClusterCopier::removeAliasColumnsFromCreateQuery(const ASTPtr & query_ast auto new_columns_list = std::make_shared(); new_columns_list->set(new_columns_list->columns, new_columns); - if (auto indices = query_ast->as()->columns_list->indices) + if (const auto * indices = query_ast->as()->columns_list->indices) new_columns_list->set(new_columns_list->indices, indices->clone()); new_query.replace(new_query.columns_list, new_columns_list); diff --git a/programs/copier/ClusterCopierApp.cpp b/programs/copier/ClusterCopierApp.cpp index 52a37c75c72..5ab6064f0f3 100644 --- a/programs/copier/ClusterCopierApp.cpp +++ b/programs/copier/ClusterCopierApp.cpp @@ -94,7 +94,7 @@ void ClusterCopierApp::mainImpl() StatusFile status_file(process_path + "/status"); ThreadStatus thread_status; - auto log = &logger(); + auto * log = &logger(); LOG_INFO(log, "Starting clickhouse-copier (" << "id " << process_id << ", " << "host_id " << host_id << ", " diff --git a/programs/copier/Internals.cpp b/programs/copier/Internals.cpp index 545df2e779c..0613381a763 100644 --- a/programs/copier/Internals.cpp +++ b/programs/copier/Internals.cpp @@ -260,7 +260,7 @@ ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std return res; res.is_remote = 1; - for (auto & replica : replicas) + for (const auto & replica : replicas) { if (isLocalAddress(DNSResolver::instance().resolveHost(replica.host_name))) { @@ -270,7 +270,7 @@ ShardPriority getReplicasPriority(const Cluster::Addresses & replicas, const std } res.hostname_difference = std::numeric_limits::max(); - for (auto & replica : replicas) + for (const auto & replica : replicas) { size_t difference = getHostNameDifference(local_hostname, replica.host_name); res.hostname_difference = std::min(difference, res.hostname_difference); diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 06790038a5e..eae30fddfee 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include diff --git a/programs/local/LocalServer.h b/programs/local/LocalServer.h index 77d0b9ff5dc..5733bbc1a7c 100644 --- a/programs/local/LocalServer.h +++ b/programs/local/LocalServer.h @@ -4,13 +4,12 @@ #include #include #include +#include namespace DB { -class Context; - /// Lightweight Application for clickhouse-local /// No networking, no extra configs and working directories, no pid and status files, no dictionaries, no logging. /// Quiet mode by default diff --git a/programs/obfuscator/Obfuscator.cpp b/programs/obfuscator/Obfuscator.cpp index 8b5a8c73ca4..f3ac0549573 100644 --- a/programs/obfuscator/Obfuscator.cpp +++ b/programs/obfuscator/Obfuscator.cpp @@ -937,10 +937,10 @@ public: if (typeid_cast(&data_type)) return std::make_unique(seed); - if (auto type = typeid_cast(&data_type)) + if (const auto * type = typeid_cast(&data_type)) return std::make_unique(get(*type->getNestedType(), seed, markov_model_params)); - if (auto type = typeid_cast(&data_type)) + if (const auto * type = typeid_cast(&data_type)) return std::make_unique(get(*type->getNestedType(), seed, markov_model_params)); throw Exception("Unsupported data type", ErrorCodes::NOT_IMPLEMENTED); diff --git a/programs/odbc-bridge/ODBCBlockOutputStream.cpp b/programs/odbc-bridge/ODBCBlockOutputStream.cpp index c2597805230..ab24c008e40 100644 --- a/programs/odbc-bridge/ODBCBlockOutputStream.cpp +++ b/programs/odbc-bridge/ODBCBlockOutputStream.cpp @@ -24,8 +24,8 @@ namespace query.table_id.table_name = table_name; query.columns = std::make_shared(','); query.children.push_back(query.columns); - for (size_t i = 0; i < columns.size(); ++i) - query.columns->children.emplace_back(std::make_shared(columns[i].name)); + for (const auto & column : columns) + query.columns->children.emplace_back(std::make_shared(column.name)); std::stringstream ss; IAST::FormatSettings settings(ss, true); diff --git a/programs/server/HTTPHandler.cpp b/programs/server/HTTPHandler.cpp index bceeec306cf..701b5f7d735 100644 --- a/programs/server/HTTPHandler.cpp +++ b/programs/server/HTTPHandler.cpp @@ -195,7 +195,7 @@ void HTTPHandler::pushDelayedResults(Output & used_output) std::vector read_buffers; std::vector read_buffers_raw_ptr; - auto cascade_buffer = typeid_cast(used_output.out_maybe_delayed_and_compressed.get()); + auto * cascade_buffer = typeid_cast(used_output.out_maybe_delayed_and_compressed.get()); if (!cascade_buffer) throw Exception("Expected CascadeWriteBuffer", ErrorCodes::LOGICAL_ERROR); @@ -383,7 +383,7 @@ void HTTPHandler::processQuery( { auto push_memory_buffer_and_continue = [next_buffer = used_output.out_maybe_compressed] (const WriteBufferPtr & prev_buf) { - auto prev_memory_buffer = typeid_cast(prev_buf.get()); + auto * prev_memory_buffer = typeid_cast(prev_buf.get()); if (!prev_memory_buffer) throw Exception("Expected MemoryWriteBuffer", ErrorCodes::LOGICAL_ERROR); diff --git a/programs/server/HTTPHandlerFactory.cpp b/programs/server/HTTPHandlerFactory.cpp index 91cf9ddf25b..955ff6b8834 100644 --- a/programs/server/HTTPHandlerFactory.cpp +++ b/programs/server/HTTPHandlerFactory.cpp @@ -28,7 +28,7 @@ HTTPRequestHandlerFactoryMain::HTTPRequestHandlerFactoryMain(const std::string & { } -Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHandler(const Poco::Net::HTTPServerRequest & request) // override +Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHandler(const Poco::Net::HTTPServerRequest & request) { LOG_TRACE(log, "HTTP Request for " << name << ". " << "Method: " << request.getMethod() @@ -40,7 +40,7 @@ Poco::Net::HTTPRequestHandler * HTTPRequestHandlerFactoryMain::createRequestHand for (auto & handler_factory : child_factories) { - auto handler = handler_factory->createRequestHandler(request); + auto * handler = handler_factory->createRequestHandler(request); if (handler != nullptr) return handler; } @@ -72,80 +72,98 @@ HTTPRequestHandlerFactoryMain::TThis * HTTPRequestHandlerFactoryMain::addHandler static inline auto createHandlersFactoryFromConfig(IServer & server, const std::string & name, const String & prefix) { - auto main_handler_factory = new HTTPRequestHandlerFactoryMain(name); + auto main_handler_factory = std::make_unique(name); - try + Poco::Util::AbstractConfiguration::Keys keys; + server.config().keys(prefix, keys); + + for (const auto & key : keys) { - Poco::Util::AbstractConfiguration::Keys keys; - server.config().keys(prefix, keys); + if (!startsWith(key, "rule")) + throw Exception("Unknown element in config: " + prefix + "." + key + ", must be 'rule'", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); - for (const auto & key : keys) - { - if (!startsWith(key, "rule")) - throw Exception("Unknown element in config: " + prefix + "." + key + ", must be 'rule'", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG); + const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", ""); - const auto & handler_type = server.config().getString(prefix + "." + key + ".handler.type", ""); - - if (handler_type == "static") - main_handler_factory->addHandler(createStaticHandlerFactory(server, prefix + "." + key)); - else if (handler_type == "dynamic_query_handler") - main_handler_factory->addHandler(createDynamicHandlerFactory(server, prefix + "." + key)); - else if (handler_type == "predefined_query_handler") - main_handler_factory->addHandler(createPredefinedHandlerFactory(server, prefix + "." + key)); - else if (handler_type.empty()) - throw Exception("Handler type in config is not specified here: " + - prefix + "." + key + ".handler.type", ErrorCodes::INVALID_CONFIG_PARAMETER); - else - throw Exception("Unknown handler type '" + handler_type +"' in config here: " + - prefix + "." + key + ".handler.type",ErrorCodes::INVALID_CONFIG_PARAMETER); - } - - return main_handler_factory; - } - catch (...) - { - delete main_handler_factory; - throw; + if (handler_type == "static") + main_handler_factory->addHandler(createStaticHandlerFactory(server, prefix + "." + key)); + else if (handler_type == "dynamic_query_handler") + main_handler_factory->addHandler(createDynamicHandlerFactory(server, prefix + "." + key)); + else if (handler_type == "predefined_query_handler") + main_handler_factory->addHandler(createPredefinedHandlerFactory(server, prefix + "." + key)); + else if (handler_type.empty()) + throw Exception("Handler type in config is not specified here: " + + prefix + "." + key + ".handler.type", ErrorCodes::INVALID_CONFIG_PARAMETER); + else + throw Exception("Unknown handler type '" + handler_type +"' in config here: " + + prefix + "." + key + ".handler.type",ErrorCodes::INVALID_CONFIG_PARAMETER); } + + return main_handler_factory.release(); } static const auto ping_response_expression = "Ok.\n"; static const auto root_response_expression = "config://http_server_default_response"; -static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory(IServer & server, const std::string & name, AsynchronousMetrics & async_metrics) +static inline Poco::Net::HTTPRequestHandlerFactory * createHTTPHandlerFactory( + IServer & server, const std::string & name, AsynchronousMetrics & async_metrics) { if (server.config().has("http_handlers")) return createHandlersFactoryFromConfig(server, name, "http_handlers"); else { - auto factory = (new HTTPRequestHandlerFactoryMain(name)) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server, root_response_expression)) - ->attachStrictPath("/")->allowGetAndHeadRequest()) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server, ping_response_expression)) - ->attachStrictPath("/ping")->allowGetAndHeadRequest()) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server)) - ->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest()) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server, "query"))->allowPostAndGetParamsRequest()); + auto factory = std::make_unique(name); + auto root_handler = std::make_unique>(server, root_response_expression); + root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); + factory->addHandler(root_handler.release()); + + auto ping_handler = std::make_unique>(server, ping_response_expression); + ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest(); + factory->addHandler(ping_handler.release()); + + auto replicas_status_handler = std::make_unique>(server); + replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); + factory->addHandler(replicas_status_handler.release()); + + auto query_handler = std::make_unique>(server, "query"); + query_handler->allowPostAndGetParamsRequest(); + factory->addHandler(query_handler.release()); + + /// We check that prometheus handler will be served on current (default) port. + /// Otherwise it will be created separately, see below. if (server.config().has("prometheus") && server.config().getInt("prometheus.port", 0) == 0) - factory->addHandler((new HandlingRuleHTTPHandlerFactory( - server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics))) - ->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest()); + { + auto prometheus_handler = std::make_unique>( + server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)); + prometheus_handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest(); + factory->addHandler(prometheus_handler.release()); + } - return factory; + return factory.release(); } } static inline Poco::Net::HTTPRequestHandlerFactory * createInterserverHTTPHandlerFactory(IServer & server, const std::string & name) { - return (new HTTPRequestHandlerFactoryMain(name)) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server, root_response_expression)) - ->attachStrictPath("/")->allowGetAndHeadRequest()) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server, ping_response_expression)) - ->attachStrictPath("/ping")->allowGetAndHeadRequest()) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server)) - ->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest()) - ->addHandler((new HandlingRuleHTTPHandlerFactory(server))->allowPostAndGetParamsRequest()); + auto factory = std::make_unique(name); + + auto root_handler = std::make_unique>(server, root_response_expression); + root_handler->attachStrictPath("/")->allowGetAndHeadRequest(); + factory->addHandler(root_handler.release()); + + auto ping_handler = std::make_unique>(server, ping_response_expression); + ping_handler->attachStrictPath("/ping")->allowGetAndHeadRequest(); + factory->addHandler(ping_handler.release()); + + auto replicas_status_handler = std::make_unique>(server); + replicas_status_handler->attachNonStrictPath("/replicas_status")->allowGetAndHeadRequest(); + factory->addHandler(replicas_status_handler.release()); + + auto main_handler = std::make_unique>(server); + main_handler->allowPostAndGetParamsRequest(); + factory->addHandler(main_handler.release()); + + return factory.release(); } Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, AsynchronousMetrics & async_metrics, const std::string & name) @@ -155,9 +173,14 @@ Poco::Net::HTTPRequestHandlerFactory * createHandlerFactory(IServer & server, As else if (name == "InterserverIOHTTPHandler-factory" || name == "InterserverIOHTTPSHandler-factory") return createInterserverHTTPHandlerFactory(server, name); else if (name == "PrometheusHandler-factory") - return (new HTTPRequestHandlerFactoryMain(name))->addHandler((new HandlingRuleHTTPHandlerFactory( - server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics))) - ->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest()); + { + auto factory = std::make_unique(name); + auto handler = std::make_unique>( + server, PrometheusMetricsWriter(server.config(), "prometheus", async_metrics)); + handler->attachStrictPath(server.config().getString("prometheus.endpoint", "/metrics"))->allowGetAndHeadRequest(); + factory->addHandler(handler.release()); + return factory.release(); + } throw Exception("LOGICAL ERROR: Unknown HTTP handler factory name.", ErrorCodes::LOGICAL_ERROR); } diff --git a/programs/server/ReplicasStatusHandler.cpp b/programs/server/ReplicasStatusHandler.cpp index 2f2aa5953b6..f2d1ffe2ee5 100644 --- a/programs/server/ReplicasStatusHandler.cpp +++ b/programs/server/ReplicasStatusHandler.cpp @@ -46,7 +46,7 @@ void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request for (auto iterator = db.second->getTablesIterator(); iterator->isValid(); iterator->next()) { - auto & table = iterator->table(); + const auto & table = iterator->table(); StorageReplicatedMergeTree * table_replicated = dynamic_cast(table.get()); if (!table_replicated) diff --git a/programs/server/TCPHandler.cpp b/programs/server/TCPHandler.cpp index a792af30cf2..3ae87a57725 100644 --- a/programs/server/TCPHandler.cpp +++ b/programs/server/TCPHandler.cpp @@ -278,8 +278,11 @@ void TCPHandler::runImpl() sendLogs(); sendEndOfStream(); - query_scope.reset(); + /// QueryState should be cleared before QueryScope, since otherwise + /// the MemoryTracker will be wrong for possible deallocations. + /// (i.e. deallocations from the Aggregator with two-level aggregation) state.reset(); + query_scope.reset(); } catch (const Exception & e) { @@ -359,8 +362,11 @@ void TCPHandler::runImpl() try { - query_scope.reset(); + /// QueryState should be cleared before QueryScope, since otherwise + /// the MemoryTracker will be wrong for possible deallocations. + /// (i.e. deallocations from the Aggregator with two-level aggregation) state.reset(); + query_scope.reset(); } catch (...) { diff --git a/src/AggregateFunctions/AggregateFunctionNull.h b/src/AggregateFunctions/AggregateFunctionNull.h index e5309e1300a..55d610207f1 100644 --- a/src/AggregateFunctions/AggregateFunctionNull.h +++ b/src/AggregateFunctions/AggregateFunctionNull.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -53,13 +54,13 @@ protected: static void initFlag(AggregateDataPtr place) noexcept { - if (result_is_nullable) + if constexpr (result_is_nullable) place[0] = 0; } static void setFlag(AggregateDataPtr place) noexcept { - if (result_is_nullable) + if constexpr (result_is_nullable) place[0] = 1; } @@ -72,7 +73,7 @@ public: AggregateFunctionNullBase(AggregateFunctionPtr nested_function_, const DataTypes & arguments, const Array & params) : IAggregateFunctionHelper(arguments, params), nested_function{nested_function_} { - if (result_is_nullable) + if constexpr (result_is_nullable) prefix_size = nested_function->alignOfData(); else prefix_size = 0; @@ -128,7 +129,7 @@ public: void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override { bool flag = getFlag(place); - if (result_is_nullable) + if constexpr (result_is_nullable) writeBinary(flag, buf); if (flag) nested_function->serialize(nestedPlace(place), buf); @@ -137,7 +138,7 @@ public: void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena * arena) const override { bool flag = 1; - if (result_is_nullable) + if constexpr (result_is_nullable) readBinary(flag, buf); if (flag) { @@ -148,7 +149,7 @@ public: void insertResultInto(AggregateDataPtr place, IColumn & to) const override { - if (result_is_nullable) + if constexpr (result_is_nullable) { ColumnNullable & to_concrete = assert_cast(to); if (getFlag(place)) @@ -194,13 +195,26 @@ public: void add(AggregateDataPtr place, const IColumn ** columns, size_t row_num, Arena * arena) const override { const ColumnNullable * column = assert_cast(columns[0]); + const IColumn * nested_column = &column->getNestedColumn(); if (!column->isNullAt(row_num)) { this->setFlag(place); - const IColumn * nested_column = &column->getNestedColumn(); this->nested_function->add(this->nestedPlace(place), &nested_column, row_num, arena); } } + + void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const override + { + const ColumnNullable * column = assert_cast(columns[0]); + const IColumn * nested_column = &column->getNestedColumn(); + const UInt8 * null_map = column->getNullMapData().data(); + + this->nested_function->addBatchSinglePlaceNotNull(batch_size, this->nestedPlace(place), &nested_column, null_map, arena); + + if constexpr (result_is_nullable) + if (!memoryIsByte(null_map, batch_size, 1)) + this->setFlag(place); + } }; diff --git a/src/AggregateFunctions/AggregateFunctionSum.h b/src/AggregateFunctions/AggregateFunctionSum.h index e9a6e50d9ef..9d3d559ecee 100644 --- a/src/AggregateFunctions/AggregateFunctionSum.h +++ b/src/AggregateFunctions/AggregateFunctionSum.h @@ -20,11 +20,72 @@ struct AggregateFunctionSumData { T sum{}; - void add(T value) + void ALWAYS_INLINE add(T value) { sum += value; } + /// Vectorized version + template + void NO_INLINE addMany(const Value * __restrict ptr, size_t count) + { + /// Compiler cannot unroll this loop, do it manually. + /// (at least for floats, most likely due to the lack of -fassociative-math) + + /// Something around the number of SSE registers * the number of elements fit in register. + constexpr size_t unroll_count = 128 / sizeof(T); + T partial_sums[unroll_count]{}; + + const auto * end = ptr + count; + const auto * unrolled_end = ptr + (count / unroll_count * unroll_count); + + while (ptr < unrolled_end) + { + for (size_t i = 0; i < unroll_count; ++i) + partial_sums[i] += ptr[i]; + ptr += unroll_count; + } + + for (size_t i = 0; i < unroll_count; ++i) + sum += partial_sums[i]; + + while (ptr < end) + { + sum += *ptr; + ++ptr; + } + } + + template + void NO_INLINE addManyNotNull(const Value * __restrict ptr, const UInt8 * __restrict null_map, size_t count) + { + constexpr size_t unroll_count = 128 / sizeof(T); + T partial_sums[unroll_count]{}; + + const auto * end = ptr + count; + const auto * unrolled_end = ptr + (count / unroll_count * unroll_count); + + while (ptr < unrolled_end) + { + for (size_t i = 0; i < unroll_count; ++i) + if (!null_map[i]) + partial_sums[i] += ptr[i]; + ptr += unroll_count; + null_map += unroll_count; + } + + for (size_t i = 0; i < unroll_count; ++i) + sum += partial_sums[i]; + + while (ptr < end) + { + if (!*null_map) + sum += *ptr; + ++ptr; + ++null_map; + } + } + void merge(const AggregateFunctionSumData & rhs) { sum += rhs.sum; @@ -55,21 +116,95 @@ struct AggregateFunctionSumKahanData T sum{}; T compensation{}; - void add(T value) + template + void ALWAYS_INLINE addImpl(Value value, T & out_sum, T & out_compensation) { - auto compensated_value = value - compensation; - auto new_sum = sum + compensated_value; - compensation = (new_sum - sum) - compensated_value; - sum = new_sum; + auto compensated_value = value - out_compensation; + auto new_sum = out_sum + compensated_value; + out_compensation = (new_sum - out_sum) - compensated_value; + out_sum = new_sum; + } + + void ALWAYS_INLINE add(T value) + { + addImpl(value, sum, compensation); + } + + /// Vectorized version + template + void NO_INLINE addMany(const Value * __restrict ptr, size_t count) + { + /// Less than in ordinary sum, because the algorithm is more complicated and too large loop unrolling is questionable. + /// But this is just a guess. + constexpr size_t unroll_count = 4; + T partial_sums[unroll_count]{}; + T partial_compensations[unroll_count]{}; + + const auto * end = ptr + count; + const auto * unrolled_end = ptr + (count / unroll_count * unroll_count); + + while (ptr < unrolled_end) + { + for (size_t i = 0; i < unroll_count; ++i) + addImpl(ptr[i], partial_sums[i], partial_compensations[i]); + ptr += unroll_count; + } + + for (size_t i = 0; i < unroll_count; ++i) + mergeImpl(sum, compensation, partial_sums[i], partial_compensations[i]); + + while (ptr < end) + { + addImpl(*ptr, sum, compensation); + ++ptr; + } + } + + template + void NO_INLINE addManyNotNull(const Value * __restrict ptr, const UInt8 * __restrict null_map, size_t count) + { + constexpr size_t unroll_count = 4; + T partial_sums[unroll_count]{}; + T partial_compensations[unroll_count]{}; + + const auto * end = ptr + count; + const auto * unrolled_end = ptr + (count / unroll_count * unroll_count); + + while (ptr < unrolled_end) + { + for (size_t i = 0; i < unroll_count; ++i) + if (!null_map[i]) + addImpl(ptr[i], partial_sums[i], partial_compensations[i]); + ptr += unroll_count; + null_map += unroll_count; + } + + for (size_t i = 0; i < unroll_count; ++i) + mergeImpl(sum, compensation, partial_sums[i], partial_compensations[i]); + + while (ptr < end) + { + if (!*null_map) + addImpl(*ptr, sum, compensation); + ++ptr; + ++null_map; + } + } + + void ALWAYS_INLINE mergeImpl(T & to_sum, T & to_compensation, T from_sum, T from_compensation) + { + auto raw_sum = to_sum + from_sum; + auto rhs_compensated = raw_sum - to_sum; + /// Kahan summation is tricky because it depends on non-associativity of float arithmetic. + /// Do not simplify this expression if you are not sure. + auto compensations = ((from_sum - rhs_compensated) + (to_sum - (raw_sum - rhs_compensated))) + compensation + from_compensation; + to_sum = raw_sum + compensations; + to_compensation = compensations - (to_sum - raw_sum); } void merge(const AggregateFunctionSumKahanData & rhs) { - auto raw_sum = sum + rhs.sum; - auto rhs_compensated = raw_sum - sum; - auto compensations = ((rhs.sum - rhs_compensated) + (sum - (raw_sum - rhs_compensated))) + compensation + rhs.compensation; - sum = raw_sum + compensations; - compensation = compensations - (sum - raw_sum); + mergeImpl(sum, compensation, rhs.sum, rhs.compensation); } void write(WriteBuffer & buf) const @@ -141,6 +276,20 @@ public: this->data(place).add(column.getData()[row_num]); } + /// Vectorized version when there is no GROUP BY keys. + void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena *) const override + { + const auto & column = static_cast(*columns[0]); + this->data(place).addMany(column.getData().data(), batch_size); + } + + void addBatchSinglePlaceNotNull( + size_t batch_size, AggregateDataPtr place, const IColumn ** columns, const UInt8 * null_map, Arena *) const override + { + const auto & column = static_cast(*columns[0]); + this->data(place).addManyNotNull(column.getData().data(), null_map, batch_size); + } + void merge(AggregateDataPtr place, ConstAggregateDataPtr rhs, Arena *) const override { this->data(place).merge(this->data(rhs)); diff --git a/src/AggregateFunctions/IAggregateFunction.h b/src/AggregateFunctions/IAggregateFunction.h index ad074feffc5..1870eee07b8 100644 --- a/src/AggregateFunctions/IAggregateFunction.h +++ b/src/AggregateFunctions/IAggregateFunction.h @@ -145,6 +145,11 @@ public: */ virtual void addBatchSinglePlace(size_t batch_size, AggregateDataPtr place, const IColumn ** columns, Arena * arena) const = 0; + /** The same for single place when need to aggregate only filtered data. + */ + virtual void addBatchSinglePlaceNotNull( + size_t batch_size, AggregateDataPtr place, const IColumn ** columns, const UInt8 * null_map, Arena * arena) const = 0; + /** In addition to addBatch, this method collects multiple rows of arguments into array "places" * as long as they are between offsets[i-1] and offsets[i]. This is used for arrayReduce and * -Array combinator. It might also be used generally to break data dependency when array @@ -201,6 +206,14 @@ public: static_cast(this)->add(place, columns, i, arena); } + void addBatchSinglePlaceNotNull( + size_t batch_size, AggregateDataPtr place, const IColumn ** columns, const UInt8 * null_map, Arena * arena) const override + { + for (size_t i = 0; i < batch_size; ++i) + if (!null_map[i]) + static_cast(this)->add(place, columns, i, arena); + } + void addBatchArray( size_t batch_size, AggregateDataPtr * places, size_t place_offset, const IColumn ** columns, const UInt64 * offsets, Arena * arena) const override diff --git a/src/Client/Connection.h b/src/Client/Connection.h index de04e3f0ef4..e056a4323df 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -50,6 +50,8 @@ class Connection; using ConnectionPtr = std::shared_ptr; using Connections = std::vector; +using Scalars = std::map; + /// Packet that could be received from server. struct Packet diff --git a/src/Common/tests/cow_columns.cpp b/src/Common/tests/cow_columns.cpp index fa84fc9ebc2..404b478f5a0 100644 --- a/src/Common/tests/cow_columns.cpp +++ b/src/Common/tests/cow_columns.cpp @@ -56,8 +56,8 @@ int main(int, char **) MutableColumnPtr mut = IColumn::mutate(std::move(y)); mut->set(2); - std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n"; - std::cerr << "addresses: " << x.get() << ", " << y.get() << ", " << mut.get() << "\n"; + std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n"; + std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n"; y = std::move(mut); } @@ -75,8 +75,8 @@ int main(int, char **) MutableColumnPtr mut = IColumn::mutate(std::move(y)); mut->set(3); - std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n"; - std::cerr << "addresses: " << x.get() << ", " << y.get() << ", " << mut.get() << "\n"; + std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n"; + std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n"; y = std::move(mut); } diff --git a/src/Common/tests/cow_compositions.cpp b/src/Common/tests/cow_compositions.cpp index be33f392497..74369e86300 100644 --- a/src/Common/tests/cow_compositions.cpp +++ b/src/Common/tests/cow_compositions.cpp @@ -75,8 +75,8 @@ int main(int, char **) MutableColumnPtr mut = IColumn::mutate(std::move(y)); mut->set(2); - std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n"; - std::cerr << "addresses: " << x.get() << ", " << y.get() << ", " << mut.get() << "\n"; + std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n"; + std::cerr << "addresses: " << x.get() << ", " << mut.get() << "\n"; y = std::move(mut); } @@ -94,8 +94,8 @@ int main(int, char **) MutableColumnPtr mut = IColumn::mutate(std::move(y)); mut->set(3); - std::cerr << "refcounts: " << x->use_count() << ", " << y->use_count() << ", " << mut->use_count() << "\n"; - std::cerr << "addresses: " << x.get() << ", " << y.get() << ", " << mut.get() << "\n"; + std::cerr << "refcounts: " << x->use_count() << ", " << mut->use_count() << "\n"; + std::cerr << "addresses: " << x.get() << ", " << ", " << mut.get() << "\n"; y = std::move(mut); } diff --git a/src/Core/Settings.h b/src/Core/Settings.h index eda76584f0b..28b068339ce 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -52,6 +52,8 @@ struct Settings : public SettingsCollection M(SettingUInt64, max_insert_block_size, DEFAULT_INSERT_BLOCK_SIZE, "The maximum block size for insertion, if we control the creation of blocks for insertion.", 0) \ M(SettingUInt64, min_insert_block_size_rows, DEFAULT_INSERT_BLOCK_SIZE, "Squash blocks passed to INSERT query to specified size in rows, if blocks are not big enough.", 0) \ M(SettingUInt64, min_insert_block_size_bytes, (DEFAULT_INSERT_BLOCK_SIZE * 256), "Squash blocks passed to INSERT query to specified size in bytes, if blocks are not big enough.", 0) \ + M(SettingUInt64, min_insert_block_size_rows_for_materialized_views, 0, "Like min_insert_block_size_rows, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_rows)", 0) \ + M(SettingUInt64, min_insert_block_size_bytes_for_materialized_views, 0, "Like min_insert_block_size_bytes, but applied only during pushing to MATERIALIZED VIEW (default: min_insert_block_size_bytes)", 0) \ M(SettingUInt64, max_joined_block_size_rows, DEFAULT_BLOCK_SIZE, "Maximum block size for JOIN result (if join algorithm supports it). 0 means unlimited.", 0) \ M(SettingUInt64, max_insert_threads, 0, "The maximum number of threads to execute the INSERT SELECT query. Values 0 or 1 means that INSERT SELECT is not run in parallel. Higher values will lead to higher memory usage. Parallel INSERT SELECT has effect only if the SELECT part is run on parallel, see 'max_threads' setting.", 0) \ M(SettingUInt64, max_final_threads, 16, "The maximum number of threads to read from table with FINAL.", 0) \ diff --git a/src/DataStreams/AddingDefaultBlockOutputStream.h b/src/DataStreams/AddingDefaultBlockOutputStream.h index e04fdc0faeb..89134920730 100644 --- a/src/DataStreams/AddingDefaultBlockOutputStream.h +++ b/src/DataStreams/AddingDefaultBlockOutputStream.h @@ -3,12 +3,12 @@ #include #include #include -#include namespace DB { +class Context; /** This stream adds three types of columns into block * 1. Columns, that are missed inside request, but present in table without defaults (missed columns) diff --git a/src/DataStreams/AddingDefaultsBlockInputStream.h b/src/DataStreams/AddingDefaultsBlockInputStream.h index cf16ec3fb75..10fa20f7ad1 100644 --- a/src/DataStreams/AddingDefaultsBlockInputStream.h +++ b/src/DataStreams/AddingDefaultsBlockInputStream.h @@ -2,12 +2,13 @@ #include #include -#include namespace DB { +class Context; + /// Adds defaults to columns using BlockDelayedDefaults bitmask attached to Block by child InputStream. class AddingDefaultsBlockInputStream : public IBlockInputStream { diff --git a/src/DataStreams/CreatingSetsBlockInputStream.cpp b/src/DataStreams/CreatingSetsBlockInputStream.cpp index 1a67031df5d..be89e0e87b0 100644 --- a/src/DataStreams/CreatingSetsBlockInputStream.cpp +++ b/src/DataStreams/CreatingSetsBlockInputStream.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/src/DataStreams/ParallelParsingBlockInputStream.h b/src/DataStreams/ParallelParsingBlockInputStream.h index 03f0d508227..a904c686e47 100644 --- a/src/DataStreams/ParallelParsingBlockInputStream.h +++ b/src/DataStreams/ParallelParsingBlockInputStream.h @@ -8,7 +8,6 @@ #include #include #include -#include namespace DB { diff --git a/src/DataStreams/PushingToViewsBlockOutputStream.cpp b/src/DataStreams/PushingToViewsBlockOutputStream.cpp index ce0922bf282..7f730b5fd3f 100644 --- a/src/DataStreams/PushingToViewsBlockOutputStream.cpp +++ b/src/DataStreams/PushingToViewsBlockOutputStream.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -40,10 +41,20 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( /// We need special context for materialized views insertions if (!dependencies.empty()) { - views_context = std::make_unique(context); + select_context = std::make_unique(context); + insert_context = std::make_unique(context); + + const auto & insert_settings = insert_context->getSettingsRef(); + // Do not deduplicate insertions into MV if the main insertion is Ok if (disable_deduplication_for_children) - views_context->setSetting("insert_deduplicate", false); + insert_context->setSetting("insert_deduplicate", false); + + // Separate min_insert_block_size_rows/min_insert_block_size_bytes for children + if (insert_settings.min_insert_block_size_rows_for_materialized_views.changed) + insert_context->setSetting("min_insert_block_size_rows", insert_settings.min_insert_block_size_rows_for_materialized_views.value); + if (insert_settings.min_insert_block_size_bytes_for_materialized_views.changed) + insert_context->setSetting("min_insert_block_size_bytes", insert_settings.min_insert_block_size_bytes_for_materialized_views.value); } for (const auto & database_table : dependencies) @@ -67,7 +78,7 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( insert->table_id = inner_table_id; /// Get list of columns we get from select query. - auto header = InterpreterSelectQuery(query, *views_context, SelectQueryOptions().analyze()) + auto header = InterpreterSelectQuery(query, *select_context, SelectQueryOptions().analyze()) .getSampleBlock(); /// Insert only columns returned by select. @@ -81,14 +92,14 @@ PushingToViewsBlockOutputStream::PushingToViewsBlockOutputStream( insert->columns = std::move(list); ASTPtr insert_query_ptr(insert.release()); - InterpreterInsertQuery interpreter(insert_query_ptr, *views_context); + InterpreterInsertQuery interpreter(insert_query_ptr, *insert_context); BlockIO io = interpreter.execute(); out = io.out; } else if (dynamic_cast(dependent_table.get())) - out = std::make_shared(dependent_table, *views_context, ASTPtr(), true); + out = std::make_shared(dependent_table, *insert_context, ASTPtr(), true); else - out = std::make_shared(dependent_table, *views_context, ASTPtr()); + out = std::make_shared(dependent_table, *insert_context, ASTPtr()); views.emplace_back(ViewInfo{std::move(query), database_table, std::move(out), nullptr}); } @@ -258,7 +269,7 @@ void PushingToViewsBlockOutputStream::process(const Block & block, size_t view_n /// but it will contain single block (that is INSERT-ed into main table). /// InterpreterSelectQuery will do processing of alias columns. - Context local_context = *views_context; + Context local_context = *select_context; local_context.addViewSource( StorageValues::create( storage->getStorageID(), storage->getColumns(), block, storage->getVirtuals())); diff --git a/src/DataStreams/PushingToViewsBlockOutputStream.h b/src/DataStreams/PushingToViewsBlockOutputStream.h index a2a1ca5caf5..c5fef413a23 100644 --- a/src/DataStreams/PushingToViewsBlockOutputStream.h +++ b/src/DataStreams/PushingToViewsBlockOutputStream.h @@ -44,7 +44,8 @@ private: }; std::vector views; - std::unique_ptr views_context; + std::unique_ptr select_context; + std::unique_ptr insert_context; void process(const Block & block, size_t view_num); }; diff --git a/src/DataStreams/RemoteBlockOutputStream.h b/src/DataStreams/RemoteBlockOutputStream.h index 40387180997..2c89a7358ad 100644 --- a/src/DataStreams/RemoteBlockOutputStream.h +++ b/src/DataStreams/RemoteBlockOutputStream.h @@ -4,6 +4,7 @@ #include #include #include +#include namespace DB diff --git a/src/DataTypes/DataTypeString.cpp b/src/DataTypes/DataTypeString.cpp index a7bfb2b635d..4a9a6e9ab78 100644 --- a/src/DataTypes/DataTypeString.cpp +++ b/src/DataTypes/DataTypeString.cpp @@ -376,8 +376,10 @@ void registerDataTypeString(DataTypeFactory & factory) /// These synonyms are added for compatibility. factory.registerAlias("CHAR", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("NCHAR", "String", DataTypeFactory::CaseInsensitive); factory.registerAlias("CHARACTER", "String", DataTypeFactory::CaseInsensitive); factory.registerAlias("VARCHAR", "String", DataTypeFactory::CaseInsensitive); + factory.registerAlias("NVARCHAR", "String", DataTypeFactory::CaseInsensitive); factory.registerAlias("VARCHAR2", "String", DataTypeFactory::CaseInsensitive); /// Oracle factory.registerAlias("TEXT", "String", DataTypeFactory::CaseInsensitive); factory.registerAlias("TINYTEXT", "String", DataTypeFactory::CaseInsensitive); diff --git a/src/Databases/DatabaseAtomic.cpp b/src/Databases/DatabaseAtomic.cpp index 2894b65274f..27a2441cec6 100644 --- a/src/Databases/DatabaseAtomic.cpp +++ b/src/Databases/DatabaseAtomic.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include diff --git a/src/Databases/DatabaseLazy.h b/src/Databases/DatabaseLazy.h index 77ad1a3ab20..4306e61b37b 100644 --- a/src/Databases/DatabaseLazy.h +++ b/src/Databases/DatabaseLazy.h @@ -1,7 +1,6 @@ #pragma once #include -#include #include @@ -10,6 +9,7 @@ namespace DB class DatabaseLazyIterator; +class Context; /** Lazy engine of databases. * Works like DatabaseOrdinary, but stores in memory only cache. diff --git a/src/Databases/DatabaseMemory.cpp b/src/Databases/DatabaseMemory.cpp index 417761f81e7..84fec6bcc22 100644 --- a/src/Databases/DatabaseMemory.cpp +++ b/src/Databases/DatabaseMemory.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB @@ -84,4 +85,10 @@ UUID DatabaseMemory::tryGetTableUUID(const String & table_name) const return UUIDHelpers::Nil; } +void DatabaseMemory::drop(const Context & context) +{ + /// Remove data on explicit DROP DATABASE + std::filesystem::remove_all(context.getPath() + data_path); +} + } diff --git a/src/Databases/DatabaseMemory.h b/src/Databases/DatabaseMemory.h index 29a9abc9d75..ad34c4d9097 100644 --- a/src/Databases/DatabaseMemory.h +++ b/src/Databases/DatabaseMemory.h @@ -46,6 +46,8 @@ public: UUID tryGetTableUUID(const String & table_name) const override; + void drop(const Context & context) override; + private: String data_path; using NameToASTCreate = std::unordered_map; diff --git a/src/Databases/DatabaseMySQL.h b/src/Databases/DatabaseMySQL.h index d729c62a8bb..a43da5d1762 100644 --- a/src/Databases/DatabaseMySQL.h +++ b/src/Databases/DatabaseMySQL.h @@ -5,14 +5,16 @@ #include #include -#include #include #include +#include namespace DB { +class Context; + /** Real-time access to table list and table structure from remote MySQL * It doesn't make any manipulations with filesystem. * All tables are created by calling code after real-time pull-out structure from remote MySQL diff --git a/src/Databases/DatabaseOnDisk.h b/src/Databases/DatabaseOnDisk.h index a07a143ea92..75609e231af 100644 --- a/src/Databases/DatabaseOnDisk.h +++ b/src/Databases/DatabaseOnDisk.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include @@ -11,6 +10,8 @@ namespace DB { +class Context; + std::pair createTableFromAST( ASTCreateQuery ast_create_query, const String & database_name, diff --git a/src/Databases/DatabaseWithDictionaries.h b/src/Databases/DatabaseWithDictionaries.h index a02b68a56da..eb9e105e31d 100644 --- a/src/Databases/DatabaseWithDictionaries.h +++ b/src/Databases/DatabaseWithDictionaries.h @@ -5,6 +5,9 @@ namespace DB { +class Context; +class ExternalDictionariesLoader; + class DatabaseWithDictionaries : public DatabaseOnDisk { diff --git a/src/Disks/DiskSelector.cpp b/src/Disks/DiskSelector.cpp index 0ae8763eef3..69549e4520d 100644 --- a/src/Disks/DiskSelector.cpp +++ b/src/Disks/DiskSelector.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include diff --git a/src/Disks/DiskSelector.h b/src/Disks/DiskSelector.h index 85b67f55d0a..8ae8de3be61 100644 --- a/src/Disks/DiskSelector.h +++ b/src/Disks/DiskSelector.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include @@ -10,6 +9,7 @@ namespace DB { +class Context; class DiskSelector; using DiskSelectorPtr = std::shared_ptr; diff --git a/src/Disks/S3/ProxyResolverConfiguration.cpp b/src/Disks/S3/ProxyResolverConfiguration.cpp index a574809596f..c36432d933e 100644 --- a/src/Disks/S3/ProxyResolverConfiguration.cpp +++ b/src/Disks/S3/ProxyResolverConfiguration.cpp @@ -7,6 +7,11 @@ #include #include +namespace DB::ErrorCodes +{ + extern const int BAD_ARGUMENTS; +} + namespace DB::S3 { ProxyResolverConfiguration::ProxyResolverConfiguration(const Poco::URI & endpoint_, String proxy_scheme_, unsigned proxy_port_) @@ -30,13 +35,16 @@ Aws::Client::ClientConfigurationPerRequest ProxyResolverConfiguration::getConfig Aws::Client::ClientConfigurationPerRequest cfg; try { - /// It should be just empty GET / request. - Poco::Net::HTTPRequest request(Poco::Net::HTTPRequest::HTTP_1_1); + /// It should be just empty GET request. + Poco::Net::HTTPRequest request(Poco::Net::HTTPRequest::HTTP_GET, endpoint.getPath(), Poco::Net::HTTPRequest::HTTP_1_1); session->sendRequest(request); Poco::Net::HTTPResponse response; auto & response_body_stream = session->receiveResponse(response); + if (response.getStatus() != Poco::Net::HTTPResponse::HTTP_OK) + throw Exception("Proxy resolver returned not OK status: " + response.getReason(), ErrorCodes::BAD_ARGUMENTS); + String proxy_host; /// Read proxy host as string from response body. Poco::StreamCopier::copyToString(response_body_stream, proxy_host); diff --git a/src/Disks/S3/ProxyResolverConfiguration.h b/src/Disks/S3/ProxyResolverConfiguration.h index 0b23ae77c4a..113ee6ea035 100644 --- a/src/Disks/S3/ProxyResolverConfiguration.h +++ b/src/Disks/S3/ProxyResolverConfiguration.h @@ -6,7 +6,7 @@ namespace DB::S3 { /** * Proxy configuration where proxy host is obtained each time from specified endpoint. - * For each request to S3 it makes GET request to specified endpoint and reads proxy host from a response body. + * For each request to S3 it makes GET request to specified endpoint URL and reads proxy host from a response body. * Specified scheme and port added to obtained proxy host to form completed proxy URL. */ class ProxyResolverConfiguration : public ProxyConfiguration diff --git a/src/Disks/S3/registerDiskS3.cpp b/src/Disks/S3/registerDiskS3.cpp index 2b72f872dd2..81879800b7f 100644 --- a/src/Disks/S3/registerDiskS3.cpp +++ b/src/Disks/S3/registerDiskS3.cpp @@ -37,13 +37,14 @@ namespace void checkRemoveAccess(IDisk & disk) { disk.remove("test_acl"); } - std::shared_ptr getProxyResolverConfiguration(const Poco::Util::AbstractConfiguration * proxy_resolver_config) + std::shared_ptr getProxyResolverConfiguration( + const String & prefix, const Poco::Util::AbstractConfiguration & proxy_resolver_config) { - auto endpoint = Poco::URI(proxy_resolver_config->getString("endpoint")); - auto proxy_scheme = proxy_resolver_config->getString("proxy_scheme"); + auto endpoint = Poco::URI(proxy_resolver_config.getString(prefix + ".endpoint")); + auto proxy_scheme = proxy_resolver_config.getString(prefix + ".proxy_scheme"); if (proxy_scheme != "http" && proxy_scheme != "https") throw Exception("Only HTTP/HTTPS schemas allowed in proxy resolver config: " + proxy_scheme, ErrorCodes::BAD_ARGUMENTS); - auto proxy_port = proxy_resolver_config->getUInt("proxy_port"); + auto proxy_port = proxy_resolver_config.getUInt(prefix + ".proxy_port"); LOG_DEBUG( &Logger::get("DiskS3"), "Configured proxy resolver: " << endpoint.toString() << ", Scheme: " << proxy_scheme << ", Port: " << proxy_port); @@ -51,16 +52,17 @@ namespace return std::make_shared(endpoint, proxy_scheme, proxy_port); } - std::shared_ptr getProxyListConfiguration(const Poco::Util::AbstractConfiguration * proxy_config) + std::shared_ptr getProxyListConfiguration( + const String & prefix, const Poco::Util::AbstractConfiguration & proxy_config) { std::vector keys; - proxy_config->keys(keys); + proxy_config.keys(prefix, keys); std::vector proxies; for (const auto & key : keys) if (startsWith(key, "uri")) { - Poco::URI proxy_uri(proxy_config->getString(key)); + Poco::URI proxy_uri(proxy_config.getString(prefix + "." + key)); if (proxy_uri.getScheme() != "http" && proxy_uri.getScheme() != "https") throw Exception("Only HTTP/HTTPS schemas allowed in proxy uri: " + proxy_uri.toString(), ErrorCodes::BAD_ARGUMENTS); @@ -78,25 +80,23 @@ namespace return nullptr; } - std::shared_ptr getProxyConfiguration(const Poco::Util::AbstractConfiguration * config) + std::shared_ptr getProxyConfiguration(const String & prefix, const Poco::Util::AbstractConfiguration & config) { - if (!config->has("proxy")) + if (!config.has(prefix + ".proxy")) return nullptr; - const auto * proxy_config = config->createView("proxy"); - std::vector config_keys; - proxy_config->keys(config_keys); + config.keys(prefix + ".proxy", config_keys); if (auto resolver_configs = std::count(config_keys.begin(), config_keys.end(), "resolver")) { if (resolver_configs > 1) throw Exception("Multiple proxy resolver configurations aren't allowed", ErrorCodes::BAD_ARGUMENTS); - return getProxyResolverConfiguration(proxy_config->createView("resolver")); + return getProxyResolverConfiguration(prefix + ".proxy.resolver", config); } - return getProxyListConfiguration(proxy_config); + return getProxyListConfiguration(prefix + ".proxy", config); } } @@ -107,27 +107,25 @@ void registerDiskS3(DiskFactory & factory) const Poco::Util::AbstractConfiguration & config, const String & config_prefix, const Context & context) -> DiskPtr { - const auto * disk_config = config.createView(config_prefix); - Poco::File disk{context.getPath() + "disks/" + name}; disk.createDirectories(); Aws::Client::ClientConfiguration cfg; - S3::URI uri(Poco::URI(disk_config->getString("endpoint"))); + S3::URI uri(Poco::URI(config.getString(config_prefix + ".endpoint"))); if (uri.key.back() != '/') throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS); cfg.endpointOverride = uri.endpoint; - auto proxy_config = getProxyConfiguration(disk_config); + auto proxy_config = getProxyConfiguration(config_prefix, config); if (proxy_config) cfg.perRequestConfiguration = [proxy_config](const auto & request) { return proxy_config->getConfiguration(request); }; auto client = S3::ClientFactory::instance().create( cfg, - disk_config->getString("access_key_id", ""), - disk_config->getString("secret_access_key", "")); + config.getString(config_prefix + ".access_key_id", ""), + config.getString(config_prefix + ".secret_access_key", "")); String metadata_path = context.getPath() + "disks/" + name + "/"; diff --git a/src/Functions/pointInPolygon.cpp b/src/Functions/pointInPolygon.cpp index 460c60d6e4c..51b96f41f1c 100644 --- a/src/Functions/pointInPolygon.cpp +++ b/src/Functions/pointInPolygon.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -44,7 +45,7 @@ namespace { template -ColumnPtr callPointInPolygonImplWithPool(const IColumn & x, const IColumn & y, Polygon & polygon) +UInt8 callPointInPolygonImplWithPool(Float64 x, Float64 y, Polygon & polygon) { using Pool = ObjectPoolMap; /// C++11 has thread-safe function-local statics on most modern compilers. @@ -63,19 +64,19 @@ ColumnPtr callPointInPolygonImplWithPool(const IColumn & x, const IColumn & y, P std::string serialized_polygon = serialize(polygon); auto impl = known_polygons.get(serialized_polygon, factory); - return pointInPolygon(x, y, *impl); + return impl->contains(x, y); } template -ColumnPtr callPointInPolygonImpl(const IColumn & x, const IColumn & y, Polygon & polygon) +UInt8 callPointInPolygonImpl(Float64 x, Float64 y, Polygon & polygon) { PointInPolygonImpl impl(polygon); - return pointInPolygon(x, y, impl); + return impl.contains(x, y); } } -template +template class FunctionPointInPolygon : public IFunction { public: @@ -91,7 +92,8 @@ public: static FunctionPtr create(const Context & context) { - return std::make_shared>(context.getSettingsRef().validate_polygons); + return std::make_shared>( + context.getSettingsRef().validate_polygons); } String getName() const override @@ -116,74 +118,192 @@ public: throw Exception("Too few arguments", ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION); } - auto get_message_prefix = [this](size_t i) { return "Argument " + toString(i + 1) + " for function " + getName(); }; - - for (size_t i = 1; i < arguments.size(); ++i) + auto validate_tuple = [this](size_t i, const DataTypeTuple * tuple) { - const auto * array = checkAndGetDataType(arguments[i].get()); - if (array == nullptr && i != 1) - throw Exception(get_message_prefix(i) + " must be array of tuples.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); - - const auto * tuple = checkAndGetDataType(array ? array->getNestedType().get() : arguments[i].get()); if (tuple == nullptr) - throw Exception(get_message_prefix(i) + " must contains tuple.", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + throw Exception(getMessagePrefix(i) + " must contain a tuple", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); const DataTypes & elements = tuple->getElements(); if (elements.size() != 2) - throw Exception(get_message_prefix(i) + " must have exactly two elements.", ErrorCodes::BAD_ARGUMENTS); + throw Exception(getMessagePrefix(i) + " must have exactly two elements", ErrorCodes::BAD_ARGUMENTS); for (auto j : ext::range(0, elements.size())) { if (!isNativeNumber(elements[j])) { - throw Exception(get_message_prefix(i) + " must contains numeric tuple at position " + toString(j + 1), + throw Exception(getMessagePrefix(i) + " must contain numeric tuple at position " + toString(j + 1), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); } } + }; + + validate_tuple(0, checkAndGetDataType(arguments[0].get())); + + if (arguments.size() == 2) + { + const auto * array = checkAndGetDataType(arguments[1].get()); + if (array == nullptr) + throw Exception(getMessagePrefix(1) + " must contain an array of tuples or an array of arrays of tuples.", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + const auto * nested_array = checkAndGetDataType(array->getNestedType().get()); + if (nested_array != nullptr) + { + array = nested_array; + } + + validate_tuple(1, checkAndGetDataType(array->getNestedType().get())); + } + else + { + for (size_t i = 1; i < arguments.size(); i++) + { + const auto * array = checkAndGetDataType(arguments[i].get()); + if (array == nullptr) + throw Exception(getMessagePrefix(i) + " must contain an array of tuples", + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); + + validate_tuple(i, checkAndGetDataType(array->getNestedType().get())); + } } return std::make_shared(); } - void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t /*input_rows_count*/) override + void executeImpl(Block & block, const ColumnNumbers & arguments, size_t result, size_t input_rows_count) override { const IColumn * point_col = block.getByPosition(arguments[0]).column.get(); const auto * const_tuple_col = checkAndGetColumn(point_col); if (const_tuple_col) point_col = &const_tuple_col->getDataColumn(); - const auto * tuple_col = checkAndGetColumn(point_col); + const auto * tuple_col = checkAndGetColumn(point_col); if (!tuple_col) throw Exception("First argument for function " + getName() + " must be constant array of tuples.", ErrorCodes::ILLEGAL_COLUMN); - auto & result_column = block.safeGetByPosition(result).column; - const auto & tuple_columns = tuple_col->getColumns(); - result_column = executeForType(*tuple_columns[0], *tuple_columns[1], block, arguments); - if (const_tuple_col) + const IColumn * poly_col = block.getByPosition(arguments[1]).column.get(); + const auto * const_poly_col = checkAndGetColumn(poly_col); + + bool point_is_const = const_tuple_col != nullptr; + bool poly_is_const = const_poly_col != nullptr; + + auto call_impl = poly_is_const + ? callPointInPolygonImplWithPool + : callPointInPolygonImpl; + + size_t size = point_is_const && poly_is_const ? 1 : input_rows_count; + auto execution_result = ColumnVector::create(size); + auto & data = execution_result->getData(); + + Polygon polygon; + for (auto i : ext::range(0, size)) + { + if (!poly_is_const || i == 0) + { + polygon = parsePolygon(block, arguments, i); + } + + size_t point_index = point_is_const ? 0 : i; + data[i] = call_impl(tuple_columns[0]->getFloat64(point_index), tuple_columns[1]->getFloat64(point_index), polygon); + } + + auto & result_column = block.safeGetByPosition(result).column; + result_column = std::move(execution_result); + if (point_is_const && poly_is_const) result_column = ColumnConst::create(result_column, const_tuple_col->size()); } + private: bool validate; - ColumnPtr executeForType(const IColumn & x, const IColumn & y, Block & block, const ColumnNumbers & arguments) + std::string getMessagePrefix(size_t i) const + { + return "Argument " + toString(i + 1) + " for function " + getName(); + } + + Polygon parsePolygonFromSingleColumn(Block & block, const ColumnNumbers & arguments, size_t i) const + { + const auto & poly = block.getByPosition(arguments[1]).column.get(); + const auto * column_const = checkAndGetColumn(poly); + const auto * array_col = + column_const ? checkAndGetColumn(column_const->getDataColumn()) : checkAndGetColumn(poly); + + if (!array_col) + throw Exception(getMessagePrefix(1) + " must contain an array of tuples or an array of arrays of tuples", + ErrorCodes::ILLEGAL_COLUMN); + + const auto * nested_array_col = checkAndGetColumn(array_col->getData()); + const auto & tuple_data = nested_array_col ? nested_array_col->getData() : array_col->getData(); + const auto & tuple_col = checkAndGetColumn(tuple_data); + if (!tuple_col) + throw Exception(getMessagePrefix(1) + " must contain an array of tuples or an array of arrays of tuples", + ErrorCodes::ILLEGAL_COLUMN); + + const auto & tuple_columns = tuple_col->getColumns(); + const auto & x_column = tuple_columns[0]; + const auto & y_column = tuple_columns[1]; + + auto parse_polygon_part = [&x_column, &y_column](auto & container, size_t l, size_t r) + { + for (auto j : ext::range(l, r)) + { + CoordinateType x_coord = x_column->getFloat64(j); + CoordinateType y_coord = y_column->getFloat64(j); + + container.push_back(Point(x_coord, y_coord)); + } + }; + + Polygon polygon; + if (nested_array_col) + { + for (auto j : ext::range(array_col->getOffsets()[i - 1], array_col->getOffsets()[i])) + { + size_t l = nested_array_col->getOffsets()[j - 1]; + size_t r = nested_array_col->getOffsets()[j]; + if (polygon.outer().empty()) + { + parse_polygon_part(polygon.outer(), l, r); + } + else + { + polygon.inners().emplace_back(); + parse_polygon_part(polygon.inners().back(), l, r); + } + } + } + else + { + size_t l = array_col->getOffsets()[i - 1]; + size_t r = array_col->getOffsets()[i]; + + parse_polygon_part(polygon.outer(), l, r); + } + + return polygon; + } + + Polygon parsePolygonFromMultipleColumns(Block & block, const ColumnNumbers & arguments, size_t) const { Polygon polygon; - auto get_message_prefix = [this](size_t i) { return "Argument " + toString(i + 1) + " for function " + getName(); }; - for (size_t i = 1; i < arguments.size(); ++i) { const auto * const_col = checkAndGetColumn(block.getByPosition(arguments[i]).column.get()); - const auto * array_col = const_col ? checkAndGetColumn(&const_col->getDataColumn()) : nullptr; + if (!const_col) + throw Exception("Multi-argument version of function " + getName() + " works only with const polygon", + ErrorCodes::BAD_ARGUMENTS); + + const auto * array_col = checkAndGetColumn(&const_col->getDataColumn()); const auto * tuple_col = array_col ? checkAndGetColumn(&array_col->getData()) : nullptr; if (!tuple_col) - throw Exception(get_message_prefix(i) + " must be constant array of tuples.", ErrorCodes::ILLEGAL_COLUMN); + throw Exception(getMessagePrefix(i) + " must be constant array of tuples", ErrorCodes::ILLEGAL_COLUMN); const auto & tuple_columns = tuple_col->getColumns(); const auto & column_x = tuple_columns[0]; @@ -197,7 +317,7 @@ private: auto size = column_x->size(); if (size == 0) - throw Exception(get_message_prefix(i) + " shouldn't be empty.", ErrorCodes::ILLEGAL_COLUMN); + throw Exception(getMessagePrefix(i) + " shouldn't be empty.", ErrorCodes::ILLEGAL_COLUMN); for (auto j : ext::range(0, size)) { @@ -207,6 +327,21 @@ private: } } + return polygon; + } + + Polygon parsePolygon(Block & block, const ColumnNumbers & arguments, size_t i) const + { + Polygon polygon; + if (arguments.size() == 2) + { + polygon = parsePolygonFromSingleColumn(block, arguments, i); + } + else + { + polygon = parsePolygonFromMultipleColumns(block, arguments, i); + } + boost::geometry::correct(polygon); #if !defined(__clang_analyzer__) /// It does not like boost. @@ -218,19 +353,14 @@ private: throw Exception("Polygon is not valid: " + failure_message, ErrorCodes::BAD_ARGUMENTS); } #endif - - auto call_impl = use_object_pool - ? callPointInPolygonImplWithPool - : callPointInPolygonImpl; - - return call_impl(x, y, polygon); + return polygon; } }; void registerFunctionPointInPolygon(FunctionFactory & factory) { - factory.registerFunction, true>>(); + factory.registerFunction, PointInPolygonTrivial>>(); } } diff --git a/src/IO/ReadWriteBufferFromHTTP.h b/src/IO/ReadWriteBufferFromHTTP.h index 2d4e3f0472e..4b4e4453360 100644 --- a/src/IO/ReadWriteBufferFromHTTP.h +++ b/src/IO/ReadWriteBufferFromHTTP.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index 3fa53e3e694..7c2133e629f 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -32,6 +32,7 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/AsynchronousMetrics.cpp b/src/Interpreters/AsynchronousMetrics.cpp index 7ba3886afb3..59ff01bf972 100644 --- a/src/Interpreters/AsynchronousMetrics.cpp +++ b/src/Interpreters/AsynchronousMetrics.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.h b/src/Interpreters/ClusterProxy/SelectStreamFactory.h index daf4dd48b4d..da1d18b6dd9 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.h +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.h @@ -2,11 +2,14 @@ #include #include +#include #include namespace DB { +using Scalars = std::map; + namespace ClusterProxy { diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index fd8ca66e85c..cb499577272 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 62eba97032e..2f63d9dadee 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -1,5 +1,5 @@ #pragma once -#include + #include #include #include @@ -13,9 +13,15 @@ #include #include +namespace zkutil +{ + class ZooKeeper; +} + namespace DB { +class Context; class ASTAlterQuery; class AccessRightsElements; struct DDLLogEntry; diff --git a/src/Interpreters/ExpressionActions.cpp b/src/Interpreters/ExpressionActions.cpp index cb65ec35b9e..363d4765019 100644 --- a/src/Interpreters/ExpressionActions.cpp +++ b/src/Interpreters/ExpressionActions.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -509,6 +510,33 @@ std::string ExpressionAction::toString() const return ss.str(); } +ExpressionActions::ExpressionActions(const NamesAndTypesList & input_columns_, const Context & context_) + : input_columns(input_columns_), settings(context_.getSettingsRef()) +{ + for (const auto & input_elem : input_columns) + sample_block.insert(ColumnWithTypeAndName(nullptr, input_elem.type, input_elem.name)); + +#if USE_EMBEDDED_COMPILER +compilation_cache = context_.getCompiledExpressionCache(); +#endif +} + +/// For constant columns the columns themselves can be contained in `input_columns_`. +ExpressionActions::ExpressionActions(const ColumnsWithTypeAndName & input_columns_, const Context & context_) + : settings(context_.getSettingsRef()) +{ + for (const auto & input_elem : input_columns_) + { + input_columns.emplace_back(input_elem.name, input_elem.type); + sample_block.insert(input_elem); + } +#if USE_EMBEDDED_COMPILER + compilation_cache = context_.getCompiledExpressionCache(); +#endif +} + +ExpressionActions::~ExpressionActions() = default; + void ExpressionActions::checkLimits(Block & block) const { if (settings.max_temporary_columns && block.columns() > settings.max_temporary_columns) diff --git a/src/Interpreters/ExpressionActions.h b/src/Interpreters/ExpressionActions.h index 5a29eaaab9e..080e8f8a10f 100644 --- a/src/Interpreters/ExpressionActions.h +++ b/src/Interpreters/ExpressionActions.h @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -25,6 +24,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +class Context; class TableJoin; class IJoin; using JoinPtr = std::shared_ptr; @@ -42,6 +42,7 @@ class IDataType; using DataTypePtr = std::shared_ptr; class ExpressionActions; +class CompiledExpressionCache; /** Action on the block. */ @@ -155,30 +156,12 @@ class ExpressionActions public: using Actions = std::vector; - ExpressionActions(const NamesAndTypesList & input_columns_, const Context & context_) - : input_columns(input_columns_), settings(context_.getSettingsRef()) - { - for (const auto & input_elem : input_columns) - sample_block.insert(ColumnWithTypeAndName(nullptr, input_elem.type, input_elem.name)); - -#if USE_EMBEDDED_COMPILER - compilation_cache = context_.getCompiledExpressionCache(); -#endif - } + ExpressionActions(const NamesAndTypesList & input_columns_, const Context & context_); /// For constant columns the columns themselves can be contained in `input_columns_`. - ExpressionActions(const ColumnsWithTypeAndName & input_columns_, const Context & context_) - : settings(context_.getSettingsRef()) - { - for (const auto & input_elem : input_columns_) - { - input_columns.emplace_back(input_elem.name, input_elem.type); - sample_block.insert(input_elem); - } -#if USE_EMBEDDED_COMPILER - compilation_cache = context_.getCompiledExpressionCache(); -#endif - } + ExpressionActions(const ColumnsWithTypeAndName & input_columns_, const Context & context_); + + ~ExpressionActions(); /// Add the input column. /// The name of the column must not match the names of the intermediate columns that occur when evaluating the expression. diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 8bff2816df1..3341855b8c6 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include diff --git a/src/Interpreters/GlobalSubqueriesVisitor.h b/src/Interpreters/GlobalSubqueriesVisitor.h index 78d98805814..37a358c3d28 100644 --- a/src/Interpreters/GlobalSubqueriesVisitor.h +++ b/src/Interpreters/GlobalSubqueriesVisitor.h @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -17,6 +16,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index 841635b8d01..1c1e21fc32c 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterExplainQuery.h b/src/Interpreters/InterpreterExplainQuery.h index 058c51737b0..fbc8a998f2c 100644 --- a/src/Interpreters/InterpreterExplainQuery.h +++ b/src/Interpreters/InterpreterExplainQuery.h @@ -1,6 +1,5 @@ #pragma once -#include #include #include @@ -8,6 +7,8 @@ namespace DB { +class Context; + /// Returns single row with explain results class InterpreterExplainQuery : public IInterpreter { diff --git a/src/Interpreters/InterpreterFactory.cpp b/src/Interpreters/InterpreterFactory.cpp index 60302848367..ccaa8fa4067 100644 --- a/src/Interpreters/InterpreterFactory.cpp +++ b/src/Interpreters/InterpreterFactory.cpp @@ -61,6 +61,7 @@ #include #include #include +#include #include diff --git a/src/Interpreters/InterpreterInsertQuery.h b/src/Interpreters/InterpreterInsertQuery.h index 476e86898d7..fef962d24a3 100644 --- a/src/Interpreters/InterpreterInsertQuery.h +++ b/src/Interpreters/InterpreterInsertQuery.h @@ -2,13 +2,14 @@ #include #include -#include #include #include namespace DB { +class Context; + /** Interprets the INSERT query. */ diff --git a/src/Interpreters/InterpreterSelectQuery.h b/src/Interpreters/InterpreterSelectQuery.h index 87a95c5b836..b97ff65e988 100644 --- a/src/Interpreters/InterpreterSelectQuery.h +++ b/src/Interpreters/InterpreterSelectQuery.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -25,6 +24,7 @@ namespace DB struct SubqueryForSet; class InterpreterSelectWithUnionQuery; +class Context; struct SyntaxAnalyzerResult; using SyntaxAnalyzerResultPtr = std::shared_ptr; diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index 9cdb19b1934..378cb943c04 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -1,5 +1,6 @@ #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.h b/src/Interpreters/InterpreterSelectWithUnionQuery.h index ad78572ab77..c7a8e09578b 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.h +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.h @@ -1,15 +1,16 @@ #pragma once #include -#include #include #include +#include #include namespace DB { +class Context; class InterpreterSelectQuery; diff --git a/src/Interpreters/InterpreterSetRoleQuery.h b/src/Interpreters/InterpreterSetRoleQuery.h index afb53014c87..91cf5fc1b2e 100644 --- a/src/Interpreters/InterpreterSetRoleQuery.h +++ b/src/Interpreters/InterpreterSetRoleQuery.h @@ -6,6 +6,8 @@ namespace DB { + +class Context; class ASTSetRoleQuery; struct ExtendedRoleSet; struct User; diff --git a/src/Interpreters/InterpreterWatchQuery.cpp b/src/Interpreters/InterpreterWatchQuery.cpp index 91e7bfac2a3..dc1ae6a7cad 100644 --- a/src/Interpreters/InterpreterWatchQuery.cpp +++ b/src/Interpreters/InterpreterWatchQuery.cpp @@ -13,6 +13,7 @@ limitations under the License. */ #include #include #include +#include #include #include #include diff --git a/src/Interpreters/InterpreterWatchQuery.h b/src/Interpreters/InterpreterWatchQuery.h index 7929b86b1c8..a0dacd08ea8 100644 --- a/src/Interpreters/InterpreterWatchQuery.h +++ b/src/Interpreters/InterpreterWatchQuery.h @@ -18,11 +18,11 @@ limitations under the License. */ #include #include #include -#include namespace DB { +class Context; class IAST; using ASTPtr = std::shared_ptr; using StoragePtr = std::shared_ptr; diff --git a/src/Interpreters/JoinedTables.cpp b/src/Interpreters/JoinedTables.cpp index b2591d94310..67363737670 100644 --- a/src/Interpreters/JoinedTables.cpp +++ b/src/Interpreters/JoinedTables.cpp @@ -1,6 +1,5 @@ #include #include -#include #include #include #include diff --git a/src/Interpreters/JoinedTables.h b/src/Interpreters/JoinedTables.h index 399acdc0768..55244e1225c 100644 --- a/src/Interpreters/JoinedTables.h +++ b/src/Interpreters/JoinedTables.h @@ -3,13 +3,14 @@ #include #include #include +#include +#include #include namespace DB { class ASTSelectQuery; -class Context; class TableJoin; struct SelectQueryOptions; diff --git a/src/Interpreters/MutationsInterpreter.h b/src/Interpreters/MutationsInterpreter.h index 9b12ce79e91..35c4f8ece0a 100644 --- a/src/Interpreters/MutationsInterpreter.h +++ b/src/Interpreters/MutationsInterpreter.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include diff --git a/src/Interpreters/PartLog.cpp b/src/Interpreters/PartLog.cpp index 7e04c5f6abb..ab22e73a0ca 100644 --- a/src/Interpreters/PartLog.cpp +++ b/src/Interpreters/PartLog.cpp @@ -10,6 +10,7 @@ #include #include #include +#include namespace DB diff --git a/src/Interpreters/QueryLog.h b/src/Interpreters/QueryLog.h index ec14f5e97fb..e503b417638 100644 --- a/src/Interpreters/QueryLog.h +++ b/src/Interpreters/QueryLog.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include diff --git a/src/Interpreters/QueryThreadLog.h b/src/Interpreters/QueryThreadLog.h index c50daa7bc88..e2c082eb54c 100644 --- a/src/Interpreters/QueryThreadLog.h +++ b/src/Interpreters/QueryThreadLog.h @@ -1,6 +1,7 @@ #pragma once #include +#include namespace ProfileEvents diff --git a/src/Interpreters/Set.cpp b/src/Interpreters/Set.cpp index afc95e11fd8..6f3031d5e7d 100644 --- a/src/Interpreters/Set.cpp +++ b/src/Interpreters/Set.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include diff --git a/src/Interpreters/Set.h b/src/Interpreters/Set.h index 0f84c2f5da8..848abc9aa8a 100644 --- a/src/Interpreters/Set.h +++ b/src/Interpreters/Set.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include @@ -17,6 +16,7 @@ namespace DB struct Range; +class Context; class IFunctionBase; using FunctionBasePtr = std::shared_ptr; diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index f9b1154f442..a3b47a584d9 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -65,6 +64,12 @@ namespace ErrorCodes class Context; +class QueryLog; +class QueryThreadLog; +class PartLog; +class TextLog; +class TraceLog; +class MetricLog; class ISystemLog diff --git a/src/Interpreters/executeQuery.cpp b/src/Interpreters/executeQuery.cpp index 4d609395c3a..8b3dc709ab2 100644 --- a/src/Interpreters/executeQuery.cpp +++ b/src/Interpreters/executeQuery.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include diff --git a/src/Interpreters/getTableExpressions.cpp b/src/Interpreters/getTableExpressions.cpp index 68f18c1397a..b5444f73b35 100644 --- a/src/Interpreters/getTableExpressions.cpp +++ b/src/Interpreters/getTableExpressions.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include diff --git a/src/Interpreters/interpretSubquery.cpp b/src/Interpreters/interpretSubquery.cpp index 0521ab7fd8b..e108db1af30 100644 --- a/src/Interpreters/interpretSubquery.cpp +++ b/src/Interpreters/interpretSubquery.cpp @@ -12,6 +12,7 @@ #include #include +#include namespace DB { diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp index 7254cf91245..24fff4203bb 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include #include @@ -17,7 +19,6 @@ #include #include #include -#include #include diff --git a/src/Storages/Distributed/DirectoryMonitor.cpp b/src/Storages/Distributed/DirectoryMonitor.cpp index d90a7974968..2947b47eefa 100644 --- a/src/Storages/Distributed/DirectoryMonitor.cpp +++ b/src/Storages/Distributed/DirectoryMonitor.cpp @@ -81,13 +81,13 @@ StorageDistributedDirectoryMonitor::StorageDistributedDirectoryMonitor( : storage(storage_) , pool(std::move(pool_)) , path{path_ + '/'} - , should_batch_inserts(storage.global_context.getSettingsRef().distributed_directory_monitor_batch_inserts) - , min_batched_block_size_rows(storage.global_context.getSettingsRef().min_insert_block_size_rows) - , min_batched_block_size_bytes(storage.global_context.getSettingsRef().min_insert_block_size_bytes) + , should_batch_inserts(storage.global_context->getSettingsRef().distributed_directory_monitor_batch_inserts) + , min_batched_block_size_rows(storage.global_context->getSettingsRef().min_insert_block_size_rows) + , min_batched_block_size_bytes(storage.global_context->getSettingsRef().min_insert_block_size_bytes) , current_batch_file_path{path + "current_batch.txt"} - , default_sleep_time{storage.global_context.getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds()} + , default_sleep_time{storage.global_context->getSettingsRef().distributed_directory_monitor_sleep_time_ms.totalMilliseconds()} , sleep_time{default_sleep_time} - , max_sleep_time{storage.global_context.getSettingsRef().distributed_directory_monitor_max_sleep_time_ms.totalMilliseconds()} + , max_sleep_time{storage.global_context->getSettingsRef().distributed_directory_monitor_max_sleep_time_ms.totalMilliseconds()} , log{&Logger::get(getLoggerName())} , monitor_blocker(monitor_blocker_) , bg_pool(bg_pool_) @@ -214,7 +214,7 @@ ConnectionPoolPtr StorageDistributedDirectoryMonitor::createPool(const std::stri auto pools = createPoolsForAddresses(name, pool_factory); - const auto settings = storage.global_context.getSettings(); + const auto settings = storage.global_context->getSettings(); return pools.size() == 1 ? pools.front() : std::make_shared(pools, settings.load_balancing, settings.distributed_replica_error_half_life.totalSeconds(), @@ -262,7 +262,7 @@ bool StorageDistributedDirectoryMonitor::processFiles() void StorageDistributedDirectoryMonitor::processFile(const std::string & file_path) { LOG_TRACE(log, "Started processing `" << file_path << '`'); - auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(storage.global_context.getSettingsRef()); + auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(storage.global_context->getSettingsRef()); auto connection = pool->get(timeouts); try @@ -437,7 +437,7 @@ struct StorageDistributedDirectoryMonitor::Batch Poco::File{tmp_file}.renameTo(parent.current_batch_file_path); } - auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(parent.storage.global_context.getSettingsRef()); + auto timeouts = ConnectionTimeouts::getTCPTimeoutsWithFailover(parent.storage.global_context->getSettingsRef()); auto connection = parent.pool->get(timeouts); bool batch_broken = false; diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.cpp b/src/Storages/Distributed/DistributedBlockOutputStream.cpp index c7b0436a9a3..e08c4b7fd34 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.cpp +++ b/src/Storages/Distributed/DistributedBlockOutputStream.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/Distributed/DistributedBlockOutputStream.h b/src/Storages/Distributed/DistributedBlockOutputStream.h index 319664e1723..17db955431c 100644 --- a/src/Storages/Distributed/DistributedBlockOutputStream.h +++ b/src/Storages/Distributed/DistributedBlockOutputStream.h @@ -11,7 +11,6 @@ #include #include #include -#include namespace Poco @@ -22,6 +21,7 @@ namespace Poco namespace DB { +class Context; class StorageDistributed; /** If insert_sync_ is true, the write is synchronous. Uses insert_timeout_ if it is not zero. diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.h b/src/Storages/MergeTree/IMergeTreeDataPart.h index f0ae8b40c5b..d98457f7f4b 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.h +++ b/src/Storages/MergeTree/IMergeTreeDataPart.h @@ -36,6 +36,7 @@ using VolumePtr = std::shared_ptr; class IMergeTreeReader; class IMergeTreeDataPartWriter; +class MarkCache; namespace ErrorCodes { @@ -319,7 +320,7 @@ protected: /// checksums.txt and columns.txt. 0 - if not counted; UInt64 bytes_on_disk{0}; - /// Columns description. Cannot be changed, after part initialiation. + /// Columns description. Cannot be changed, after part initialization. NamesAndTypesList columns; const Type part_type; @@ -352,7 +353,7 @@ private: /// For the older format version calculates rows count from the size of a column with a fixed size. void loadRowsCount(); - /// Loads ttl infos in json format from file ttl.txt. If file doesn`t exists assigns ttl infos with all zeros + /// Loads ttl infos in json format from file ttl.txt. If file doesn't exists assigns ttl infos with all zeros void loadTTLInfos(); void loadPartitionAndMinMaxIndex(); diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index ffdba570544..149aeaa2f0d 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -136,7 +136,7 @@ protected: size_t next_mark = 0; size_t next_index_offset = 0; - /// Number of marsk in data from which skip indices have to start + /// Number of marks in data from which skip indices have to start /// aggregation. I.e. it's data mark number, not skip indices mark. size_t skip_index_data_mark = 0; diff --git a/src/Storages/MergeTree/KeyCondition.h b/src/Storages/MergeTree/KeyCondition.h index 3a3768f0e4c..f12acdbf7bf 100644 --- a/src/Storages/MergeTree/KeyCondition.h +++ b/src/Storages/MergeTree/KeyCondition.h @@ -3,7 +3,6 @@ #include #include -#include #include #include #include @@ -15,10 +14,9 @@ namespace DB { - +class Context; class IFunction; using FunctionBasePtr = std::shared_ptr; - class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; diff --git a/src/Storages/MergeTree/MergeList.h b/src/Storages/MergeTree/MergeList.h index 107b03a018a..4ee8a75a868 100644 --- a/src/Storages/MergeTree/MergeList.h +++ b/src/Storages/MergeTree/MergeList.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 64c95b8b8a5..88a396ce763 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 394260096a7..9cf72cbe8bb 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include #include #include #include @@ -34,6 +34,7 @@ class MergeListEntry; class AlterCommands; class MergeTreePartsMover; class MutationCommands; +class Context; class ExpressionActions; using ExpressionActionsPtr = std::shared_ptr; @@ -70,7 +71,7 @@ namespace ErrorCodes /// [Column].mrk - marks, pointing to seek positions allowing to skip n * k rows. /// /// File structure of tables with custom partitioning (format_version >= 1): -/// Part directory - / partiiton-id _ min-id _ max-id _ level / +/// Part directory - / partition-id _ min-id _ max-id _ level / /// Inside the part directory: /// The same files as for month-partitioned tables, plus /// count.txt - contains total number of rows in this part. diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 625dc9f367a..00b474a7792 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h index 431d059ba60..84a3fcf1be2 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.h +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.h @@ -160,7 +160,7 @@ private: NamesAndTypesList storage_columns, const MutationCommands & commands_for_removes); - /// Get skip indcies, that should exists in the resulting data part. + /// Get skip indices, that should exists in the resulting data part. static MergeTreeIndices getIndicesForNewDataPart( const MergeTreeIndices & all_indices, const MutationCommands & commands_for_removes); diff --git a/src/Storages/MergeTree/MergeTreeDataPartCompact.h b/src/Storages/MergeTree/MergeTreeDataPartCompact.h index 3ce1fd830d5..b4a2b5fa797 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartCompact.h @@ -6,13 +6,13 @@ namespace DB { /** In compact format all columns are stored in one file (`data.bin`). - * Data is splitted in granules and columns are serialized sequentially in one granule. + * Data is split in granules and columns are serialized sequentially in one granule. * Granules are written one by one in data file. * Marks are also stored in single file (`data.mrk3`). * In compact format one mark is an array of marks for every column and a number of rows in granule. * Format of other data part files is not changed. * It's considered to store only small parts in compact format (up to 10M). - * NOTE: Compact parts aren't supported for tables with non-adaptive granularty. + * NOTE: Compact parts aren't supported for tables with non-adaptive granularity. * NOTE: In compact part compressed and uncompressed size of single column is unknown. */ class MergeTreeDataPartCompact : public IMergeTreeDataPart diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 4cdf57a4700..1ab10b55409 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 662fb067e48..92f4f6107b8 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -18,6 +18,7 @@ #include #include #include +#include /// Allow to use __uint128_t as a template parameter for boost::rational. // https://stackoverflow.com/questions/41198673/uint128-t-not-working-with-clang-and-libstdc diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index a78e2e5ae32..75d6ebe7cb7 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.h b/src/Storages/MergeTree/MergeTreeDataWriter.h index c2878145a50..e199aa3b43a 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.h +++ b/src/Storages/MergeTree/MergeTreeDataWriter.h @@ -9,7 +9,6 @@ #include #include -#include #include diff --git a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp index 265dba0e6fe..cc1f713a57e 100644 --- a/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeReverseSelectProcessor.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp index def01b192d5..9af72e60123 100644 --- a/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeSelectProcessor.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp index cfda7fdf562..755ff87eab5 100644 --- a/src/Storages/MergeTree/MergeTreeSequentialSource.cpp +++ b/src/Storages/MergeTree/MergeTreeSequentialSource.cpp @@ -1,5 +1,6 @@ #include #include +#include namespace DB { diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index dfe3643e60c..f29be3d692f 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -28,7 +28,7 @@ struct MergeTreeSettings : public SettingsCollection #define LIST_OF_MERGE_TREE_SETTINGS(M) \ M(SettingUInt64, index_granularity, 8192, "How many rows correspond to one primary key value.", 0) \ \ - /** Data storing format settigns. */ \ + /** Data storing format settings. */ \ M(SettingUInt64, min_bytes_for_wide_part, 0, "Minimal uncompressed size in bytes to create part in wide format instead of compact", 0) \ M(SettingUInt64, min_rows_for_wide_part, 0, "Minimal number of rows to create part in wide format instead of compact", 0) \ \ diff --git a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp index aa8c550839d..0b09fad91d1 100644 --- a/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp +++ b/src/Storages/MergeTree/MergeTreeThreadSelectBlockInputProcessor.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index e0760e87d00..bd1312f0c59 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -1,4 +1,5 @@ #include +#include #include diff --git a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp index 892b4eccfbc..1a99636534b 100644 --- a/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp +++ b/src/Storages/MergeTree/MergedColumnOnlyOutputStream.cpp @@ -1,4 +1,5 @@ #include +#include namespace DB { diff --git a/src/Storages/MergeTree/RPNBuilder.h b/src/Storages/MergeTree/RPNBuilder.h index 2e457147cf4..ed25252642b 100644 --- a/src/Storages/MergeTree/RPNBuilder.h +++ b/src/Storages/MergeTree/RPNBuilder.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -13,6 +12,7 @@ namespace DB { +class Context; /// Builds reverse polish notation template diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp index b1164f6621c..c36879750a1 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeCleanupThread.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp index 98b30498839..9d055545457 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreePartCheckThread.cpp @@ -2,6 +2,7 @@ #include #include #include +#include namespace ProfileEvents diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp index 80e7e033525..94497b2a850 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeQueue.cpp @@ -331,18 +331,13 @@ void ReplicatedMergeTreeQueue::updateTimesInZooKeeper( void ReplicatedMergeTreeQueue::removeProcessedEntry(zkutil::ZooKeeperPtr zookeeper, LogEntryPtr & entry) { - auto code = zookeeper->tryRemove(replica_path + "/queue/" + entry->znode_name); - - if (code) - LOG_ERROR(log, "Couldn't remove " << replica_path << "/queue/" << entry->znode_name << ": " - << zkutil::ZooKeeper::error2string(code) << ". This shouldn't happen often."); - std::optional min_unprocessed_insert_time_changed; std::optional max_processed_insert_time_changed; bool found = false; size_t queue_size = 0; + /// First remove from memory then from ZooKeeper { std::unique_lock lock(state_mutex); @@ -372,6 +367,11 @@ void ReplicatedMergeTreeQueue::removeProcessedEntry(zkutil::ZooKeeperPtr zookeep notifySubscribers(queue_size); + auto code = zookeeper->tryRemove(replica_path + "/queue/" + entry->znode_name); + if (code) + LOG_ERROR(log, "Couldn't remove " << replica_path << "/queue/" << entry->znode_name << ": " + << zkutil::ZooKeeper::error2string(code) << ". This shouldn't happen often."); + updateTimesInZooKeeper(zookeeper, min_unprocessed_insert_time_changed, max_processed_insert_time_changed); } @@ -1405,6 +1405,8 @@ bool ReplicatedMergeTreeQueue::tryFinalizeMutations(zkutil::ZooKeeperPtr zookeep if (candidates.empty()) return false; + else + LOG_DEBUG(log, "Trying to finalize " << candidates.size() << " mutations"); auto merge_pred = getMergePredicate(zookeeper); diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp index cd82a865827..75f15556edf 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeRestartingThread.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index 721d58539ef..40cc8edca74 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -17,6 +17,8 @@ #include #include +#include + namespace DB { diff --git a/src/Storages/StorageBuffer.h b/src/Storages/StorageBuffer.h index f8fd1d2eaaf..6d6c1f66569 100644 --- a/src/Storages/StorageBuffer.h +++ b/src/Storages/StorageBuffer.h @@ -8,6 +8,7 @@ #include #include #include +#include namespace Poco { class Logger; } @@ -16,8 +17,6 @@ namespace Poco { class Logger; } namespace DB { -class Context; - /** During insertion, buffers the data in the RAM until certain thresholds are exceeded. * When thresholds are exceeded, flushes the data to another table. diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index 8d85b5a1db2..bf2d0b98870 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -276,8 +277,8 @@ StorageDistributed::StorageDistributed( : IStorage(id_) , remote_database(remote_database_) , remote_table(remote_table_) - , global_context(context_) - , cluster_name(global_context.getMacros()->expand(cluster_name_)) + , global_context(std::make_unique(context_)) + , cluster_name(global_context->getMacros()->expand(cluster_name_)) , has_sharding_key(sharding_key_) , storage_policy(storage_policy_) , relative_data_path(relative_data_path_) @@ -287,7 +288,7 @@ StorageDistributed::StorageDistributed( if (sharding_key_) { - sharding_key_expr = buildShardingKeyExpression(sharding_key_, global_context, getColumns().getAllPhysical(), false); + sharding_key_expr = buildShardingKeyExpression(sharding_key_, *global_context, getColumns().getAllPhysical(), false); sharding_key_column_name = sharding_key_->getColumnName(); } @@ -297,7 +298,7 @@ StorageDistributed::StorageDistributed( /// Sanity check. Skip check if the table is already created to allow the server to start. if (!attach_ && !cluster_name.empty()) { - size_t num_local_shards = global_context.getCluster(cluster_name)->getLocalShardCount(); + size_t num_local_shards = global_context->getCluster(cluster_name)->getLocalShardCount(); if (num_local_shards && remote_database == id_.database_name && remote_table == id_.table_name) throw Exception("Distributed table " + id_.table_name + " looks at itself", ErrorCodes::INFINITE_LOOP); } @@ -325,7 +326,7 @@ void StorageDistributed::createStorage() /// Create default policy with the relative_data_path_ if (storage_policy.empty()) { - std::string path(global_context.getPath()); + std::string path(global_context->getPath()); /// Disk must ends with '/' if (!path.ends_with('/')) path += '/'; @@ -334,7 +335,7 @@ void StorageDistributed::createStorage() } else { - auto policy = global_context.getStoragePolicy(storage_policy); + auto policy = global_context->getStoragePolicy(storage_policy); if (policy->getVolumes().size() != 1) throw Exception("Policy for Distributed table, should have exactly one volume", ErrorCodes::BAD_ARGUMENTS); volume = policy->getVolume(0); @@ -628,7 +629,7 @@ StoragePolicyPtr StorageDistributed::getStoragePolicy() const { if (storage_policy.empty()) return {}; - return global_context.getStoragePolicy(storage_policy); + return global_context->getStoragePolicy(storage_policy); } void StorageDistributed::createDirectoryMonitors(const std::string & disk) @@ -655,7 +656,7 @@ StorageDistributedDirectoryMonitor& StorageDistributed::requireDirectoryMonitor( { node_data.conneciton_pool = StorageDistributedDirectoryMonitor::createPool(name, *this); node_data.directory_monitor = std::make_unique( - *this, path, node_data.conneciton_pool, monitors_blocker, global_context.getDistributedSchedulePool()); + *this, path, node_data.conneciton_pool, monitors_blocker, global_context->getDistributedSchedulePool()); } return *node_data.directory_monitor; } @@ -672,7 +673,7 @@ std::pair StorageDistributed::getPath( ClusterPtr StorageDistributed::getCluster() const { - return owned_cluster ? owned_cluster : global_context.getCluster(cluster_name); + return owned_cluster ? owned_cluster : global_context->getCluster(cluster_name); } ClusterPtr StorageDistributed::getOptimizedCluster(const Context & context, const ASTPtr & query_ptr) const diff --git a/src/Storages/StorageDistributed.h b/src/Storages/StorageDistributed.h index 125e1dee1e6..c934b3870f4 100644 --- a/src/Storages/StorageDistributed.h +++ b/src/Storages/StorageDistributed.h @@ -127,7 +127,7 @@ public: String remote_table; ASTPtr remote_table_function_ptr; - Context global_context; + std::unique_ptr global_context; Logger * log = &Logger::get("StorageDistributed"); /// Used to implement TableFunctionRemote. diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index e22e81d5041..a25b4d3beba 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include #include @@ -21,9 +23,8 @@ #include #include #include -#include -#include #include +#include namespace DB diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index e6fc32ecbf9..1366d0fa62a 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -44,6 +44,7 @@ #include #include +#include #include #include @@ -2272,7 +2273,6 @@ void StorageReplicatedMergeTree::mergeSelectingTask() void StorageReplicatedMergeTree::mutationsFinalizingTask() { - LOG_DEBUG(log, "Trying to finalize mutations"); bool needs_reschedule = false; try diff --git a/src/Storages/StorageSet.cpp b/src/Storages/StorageSet.cpp index 613f5b6a2ed..9a3a2b0ac94 100644 --- a/src/Storages/StorageSet.cpp +++ b/src/Storages/StorageSet.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 287e93fc99a..636c7f9d64d 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 283c7f2c4a5..ab824fc8bdc 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/System/StorageSystemDisks.cpp b/src/Storages/System/StorageSystemDisks.cpp index cadaaff6cbc..b5a5026b2e7 100644 --- a/src/Storages/System/StorageSystemDisks.cpp +++ b/src/Storages/System/StorageSystemDisks.cpp @@ -1,6 +1,7 @@ #include #include #include +#include namespace DB { diff --git a/src/Storages/System/StorageSystemMutations.cpp b/src/Storages/System/StorageSystemMutations.cpp index 968c67bc14a..d4a262860dc 100644 --- a/src/Storages/System/StorageSystemMutations.cpp +++ b/src/Storages/System/StorageSystemMutations.cpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/System/StorageSystemParts.cpp b/src/Storages/System/StorageSystemParts.cpp index 2418594899e..83e3c34bfb8 100644 --- a/src/Storages/System/StorageSystemParts.cpp +++ b/src/Storages/System/StorageSystemParts.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -17,41 +18,48 @@ namespace DB StorageSystemParts::StorageSystemParts(const std::string & name_) : StorageSystemPartsBase(name_, { - {"partition", std::make_shared()}, - {"name", std::make_shared()}, - {"part_type", std::make_shared()}, - {"active", std::make_shared()}, - {"marks", std::make_shared()}, - {"rows", std::make_shared()}, - {"bytes_on_disk", std::make_shared()}, - {"data_compressed_bytes", std::make_shared()}, - {"data_uncompressed_bytes", std::make_shared()}, - {"marks_bytes", std::make_shared()}, - {"modification_time", std::make_shared()}, - {"remove_time", std::make_shared()}, - {"refcount", std::make_shared()}, - {"min_date", std::make_shared()}, - {"max_date", std::make_shared()}, - {"min_time", std::make_shared()}, - {"max_time", std::make_shared()}, - {"partition_id", std::make_shared()}, - {"min_block_number", std::make_shared()}, - {"max_block_number", std::make_shared()}, - {"level", std::make_shared()}, - {"data_version", std::make_shared()}, - {"primary_key_bytes_in_memory", std::make_shared()}, - {"primary_key_bytes_in_memory_allocated", std::make_shared()}, - {"is_frozen", std::make_shared()}, + {"partition", std::make_shared()}, + {"name", std::make_shared()}, + {"part_type", std::make_shared()}, + {"active", std::make_shared()}, + {"marks", std::make_shared()}, + {"rows", std::make_shared()}, + {"bytes_on_disk", std::make_shared()}, + {"data_compressed_bytes", std::make_shared()}, + {"data_uncompressed_bytes", std::make_shared()}, + {"marks_bytes", std::make_shared()}, + {"modification_time", std::make_shared()}, + {"remove_time", std::make_shared()}, + {"refcount", std::make_shared()}, + {"min_date", std::make_shared()}, + {"max_date", std::make_shared()}, + {"min_time", std::make_shared()}, + {"max_time", std::make_shared()}, + {"partition_id", std::make_shared()}, + {"min_block_number", std::make_shared()}, + {"max_block_number", std::make_shared()}, + {"level", std::make_shared()}, + {"data_version", std::make_shared()}, + {"primary_key_bytes_in_memory", std::make_shared()}, + {"primary_key_bytes_in_memory_allocated", std::make_shared()}, + {"is_frozen", std::make_shared()}, - {"database", std::make_shared()}, - {"table", std::make_shared()}, - {"engine", std::make_shared()}, - {"disk_name", std::make_shared()}, - {"path", std::make_shared()}, + {"database", std::make_shared()}, + {"table", std::make_shared()}, + {"engine", std::make_shared()}, + {"disk_name", std::make_shared()}, + {"path", std::make_shared()}, - {"hash_of_all_files", std::make_shared()}, - {"hash_of_uncompressed_files", std::make_shared()}, - {"uncompressed_hash_of_compressed_files", std::make_shared()} + {"hash_of_all_files", std::make_shared()}, + {"hash_of_uncompressed_files", std::make_shared()}, + {"uncompressed_hash_of_compressed_files", std::make_shared()}, + + {"delete_ttl_info_min", std::make_shared()}, + {"delete_ttl_info_max", std::make_shared()}, + + {"move_ttl_info.expression", std::make_shared(std::make_shared())}, + {"move_ttl_info.min", std::make_shared(std::make_shared())}, + {"move_ttl_info.max", std::make_shared(std::make_shared())}, } ) { @@ -128,6 +136,31 @@ void StorageSystemParts::processNextStorage(MutableColumns & columns_, const Sto checksum = helper.uncompressed_hash_of_compressed_files; columns_[i++]->insert(getHexUIntLowercase(checksum.first) + getHexUIntLowercase(checksum.second)); + + /// delete_ttl_info + { + columns_[i++]->insert(static_cast(part->ttl_infos.table_ttl.min)); + columns_[i++]->insert(static_cast(part->ttl_infos.table_ttl.max)); + } + + /// move_ttl_info + { + Array expression_array; + Array min_array; + Array max_array; + expression_array.reserve(part->ttl_infos.moves_ttl.size()); + min_array.reserve(part->ttl_infos.moves_ttl.size()); + max_array.reserve(part->ttl_infos.moves_ttl.size()); + for (const auto & [expression, move_ttl_info] : part->ttl_infos.moves_ttl) + { + expression_array.emplace_back(expression); + min_array.push_back(static_cast(move_ttl_info.min)); + max_array.push_back(static_cast(move_ttl_info.max)); + } + columns_[i++]->insert(expression_array); + columns_[i++]->insert(min_array); + columns_[i++]->insert(max_array); + } } } diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index 0fce41d8398..6356e6d699e 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -14,6 +14,7 @@ #include #include #include +#include namespace DB diff --git a/src/Storages/System/StorageSystemStoragePolicies.cpp b/src/Storages/System/StorageSystemStoragePolicies.cpp index 474d2b5bcc4..c4f43b0a5bf 100644 --- a/src/Storages/System/StorageSystemStoragePolicies.cpp +++ b/src/Storages/System/StorageSystemStoragePolicies.cpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB diff --git a/src/TableFunctions/ITableFunctionFileLike.h b/src/TableFunctions/ITableFunctionFileLike.h index a18ca8ea4c8..55df9a7484d 100644 --- a/src/TableFunctions/ITableFunctionFileLike.h +++ b/src/TableFunctions/ITableFunctionFileLike.h @@ -1,11 +1,11 @@ #pragma once #include -#include namespace DB { class ColumnsDescription; +class Context; /* * function(source, format, structure) - creates a temporary storage from formated source diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index 0a68ed59aa2..4f67f4cfd10 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include "registerTableFunctions.h" namespace DB diff --git a/src/TableFunctions/TableFunctionFile.h b/src/TableFunctions/TableFunctionFile.h index ead924f6828..e0d8c20ac61 100644 --- a/src/TableFunctions/TableFunctionFile.h +++ b/src/TableFunctions/TableFunctionFile.h @@ -1,11 +1,11 @@ #pragma once #include -#include namespace DB { +class Context; /* file(path, format, structure) - creates a temporary storage from file * * diff --git a/src/TableFunctions/TableFunctionHDFS.h b/src/TableFunctions/TableFunctionHDFS.h index 443ce0aa93b..5e8503b318e 100644 --- a/src/TableFunctions/TableFunctionHDFS.h +++ b/src/TableFunctions/TableFunctionHDFS.h @@ -5,11 +5,13 @@ #if USE_HDFS #include -#include namespace DB { + +class Context; + /* hdfs(name_node_ip:name_node_port, format, structure) - creates a temporary storage from hdfs file * */ @@ -27,6 +29,7 @@ private: const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const override; const char * getStorageTypeName() const override { return "HDFS"; } }; + } #endif diff --git a/src/TableFunctions/TableFunctionInput.cpp b/src/TableFunctions/TableFunctionInput.cpp index e8f3453da06..5b4a3d97ee4 100644 --- a/src/TableFunctions/TableFunctionInput.cpp +++ b/src/TableFunctions/TableFunctionInput.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include "registerTableFunctions.h" diff --git a/src/TableFunctions/TableFunctionInput.h b/src/TableFunctions/TableFunctionInput.h index 92c2e3a6e54..a2791533c5d 100644 --- a/src/TableFunctions/TableFunctionInput.h +++ b/src/TableFunctions/TableFunctionInput.h @@ -1,11 +1,13 @@ #pragma once #include -#include namespace DB { + +class Context; + /* input(structure) - allows to make INSERT SELECT from incoming stream of data */ class TableFunctionInput : public ITableFunction @@ -18,4 +20,5 @@ private: StoragePtr executeImpl(const ASTPtr & ast_function, const Context & context, const std::string & table_name) const override; const char * getStorageTypeName() const override { return "Input"; } }; + } diff --git a/src/TableFunctions/TableFunctionURL.h b/src/TableFunctions/TableFunctionURL.h index 61dca561f0c..366d573bcf4 100644 --- a/src/TableFunctions/TableFunctionURL.h +++ b/src/TableFunctions/TableFunctionURL.h @@ -1,11 +1,13 @@ #pragma once #include -#include namespace DB { + +class Context; + /* url(source, format, structure) - creates a temporary storage from url */ class TableFunctionURL : public ITableFunctionFileLike @@ -22,4 +24,5 @@ private: const String & source, const String & format, const ColumnsDescription & columns, Context & global_context, const std::string & table_name, const String & compression_method) const override; const char * getStorageTypeName() const override { return "URL"; } }; + } diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 1a2735296e8..324fd13aac2 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -234,6 +234,14 @@ def run_tests_array(all_tests_with_params): clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE) clickhouse_proc.communicate("SELECT 'Running test {suite}/{case} from pid={pid}';".format(pid = os.getpid(), case = case, suite = suite)) + if not args.no_system_log_cleanup: + clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc.communicate("SYSTEM FLUSH LOGS") + + for table in ['query_log', 'query_thread_log', 'trace_log', 'metric_log']: + clickhouse_proc = Popen(shlex.split(args.client), stdin=PIPE, stdout=PIPE, stderr=PIPE) + clickhouse_proc.communicate("TRUNCATE TABLE IF EXISTS system.{}".format(table)) + reference_file = os.path.join(suite_dir, name) + '.reference' stdout_file = os.path.join(suite_tmp_dir, name) + '.stdout' stderr_file = os.path.join(suite_tmp_dir, name) + '.stderr' @@ -564,6 +572,7 @@ if __name__ == '__main__': parser.add_argument('--stop', action='store_true', default=None, dest='stop', help='Stop on network errors') parser.add_argument('--order', default='desc', choices=['asc', 'desc', 'random'], help='Run order') parser.add_argument('--testname', action='store_true', default=None, dest='testname', help='Make query with test name before test run') + parser.add_argument('--no-system-log-cleanup', action='store_true', default=None, help='Do not cleanup system.*_log tables') parser.add_argument('--hung-check', action='store_true', default=False) parser.add_argument('--force-color', action='store_true', default=False) parser.add_argument('--database', help='Database for tests (random name test_XXXXXX by default)') diff --git a/tests/integration/test_s3_with_proxy/configs/config.d/storage_conf.xml b/tests/integration/test_s3_with_proxy/configs/config.d/storage_conf.xml index a83c875b134..ec543d64cdb 100644 --- a/tests/integration/test_s3_with_proxy/configs/config.d/storage_conf.xml +++ b/tests/integration/test_s3_with_proxy/configs/config.d/storage_conf.xml @@ -18,12 +18,12 @@ minio123 - http://resolver:8080 + http://resolver:8080/hostname http 8888 diff --git a/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py b/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py index ecafe92cb83..520c018cbad 100644 --- a/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py +++ b/tests/integration/test_s3_with_proxy/proxy-resolver/resolver.py @@ -2,7 +2,7 @@ import bottle import random -@bottle.route('/') +@bottle.route('/hostname') def index(): if random.randrange(2) == 0: return 'proxy1' diff --git a/tests/performance/point_in_polygon.xml b/tests/performance/point_in_polygon.xml new file mode 100644 index 00000000000..d854fb6952b --- /dev/null +++ b/tests/performance/point_in_polygon.xml @@ -0,0 +1,6 @@ + + CREATE TABLE point_in_polygon(`polygon` Array(Array(Float64, Float64))) ENGINE = Log() + insert into point_in_polygon SELECT arrayJoin(arrayMap(y -> [arrayMap(x -> (cos(x / 90. * pi()) * y, sin(x / 90. * pi()) * y), range(180))], arraySlice(range(35000), 2, 35000))) + SELECT pointInPolygon((100, 100), `polygon`) from point_in_polygon + DROP TABLE IF EXISTS point_in_polygon + diff --git a/tests/performance/sum.xml b/tests/performance/sum.xml new file mode 100644 index 00000000000..9bee2a580c3 --- /dev/null +++ b/tests/performance/sum.xml @@ -0,0 +1,19 @@ + + SELECT sum(number) FROM numbers(100000000) + SELECT sum(toUInt32(number)) FROM numbers(100000000) + SELECT sum(toUInt16(number)) FROM numbers(100000000) + SELECT sum(toUInt8(number)) FROM numbers(100000000) + SELECT sum(toFloat32(number)) FROM numbers(100000000) + SELECT sum(toFloat64(number)) FROM numbers(100000000) + SELECT sumKahan(toFloat32(number)) FROM numbers(100000000) + SELECT sumKahan(toFloat64(number)) FROM numbers(100000000) + + SELECT sum(toNullable(number)) FROM numbers(100000000) + SELECT sum(toNullable(toUInt32(number))) FROM numbers(100000000) + SELECT sum(toNullable(toUInt16(number))) FROM numbers(100000000) + SELECT sum(toNullable(toUInt8(number))) FROM numbers(100000000) + SELECT sum(toNullable(toFloat32(number))) FROM numbers(100000000) + SELECT sum(toNullable(toFloat64(number))) FROM numbers(100000000) + SELECT sumKahan(toNullable(toFloat32(number))) FROM numbers(100000000) + SELECT sumKahan(toNullable(toFloat64(number))) FROM numbers(100000000) + diff --git a/tests/queries/0_stateless/00500_point_in_polygon_non_const_poly.reference b/tests/queries/0_stateless/00500_point_in_polygon_non_const_poly.reference new file mode 100644 index 00000000000..083a7ac7236 --- /dev/null +++ b/tests/queries/0_stateless/00500_point_in_polygon_non_const_poly.reference @@ -0,0 +1,68 @@ +Const point; No holes +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +0 +1 +0 +1 +0 +Non-const point; No holes +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +0 +1 +0 +1 +0 +Const point; With holes +0 +0 +0 +0 +0 +1 +1 +0 +0 +1 +1 +0 +1 +0 +1 +0 +Non-const point; With holes +0 +0 +0 +0 +0 +1 +1 +0 +0 +1 +1 +0 +1 +0 +1 +0 diff --git a/tests/queries/0_stateless/00500_point_in_polygon_non_const_poly.sql b/tests/queries/0_stateless/00500_point_in_polygon_non_const_poly.sql new file mode 100644 index 00000000000..f38066debbf --- /dev/null +++ b/tests/queries/0_stateless/00500_point_in_polygon_non_const_poly.sql @@ -0,0 +1,86 @@ +DROP TABLE IF EXISTS polygons; + +SELECT 'Const point; No holes'; +create table polygons ( id Int32, poly Array(Tuple(Int32, Int32))) engine = Log(); + +INSERT INTO polygons VALUES (1, [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (2, [(-5, -5), (5, -5), (5, 5), (-5, 5)]); + +SELECT pointInPolygon((-10, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, -10), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((-5, -5), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((5, 5), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((10, 10), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((10, 5), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((5, 10), poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Non-const point; No holes'; + +create table polygons ( id Int32, pt Tuple(Int32, Int32), poly Array(Tuple(Int32, Int32))) engine = Log(); + +INSERT INTO polygons VALUES (1, (-10, 0), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (2, (-10, 0), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); +INSERT INTO polygons VALUES (3, (0, -10), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (4, (0, -10), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); +INSERT INTO polygons VALUES (5, (-5, -5), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (6, (-5, -5), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); +INSERT INTO polygons VALUES (7, (0, 0), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (8, (0, 0), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); +INSERT INTO polygons VALUES (9, (5, 5), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (10, (5, 5), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); +INSERT INTO polygons VALUES (11, (10, 10), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (12, (10, 10), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); +INSERT INTO polygons VALUES (13, (10, 5), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (14, (10, 5), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); +INSERT INTO polygons VALUES (15, (5, 10), [(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO polygons VALUES (16, (5, 10), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); + +SELECT pointInPolygon(pt, poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Const point; With holes'; + +create table polygons ( id Int32, poly Array(Array(Tuple(Int32, Int32)))) engine = Log(); + +INSERT INTO polygons VALUES (1, [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (2, [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); + +SELECT pointInPolygon((-10, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, -10), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((-5, -5), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((5, 5), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((10, 10), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((10, 5), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((5, 10), poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Non-const point; With holes'; + +create table polygons ( id Int32, pt Tuple(Int32, Int32), poly Array(Array(Tuple(Int32, Int32)))) engine = Log(); + +INSERT INTO polygons VALUES (1, (-10, 0), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (2, (-10, 0), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); +INSERT INTO polygons VALUES (3, (0, -10), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (4, (0, -10), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); +INSERT INTO polygons VALUES (5, (-5, -5), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (6, (-5, -5), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); +INSERT INTO polygons VALUES (7, (0, 0), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (8, (0, 0), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); +INSERT INTO polygons VALUES (9, (5, 5), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (10, (5, 5), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); +INSERT INTO polygons VALUES (11, (10, 10), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (12, (10, 10), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); +INSERT INTO polygons VALUES (13, (10, 5), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (14, (10, 5), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); +INSERT INTO polygons VALUES (15, (5, 10), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]); +INSERT INTO polygons VALUES (16, (5, 10), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); + +SELECT pointInPolygon(pt, poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; \ No newline at end of file diff --git a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh index 5173b5f5772..efb4b3569fb 100755 --- a/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh +++ b/tests/queries/0_stateless/00634_performance_introspection_and_logging.sh @@ -47,7 +47,7 @@ SELECT threads_realtime >= threads_time_user_system_io, any(length(thread_ids)) >= 1 FROM - (SELECT * FROM system.query_log PREWHERE query='$heavy_cpu_query' WHERE event_date >= today()-1 AND type=2 ORDER BY event_time DESC LIMIT 1) + (SELECT * FROM system.query_log PREWHERE query='$heavy_cpu_query' WHERE type='QueryFinish' ORDER BY event_time DESC LIMIT 1) ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV" # Check per-thread and per-query ProfileEvents consistency @@ -58,7 +58,7 @@ SELECT PN, PVq, PVt FROM SELECT PN, sum(PV) AS PVt FROM system.query_thread_log ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV - WHERE event_date >= today()-1 AND query_id='$query_id' + WHERE query_id='$query_id' GROUP BY PN ) js1 ANY INNER JOIN @@ -66,7 +66,7 @@ ANY INNER JOIN SELECT PN, PV AS PVq FROM system.query_log ARRAY JOIN ProfileEvents.Names AS PN, ProfileEvents.Values AS PV - WHERE event_date >= today()-1 AND query_id='$query_id' + WHERE query_id='$query_id' ) js2 USING PN WHERE diff --git a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh index 1f7571a2404..9e32c30ce20 100755 --- a/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh +++ b/tests/queries/0_stateless/00933_test_fix_extra_seek_on_compressed_cache.sh @@ -19,7 +19,7 @@ $CLICKHOUSE_CLIENT --use_uncompressed_cache=1 --query_id="test-query-uncompresse sleep 1 $CLICKHOUSE_CLIENT --query="SYSTEM FLUSH LOGS" -$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 2) AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1" +$CLICKHOUSE_CLIENT --query="SELECT ProfileEvents.Values[indexOf(ProfileEvents.Names, 'Seek')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'ReadCompressedBytes')], ProfileEvents.Values[indexOf(ProfileEvents.Names, 'UncompressedCacheHits')] AS hit FROM system.query_log WHERE (query_id = 'test-query-uncompressed-cache') AND (type = 'QueryFinish') ORDER BY event_time DESC LIMIT 1" $CLICKHOUSE_CLIENT --query="DROP TABLE IF EXISTS small_table" diff --git a/tests/queries/0_stateless/00956_sensitive_data_masking.sh b/tests/queries/0_stateless/00956_sensitive_data_masking.sh index 0f76c34eaff..c492fd35b89 100755 --- a/tests/queries/0_stateless/00956_sensitive_data_masking.sh +++ b/tests/queries/0_stateless/00956_sensitive_data_masking.sh @@ -95,7 +95,7 @@ echo 7 # and finally querylog $CLICKHOUSE_CLIENT \ --server_logs_file=/dev/null \ - --query="select * from system.query_log where event_time>now() - 10 and query like '%TOPSECRET%';" + --query="select * from system.query_log where query like '%TOPSECRET%';" rm -f $tmp_file >/dev/null 2>&1 @@ -117,8 +117,8 @@ sleep 0.1; echo 9 $CLICKHOUSE_CLIENT \ --server_logs_file=/dev/null \ - --query="SELECT if( count() > 0, 'text_log non empty', 'text_log empty') FROM system.text_log WHERE event_time>now() - 60 and message like '%find_me%'; - select * from system.text_log where event_time>now() - 60 and message like '%TOPSECRET=TOPSECRET%';" --ignore-error --multiquery + --query="SELECT if( count() > 0, 'text_log non empty', 'text_log empty') FROM system.text_log WHERE message like '%find_me%'; + select * from system.text_log where message like '%TOPSECRET=TOPSECRET%';" --ignore-error --multiquery echo 'finish' rm -f $tmp_file >/dev/null 2>&1 diff --git a/tests/queries/0_stateless/00974_query_profiler.sql b/tests/queries/0_stateless/00974_query_profiler.sql index 4d0b46dd51f..9e2723c67d8 100644 --- a/tests/queries/0_stateless/00974_query_profiler.sql +++ b/tests/queries/0_stateless/00974_query_profiler.sql @@ -5,7 +5,7 @@ SET log_queries = 1; SELECT sleep(0.5), ignore('test real time query profiler'); SET log_queries = 0; SYSTEM FLUSH LOGS; -WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE event_date >= yesterday() AND query_id = (SELECT query_id FROM system.query_log WHERE event_date >= yesterday() AND query LIKE '%test real time query profiler%' AND query NOT LIKE '%system%' ORDER BY event_time DESC LIMIT 1) AND symbol LIKE '%FunctionSleep%'; +WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FROM system.query_log WHERE query LIKE '%test real time query profiler%' AND query NOT LIKE '%system%' ORDER BY event_time DESC LIMIT 1) AND symbol LIKE '%FunctionSleep%'; SET query_profiler_real_time_period_ns = 0; SET query_profiler_cpu_time_period_ns = 1000000; @@ -13,4 +13,4 @@ SET log_queries = 1; SELECT count(), ignore('test cpu time query profiler') FROM numbers(1000000000); SET log_queries = 0; SYSTEM FLUSH LOGS; -WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE event_date >= yesterday() AND query_id = (SELECT query_id FROM system.query_log WHERE event_date >= yesterday() AND query LIKE '%test cpu time query profiler%' AND query NOT LIKE '%system%' ORDER BY event_time DESC LIMIT 1) AND symbol LIKE '%Source%'; +WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FROM system.query_log WHERE query LIKE '%test cpu time query profiler%' AND query NOT LIKE '%system%' ORDER BY event_time DESC LIMIT 1) AND symbol LIKE '%Source%'; diff --git a/tests/queries/0_stateless/00974_text_log_table_not_empty.sh b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh index c3cde4c08bb..149f0668bd1 100755 --- a/tests/queries/0_stateless/00974_text_log_table_not_empty.sh +++ b/tests/queries/0_stateless/00974_text_log_table_not_empty.sh @@ -10,7 +10,7 @@ do ${CLICKHOUSE_CLIENT} --query="SYSTEM FLUSH LOGS" sleep 0.1; -if [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() > 0 FROM system.text_log WHERE position(system.text_log.message, 'SELECT 6103') > 0 AND event_date >= yesterday()") == 1 ]]; then echo 1; exit; fi; +if [[ $($CLICKHOUSE_CURL -sS "$CLICKHOUSE_URL" -d "SELECT count() > 0 FROM system.text_log WHERE position(system.text_log.message, 'SELECT 6103') > 0") == 1 ]]; then echo 1; exit; fi; done; diff --git a/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql b/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql index 2c99ba54112..e1e81614ab7 100644 --- a/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql +++ b/tests/queries/0_stateless/01070_exception_code_in_query_log_table.sql @@ -3,5 +3,5 @@ SELECT * FROM test_table_for_01070_exception_code_in_query_log_table; -- { serve CREATE TABLE test_table_for_01070_exception_code_in_query_log_table (value UInt64) ENGINE=Memory(); SELECT * FROM test_table_for_01070_exception_code_in_query_log_table; SYSTEM FLUSH LOGS; -SELECT exception_code FROM system.query_log WHERE query = 'SELECT * FROM test_table_for_01070_exception_code_in_query_log_table' AND event_date >= yesterday() AND event_time > now() - INTERVAL 5 MINUTE ORDER BY exception_code; +SELECT exception_code FROM system.query_log WHERE query = 'SELECT * FROM test_table_for_01070_exception_code_in_query_log_table' ORDER BY exception_code; DROP TABLE IF EXISTS test_table_for_01070_exception_code_in_query_log_table; diff --git a/tests/queries/0_stateless/01091_num_threads.sql b/tests/queries/0_stateless/01091_num_threads.sql index 876a2d15d1a..a93568fcee5 100644 --- a/tests/queries/0_stateless/01091_num_threads.sql +++ b/tests/queries/0_stateless/01091_num_threads.sql @@ -8,13 +8,13 @@ WITH ( SELECT query_id FROM system.query_log - WHERE (query = 'SELECT 1') AND (event_date >= (today() - 1)) + WHERE (query = 'SELECT 1') ORDER BY event_time DESC LIMIT 1 ) AS id SELECT uniqExact(thread_id) FROM system.query_thread_log -WHERE (event_date >= (today() - 1)) AND (query_id = id) AND (thread_id != master_thread_id); +WHERE (query_id = id) AND (thread_id != master_thread_id); select sum(number) from numbers(1000000); SYSTEM FLUSH LOGS; @@ -23,13 +23,13 @@ WITH ( SELECT query_id FROM system.query_log - WHERE (query = 'SELECT sum(number) FROM numbers(1000000)') AND (event_date >= (today() - 1)) + WHERE (query = 'SELECT sum(number) FROM numbers(1000000)') ORDER BY event_time DESC LIMIT 1 ) AS id SELECT uniqExact(thread_id) FROM system.query_thread_log -WHERE (event_date >= (today() - 1)) AND (query_id = id) AND (thread_id != master_thread_id); +WHERE (query_id = id) AND (thread_id != master_thread_id); select sum(number) from numbers_mt(1000000); SYSTEM FLUSH LOGS; @@ -38,10 +38,10 @@ WITH ( SELECT query_id FROM system.query_log - WHERE (query = 'SELECT sum(number) FROM numbers_mt(1000000)') AND (event_date >= (today() - 1)) + WHERE (query = 'SELECT sum(number) FROM numbers_mt(1000000)') ORDER BY event_time DESC LIMIT 1 ) AS id SELECT uniqExact(thread_id) > 2 FROM system.query_thread_log -WHERE (event_date >= (today() - 1)) AND (query_id = id) AND (thread_id != master_thread_id); +WHERE (query_id = id) AND (thread_id != master_thread_id); diff --git a/tests/queries/0_stateless/01092_memory_profiler.sql b/tests/queries/0_stateless/01092_memory_profiler.sql index c20b5c79cdb..980f7f73d5d 100644 --- a/tests/queries/0_stateless/01092_memory_profiler.sql +++ b/tests/queries/0_stateless/01092_memory_profiler.sql @@ -3,4 +3,4 @@ SET allow_introspection_functions = 1; SET memory_profiler_step = 1000000; SELECT ignore(groupArray(number), 'test memory profiler') FROM numbers(10000000); SYSTEM FLUSH LOGS; -WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE event_date >= yesterday() AND trace_type = 'Memory' AND query_id = (SELECT query_id FROM system.query_log WHERE event_date >= yesterday() AND query LIKE '%test memory profiler%' ORDER BY event_time DESC LIMIT 1); +WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE trace_type = 'Memory' AND query_id = (SELECT query_id FROM system.query_log WHERE query LIKE '%test memory profiler%' ORDER BY event_time DESC LIMIT 1); diff --git a/tests/queries/0_stateless/01198_client_quota_key.sh b/tests/queries/0_stateless/01198_client_quota_key.sh index f4b66aea6ac..b3bc845cd06 100755 --- a/tests/queries/0_stateless/01198_client_quota_key.sh +++ b/tests/queries/0_stateless/01198_client_quota_key.sh @@ -3,4 +3,4 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . $CURDIR/../shell_config.sh -$CLICKHOUSE_CLIENT --quota_key Hello --query_id test_quota_key --log_queries 1 --multiquery --query "SELECT 1; SYSTEM FLUSH LOGS; SELECT DISTINCT quota_key FROM system.query_log WHERE event_date >= yesterday() AND event_time >= now() - 300 AND query_id = 'test_quota_key'" +$CLICKHOUSE_CLIENT --quota_key Hello --query_id test_quota_key --log_queries 1 --multiquery --query "SELECT 1; SYSTEM FLUSH LOGS; SELECT DISTINCT quota_key FROM system.query_log WHERE query_id = 'test_quota_key'" diff --git a/tests/queries/0_stateless/01231_log_queries_min_type.sql b/tests/queries/0_stateless/01231_log_queries_min_type.sql index f2229c94a8a..565a5880b84 100644 --- a/tests/queries/0_stateless/01231_log_queries_min_type.sql +++ b/tests/queries/0_stateless/01231_log_queries_min_type.sql @@ -2,14 +2,14 @@ set log_queries=1; select '01231_log_queries_min_type/QUERY_START'; system flush logs; -select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%'; set log_queries_min_type='EXCEPTION_BEFORE_START'; select '01231_log_queries_min_type/EXCEPTION_BEFORE_START'; system flush logs; -select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%'; set log_queries_min_type='EXCEPTION_WHILE_PROCESSING'; select '01231_log_queries_min_type/', max(number) from system.numbers limit 1e6 settings max_rows_to_read='100K'; -- { serverError 158; } system flush logs; -select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%' and event_date = today() and event_time >= now() - interval 1 minute; +select count() from system.query_log where query like '%01231_log_queries_min_type/%' and query not like '%system.query_log%'; diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.reference b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.reference new file mode 100644 index 00000000000..ed22b7e1e35 --- /dev/null +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.reference @@ -0,0 +1,4 @@ +0 +0 +100000 +200000 diff --git a/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh new file mode 100755 index 00000000000..7e08c930f67 --- /dev/null +++ b/tests/queries/0_stateless/01278_min_insert_block_size_rows_for_materialized_views.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +. $CURDIR/../shell_config.sh + +# just in case +set -o pipefail + +function execute() +{ + ${CLICKHOUSE_CLIENT} -n "$@" +} + +# +# TEST SETTINGS +# +TEST_01278_PARTS=9 +TEST_01278_MEMORY=$((100<<20)) + +function cleanup() +{ + for i in $(seq 1 $TEST_01278_PARTS); do + echo "drop table if exists part_01278_$i;" + echo "drop table if exists mv_01278_$i;" + done | execute + echo 'drop table if exists data_01278;' | execute + echo 'drop table if exists out_01278;' | execute + echo 'drop table if exists null_01278;' | execute +} + +cleanup +trap cleanup EXIT + +# +# CREATE +# +{ +cat < 1, 'too many queries'), + throwIf(alloc < 100e6, 'minimal allocation had not been done'), + throwIf((alloc+free)/alloc > 0.05, 'memory accounting leaked more than 5%') + FROM ( + SELECT + count() cnt, + uniq(query_id) queries, + sumIf(size, size > 0) alloc, + sumIf(size, size < 0) free + FROM trace_log_01281 + ); + +-- +-- Basic +-- NOTE: 0 (and even 1e6) is too small, will make SYSTEM FLUSH LOGS too slow +-- (in debug build at least) +-- +SET max_untracked_memory=4e6; + +TRUNCATE TABLE trace_log_01281; +-- single { +SET max_threads=1; +SET memory_profiler_sample_probability=1; +SELECT uniqExactState(number) FROM numbers(toUInt64(2e6)) GROUP BY number % 2e5 FORMAT Null; +SET memory_profiler_sample_probability=0; +SYSTEM FLUSH LOGS; +-- } +SELECT * FROM trace_log_01281_assert FORMAT Null; + +SYSTEM FLUSH LOGS; +TRUNCATE TABLE trace_log_01281; +-- single limit { +SET max_threads=1; +SET memory_profiler_sample_probability=1; +SELECT uniqExactState(number) FROM numbers(toUInt64(2e6)) GROUP BY number % 2e5 LIMIT 10 FORMAT Null; +SET memory_profiler_sample_probability=0; +SYSTEM FLUSH LOGS; +-- } +SELECT * FROM trace_log_01281_assert FORMAT Null; + +SYSTEM FLUSH LOGS; +TRUNCATE TABLE trace_log_01281; +-- two-level { +-- need to have multiple threads for two-level aggregation +SET max_threads=2; +SET memory_profiler_sample_probability=1; +SELECT uniqExactState(number) FROM numbers_mt(toUInt64(2e6)) GROUP BY number % 2e5 FORMAT Null; +SET memory_profiler_sample_probability=0; +SYSTEM FLUSH LOGS; +-- } +SELECT * FROM trace_log_01281_assert FORMAT Null; + +SYSTEM FLUSH LOGS; +TRUNCATE TABLE trace_log_01281; +-- two-level limit { +-- need to have multiple threads for two-level aggregation +SET max_threads=2; +SET memory_profiler_sample_probability=1; +SELECT uniqExactState(number) FROM numbers_mt(toUInt64(2e6)) GROUP BY number % 2e5 LIMIT 10 FORMAT Null; +SET memory_profiler_sample_probability=0; +SYSTEM FLUSH LOGS; +-- } +SELECT * FROM trace_log_01281_assert FORMAT Null; + +SYSTEM FLUSH LOGS; +TRUNCATE TABLE trace_log_01281; +-- two-level MEMORY_LIMIT_EXCEEDED { +-- need to have multiple threads for two-level aggregation +SET max_threads=2; +SET memory_profiler_sample_probability=1; +SET max_memory_usage='150M'; +SELECT uniqExactState(number) FROM numbers_mt(toUInt64(10e6)) GROUP BY number % 1e6 FORMAT Null; -- { serverError 241; } +SET memory_profiler_sample_probability=0; +SET max_memory_usage=0; +SYSTEM FLUSH LOGS; +-- } +SELECT * FROM trace_log_01281_assert FORMAT Null; diff --git a/tests/queries/0_stateless/01281_sum_nullable.reference b/tests/queries/0_stateless/01281_sum_nullable.reference new file mode 100644 index 00000000000..be8b67fd296 --- /dev/null +++ b/tests/queries/0_stateless/01281_sum_nullable.reference @@ -0,0 +1,6 @@ +45 +45 +45 +1 +45 +\N diff --git a/tests/queries/0_stateless/01281_sum_nullable.sql b/tests/queries/0_stateless/01281_sum_nullable.sql new file mode 100644 index 00000000000..35d593da75d --- /dev/null +++ b/tests/queries/0_stateless/01281_sum_nullable.sql @@ -0,0 +1,6 @@ +SELECT sumKahan(toFloat64(number)) FROM numbers(10); +SELECT sumKahan(toNullable(toFloat64(number))) FROM numbers(10); +SELECT sum(toNullable(number)) FROM numbers(10); +SELECT sum(x) FROM (SELECT 1 AS x UNION ALL SELECT NULL); +SELECT sum(number) FROM numbers(10); +SELECT sum(number < 1000 ? NULL : number) FROM numbers(10); diff --git a/tests/queries/0_stateless/01282_system_parts_ttl_info.reference b/tests/queries/0_stateless/01282_system_parts_ttl_info.reference new file mode 100644 index 00000000000..1d9fe9eeb36 --- /dev/null +++ b/tests/queries/0_stateless/01282_system_parts_ttl_info.reference @@ -0,0 +1,2 @@ +2 2000-01-11 01:02:03 2000-02-13 04:05:06 [] [] [] +0 0000-00-00 00:00:00 0000-00-00 00:00:00 [] [] [] diff --git a/tests/queries/0_stateless/01282_system_parts_ttl_info.sql b/tests/queries/0_stateless/01282_system_parts_ttl_info.sql new file mode 100644 index 00000000000..3a1b1cc79ce --- /dev/null +++ b/tests/queries/0_stateless/01282_system_parts_ttl_info.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS ttl; +CREATE TABLE ttl (d DateTime) ENGINE = MergeTree ORDER BY tuple() TTL d + INTERVAL 10 DAY; +SYSTEM STOP MERGES; +INSERT INTO ttl VALUES ('2000-01-01 01:02:03'), ('2000-02-03 04:05:06'); +SELECT rows, delete_ttl_info_min, delete_ttl_info_max, move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl'; +SYSTEM START MERGES; +OPTIMIZE TABLE ttl FINAL; +SELECT rows, delete_ttl_info_min, delete_ttl_info_max, move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl' AND active; +DROP TABLE ttl; diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index c8531bd63a0..94042ea4090 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -1,3 +1,7 @@ +if (USE_CLANG_TIDY) + set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") +endif () + if(MAKE_STATIC_LIBRARIES) set(MAX_LINKER_MEMORY 3500) else() diff --git a/utils/convert-month-partitioned-parts/main.cpp b/utils/convert-month-partitioned-parts/main.cpp index 51ea87d35b9..af8e221a10b 100644 --- a/utils/convert-month-partitioned-parts/main.cpp +++ b/utils/convert-month-partitioned-parts/main.cpp @@ -30,7 +30,7 @@ void run(String part_path, String date_column, String dest_path) { std::shared_ptr disk = std::make_shared("local", "/", 0); auto old_part_path = Poco::Path::forDirectory(part_path); - String old_part_name = old_part_path.directory(old_part_path.depth() - 1); + const String & old_part_name = old_part_path.directory(old_part_path.depth() - 1); String old_part_path_str = old_part_path.toString(); auto part_info = MergeTreePartInfo::fromPartName(old_part_name, MergeTreeDataFormatVersion(0)); diff --git a/utils/iotest/iotest.cpp b/utils/iotest/iotest.cpp index ed846e3d46f..e578a539bcd 100644 --- a/utils/iotest/iotest.cpp +++ b/utils/iotest/iotest.cpp @@ -59,9 +59,9 @@ void thread(int fd, int mode, size_t min_offset, size_t max_offset, size_t block for (size_t i = 0; i < count; ++i) { - long rand_result1 = rng(); - long rand_result2 = rng(); - long rand_result3 = rng(); + uint64_t rand_result1 = rng(); + uint64_t rand_result2 = rng(); + uint64_t rand_result3 = rng(); size_t rand_result = rand_result1 ^ (rand_result2 << 22) ^ (rand_result3 << 43); size_t offset; @@ -152,7 +152,7 @@ int mainImpl(int argc, char ** argv) Stopwatch watch; for (size_t i = 0; i < threads; ++i) - pool.scheduleOrThrowOnError(std::bind(thread, fd, mode, min_offset, max_offset, block_size, count)); + pool.scheduleOrThrowOnError([=]{ thread(fd, mode, min_offset, max_offset, block_size, count); }); pool.wait(); fsync(fd); diff --git a/utils/iotest/iotest_aio.cpp b/utils/iotest/iotest_aio.cpp index c0945fbe1e1..24508c1dd9f 100644 --- a/utils/iotest/iotest_aio.cpp +++ b/utils/iotest/iotest_aio.cpp @@ -13,6 +13,8 @@ int main(int, char **) { return 0; } #include #include #include +#include +#include #include #include #include @@ -52,10 +54,7 @@ void thread(int fd, int mode, size_t min_offset, size_t max_offset, size_t block for (size_t i = 0; i < buffers_count; ++i) buffers[i] = Memory<>(block_size, sysconf(_SC_PAGESIZE)); - drand48_data rand_data; - timespec times; - clock_gettime(CLOCK_THREAD_CPUTIME_ID, ×); - srand48_r(times.tv_nsec, &rand_data); + pcg64_fast rng(randomSeed()); size_t in_progress = 0; size_t blocks_sent = 0; @@ -82,12 +81,9 @@ void thread(int fd, int mode, size_t min_offset, size_t max_offset, size_t block char * buf = buffers[i].data(); - long rand_result1 = 0; - long rand_result2 = 0; - long rand_result3 = 0; - lrand48_r(&rand_data, &rand_result1); - lrand48_r(&rand_data, &rand_result2); - lrand48_r(&rand_data, &rand_result3); + uint64_t rand_result1 = rng(); + uint64_t rand_result2 = rng(); + uint64_t rand_result3 = rng(); size_t rand_result = rand_result1 ^ (rand_result2 << 22) ^ (rand_result3 << 43); size_t offset = min_offset + rand_result % ((max_offset - min_offset) / block_size) * block_size; @@ -172,7 +168,7 @@ int mainImpl(int argc, char ** argv) Stopwatch watch; for (size_t i = 0; i < threads_count; ++i) - pool.scheduleOrThrowOnError(std::bind(thread, fd, mode, min_offset, max_offset, block_size, buffers_count, count)); + pool.scheduleOrThrowOnError([=]{ thread(fd, mode, min_offset, max_offset, block_size, buffers_count, count); }); pool.wait(); watch.stop(); diff --git a/utils/iotest/iotest_nonblock.cpp b/utils/iotest/iotest_nonblock.cpp index 9317e7ed47f..524d6298da5 100644 --- a/utils/iotest/iotest_nonblock.cpp +++ b/utils/iotest/iotest_nonblock.cpp @@ -113,9 +113,9 @@ int mainImpl(int argc, char ** argv) polls[i].revents = 0; ++ops; - long rand_result1 = rng(); - long rand_result2 = rng(); - long rand_result3 = rng(); + uint64_t rand_result1 = rng(); + uint64_t rand_result2 = rng(); + uint64_t rand_result3 = rng(); size_t rand_result = rand_result1 ^ (rand_result2 << 22) ^ (rand_result3 << 43); size_t offset; diff --git a/utils/test-data-generator/CMakeLists.txt b/utils/test-data-generator/CMakeLists.txt index d8a2111cf07..3a94358e86d 100644 --- a/utils/test-data-generator/CMakeLists.txt +++ b/utils/test-data-generator/CMakeLists.txt @@ -1,3 +1,6 @@ +# Disable clang-tidy for protobuf generated files +set (CMAKE_CXX_CLANG_TIDY "") + add_compile_options(-Wno-zero-as-null-pointer-constant -Wno-array-bounds) # Protobuf generated files if (USE_PROTOBUF) diff --git a/utils/zookeeper-adjust-block-numbers-to-parts/main.cpp b/utils/zookeeper-adjust-block-numbers-to-parts/main.cpp index 91431c01648..8550675cb9e 100644 --- a/utils/zookeeper-adjust-block-numbers-to-parts/main.cpp +++ b/utils/zookeeper-adjust-block-numbers-to-parts/main.cpp @@ -102,7 +102,7 @@ std::unordered_map getPartitionsNeedAdjustingBlockNumbers( std::cout << "Shard: " << shard << std::endl; std::vector use_tables = tables.empty() ? getAllTables(zk, root, shard) : removeNotExistingTables(zk, root, shard, tables); - for (auto table : use_tables) + for (const auto & table : use_tables) { std::cout << "\tTable: " << table << std::endl; std::string table_path = root + "/" + shard + "/" + table; @@ -121,7 +121,7 @@ std::unordered_map getPartitionsNeedAdjustingBlockNumbers( continue; } - for (auto partition : partitions) + for (const auto & partition : partitions) { try { @@ -199,7 +199,7 @@ void setCurrentBlockNumber(zkutil::ZooKeeper & zk, const std::string & path, Int create_ephemeral_nodes(1); /// Firstly try to create just a single node. /// Create other nodes in batches of 50 nodes. - while (current_block_number + 50 <= new_current_block_number) + while (current_block_number + 50 <= new_current_block_number) // NOLINT: clang-tidy thinks that the loop is infinite create_ephemeral_nodes(50); create_ephemeral_nodes(new_current_block_number - current_block_number); diff --git a/utils/zookeeper-cli/zookeeper-cli.cpp b/utils/zookeeper-cli/zookeeper-cli.cpp index 40755fc0160..0a503e77250 100644 --- a/utils/zookeeper-cli/zookeeper-cli.cpp +++ b/utils/zookeeper-cli/zookeeper-cli.cpp @@ -97,10 +97,8 @@ int main(int argc, char ** argv) bool watch = w == "w"; zkutil::EventPtr event = watch ? std::make_shared() : nullptr; std::vector v = zk.getChildren(path, nullptr, event); - for (size_t i = 0; i < v.size(); ++i) - { - std::cout << v[i] << std::endl; - } + for (const auto & child : v) + std::cout << child << std::endl; if (watch) waitForWatch(event); } @@ -193,7 +191,7 @@ int main(int argc, char ** argv) zk.set(path, data, version, &stat); printStat(stat); } - else if (cmd != "") + else if (!cmd.empty()) { std::cout << "commands:\n"; std::cout << " q\n"; diff --git a/website/robots.txt b/website/robots.txt index f9970836f18..fa3a68b6d69 100644 --- a/website/robots.txt +++ b/website/robots.txt @@ -12,4 +12,4 @@ Disallow: /docs/v3* Disallow: /cdn-cgi/ Allow: / Host: https://clickhouse.tech -Sitemap: https://clickhouse.tech/sitemap.xml +Sitemap: https://clickhouse.tech/sitemap-index.xml diff --git a/website/sitemap.xml b/website/sitemap-index.xml similarity index 92% rename from website/sitemap.xml rename to website/sitemap-index.xml index a147404ec6f..e53d6c29c54 100644 --- a/website/sitemap.xml +++ b/website/sitemap-index.xml @@ -22,6 +22,6 @@ https://clickhouse.tech/docs/fa/sitemap.xml - https://clickhouse.tech/sitemap_static.xml + https://clickhouse.tech/sitemap-static.xml diff --git a/website/sitemap_static.xml b/website/sitemap-static.xml similarity index 100% rename from website/sitemap_static.xml rename to website/sitemap-static.xml