From 48f6f7e490754880ad179c3568d2c118454d0db9 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 10 Feb 2021 19:26:49 +0300 Subject: [PATCH 01/86] Split filter for predicate push down. --- src/Interpreters/ActionsDAG.cpp | 194 +++++++++++++++++++++++++++++++- src/Interpreters/ActionsDAG.h | 9 +- 2 files changed, 201 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 176745c707d..223b4341f46 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -338,7 +339,7 @@ void ActionsDAG::removeUnusedActions(const std::vector & required_nodes) removeUnusedActions(); } -void ActionsDAG::removeUnusedActions() +void ActionsDAG::removeUnusedActions(bool allow_remove_inputs) { std::unordered_set visited_nodes; std::stack stack; @@ -357,6 +358,9 @@ void ActionsDAG::removeUnusedActions() visited_nodes.insert(&node); stack.push(&node); } + + if (node.type == ActionType::INPUT && !allow_remove_inputs) + visited_nodes.insert(&node); } while (!stack.empty()) @@ -1153,4 +1157,192 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsForFilter(const std::string & co return split(split_nodes); } +ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, const Names & available_inputs) +{ + std::unordered_map> inputs_map; + for (const auto & input : inputs) + inputs_map[input->result_name].emplace_back(input); + + std::unordered_set allowed_nodes; + for (const auto & name : available_inputs) + { + auto & inputs_list = inputs_map[name]; + if (inputs_list.empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find input {} in ActionsDAG. DAG:\n{}", name, dumpDAG()); + + allowed_nodes.emplace(inputs_list.front()); + inputs_list.pop_front(); + } + + auto it = index.begin(); + for (; it != index.end(); ++it) + if ((*it)->result_name == filter_name) + break; + + if (it == index.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Index for ActionsDAG does not contain filter column name {}. DAG:\n{}", + filter_name, dumpDAG()); + + std::unordered_set selected_predicates; + + { + struct Frame + { + const Node * node; + bool is_predicate = false; + size_t next_child_to_visit = 0; + size_t num_allowed_children = 0; + }; + + std::stack stack; + std::unordered_set visited_nodes; + + stack.push(Frame{.node = *it, .is_predicate = true}); + visited_nodes.insert(*it); + while (!stack.empty()) + { + auto & cur = stack.top(); + bool is_conjunction = cur.is_predicate + && cur.node->type == ActionType::FUNCTION + && cur.node->function_base->getName() == "and"; + + /// At first, visit all children. + while (cur.next_child_to_visit < cur.node->children.size()) + { + auto * child = cur.node->children[cur.next_child_to_visit]; + + if (visited_nodes.count(child) == 0) + { + visited_nodes.insert(child); + stack.push({.node = child, .is_predicate = is_conjunction}); + break; + } + + if (allowed_nodes.contains(child)) + ++cur.num_allowed_children; + ++cur.next_child_to_visit; + } + + if (cur.next_child_to_visit == cur.node->children.size()) + { + if (cur.num_allowed_children == cur.node->children.size()) + { + if (cur.node->type != ActionType::ARRAY_JOIN && cur.node->type != ActionType::INPUT) + allowed_nodes.emplace(cur.node); + } + else if (is_conjunction) + { + for (auto * child : cur.node->children) + if (allowed_nodes.count(child)) + selected_predicates.insert(child); + } + + stack.pop(); + } + } + } + + if (selected_predicates.empty()) + { + if (allowed_nodes.count(*it)) + selected_predicates.insert(*it); + else + return nullptr; + } + + auto actions = cloneEmpty(); + actions->settings.project_input = false; + + std::unordered_map nodes_mapping; + + { + struct Frame + { + const Node * node; + size_t next_child_to_visit = 0; + }; + + std::stack stack; + + for (const auto * predicate : selected_predicates) + { + if (nodes_mapping.count(predicate)) + continue; + + stack.push({.node = predicate}); + while (!stack.empty()) + { + auto & cur = stack.top(); + /// At first, visit all children. + while (cur.next_child_to_visit < cur.node->children.size()) + { + auto * child = cur.node->children[cur.next_child_to_visit]; + + if (nodes_mapping.count(child) == 0) + { + stack.push({.node = child}); + break; + } + + ++cur.next_child_to_visit; + } + + if (cur.next_child_to_visit == cur.node->children.size()) + { + auto & node = actions->nodes.emplace_back(*cur.node); + nodes_mapping[cur.node] = &node; + + for (auto & child : node.children) + child = nodes_mapping[child]; + + if (node.type == ActionType::INPUT) + { + actions->inputs.emplace_back(&node); + actions->index.insert(&node); + } + } + } + } + + Node * result_predicate = nodes_mapping[*selected_predicates.begin()]; + + if (selected_predicates.size() > 1) + { + FunctionOverloadResolverPtr func_builder_and = + std::make_shared( + std::make_unique( + std::make_shared())); + + std::vector args; + args.reserve(selected_predicates.size()); + for (const auto * predicate : selected_predicates) + args.emplace_back(nodes_mapping[predicate]); + + result_predicate = &actions->addFunction(func_builder_and, args, {}, true); + } + + actions->index.insert(result_predicate); + } + + + + /// Replace all predicates which are copied to constants. + /// Note: This also keeps valid const propagation. AND is constant only if all elements are. + /// But if all elements are constant, AND should is moved to split actions and replaced itself. + for (const auto & predicate : selected_predicates) + { + Node node; + node.type = ActionType::COLUMN; + node.result_name = std::move(predicate->result_name); + node.result_type = std::move(predicate->result_type); + node.column = node.result_type->createColumnConst(0, 1); + *predicate = std::move(node); + } + + removeUnusedActions(false); + + return actions; +} + } diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index e13a9bd62b3..6fd4e14568a 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -274,6 +274,13 @@ public: /// Index of initial actions must contain column_name. SplitResult splitActionsForFilter(const std::string & column_name) const; + /// Create actions which may calculate part of filter using only available_inputs. + /// If nothing may be calculated, returns nullptr. + /// Otherwise, return actions which inputs are from available_inputs. + /// Returned actions add single column which may be used for filter. + /// Also, replace some nodes of current inputs to constant 1 in case they are filtered. + ActionsDAGPtr splitActionsForFilter(const std::string & filter_name, const Names & available_inputs); + private: Node & addNode(Node node, bool can_replace = false); Node & getNode(const std::string & name); @@ -297,7 +304,7 @@ private: } void removeUnusedActions(const std::vector & required_nodes); - void removeUnusedActions(); + void removeUnusedActions(bool allow_remove_inputs = true); void addAliases(const NamesWithAliases & aliases, std::vector & result_nodes); void compileFunctions(); From a83885392e8233a9b9faa462eea371c71df2c745 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 10 Feb 2021 20:47:48 +0300 Subject: [PATCH 02/86] Split filter for predicate push down. --- src/Interpreters/ActionsDAG.cpp | 117 ++++++++++++++++++++++++++------ src/Interpreters/ActionsDAG.h | 2 +- 2 files changed, 98 insertions(+), 21 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 223b4341f46..eb1ff9ad998 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1157,7 +1157,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsForFilter(const std::string & co return split(split_nodes); } -ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, const Names & available_inputs) +ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs) { std::unordered_map> inputs_map; for (const auto & input : inputs) @@ -1185,6 +1185,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, filter_name, dumpDAG()); std::unordered_set selected_predicates; + std::unordered_set other_predicates; { struct Frame @@ -1234,8 +1235,12 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, else if (is_conjunction) { for (auto * child : cur.node->children) + { if (allowed_nodes.count(child)) selected_predicates.insert(child); + else + other_predicates.insert(child); + } } stack.pop(); @@ -1254,6 +1259,11 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, auto actions = cloneEmpty(); actions->settings.project_input = false; + FunctionOverloadResolverPtr func_builder_and = + std::make_shared( + std::make_unique( + std::make_shared())); + std::unordered_map nodes_mapping; { @@ -1309,11 +1319,6 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, if (selected_predicates.size() > 1) { - FunctionOverloadResolverPtr func_builder_and = - std::make_shared( - std::make_unique( - std::make_shared())); - std::vector args; args.reserve(selected_predicates.size()); for (const auto * predicate : selected_predicates) @@ -1325,22 +1330,94 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, actions->index.insert(result_predicate); } - - - /// Replace all predicates which are copied to constants. - /// Note: This also keeps valid const propagation. AND is constant only if all elements are. - /// But if all elements are constant, AND should is moved to split actions and replaced itself. - for (const auto & predicate : selected_predicates) + if (selected_predicates.count(*it)) { - Node node; - node.type = ActionType::COLUMN; - node.result_name = std::move(predicate->result_name); - node.result_type = std::move(predicate->result_type); - node.column = node.result_type->createColumnConst(0, 1); - *predicate = std::move(node); - } + /// The whole predicate was split. + if (can_remove_filter) + { + for (auto i = index.begin(); i != index.end(); ++i) + { + if (*i == *it) + { + index.remove(i); + break; + } + } + } + else + { + Node node; + node.type = ActionType::COLUMN; + node.result_name = std::move((*it)->result_name); + node.result_type = std::move((*it)->result_type); + node.column = node.result_type->createColumnConst(0, 1); + *(*it) = std::move(node); + } - removeUnusedActions(false); + removeUnusedActions(false); + } + else if ((*it)->type == ActionType::FUNCTION && (*it)->function_base->getName() == "and") + { + std::vector new_children(other_predicates.begin(), other_predicates.end()); + + if (new_children.size() == 1) + { + if (new_children.front()->result_type->equals(*((*it)->result_type))) + { + Node node; + node.type = ActionType::ALIAS; + node.result_name = (*it)->result_name; + node.result_type = (*it)->result_type; + node.children.swap(new_children); + *(*it) = std::move(node); + } + else + { + (*it)->children.swap(new_children); + ColumnsWithTypeAndName arguments; + arguments.reserve((*it)->children.size()); + + for (const auto * child : (*it)->children) + { + ColumnWithTypeAndName argument; + argument.column = child->column; + argument.type = child->result_type; + argument.name = child->result_name; + + arguments.emplace_back(std::move(argument)); + } + + FunctionOverloadResolverPtr func_builder_cast = + std::make_shared( + CastOverloadResolver::createImpl(false)); + + (*it)->function_builder = func_builder_cast; + (*it)->function_base = (*it)->function_builder->build(arguments); + (*it)->function = (*it)->function_base->prepare(arguments); + } + } + else + { + (*it)->children.swap(new_children); + ColumnsWithTypeAndName arguments; + arguments.reserve((*it)->children.size()); + + for (const auto * child : (*it)->children) + { + ColumnWithTypeAndName argument; + argument.column = child->column; + argument.type = child->result_type; + argument.name = child->result_name; + + arguments.emplace_back(std::move(argument)); + } + + (*it)->function_base = (*it)->function_builder->build(arguments); + (*it)->function = (*it)->function_base->prepare(arguments); + } + + removeUnusedActions(false); + } return actions; } diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 6fd4e14568a..112c507e79f 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -279,7 +279,7 @@ public: /// Otherwise, return actions which inputs are from available_inputs. /// Returned actions add single column which may be used for filter. /// Also, replace some nodes of current inputs to constant 1 in case they are filtered. - ActionsDAGPtr splitActionsForFilter(const std::string & filter_name, const Names & available_inputs); + ActionsDAGPtr splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs); private: Node & addNode(Node node, bool can_replace = false); From 3a020d2dd5c4ffda10fb4dd79509f5e04f45e692 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 11 Feb 2021 11:49:12 +0300 Subject: [PATCH 03/86] filter push down for Aggregating --- src/Processors/QueryPlan/AggregatingStep.h | 2 + .../QueryPlan/Optimizations/Optimizations.h | 7 +- .../Optimizations/filterPushDown.cpp | 77 +++++++++++++++++++ 3 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 src/Processors/QueryPlan/Optimizations/filterPushDown.cpp diff --git a/src/Processors/QueryPlan/AggregatingStep.h b/src/Processors/QueryPlan/AggregatingStep.h index 853173895b3..6be92394fab 100644 --- a/src/Processors/QueryPlan/AggregatingStep.h +++ b/src/Processors/QueryPlan/AggregatingStep.h @@ -32,6 +32,8 @@ public: void describeActions(FormatSettings &) const override; void describePipeline(FormatSettings & settings) const override; + const Aggregator::Params & getParams() const { return params; } + private: Aggregator::Params params; bool final; diff --git a/src/Processors/QueryPlan/Optimizations/Optimizations.h b/src/Processors/QueryPlan/Optimizations/Optimizations.h index 454eab9649a..be7f81e5db0 100644 --- a/src/Processors/QueryPlan/Optimizations/Optimizations.h +++ b/src/Processors/QueryPlan/Optimizations/Optimizations.h @@ -38,14 +38,19 @@ size_t trySplitFilter(QueryPlan::Node * node, QueryPlan::Nodes & nodes); /// Replace chain `FilterStep -> ExpressionStep` to single FilterStep size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &); +/// Move FilterStep down if possible. +/// May split FilterStep and push down only part of it. +size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes); + inline const auto & getOptimizations() { - static const std::array optimizations = + static const std::array optimizations = {{ {tryLiftUpArrayJoin, "liftUpArrayJoin"}, {tryPushDownLimit, "pushDownLimit"}, {trySplitFilter, "splitFilter"}, {tryMergeExpressions, "mergeExpressions"}, + {tryPushDownLimit, "pushDownFilter"}, }}; return optimizations; diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp new file mode 100644 index 00000000000..82704bcbce9 --- /dev/null +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -0,0 +1,77 @@ +#include +#include +#include +#include +#include +#include +#include + +namespace DB::ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace DB::QueryPlanOptimizations +{ + +size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) +{ + if (parent_node->children.size() != 1) + return 0; + + QueryPlan::Node * child_node = parent_node->children.front(); + + auto & parent = parent_node->step; + auto & child = child_node->step; + auto * filter = typeid_cast(parent.get()); + + if (!filter) + return 0; + + const auto & expression = filter->getExpression(); + const auto & filter_column_name = filter->getFilterColumnName(); + bool removes_filter = filter->removesFilterColumn(); + + if (auto * aggregating = typeid_cast(child.get())) + { + const auto & params = aggregating->getParams(); + + Names keys; + keys.reserve(params.keys.size()); + for (auto pos : params.keys) + keys.push_back(params.src_header.getByPosition(pos).name); + + if (auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, keys)) + { + auto it = expression->getIndex().find(filter_column_name); + if (it == expression->getIndex().end()) + { + if (!removes_filter) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", + filter_column_name, expression->dumpDAG()); + + parent = std::make_unique(child->getOutputStream(), expression); + } + + /// Add new Filter step before Aggregating. + /// Expression/Filter -> Aggregating -> Something + auto & node = nodes.emplace_back(); + node.children.swap(child_node->children); + child_node->children.emplace_back(&node); + /// Expression/Filter -> Aggregating -> Filter -> Something + + /// New filter column is added to the end. + auto split_filter_column_name = (*split_filter->getIndex().rbegin())->result_name; + node.step = std::make_unique( + node.children.at(0)->step->getOutputStream(), + std::move(split_filter), std::move(split_filter_column_name), true); + + return 3; + } + } + + return 0; +} + +} From e24b8e8a13ecea65e9d35e53cbe1a7fa44917680 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 11 Feb 2021 15:06:28 +0300 Subject: [PATCH 04/86] Fix ActionsDAG::splitActionsForFilter --- src/Interpreters/ActionsDAG.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index eb1ff9ad998..cd3a2853687 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1311,6 +1311,8 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, actions->inputs.emplace_back(&node); actions->index.insert(&node); } + + stack.pop(); } } } From 2deff0d9d09bab61a149b62acfc49a34e6d4011f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 11 Feb 2021 18:44:10 +0300 Subject: [PATCH 05/86] Fix ActionsDAG::splitActionsForFilter --- src/Interpreters/ActionsDAG.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index cd3a2853687..78254e5139a 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1375,7 +1375,17 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, } else { - (*it)->children.swap(new_children); + Node node; + node.type = ActionType::COLUMN; + node.result_name = (*it)->result_type->getName(); + node.column = DataTypeString().createColumnConst(0, node.result_name); + node.result_type = std::make_shared(); + + auto * right_arg = &nodes.emplace_back(std::move(node)); + auto * left_arg = new_children.front(); + + + (*it)->children = {left_arg, right_arg}; ColumnsWithTypeAndName arguments; arguments.reserve((*it)->children.size()); From a26c8d9eee365d72d151e55416137377e2ea56bb Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 11 Feb 2021 19:08:54 +0300 Subject: [PATCH 06/86] Fix const filter resilt for filter push down. --- .../QueryPlan/Optimizations/filterPushDown.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 82704bcbce9..2a42b08af73 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -5,6 +5,7 @@ #include #include #include +#include namespace DB::ErrorCodes { @@ -41,8 +42,15 @@ size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) for (auto pos : params.keys) keys.push_back(params.src_header.getByPosition(pos).name); + std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; if (auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, keys)) { + std::cerr << "===============\n" << expression->dumpDAG() << std::endl; + std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; + + //if (split_filter) + // throw Exception("!!!!", 0); + auto it = expression->getIndex().find(filter_column_name); if (it == expression->getIndex().end()) { @@ -53,6 +61,10 @@ size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) parent = std::make_unique(child->getOutputStream(), expression); } + else if ((*it)->column && isColumnConst(*(*it)->column)) + { + parent = std::make_unique(child->getOutputStream(), expression); + } /// Add new Filter step before Aggregating. /// Expression/Filter -> Aggregating -> Something From fc6587319c97c6cccb8b5dc1b108a7b56afaed9e Mon Sep 17 00:00:00 2001 From: George Date: Thu, 11 Feb 2021 19:29:46 +0300 Subject: [PATCH 07/86] Edit and translated encription-functions --- .../functions/encryption-functions.md | 14 +- .../functions/encryption-functions.md | 305 ++++++++---------- 2 files changed, 150 insertions(+), 169 deletions(-) diff --git a/docs/en/sql-reference/functions/encryption-functions.md b/docs/en/sql-reference/functions/encryption-functions.md index 9e360abfe26..f3e851db29b 100644 --- a/docs/en/sql-reference/functions/encryption-functions.md +++ b/docs/en/sql-reference/functions/encryption-functions.md @@ -55,7 +55,7 @@ CREATE TABLE encryption_test `comment` String, `secret` String ) -ENGINE = Memory +ENGINE = Memory; ``` Insert some data (please avoid storing the keys/ivs in the database as this undermines the whole concept of encryption), also storing 'hints' is unsafe too and used only for illustrative purposes: @@ -110,7 +110,7 @@ Result: Compatible with mysql encryption and resulting ciphertext can be decrypted with [AES_DECRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-decrypt) function. -Will produce same ciphertext as `encrypt` on equal inputs. But when `key` or `iv` are longer than they should normally be, `aes_encrypt_mysql` will stick to what MySQL's `aes_encrypt` does: 'fold' `key` and ignore excess bits of `IV`. +Will produce the same ciphertext as `encrypt` on equal inputs. But when `key` or `iv` are longer than they should normally be, `aes_encrypt_mysql` will stick to what MySQL's `aes_encrypt` does: 'fold' `key` and ignore excess bits of `iv`. Supported encryption modes: @@ -138,7 +138,6 @@ aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv]) - Ciphertext binary string. [String](../../sql-reference/data-types/string.md#string). - **Examples** Given equal input `encrypt` and `aes_encrypt_mysql` produce the same ciphertext: @@ -157,7 +156,6 @@ Result: └───────────────────┘ ``` - But `encrypt` fails when `key` or `iv` is longer than expected: Query: @@ -252,7 +250,7 @@ decrypt('mode', 'ciphertext', 'key' [, iv, aad]) **Examples** -Re-using table from [encrypt](./encryption-functions.md#encrypt). +Re-using table from [encrypt](#encrypt). Query: @@ -284,6 +282,7 @@ SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920 ``` Result: + ``` text ┌─comment─────────────────────────────┬─plaintext─┐ │ aes-256-cfb128 no IV │ Secret │ @@ -294,7 +293,7 @@ Result: └─────────────────────────────────────┴───────────┘ ``` -Notice how only portion of the data was properly decrypted, and the rest is gibberish since either `mode`, `key`, or `iv` were different upon encryption. +Notice how only a portion of the data was properly decrypted, and the rest is gibberish since either `mode`, `key`, or `iv` were different upon encryption. ## aes_decrypt_mysql {#aes_decrypt_mysql} @@ -331,6 +330,7 @@ aes_decrypt_mysql('mode', 'ciphertext', 'key' [, iv]) **Examples** Let's decrypt data we've previously encrypted with MySQL: + ``` sql mysql> SET block_encryption_mode='aes-256-cfb128'; Query OK, 0 rows affected (0.00 sec) @@ -345,11 +345,13 @@ mysql> SELECT aes_encrypt('Secret', '123456789101213141516171819202122', 'iviviv ``` Query: + ``` sql SELECT aes_decrypt_mysql('aes-256-cfb128', unhex('24E9E4966469'), '123456789101213141516171819202122', 'iviviviviviviviv123456') AS plaintext ``` Result: + ``` text ┌─plaintext─┐ │ Secret │ diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index f1f6516d453..14ce97f5513 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -11,7 +11,7 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438 \u0434\u043b\u044f \u0448 Длина инициализирующего вектора всегда 16 байт (лишнии байты игнорируются). -Обратите внимание, что эти функции работают медленно. +Обратите внимание, что до версии Clickhouse 21.1 эти функции работают медленно. ## encrypt {#encrypt} @@ -41,7 +41,7 @@ encrypt('mode', 'plaintext', 'key' [, iv, aad]) **Возвращаемое значение** -- Зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). +- Двоичная зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). **Примеры** @@ -52,57 +52,38 @@ encrypt('mode', 'plaintext', 'key' [, iv, aad]) ``` sql CREATE TABLE encryption_test ( - input String, - key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'), - iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'), - key32 String DEFAULT substring(key, 1, 32), - key24 String DEFAULT substring(key, 1, 24), - key16 String DEFAULT substring(key, 1, 16) -) Engine = Memory; + `comment` String, + `secret` String +) +ENGINE = Memory; ``` -Вставим эти данные: +Вставим некоторые данные (замечание: не храните ключи или инициализирующие векторы в базе данных, так как это компрометирует всю концепцию шифрования), также хранение "подсказок" небезопасно и используется только для наглядности: Запрос: ``` sql -INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?'); +INSERT INTO encryption_test VALUES('aes-256-cfb128 no IV', encrypt('aes-256-cfb128', 'Secret', '12345678910121314151617181920212')),\ +('aes-256-cfb128 no IV, different key', encrypt('aes-256-cfb128', 'Secret', 'keykeykeykeykeykeykeykeykeykeyke')),\ +('aes-256-cfb128 with IV', encrypt('aes-256-cfb128', 'Secret', '12345678910121314151617181920212', 'iviviviviviviviv')),\ +('aes-256-cbc no IV', encrypt('aes-256-cbc', 'Secret', '12345678910121314151617181920212')); ``` -Пример без `iv`: - Запрос: ``` sql -SELECT 'aes-128-ecb' AS mode, hex(encrypt(mode, input, key16)) FROM encryption_test; +SELECT comment, hex(secret) FROM encryption_test; ``` Результат: ``` text -┌─mode────────┬─hex(encrypt('aes-128-ecb', input, key16))────────────────────────┐ -│ aes-128-ecb │ 4603E6862B0D94BBEC68E0B0DF51D60F │ -│ aes-128-ecb │ 3004851B86D3F3950672DE7085D27C03 │ -│ aes-128-ecb │ E807F8C8D40A11F65076361AFC7D8B68D8658C5FAA6457985CAA380F16B3F7E4 │ -└─────────────┴──────────────────────────────────────────────────────────────────┘ -``` - -Пример с `iv`: - -Запрос: - -``` sql -SELECT 'aes-256-ctr' AS mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test; -``` - -Результат: - -``` text -┌─mode────────┬─hex(encrypt('aes-256-ctr', input, key32, iv))─┐ -│ aes-256-ctr │ │ -│ aes-256-ctr │ 7FB039F7 │ -│ aes-256-ctr │ 5CBD20F7ABD3AC41FCAA1A5C0E119E2B325949 │ -└─────────────┴───────────────────────────────────────────────┘ +┌─comment─────────────────────────────┬─hex(secret)──────────────────────┐ +│ aes-256-cfb128 no IV │ B4972BDC4459 │ +│ aes-256-cfb128 no IV, different key │ 2FF57C092DC9 │ +│ aes-256-cfb128 with IV │ 5E6CB398F653 │ +│ aes-256-cbc no IV │ 1BC0629A92450D9E73A00E7D02CF4142 │ +└─────────────────────────────────────┴──────────────────────────────────┘ ``` Пример в режиме `-gcm`: @@ -110,41 +91,27 @@ SELECT 'aes-256-ctr' AS mode, hex(encrypt(mode, input, key32, iv)) FROM encrypti Запрос: ``` sql -SELECT 'aes-256-gcm' AS mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test; +INSERT INTO encryption_test VALUES('aes-256-gcm', encrypt('aes-256-gcm', 'Secret', '12345678910121314151617181920212', 'iviviviviviviviv')), \ +('aes-256-gcm with AAD', encrypt('aes-256-gcm', 'Secret', '12345678910121314151617181920212', 'iviviviviviviviv', 'aad')); + +SELECT comment, hex(secret) FROM encryption_test WHERE comment LIKE '%gcm%'; ``` Результат: ``` text -┌─mode────────┬─hex(encrypt('aes-256-gcm', input, key32, iv))──────────────────────────┐ -│ aes-256-gcm │ E99DBEBC01F021758352D7FBD9039EFA │ -│ aes-256-gcm │ 8742CE3A7B0595B281C712600D274CA881F47414 │ -│ aes-256-gcm │ A44FD73ACEB1A64BDE2D03808A2576EDBB60764CC6982DB9AF2C33C893D91B00C60DC5 │ -└─────────────┴────────────────────────────────────────────────────────────────────────┘ -``` - -Пример в режиме `-gcm` и с `aad`: - -Запрос: - -``` sql -SELECT 'aes-192-gcm' AS mode, hex(encrypt(mode, input, key24, iv, 'AAD')) FROM encryption_test; -``` - -Результат: - -``` text -┌─mode────────┬─hex(encrypt('aes-192-gcm', input, key24, iv, 'AAD'))───────────────────┐ -│ aes-192-gcm │ 04C13E4B1D62481ED22B3644595CB5DB │ -│ aes-192-gcm │ 9A6CF0FD2B329B04EAD18301818F016DF8F77447 │ -│ aes-192-gcm │ B961E9FD9B940EBAD7ADDA75C9F198A40797A5EA1722D542890CC976E21113BBB8A7AA │ -└─────────────┴────────────────────────────────────────────────────────────────────────┘ +┌─comment──────────────┬─hex(secret)──────────────────────────────────┐ +│ aes-256-gcm │ A8A3CCBC6426CFEEB60E4EAE03D3E94204C1B09E0254 │ +│ aes-256-gcm with AAD │ A8A3CCBC6426D9A1017A0A932322F1852260A4AD6837 │ +└──────────────────────┴──────────────────────────────────────────────┘ ``` ## aes_encrypt_mysql {#aes_encrypt_mysql} Совместима с шифрованием myqsl, результат может быть расшифрован функцией [AES_DECRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-decrypt). +При одинаковых вводных зашифрованный текст будет совпадать с результатом `encrypt`. Однако, когда `key` или `iv` длиннее, чем должны быть, `aes_encrypt_mysql` будет работать аналогично MySQL `aes_encrypt`: свернет ключ и проигнорирует лишнюю часть `iv`. + Функция поддерживает шифрофание данных следующими режимами: - aes-128-ecb, aes-192-ecb, aes-256-ecb @@ -156,7 +123,7 @@ SELECT 'aes-192-gcm' AS mode, hex(encrypt(mode, input, key24, iv, 'AAD')) FROM e **Синтаксис** -```sql +``` sql aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv]) ``` @@ -164,78 +131,96 @@ aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv]) - `mode` — режим шифрования. [String](../../sql-reference/data-types/string.md#string). - `plaintext` — текст, который будет зашифрован. [String](../../sql-reference/data-types/string.md#string). -- `key` — ключ шифрования. [String](../../sql-reference/data-types/string.md#string). -- `iv` — инициализирующий вектор. Необязателен. [String](../../sql-reference/data-types/string.md#string). +- `key` — ключ шифрования. Если ключ длиннее, чем требует режим шифрования, производится специфичная для MySQL свертка ключа. [String](../../sql-reference/data-types/string.md#string). +- `iv` — инициализирующий вектор. Необязателен, учитываются только первые 16 байтов. [String](../../sql-reference/data-types/string.md#string). **Возвращаемое значение** -- Зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). +- Двоичная зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). **Примеры** -Создадим такую таблицу: +При одинаковых вводных результаты шифрования `encrypt` и `aes_encrypt_mysql` будут совпадать. Запрос: ``` sql -CREATE TABLE encryption_test -( - input String, - key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'), - iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'), - key32 String DEFAULT substring(key, 1, 32), - key24 String DEFAULT substring(key, 1, 24), - key16 String DEFAULT substring(key, 1, 16) -) Engine = Memory; -``` - -Вставим эти данные: - -Запрос: - -``` sql -INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?'); -``` - -Пример без `iv`: - -Запрос: - -``` sql -SELECT 'aes-128-cbc' AS mode, hex(aes_encrypt_mysql(mode, input, key32)) FROM encryption_test; +SELECT encrypt('aes-256-cfb128', 'Secret', '12345678910121314151617181920212', 'iviviviviviviviv') = aes_encrypt_mysql('aes-256-cfb128', 'Secret', '12345678910121314151617181920212', 'iviviviviviviviv') AS ciphertexts_equal; ``` Результат: ``` text -┌─mode────────┬─hex(aes_encrypt_mysql('aes-128-cbc', input, key32))──────────────┐ -│ aes-128-cbc │ FEA8CFDE6EE2C6E7A2CC6ADDC9F62C83 │ -│ aes-128-cbc │ 78B16CD4BE107660156124C5FEE6454A │ -│ aes-128-cbc │ 67C0B119D96F18E2823968D42871B3D179221B1E7EE642D628341C2B29BA2E18 │ -└─────────────┴──────────────────────────────────────────────────────────────────┘ +┌─ciphertexts_equal─┐ +│ 1 │ +└───────────────────┘ ``` -Пример с `iv`: +Но `encrypt` генерирует исключение, когда `key` или `iv` длиннее, чем нужно: Запрос: ``` sql -SELECT 'aes-256-cfb128' AS mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; +SELECT encrypt('aes-256-cfb128', 'Secret', '123456789101213141516171819202122', 'iviviviviviviviv123'); ``` Результат: ``` text -┌─mode───────────┬─hex(aes_encrypt_mysql('aes-256-cfb128', input, key32, iv))─┐ -│ aes-256-cfb128 │ │ -│ aes-256-cfb128 │ 7FB039F7 │ -│ aes-256-cfb128 │ 5CBD20F7ABD3AC41FCAA1A5C0E119E2BB5174F │ -└────────────────┴────────────────────────────────────────────────────────────┘ +Received exception from server (version 21.1.2): +Code: 36. DB::Exception: Received from localhost:9000. DB::Exception: Invalid key size: 33 expected 32: While processing encrypt('aes-256-cfb128', 'Secret', '123456789101213141516171819202122', 'iviviviviviviviv123'). +``` + +Тогда как `aes_encrypt_mysql` возвращает совместимый с MySQL вывод: + +Запрос: + +``` sql +SELECT hex(aes_encrypt_mysql('aes-256-cfb128', 'Secret', '123456789101213141516171819202122', 'iviviviviviviviv123')) AS ciphertext; +``` + +Результат: + +```text +┌─ciphertext───┐ +│ 24E9E4966469 │ +└──────────────┘ +``` + +Если передать `iv` еще длиннее, результат останется таким же: + +Запрос: + +``` sql +SELECT hex(aes_encrypt_mysql('aes-256-cfb128', 'Secret', '123456789101213141516171819202122', 'iviviviviviviviv123456')) AS ciphertext +``` + +Результат: + +``` text +┌─ciphertext───┐ +│ 24E9E4966469 │ +└──────────────┘ +``` + +Это совпадает с тем, что выводит MySQL с такими же вводными: + +``` sql +mysql> SET block_encryption_mode='aes-256-cfb128'; +Query OK, 0 rows affected (0.00 sec) + +mysql> SELECT aes_encrypt('Secret', '123456789101213141516171819202122', 'iviviviviviviviv123456') as ciphertext; ++------------------------+ +| ciphertext | ++------------------------+ +| 0x24E9E4966469 | ++------------------------+ +1 row in set (0.00 sec) ``` ## decrypt {#decrypt} -Функция поддерживает расшифровку данных следующими режимами: +Функция расшифровывает зашифрованный текст в обычный следующими режимами: - aes-128-ecb, aes-192-ecb, aes-256-ecb - aes-128-cbc, aes-192-cbc, aes-256-cbc @@ -247,7 +232,7 @@ SELECT 'aes-256-cfb128' AS mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) **Синтаксис** -```sql +``` sql decrypt('mode', 'ciphertext', 'key' [, iv, aad]) ``` @@ -265,51 +250,57 @@ decrypt('mode', 'ciphertext', 'key' [, iv, aad]) **Примеры** -Создадим такую таблицу: +Используется таблица из [encrypt](#encrypt). Запрос: ``` sql -CREATE TABLE encryption_test -( - input String, - key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'), - iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'), - key32 String DEFAULT substring(key, 1, 32), - key24 String DEFAULT substring(key, 1, 24), - key16 String DEFAULT substring(key, 1, 16) -) Engine = Memory; -``` - -Вставим эти данные: - -Запрос: - -``` sql -INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?'); -``` - -Запрос: - -``` sql - -SELECT 'aes-128-ecb' AS mode, decrypt(mode, encrypt(mode, input, key16), key16) FROM encryption_test; +SELECT comment, hex(secret) FROM encryption_test; ``` Результат: -```text -┌─mode────────┬─decrypt('aes-128-ecb', encrypt('aes-128-ecb', input, key16), key16)─┐ -│ aes-128-ecb │ │ -│ aes-128-ecb │ text │ -│ aes-128-ecb │ What Is ClickHouse? │ -└─────────────┴─────────────────────────────────────────────────────────────────────┘ +``` text +┌─comment──────────────┬─hex(secret)──────────────────────────────────┐ +│ aes-256-gcm │ A8A3CCBC6426CFEEB60E4EAE03D3E94204C1B09E0254 │ +│ aes-256-gcm with AAD │ A8A3CCBC6426D9A1017A0A932322F1852260A4AD6837 │ +└──────────────────────┴──────────────────────────────────────────────┘ +┌─comment─────────────────────────────┬─hex(secret)──────────────────────┐ +│ aes-256-cfb128 no IV │ B4972BDC4459 │ +│ aes-256-cfb128 no IV, different key │ 2FF57C092DC9 │ +│ aes-256-cfb128 with IV │ 5E6CB398F653 │ +│ aes-256-cbc no IV │ 1BC0629A92450D9E73A00E7D02CF4142 │ +└─────────────────────────────────────┴──────────────────────────────────┘ ``` +Теперь попытаемся расшифровать эти данные: + +Запрос: + +``` sql +SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920212') as plaintext FROM encryption_test +``` + +Результат: + +``` text +┌─comment─────────────────────────────┬─plaintext─┐ +│ aes-256-cfb128 no IV │ Secret │ +│ aes-256-cfb128 no IV, different key │ �4� + � │ +│ aes-256-cfb128 with IV │ ���6�~ │ + │aes-256-cbc no IV │ �2*4�h3c�4w��@ +└─────────────────────────────────────┴───────────┘ +``` + +Обратите внимание, что только часть данных была расшифрована, а остальное является бессмыслицей, как как `mode`, `key`, или `iv`были другими во время шифрования. + ## aes_decrypt_mysql {#aes_decrypt_mysql} Совместима с шифрованием myqsl и может расшифровать данные, зашифрованные функцией [AES_ENCRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt). +При одинаковых вводных расшифрованный текст будет совпадать с результатом `decrypt`. Однако, когда `key` или `iv` длиннее, чем должны быть, `aes_decrypt_mysql` будет работать аналогично MySQL `aes_decrypt`: свернет ключ и проигнорирует лишнюю часть `iv`. + Функция поддерживает расшифровку данных следующими режимами: - aes-128-ecb, aes-192-ecb, aes-256-ecb @@ -321,7 +312,7 @@ SELECT 'aes-128-ecb' AS mode, decrypt(mode, encrypt(mode, input, key16), key16) **Синтаксис** -```sql +``` sql aes_decrypt_mysql('mode', 'ciphertext', 'key' [, iv]) ``` @@ -332,51 +323,39 @@ aes_decrypt_mysql('mode', 'ciphertext', 'key' [, iv]) - `key` — ключ шифрования. [String](../../sql-reference/data-types/string.md#string). - `iv` — инициализирующий вектор. Необязателен. [String](../../sql-reference/data-types/string.md#string). - **Возвращаемое значение** - Расшифрованная строка. [String](../../sql-reference/data-types/string.md#string). **Примеры** -Создадим такую таблицу: +Расшифруем данные, которые до этого зашифровали с помощью MySQL: -Запрос: ``` sql -CREATE TABLE encryption_test -( - input String, - key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'), - iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'), - key32 String DEFAULT substring(key, 1, 32), - key24 String DEFAULT substring(key, 1, 24), - key16 String DEFAULT substring(key, 1, 16) -) Engine = Memory; -``` +mysql> SET block_encryption_mode='aes-256-cfb128'; +Query OK, 0 rows affected (0.00 sec) -Вставим эти данные: - -Запрос: - -``` sql -INSERT INTO encryption_test (input) VALUES (''), ('text'), ('What Is ClickHouse?'); +mysql> SELECT aes_encrypt('Secret', '123456789101213141516171819202122', 'iviviviviviviviv123456') as ciphertext; ++------------------------+ +| ciphertext | ++------------------------+ +| 0x24E9E4966469 | ++------------------------+ +1 row in set (0.00 sec) ``` Запрос: ``` sql -SELECT 'aes-128-cbc' AS mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key), key) FROM encryption_test; +SELECT aes_decrypt_mysql('aes-256-cfb128', unhex('24E9E4966469'), '123456789101213141516171819202122', 'iviviviviviviviv123456') AS plaintext ``` Результат: ``` text -┌─mode────────┬─aes_decrypt_mysql('aes-128-cbc', aes_encrypt_mysql('aes-128-cbc', input, key), key)─┐ -│ aes-128-cbc │ │ -│ aes-128-cbc │ text │ -│ aes-128-cbc │ What Is ClickHouse? │ -└─────────────┴─────────────────────────────────────────────────────────────────────────────────────┘ +┌─plaintext─┐ +│ Secret │ +└───────────┘ ``` - [Original article](https://clickhouse.tech/docs/ru/sql-reference/functions/encryption_functions/) From 2a9a6cf4048969d1fa670fb7afac18d57b86649a Mon Sep 17 00:00:00 2001 From: George Date: Thu, 11 Feb 2021 19:46:23 +0300 Subject: [PATCH 08/86] Edited and translated parametric-functions --- .../sql-reference/aggregate-functions/parametric-functions.md | 2 +- .../sql-reference/aggregate-functions/parametric-functions.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/sql-reference/aggregate-functions/parametric-functions.md b/docs/en/sql-reference/aggregate-functions/parametric-functions.md index 4b3bf12aa8c..2d2df3bd6cb 100644 --- a/docs/en/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/en/sql-reference/aggregate-functions/parametric-functions.md @@ -241,7 +241,7 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) **Parameters** -- `window` — Length of the sliding window. The unit of `window` depends on the timestamp itself and varies. Determined using the expression `timestamp of cond2 <= timestamp of cond1 + window`. +- `window` — Length of the sliding window. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond2 <= timestamp of cond1 + window`. - `mode` - It is an optional argument. - `'strict'` - When the `'strict'` is set, the windowFunnel() applies conditions only for the unique values. - `timestamp` — Name of the column containing the timestamp. Data types supported: [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md#data_type-datetime) and other unsigned integer types (note that even though timestamp supports the `UInt64` type, it’s value can’t exceed the Int64 maximum, which is 2^63 - 1). diff --git a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md index f20acaa45c3..2c367882714 100644 --- a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md @@ -239,7 +239,7 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) **Параметры** -- `window` — ширина скользящего окна по времени в секундах. [UInt](../../sql-reference/aggregate-functions/parametric-functions.md). +- `window` — ширина скользящего окна по времени. Единица измерения зависит от `timestamp` и может варьироваться. Определяется выражением `timestamp от cond2 <= timestamp от cond1 + window`. - `mode` - необязательный параметр. Если установлено значение `'strict'`, то функция `windowFunnel()` применяет условия только для уникальных значений. - `timestamp` — имя столбца, содержащего временные отметки. [Date](../../sql-reference/aggregate-functions/parametric-functions.md), [DateTime](../../sql-reference/aggregate-functions/parametric-functions.md#data_type-datetime) и другие параметры с типом `Integer`. В случае хранения меток времени в столбцах с типом `UInt64`, максимально допустимое значение соответствует ограничению для типа `Int64`, т.е. равно `2^63-1`. - `cond` — условия или данные, описывающие цепочку событий. [UInt8](../../sql-reference/aggregate-functions/parametric-functions.md). From cd11212bba784958174fdfbd334622a533686756 Mon Sep 17 00:00:00 2001 From: George Date: Thu, 11 Feb 2021 19:57:41 +0300 Subject: [PATCH 09/86] Edited and translated settings --- docs/en/operations/settings/settings.md | 4 ++-- docs/ru/operations/settings/settings.md | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index c7ee48c11bf..70809885a99 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1956,8 +1956,8 @@ Default value: 16. **See Also** -- [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) engine -- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md#rabbitmq-engine) engine +- [Kafka](../../engines/table-engines/integrations/kafka.md#kafka) engine. +- [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md#rabbitmq-engine) engine. ## validate_polygons {#validate_polygons} diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index 1352fe850df..fed10d21920 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1939,6 +1939,21 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1; Значение по умолчанию: 16. +## background_message_broker_schedule_pool_size {#background_message_broker_schedule_pool_size} + +Задает количество потоков для вывода потокового вывода сообщений. Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. + +Допустимые значения: + +- Положительное целое число. + +Значение по умолчанию: 16. + +**Смотрите также** + +- Движок [Kafka](../../engines/table-engines/integrations/kafka.md#kafka). +- Движок [RabbitMQ](../../engines/table-engines/integrations/rabbitmq.md#rabbitmq-engine). + ## format_avro_schema_registry_url {#format_avro_schema_registry_url} Задает URL реестра схем [Confluent](https://docs.confluent.io/current/schema-registry/index.html) для использования с форматом [AvroConfluent](../../interfaces/formats.md#data-format-avro-confluent). From 93ea1e5e82da3a3eb07dbe9daa355d3ab31accf5 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 11 Feb 2021 20:13:59 +0300 Subject: [PATCH 10/86] Comment output --- .../QueryPlan/Optimizations/filterPushDown.cpp | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 2a42b08af73..a5f1d37e2f2 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -42,14 +42,11 @@ size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) for (auto pos : params.keys) keys.push_back(params.src_header.getByPosition(pos).name); - std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; + // std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; if (auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, keys)) { - std::cerr << "===============\n" << expression->dumpDAG() << std::endl; - std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; - - //if (split_filter) - // throw Exception("!!!!", 0); + // std::cerr << "===============\n" << expression->dumpDAG() << std::endl; + // std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; auto it = expression->getIndex().find(filter_column_name); if (it == expression->getIndex().end()) From 838dab756491d5bdcd6151fb5075756d0807b807 Mon Sep 17 00:00:00 2001 From: George Date: Thu, 11 Feb 2021 21:07:38 +0300 Subject: [PATCH 11/86] Edit and translated Kafka --- .../table-engines/integrations/kafka.md | 22 +++++++++---------- .../table-engines/integrations/kafka.md | 19 +++++++++------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/docs/en/engines/table-engines/integrations/kafka.md b/docs/en/engines/table-engines/integrations/kafka.md index c519d6bb136..fb1df62bb15 100644 --- a/docs/en/engines/table-engines/integrations/kafka.md +++ b/docs/en/engines/table-engines/integrations/kafka.md @@ -38,20 +38,20 @@ SETTINGS Required parameters: -- `kafka_broker_list` – A comma-separated list of brokers (for example, `localhost:9092`). -- `kafka_topic_list` – A list of Kafka topics. -- `kafka_group_name` – A group of Kafka consumers. Reading margins are tracked for each group separately. If you don’t want messages to be duplicated in the cluster, use the same group name everywhere. -- `kafka_format` – Message format. Uses the same notation as the SQL `FORMAT` function, such as `JSONEachRow`. For more information, see the [Formats](../../../interfaces/formats.md) section. +- `kafka_broker_list` — A comma-separated list of brokers (for example, `localhost:9092`). +- `kafka_topic_list` — A list of Kafka topics. +- `kafka_group_name` — A group of Kafka consumers. Reading margins are tracked for each group separately. If you don’t want messages to be duplicated in the cluster, use the same group name everywhere. +- `kafka_format` — Message format. Uses the same notation as the SQL `FORMAT` function, such as `JSONEachRow`. For more information, see the [Formats](../../../interfaces/formats.md) section. Optional parameters: -- `kafka_row_delimiter` – Delimiter character, which ends the message. -- `kafka_schema` – Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. -- `kafka_num_consumers` – The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition. -- `kafka_max_block_size` - The maximum batch size (in messages) for poll (default: `max_block_size`). -- `kafka_skip_broken_messages` – Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). -- `kafka_commit_every_batch` - Commit every consumed and handled batch instead of a single commit after writing a whole block (default: `0`). -- `kafka_thread_per_consumer` - Provide independent thread for each consumer (default: `0`). When enabled, every consumer flush the data independently, in parallel (otherwise - rows from several consumers squashed to form one block). +- `kafka_row_delimiter` — Delimiter character, which ends the message. +- `kafka_schema` — Parameter that must be used if the format requires a schema definition. For example, [Cap’n Proto](https://capnproto.org/) requires the path to the schema file and the name of the root `schema.capnp:Message` object. +- `kafka_num_consumers` — The number of consumers per table. Default: `1`. Specify more consumers if the throughput of one consumer is insufficient. The total number of consumers should not exceed the number of partitions in the topic, since only one consumer can be assigned per partition. +- `kafka_max_block_size` — The maximum batch size (in messages) for poll (default: `max_block_size`). +- `kafka_skip_broken_messages` — Kafka message parser tolerance to schema-incompatible messages per block. Default: `0`. If `kafka_skip_broken_messages = N` then the engine skips *N* Kafka messages that cannot be parsed (a message equals a row of data). +- `kafka_commit_every_batch` — Commit every consumed and handled batch instead of a single commit after writing a whole block (default: `0`). +- `kafka_thread_per_consumer` — Provide independent thread for each consumer (default: `0`). When enabled, every consumer flush the data independently, in parallel (otherwise — rows from several consumers squashed to form one block). Examples: diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index 940fee2452b..2b9dfcd49da 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -31,21 +31,24 @@ SETTINGS [kafka_schema = '',] [kafka_num_consumers = N,] [kafka_skip_broken_messages = N] + [kafka_commit_every_batch = 0,] + [kafka_thread_per_consumer = 0] ``` Обязательные параметры: -- `kafka_broker_list` – перечень брокеров, разделенный запятыми (`localhost:9092`). -- `kafka_topic_list` – перечень необходимых топиков Kafka. -- `kafka_group_name` – группа потребителя Kafka. Отступы для чтения отслеживаются для каждой группы отдельно. Если необходимо, чтобы сообщения не повторялись на кластере, используйте везде одно имя группы. -- `kafka_format` – формат сообщений. Названия форматов должны быть теми же, что можно использовать в секции `FORMAT`, например, `JSONEachRow`. Подробнее читайте в разделе [Форматы](../../../interfaces/formats.md). +- `kafka_broker_list` — перечень брокеров, разделенный запятыми (`localhost:9092`). +- `kafka_topic_list` — перечень необходимых топиков Kafka. +- `kafka_group_name` — группа потребителя Kafka. Отступы для чтения отслеживаются для каждой группы отдельно. Если необходимо, чтобы сообщения не повторялись на кластере, используйте везде одно имя группы. +- `kafka_format` — формат сообщений. Названия форматов должны быть теми же, что можно использовать в секции `FORMAT`, например, `JSONEachRow`. Подробнее читайте в разделе [Форматы](../../../interfaces/formats.md). Опциональные параметры: -- `kafka_row_delimiter` – символ-разделитель записей (строк), которым завершается сообщение. -- `kafka_schema` – опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap’n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`. -- `kafka_num_consumers` – количество потребителей (consumer) на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. Общее число потребителей не должно превышать количество партиций в топике, так как на одну партицию может быть назначено не более одного потребителя. -- `kafka_skip_broken_messages` – максимальное количество некорректных сообщений в блоке. Если `kafka_skip_broken_messages = N`, то движок отбрасывает `N` сообщений Кафки, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0. +- `kafka_row_delimiter` — символ-разделитель записей (строк), которым завершается сообщение. +- `kafka_schema` — опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap’n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`. +- `kafka_num_consumers` — количество потребителей (consumer) на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. Общее число потребителей не должно превышать количество партиций в топике, так как на одну партицию может быть назначено не более одного потребителя. +- `kafka_skip_broken_messages` — максимальное количество некорректных сообщений в блоке. Если `kafka_skip_broken_messages = N`, то движок отбрасывает `N` сообщений Кафки, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0. +- `kafka_thread_per_consumer` — снабжает каждого потребителя независимым потоком (по умолчанию `0`). При включенном состоянии каждый потребитель сбрасывает данные независимо и параллельно (иначе — строки от нескольких потребителей склеиваются в один блок). Примеры From 4c8632bd9ab32322af29abb04cf70c39c6cd3c79 Mon Sep 17 00:00:00 2001 From: George Date: Fri, 12 Feb 2021 00:22:55 +0300 Subject: [PATCH 12/86] Minor fixes --- docs/ru/operations/settings/settings.md | 2 +- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index fed10d21920..a7754cfc421 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -1941,7 +1941,7 @@ SELECT idx, i FROM null_in WHERE i IN (1, NULL) SETTINGS transform_null_in = 1; ## background_message_broker_schedule_pool_size {#background_message_broker_schedule_pool_size} -Задает количество потоков для вывода потокового вывода сообщений. Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. +Задает количество потоков для фонового потокового вывода сообщений. Настройка применяется при запуске сервера ClickHouse и не может быть изменена в пользовательском сеансе. Допустимые значения: diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 14ce97f5513..91b26a2415d 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -293,7 +293,7 @@ SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920 └─────────────────────────────────────┴───────────┘ ``` -Обратите внимание, что только часть данных была расшифрована, а остальное является бессмыслицей, как как `mode`, `key`, или `iv`были другими во время шифрования. +Обратите внимание, что только часть данных была расшифрована, а остальное является бессмыслицей, как как `mode`, `key`, или `iv` были другими во время шифрования. ## aes_decrypt_mysql {#aes_decrypt_mysql} From 7e75965af887d7a7d68699b7bac5e0401cbf02c7 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Feb 2021 12:35:26 +0300 Subject: [PATCH 13/86] Fix ActionsDAG::splitActionsForFilter --- src/Interpreters/ActionsDAG.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 78254e5139a..6a7dbc47230 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1168,7 +1168,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, { auto & inputs_list = inputs_map[name]; if (inputs_list.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot find input {} in ActionsDAG. DAG:\n{}", name, dumpDAG()); + continue; allowed_nodes.emplace(inputs_list.front()); inputs_list.pop_front(); From 443a3e7e6fd2452bf3efa8e4ab2a349feaf3b29f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Feb 2021 13:12:31 +0300 Subject: [PATCH 14/86] Fix limit push down. --- src/Processors/QueryPlan/Optimizations/Optimizations.h | 4 ++-- src/Processors/QueryPlan/Optimizations/filterPushDown.cpp | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/Optimizations.h b/src/Processors/QueryPlan/Optimizations/Optimizations.h index be7f81e5db0..a5c3af488a9 100644 --- a/src/Processors/QueryPlan/Optimizations/Optimizations.h +++ b/src/Processors/QueryPlan/Optimizations/Optimizations.h @@ -40,7 +40,7 @@ size_t tryMergeExpressions(QueryPlan::Node * parent_node, QueryPlan::Nodes &); /// Move FilterStep down if possible. /// May split FilterStep and push down only part of it. -size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes); +size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes); inline const auto & getOptimizations() { @@ -50,7 +50,7 @@ inline const auto & getOptimizations() {tryPushDownLimit, "pushDownLimit"}, {trySplitFilter, "splitFilter"}, {tryMergeExpressions, "mergeExpressions"}, - {tryPushDownLimit, "pushDownFilter"}, + {tryPushDownFilter, "pushDownFilter"}, }}; return optimizations; diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index a5f1d37e2f2..ac95d69d237 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -42,11 +42,11 @@ size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) for (auto pos : params.keys) keys.push_back(params.src_header.getByPosition(pos).name); - // std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; + std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; if (auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, keys)) { - // std::cerr << "===============\n" << expression->dumpDAG() << std::endl; - // std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; + std::cerr << "===============\n" << expression->dumpDAG() << std::endl; + std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; auto it = expression->getIndex().find(filter_column_name); if (it == expression->getIndex().end()) From 93e1428f2119ecc5b3979ff5bff0d0304327579c Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Feb 2021 13:51:16 +0300 Subject: [PATCH 15/86] Fix limit push down. --- src/Processors/QueryPlan/Optimizations/filterPushDown.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index ac95d69d237..ec005e59729 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -15,7 +15,7 @@ namespace DB::ErrorCodes namespace DB::QueryPlanOptimizations { -size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) +size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) { if (parent_node->children.size() != 1) return 0; @@ -42,11 +42,11 @@ size_t tryPushDownLimit(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) for (auto pos : params.keys) keys.push_back(params.src_header.getByPosition(pos).name); - std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; + // std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; if (auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, keys)) { - std::cerr << "===============\n" << expression->dumpDAG() << std::endl; - std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; + // std::cerr << "===============\n" << expression->dumpDAG() << std::endl; + // std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; auto it = expression->getIndex().find(filter_column_name); if (it == expression->getIndex().end()) From 683d793cc289ec12b8885efe1405b79a22350a36 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Feb 2021 14:31:14 +0300 Subject: [PATCH 16/86] Update test. --- .../01655_plan_optimizations.reference | 33 +++++++++++- .../0_stateless/01655_plan_optimizations.sh | 51 ++++++++++++++++++- 2 files changed, 80 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index fda40305f9d..510224146ed 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -1,7 +1,7 @@ -sipHash should be calculated after filtration +> sipHash should be calculated after filtration FUNCTION sipHash64 Filter column: equals -sorting steps should know about limit +> sorting steps should know about limit Limit 10 MergingSorted Limit 10 @@ -9,3 +9,32 @@ MergeSorting Limit 10 PartialSorting Limit 10 +-- filter push down -- +> filter should be pushed down after aggregating +Aggregating +Filter +> filter should be pushed down after aggregating, column after aggregation is const +COLUMN Const(UInt8) -> notEquals(y, 0) +Aggregating +Filter +Filter +> one condition of filter should be pushed down after aggregating, other condition is aliased +Filter column +ALIAS notEquals(s, 4) :: 1 -> and(notEquals(y, 0), notEquals(s, 4)) +Aggregating +Filter column: notEquals(y, 0) +> one condition of filter should be pushed down after aggregating, other condition is casted +Filter column +FUNCTION CAST(minus(s, 4) :: 1, UInt8 :: 3) -> and(notEquals(y, 0), minus(s, 4)) +Aggregating +Filter column: notEquals(y, 0) +> one condition of filter should be pushed down after aggregating, other two conditions are ANDed +Filter column +FUNCTION and(minus(s, 4) :: 2, minus(s, 8) :: 1) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) +Aggregating +Filter column: notEquals(y, 0) +> two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased +Filter column +ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4)) +Aggregating +Filter column: and(minus(y, 4), notEquals(y, 0)) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index 4f3541f9dde..ea76d15c648 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -4,7 +4,54 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -echo "sipHash should be calculated after filtration" +echo "> sipHash should be calculated after filtration" $CLICKHOUSE_CLIENT -q "explain actions = 1 select sum(x), sum(y) from (select sipHash64(number) as x, bitAnd(number, 1024) as y from numbers_mt(1000000000) limit 1000000000) where y = 0" | grep -o "FUNCTION sipHash64\|Filter column: equals" -echo "sorting steps should know about limit" +echo "> sorting steps should know about limit" $CLICKHOUSE_CLIENT -q "explain actions = 1 select number from (select number from numbers(500000000) order by -number) limit 10" | grep -o "MergingSorted\|MergeSorting\|PartialSorting\|Limit 10" + +echo "-- filter push down --" +echo "> filter should be pushed down after aggregating" +$CLICKHOUSE_CLIENT -q " + explain select * from (select sum(x), y from ( + select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 + settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter" + +echo "> filter should be pushed down after aggregating, column after aggregation is const" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select *, y != 0 from (select sum(x), y from ( + select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 + settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter\|COLUMN Const(UInt8) -> notEquals(y, 0)" + +echo "> one condition of filter should be pushed down after aggregating, other condition is aliased" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select * from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s != 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|ALIAS notEquals(s, 4) :: 1 -> and(notEquals(y, 0), notEquals(s, 4))" + +echo "> one condition of filter should be pushed down after aggregating, other condition is casted" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select * from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s - 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION CAST(minus(s, 4) :: 1, UInt8 :: 3) -> and(notEquals(y, 0), minus(s, 4))" + +echo "> one condition of filter should be pushed down after aggregating, other two conditions are ANDed" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select * from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s - 8 and s - 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 4) :: 2, minus(s, 8) :: 1) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" + +echo "> two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased" +$CLICKHOUSE_CLIENT -q " + explain optimize = 1, actions = 1 select * from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s != 8 and y - 4 + settings enable_optimize_predicate_expression=0" | + grep -o "Aggregating\|Filter column\|Filter column: and(minus(y, 4), notEquals(y, 0))\|ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4))" From bbed905461d9e08adaa1303f71c228d2f62fff8c Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Feb 2021 18:20:54 +0300 Subject: [PATCH 17/86] Fix ActionsDAG::removeUnusedResult --- src/Interpreters/ActionsDAG.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 6a7dbc47230..255c774bbf9 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -490,6 +490,11 @@ bool ActionsDAG::removeUnusedResult(const std::string & column_name) if (col == child) return false; + /// Do not remove input if it was mentioned in index several times. + for (const auto * node : index) + if (col == node) + return false; + /// Remove from nodes and inputs. for (auto jt = nodes.begin(); jt != nodes.end(); ++jt) { From 90c7cf5a5293a32654e97cc8b4f8cb1d2090d3be Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Feb 2021 18:24:31 +0300 Subject: [PATCH 18/86] Push down for ArrayJoin --- .../Optimizations/filterPushDown.cpp | 116 ++++++++++++------ 1 file changed, 80 insertions(+), 36 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index ec005e59729..98e923249f3 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -3,7 +3,9 @@ #include #include #include +#include #include +#include #include #include @@ -15,6 +17,68 @@ namespace DB::ErrorCodes namespace DB::QueryPlanOptimizations { +static size_t tryAddNewFilterStep( + QueryPlan::Node * parent_node, + QueryPlan::Nodes & nodes, + const Names & allowed_inputs) +{ + QueryPlan::Node * child_node = parent_node->children.front(); + + auto & parent = parent_node->step; + auto & child = child_node->step; + + auto * filter = static_cast(parent.get()); + const auto & expression = filter->getExpression(); + const auto & filter_column_name = filter->getFilterColumnName(); + bool removes_filter = filter->removesFilterColumn(); + + // std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; + + auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, allowed_inputs); + if (!split_filter) + return 0; + + // std::cerr << "===============\n" << expression->dumpDAG() << std::endl; + // std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; + + const auto & index = expression->getIndex(); + auto it = index.begin(); + for (; it != index.end(); ++it) + if ((*it)->result_name == filter_column_name) + break; + + if (it == expression->getIndex().end()) + { + if (!removes_filter) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", + filter_column_name, expression->dumpDAG()); + + std::cerr << "replacing to expr because filter " << filter_column_name << " was removed\n"; + parent = std::make_unique(child->getOutputStream(), expression); + } + else if ((*it)->column && isColumnConst(*(*it)->column)) + { + std::cerr << "replacing to expr because filter is const\n"; + parent = std::make_unique(child->getOutputStream(), expression); + } + + /// Add new Filter step before Aggregating. + /// Expression/Filter -> Aggregating -> Something + auto & node = nodes.emplace_back(); + node.children.swap(child_node->children); + child_node->children.emplace_back(&node); + /// Expression/Filter -> Aggregating -> Filter -> Something + + /// New filter column is added to the end. + auto split_filter_column_name = (*split_filter->getIndex().rbegin())->result_name; + node.step = std::make_unique( + node.children.at(0)->step->getOutputStream(), + std::move(split_filter), std::move(split_filter_column_name), true); + + return 3; +} + size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) { if (parent_node->children.size() != 1) @@ -29,10 +93,6 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (!filter) return 0; - const auto & expression = filter->getExpression(); - const auto & filter_column_name = filter->getFilterColumnName(); - bool removes_filter = filter->removesFilterColumn(); - if (auto * aggregating = typeid_cast(child.get())) { const auto & params = aggregating->getParams(); @@ -42,42 +102,26 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes for (auto pos : params.keys) keys.push_back(params.src_header.getByPosition(pos).name); - // std::cerr << "Filter: \n" << expression->dumpDAG() << std::endl; - if (auto split_filter = expression->splitActionsForFilter(filter_column_name, removes_filter, keys)) - { - // std::cerr << "===============\n" << expression->dumpDAG() << std::endl; - // std::cerr << "---------------\n" << split_filter->dumpDAG() << std::endl; + if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, keys)) + return updated_steps; + } - auto it = expression->getIndex().find(filter_column_name); - if (it == expression->getIndex().end()) - { - if (!removes_filter) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", - filter_column_name, expression->dumpDAG()); + if (auto * array_join = typeid_cast(child.get())) + { + const auto & array_join_actions = array_join->arrayJoin(); + const auto & keys = array_join_actions->columns; + const auto & array_join_header = array_join->getInputStreams().front().header; - parent = std::make_unique(child->getOutputStream(), expression); - } - else if ((*it)->column && isColumnConst(*(*it)->column)) - { - parent = std::make_unique(child->getOutputStream(), expression); - } + Names allowed_inputs; + for (const auto & column : array_join_header) + if (keys.count(column.name) == 0) + allowed_inputs.push_back(column.name); - /// Add new Filter step before Aggregating. - /// Expression/Filter -> Aggregating -> Something - auto & node = nodes.emplace_back(); - node.children.swap(child_node->children); - child_node->children.emplace_back(&node); - /// Expression/Filter -> Aggregating -> Filter -> Something + for (const auto & name : allowed_inputs) + std::cerr << name << std::endl; - /// New filter column is added to the end. - auto split_filter_column_name = (*split_filter->getIndex().rbegin())->result_name; - node.step = std::make_unique( - node.children.at(0)->step->getOutputStream(), - std::move(split_filter), std::move(split_filter_column_name), true); - - return 3; - } + if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs)) + return updated_steps; } return 0; From 5fd80555aa6241e01737c9a9083f663a8d7ed0eb Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 12 Feb 2021 19:06:18 +0300 Subject: [PATCH 19/86] Update test. --- .../queries/0_stateless/01655_plan_optimizations.reference | 4 ++++ tests/queries/0_stateless/01655_plan_optimizations.sh | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 510224146ed..1e638829c74 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -38,3 +38,7 @@ Filter column ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4)) Aggregating Filter column: and(minus(y, 4), notEquals(y, 0)) +> filter is split, one part is filtered before ARRAY JOIN +Filter column: and(notEquals(y, 2), notEquals(x, 0)) +ARRAY JOIN x +Filter column: notEquals(y, 2) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index ea76d15c648..ccd331df45e 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -55,3 +55,10 @@ $CLICKHOUSE_CLIENT -q " ) where y != 0 and s != 8 and y - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: and(minus(y, 4), notEquals(y, 0))\|ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4))" + +echo "> filter is split, one part is filtered before ARRAY JOIN" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select x, y from ( + select range(number) as x, number + 1 as y from numbers(3) + ) array join x where y != 2 and x != 0" | + grep -o "Filter column: and(notEquals(y, 2), notEquals(x, 0))\|ARRAY JOIN x\|Filter column: notEquals(y, 2)" \ No newline at end of file From a72ef6f026eb955fe43ba9c2d07e3ad6e6646983 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Feb 2021 11:26:24 +0300 Subject: [PATCH 20/86] Fix number of threads for scalar subqueries and subqueries for index. --- .../ExecuteScalarSubqueriesVisitor.cpp | 16 ++++++++++++---- src/Interpreters/ExpressionAnalyzer.cpp | 7 +++++-- .../Executors/PullingAsyncPipelineExecutor.cpp | 7 ++++++- src/Processors/Formats/LazyOutputFormat.cpp | 9 +++++++-- 4 files changed, 30 insertions(+), 9 deletions(-) diff --git a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp index e6061aabe94..7ee7bb1f301 100644 --- a/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp +++ b/src/Interpreters/ExecuteScalarSubqueriesVisitor.cpp @@ -21,7 +21,7 @@ #include -#include +#include namespace DB { @@ -122,8 +122,10 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr try { - PullingPipelineExecutor executor(io.pipeline); - if (!executor.pull(block)) + PullingAsyncPipelineExecutor executor(io.pipeline); + while (block.rows() == 0 && executor.pull(block)); + + if (block.rows() == 0) { /// Interpret subquery with empty result as Null literal auto ast_new = std::make_unique(Null()); @@ -132,7 +134,13 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTSubquery & subquery, ASTPtr return; } - if (block.rows() != 1 || executor.pull(block)) + if (block.rows() != 1) + throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY); + + Block tmp_block; + while (tmp_block.rows() == 0 && executor.pull(tmp_block)); + + if (tmp_block.rows() != 0) throw Exception("Scalar subquery returned more than one row", ErrorCodes::INCORRECT_RESULT_OF_SCALAR_SUBQUERY); } catch (const Exception & e) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 3f65a6f3f58..cea056d6a21 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -54,7 +54,7 @@ #include #include -#include +#include #include namespace DB @@ -321,7 +321,7 @@ void SelectQueryExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr auto interpreter_subquery = interpretSubquery(subquery_or_table_name, context, {}, query_options); auto io = interpreter_subquery->execute(); - PullingPipelineExecutor executor(io.pipeline); + PullingAsyncPipelineExecutor executor(io.pipeline); SetPtr set = std::make_shared(settings.size_limits_for_set, true, context.getSettingsRef().transform_null_in); set->setHeader(executor.getHeader()); @@ -329,6 +329,9 @@ void SelectQueryExpressionAnalyzer::tryMakeSetForIndexFromSubquery(const ASTPtr Block block; while (executor.pull(block)) { + if (block.rows() == 0) + continue; + /// If the limits have been exceeded, give up and let the default subquery processing actions take place. if (!set->insertFromBlock(block)) return; diff --git a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp index e4bcf6dc0ab..21741d30dfa 100644 --- a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp @@ -133,7 +133,12 @@ bool PullingAsyncPipelineExecutor::pull(Chunk & chunk, uint64_t milliseconds) } chunk.clear(); - data->finish_event.tryWait(milliseconds); + + if (milliseconds) + data->finish_event.tryWait(milliseconds); + else + data->finish_event.wait(); + return true; } diff --git a/src/Processors/Formats/LazyOutputFormat.cpp b/src/Processors/Formats/LazyOutputFormat.cpp index 46287d1cce9..0663ff28f84 100644 --- a/src/Processors/Formats/LazyOutputFormat.cpp +++ b/src/Processors/Formats/LazyOutputFormat.cpp @@ -16,8 +16,13 @@ Chunk LazyOutputFormat::getChunk(UInt64 milliseconds) } Chunk chunk; - if (!queue.tryPop(chunk, milliseconds)) - return {}; + if (milliseconds) + { + if (!queue.tryPop(chunk, milliseconds)) + return {}; + } + else + queue.pop(chunk); if (chunk) info.update(chunk.getNumRows(), chunk.allocatedBytes()); From 10f1432c5cb1dc77c0c31cd960a275480fa380dd Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 16 Feb 2021 11:31:17 +0300 Subject: [PATCH 21/86] Added perftest. --- tests/performance/subqueries.xml | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 tests/performance/subqueries.xml diff --git a/tests/performance/subqueries.xml b/tests/performance/subqueries.xml new file mode 100644 index 00000000000..f1481a78c7e --- /dev/null +++ b/tests/performance/subqueries.xml @@ -0,0 +1,7 @@ + + create table tab (a UInt32, b UInt32) engine = MergeTree order by (a, b) + insert into tab values (1, 1) + select a, b from tab where (a, b) in (select toUInt32(number) as x, toUInt32(sleep(0.1) + 1) from numbers_mt(16)) settings max_threads = 2, max_block_size = 4 + select a, b from tab where (1, 1) = (select min(toUInt32(number + 1)) as x, min(toUInt32(sleep(0.1) + 1)) from numbers_mt(16)) settings max_threads = 2, max_block_size = 4 + DROP TABLE tab + \ No newline at end of file From a1cd07b9a00ff0ea4bc4e98d03af9b5046e6854f Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:24:49 +0300 Subject: [PATCH 22/86] Update docs/ru/sql-reference/aggregate-functions/parametric-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- .../sql-reference/aggregate-functions/parametric-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md index 2c367882714..d96f7a13bcc 100644 --- a/docs/ru/sql-reference/aggregate-functions/parametric-functions.md +++ b/docs/ru/sql-reference/aggregate-functions/parametric-functions.md @@ -239,7 +239,7 @@ windowFunnel(window, [mode])(timestamp, cond1, cond2, ..., condN) **Параметры** -- `window` — ширина скользящего окна по времени. Единица измерения зависит от `timestamp` и может варьироваться. Определяется выражением `timestamp от cond2 <= timestamp от cond1 + window`. +- `window` — ширина скользящего окна по времени. Единица измерения зависит от `timestamp` и может варьироваться. Должно соблюдаться условие `timestamp события cond2 <= timestamp события cond1 + window`. - `mode` - необязательный параметр. Если установлено значение `'strict'`, то функция `windowFunnel()` применяет условия только для уникальных значений. - `timestamp` — имя столбца, содержащего временные отметки. [Date](../../sql-reference/aggregate-functions/parametric-functions.md), [DateTime](../../sql-reference/aggregate-functions/parametric-functions.md#data_type-datetime) и другие параметры с типом `Integer`. В случае хранения меток времени в столбцах с типом `UInt64`, максимально допустимое значение соответствует ограничению для типа `Int64`, т.е. равно `2^63-1`. - `cond` — условия или данные, описывающие цепочку событий. [UInt8](../../sql-reference/aggregate-functions/parametric-functions.md). From b8be90cdf9c8505714cfaeb94ac6ffa296a0778d Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:32:59 +0300 Subject: [PATCH 23/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 91b26a2415d..adf084a6b21 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -110,7 +110,7 @@ SELECT comment, hex(secret) FROM encryption_test WHERE comment LIKE '%gcm%'; Совместима с шифрованием myqsl, результат может быть расшифрован функцией [AES_DECRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-decrypt). -При одинаковых вводных зашифрованный текст будет совпадать с результатом `encrypt`. Однако, когда `key` или `iv` длиннее, чем должны быть, `aes_encrypt_mysql` будет работать аналогично MySQL `aes_encrypt`: свернет ключ и проигнорирует лишнюю часть `iv`. +При одинаковых входящих значениях зашифрованный текст будет совпадать с результатом, возвращаемым функцией `encrypt`. Однако если `key` или `iv` длиннее, чем должны быть, `aes_encrypt_mysql` будет работать аналогично функции `aes_encrypt` в MySQL: свернет ключ и проигнорирует лишнюю часть `iv`. Функция поддерживает шифрофание данных следующими режимами: From a642dbce46f1734b1893f6528ad591641edbdc70 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:33:19 +0300 Subject: [PATCH 24/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index adf084a6b21..0e8e7d2a33a 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -140,7 +140,7 @@ aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv]) **Примеры** -При одинаковых вводных результаты шифрования `encrypt` и `aes_encrypt_mysql` будут совпадать. +При одинаковых входящих значениях результаты шифрования у функций `encrypt` и `aes_encrypt_mysql` совпадают. Запрос: From 22ab639287ea47b9a2dba80982170e15c9edd3a0 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:33:32 +0300 Subject: [PATCH 25/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 0e8e7d2a33a..a72866121c4 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -156,7 +156,7 @@ SELECT encrypt('aes-256-cfb128', 'Secret', '12345678910121314151617181920212', ' └───────────────────┘ ``` -Но `encrypt` генерирует исключение, когда `key` или `iv` длиннее, чем нужно: +Функция `encrypt` генерирует исключение, если `key` или `iv` длиннее чем нужно: Запрос: From d213039fe58fa8efe4340fdd4e3b14564139c71f Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:33:57 +0300 Subject: [PATCH 26/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index a72866121c4..90aa3268922 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -171,7 +171,7 @@ Received exception from server (version 21.1.2): Code: 36. DB::Exception: Received from localhost:9000. DB::Exception: Invalid key size: 33 expected 32: While processing encrypt('aes-256-cfb128', 'Secret', '123456789101213141516171819202122', 'iviviviviviviviv123'). ``` -Тогда как `aes_encrypt_mysql` возвращает совместимый с MySQL вывод: +Однако функция `aes_encrypt_mysql` в аналогичном случае возвращает результат, который может быть обработан MySQL: Запрос: From 66d6b7a3a088be7e72cab7ced29b1c7fa5c4f418 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:34:33 +0300 Subject: [PATCH 27/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 90aa3268922..f75e7bcc1a3 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -220,7 +220,7 @@ mysql> SELECT aes_encrypt('Secret', '123456789101213141516171819202122', 'iviviv ## decrypt {#decrypt} -Функция расшифровывает зашифрованный текст в обычный следующими режимами: +Функция расшифровывает зашифрованный текст и может работать в следующих режимах: - aes-128-ecb, aes-192-ecb, aes-256-ecb - aes-128-cbc, aes-192-cbc, aes-256-cbc From 5edba428658e60f9ee0be3681e17b638e8f2d254 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:34:43 +0300 Subject: [PATCH 28/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index f75e7bcc1a3..c4e0968d6f9 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -203,7 +203,7 @@ SELECT hex(aes_encrypt_mysql('aes-256-cfb128', 'Secret', '1234567891012131415161 └──────────────┘ ``` -Это совпадает с тем, что выводит MySQL с такими же вводными: +Это совпадает с результатом, возвращаемым MySQL при таких же входящих значениях: ``` sql mysql> SET block_encryption_mode='aes-256-cfb128'; From a26f2b77cb84e5d5629a706f42bd5a0c8214c694 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:35:07 +0300 Subject: [PATCH 29/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index c4e0968d6f9..92e8d62faca 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -250,7 +250,7 @@ decrypt('mode', 'ciphertext', 'key' [, iv, aad]) **Примеры** -Используется таблица из [encrypt](#encrypt). +Рассмотрим таблицу из примера для функции [encrypt](#encrypt). Запрос: From 7a910d38a10c92f1aae4d13e5de34a73e10e978e Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:35:12 +0300 Subject: [PATCH 30/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 92e8d62faca..faddf314fe7 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -278,7 +278,7 @@ SELECT comment, hex(secret) FROM encryption_test; Запрос: ``` sql -SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920212') as plaintext FROM encryption_test +SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920212') as plaintext FROM encryption_test; ``` Результат: From 07795335cecc9352b7d4164bbd6c63599d19bda1 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:35:23 +0300 Subject: [PATCH 31/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index faddf314fe7..0f46f3c1fd5 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -293,7 +293,7 @@ SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920 └─────────────────────────────────────┴───────────┘ ``` -Обратите внимание, что только часть данных была расшифрована, а остальное является бессмыслицей, как как `mode`, `key`, или `iv` были другими во время шифрования. +Обратите внимание, что только часть данных была расшифрована верно. Оставшаяся часть расшифрована некорректно, так как при шифровании использовались другие значения `mode`, `key`, или `iv`. ## aes_decrypt_mysql {#aes_decrypt_mysql} From 579f8a95bcaa804b4264e8047d68474af5ef3ec6 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:35:43 +0300 Subject: [PATCH 32/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 0f46f3c1fd5..6cf5b520f23 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -299,7 +299,7 @@ SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920 Совместима с шифрованием myqsl и может расшифровать данные, зашифрованные функцией [AES_ENCRYPT](https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt). -При одинаковых вводных расшифрованный текст будет совпадать с результатом `decrypt`. Однако, когда `key` или `iv` длиннее, чем должны быть, `aes_decrypt_mysql` будет работать аналогично MySQL `aes_decrypt`: свернет ключ и проигнорирует лишнюю часть `iv`. +При одинаковых входящих значениях расшифрованный текст будет совпадать с результатом, возвращаемым функцией `decrypt`. Однако если `key` или `iv` длиннее, чем должны быть, `aes_decrypt_mysql` будет работать аналогично функции `aes_decrypt` в MySQL: свернет ключ и проигнорирует лишнюю часть `iv`. Функция поддерживает расшифровку данных следующими режимами: From b82e564076203733a292d53ebcf843ad0289ace9 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:35:48 +0300 Subject: [PATCH 33/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 6cf5b520f23..04a74fe8107 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -301,7 +301,7 @@ SELECT comment, decrypt('aes-256-cfb128', secret, '12345678910121314151617181920 При одинаковых входящих значениях расшифрованный текст будет совпадать с результатом, возвращаемым функцией `decrypt`. Однако если `key` или `iv` длиннее, чем должны быть, `aes_decrypt_mysql` будет работать аналогично функции `aes_decrypt` в MySQL: свернет ключ и проигнорирует лишнюю часть `iv`. -Функция поддерживает расшифровку данных следующими режимами: +Функция поддерживает расшифровку данных в следующих режимах: - aes-128-ecb, aes-192-ecb, aes-256-ecb - aes-128-cbc, aes-192-cbc, aes-256-cbc From c10485d21a29ab7e1ec405ef19fad35ca306185a Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:35:55 +0300 Subject: [PATCH 34/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 04a74fe8107..3c2f9e3e682 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -348,7 +348,7 @@ mysql> SELECT aes_encrypt('Secret', '123456789101213141516171819202122', 'iviviv Запрос: ``` sql -SELECT aes_decrypt_mysql('aes-256-cfb128', unhex('24E9E4966469'), '123456789101213141516171819202122', 'iviviviviviviviv123456') AS plaintext +SELECT aes_decrypt_mysql('aes-256-cfb128', unhex('24E9E4966469'), '123456789101213141516171819202122', 'iviviviviviviviv123456') AS plaintext; ``` Результат: From 236b9cfeff06a9ac5115736041586a9ae119d761 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:36:01 +0300 Subject: [PATCH 35/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 3c2f9e3e682..5406112624f 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -329,7 +329,7 @@ aes_decrypt_mysql('mode', 'ciphertext', 'key' [, iv]) **Примеры** -Расшифруем данные, которые до этого зашифровали с помощью MySQL: +Расшифруем данные, которые до этого были зашифрованы в MySQL: ``` sql From f2c7c38c18b817bf101769d4d69e1ab78075778e Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Tue, 16 Feb 2021 22:38:20 +0300 Subject: [PATCH 36/86] Update docs/ru/sql-reference/functions/encryption-functions.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 5406112624f..e2c5560e4f6 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -11,7 +11,7 @@ toc_title: "\u0424\u0443\u043d\u043a\u0446\u0438\u0438 \u0434\u043b\u044f \u0448 Длина инициализирующего вектора всегда 16 байт (лишнии байты игнорируются). -Обратите внимание, что до версии Clickhouse 21.1 эти функции работают медленно. +Обратите внимание, что до версии Clickhouse 21.1 эти функции работали медленно. ## encrypt {#encrypt} From 2858151d09b70b018a9626a2c4efda6d1535ec8b Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 17 Feb 2021 00:25:34 +0300 Subject: [PATCH 37/86] Update kafka.md --- docs/ru/engines/table-engines/integrations/kafka.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index 2b9dfcd49da..a1528edfd1d 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -47,7 +47,9 @@ SETTINGS - `kafka_row_delimiter` — символ-разделитель записей (строк), которым завершается сообщение. - `kafka_schema` — опциональный параметр, необходимый, если используется формат, требующий определения схемы. Например, [Cap’n Proto](https://capnproto.org/) требует путь к файлу со схемой и название корневого объекта `schema.capnp:Message`. - `kafka_num_consumers` — количество потребителей (consumer) на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. Общее число потребителей не должно превышать количество партиций в топике, так как на одну партицию может быть назначено не более одного потребителя. +- `kafka_max_block_size` — максимальный размер пачек (в сообщениях) для poll (по умолчанию `max_block_size`). - `kafka_skip_broken_messages` — максимальное количество некорректных сообщений в блоке. Если `kafka_skip_broken_messages = N`, то движок отбрасывает `N` сообщений Кафки, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0. +- `kafka_commit_every_batch` — фиксирует каждый обработанный и потребленный пакет вместо отдельной фиксации после записи целого блока (по умолчанию `0`). - `kafka_thread_per_consumer` — снабжает каждого потребителя независимым потоком (по умолчанию `0`). При включенном состоянии каждый потребитель сбрасывает данные независимо и параллельно (иначе — строки от нескольких потребителей склеиваются в один блок). Примеры From 23754e46e8a8c54ff00537546908fa629f8ece71 Mon Sep 17 00:00:00 2001 From: gyuton <40863448+gyuton@users.noreply.github.com> Date: Wed, 17 Feb 2021 01:41:47 +0300 Subject: [PATCH 38/86] Update docs/ru/engines/table-engines/integrations/kafka.md Co-authored-by: olgarev <56617294+olgarev@users.noreply.github.com> --- docs/ru/engines/table-engines/integrations/kafka.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ru/engines/table-engines/integrations/kafka.md b/docs/ru/engines/table-engines/integrations/kafka.md index a1528edfd1d..5a6971b1ae6 100644 --- a/docs/ru/engines/table-engines/integrations/kafka.md +++ b/docs/ru/engines/table-engines/integrations/kafka.md @@ -49,8 +49,8 @@ SETTINGS - `kafka_num_consumers` — количество потребителей (consumer) на таблицу. По умолчанию: `1`. Укажите больше потребителей, если пропускная способность одного потребителя недостаточна. Общее число потребителей не должно превышать количество партиций в топике, так как на одну партицию может быть назначено не более одного потребителя. - `kafka_max_block_size` — максимальный размер пачек (в сообщениях) для poll (по умолчанию `max_block_size`). - `kafka_skip_broken_messages` — максимальное количество некорректных сообщений в блоке. Если `kafka_skip_broken_messages = N`, то движок отбрасывает `N` сообщений Кафки, которые не получилось обработать. Одно сообщение в точности соответствует одной записи (строке). Значение по умолчанию – 0. -- `kafka_commit_every_batch` — фиксирует каждый обработанный и потребленный пакет вместо отдельной фиксации после записи целого блока (по умолчанию `0`). -- `kafka_thread_per_consumer` — снабжает каждого потребителя независимым потоком (по умолчанию `0`). При включенном состоянии каждый потребитель сбрасывает данные независимо и параллельно (иначе — строки от нескольких потребителей склеиваются в один блок). +- `kafka_commit_every_batch` — включает или отключает режим записи каждой принятой и обработанной пачки по отдельности вместо единой записи целого блока (по умолчанию `0`). +- `kafka_thread_per_consumer` — включает или отключает предоставление отдельного потока каждому потребителю (по умолчанию `0`). При включенном режиме каждый потребитель сбрасывает данные независимо и параллельно, при отключённом — строки с данными от нескольких потребителей собираются в один блок. Примеры From e5cef576e589f4307f35074cf45e8dbb08801c65 Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Wed, 17 Feb 2021 12:39:40 +0300 Subject: [PATCH 39/86] Update subqueries.xml --- tests/performance/subqueries.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/performance/subqueries.xml b/tests/performance/subqueries.xml index f1481a78c7e..0d41099841b 100644 --- a/tests/performance/subqueries.xml +++ b/tests/performance/subqueries.xml @@ -1,7 +1,7 @@ - create table tab (a UInt32, b UInt32) engine = MergeTree order by (a, b) + create table tab (a UInt32, b UInt32) engine = MergeTree order by (a, b) insert into tab values (1, 1) select a, b from tab where (a, b) in (select toUInt32(number) as x, toUInt32(sleep(0.1) + 1) from numbers_mt(16)) settings max_threads = 2, max_block_size = 4 select a, b from tab where (1, 1) = (select min(toUInt32(number + 1)) as x, min(toUInt32(sleep(0.1) + 1)) from numbers_mt(16)) settings max_threads = 2, max_block_size = 4 DROP TABLE tab - \ No newline at end of file + From a8647096ed96fb348aea73edf54b5e7bedea4284 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Feb 2021 13:27:47 +0300 Subject: [PATCH 40/86] Try fix tests. --- src/Interpreters/ActionsDAG.cpp | 20 +++++++++++++------ .../Optimizations/filterPushDown.cpp | 4 ++-- .../QueryPlan/Optimizations/optimizeTree.cpp | 8 ++++++++ 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index e9e9d1628a8..691905bed27 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1245,14 +1245,14 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, { struct Frame { - const Node * node; + Node * node; bool is_predicate = false; size_t next_child_to_visit = 0; size_t num_allowed_children = 0; }; std::stack stack; - std::unordered_set visited_nodes; + std::unordered_set visited_nodes; stack.push(Frame{.node = *it, .is_predicate = true}); visited_nodes.insert(*it); @@ -1290,12 +1290,12 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, else if (is_conjunction) { for (auto * child : cur.node->children) - { if (allowed_nodes.count(child)) selected_predicates.insert(child); - else - other_predicates.insert(child); - } + } + else if (cur.is_predicate) + { + other_predicates.insert(cur.node); } stack.pop(); @@ -1311,6 +1311,14 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, return nullptr; } + // std::cerr << "************* Selectecd predicates\n"; + // for (const auto * p : selected_predicates) + // std::cerr << p->result_name << std::endl; + + // std::cerr << "............. Other predicates\n"; + // for (const auto * p : other_predicates) + // std::cerr << p->result_name << std::endl; + auto actions = cloneEmpty(); actions->settings.project_input = false; diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 98e923249f3..39f24a32b45 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -117,8 +117,8 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (keys.count(column.name) == 0) allowed_inputs.push_back(column.name); - for (const auto & name : allowed_inputs) - std::cerr << name << std::endl; + // for (const auto & name : allowed_inputs) + // std::cerr << name << std::endl; if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs)) return updated_steps; diff --git a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp index e5ccc173ed8..cc81a7f39fc 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp @@ -23,6 +23,9 @@ void optimizeTree(QueryPlan::Node & root, QueryPlan::Nodes & nodes) std::stack stack; stack.push(Frame{.node = &root}); + size_t max_optimizations_to_apply = 0; + size_t total_applied_optimizations = 0; + while (!stack.empty()) { auto & frame = stack.top(); @@ -54,8 +57,13 @@ void optimizeTree(QueryPlan::Node & root, QueryPlan::Nodes & nodes) if (!optimization.apply) continue; + if (max_optimizations_to_apply && max_optimizations_to_apply < total_applied_optimizations) + continue; + /// Try to apply optimization. auto update_depth = optimization.apply(frame.node, nodes); + if (update_depth) + ++total_applied_optimizations; max_update_depth = std::max(max_update_depth, update_depth); } From 6522bfc402260b2b4edfd4c2f0ab55a662296e63 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Feb 2021 19:54:11 +0300 Subject: [PATCH 41/86] Support for DIstinct, sorting steps. --- src/Interpreters/ActionsDAG.cpp | 2 +- src/Processors/QueryPlan/CreatingSetsStep.h | 2 +- src/Processors/QueryPlan/CubeStep.cpp | 5 ++ src/Processors/QueryPlan/CubeStep.h | 2 + src/Processors/QueryPlan/FillingStep.h | 2 + .../Optimizations/filterPushDown.cpp | 68 +++++++++++++++++-- 6 files changed, 74 insertions(+), 7 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 691905bed27..8b6013a4365 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1389,7 +1389,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, for (const auto * predicate : selected_predicates) args.emplace_back(nodes_mapping[predicate]); - result_predicate = &actions->addFunction(func_builder_and, args, {}, true); + result_predicate = &actions->addFunction(func_builder_and, args, {}, true, false); } actions->index.insert(result_predicate); diff --git a/src/Processors/QueryPlan/CreatingSetsStep.h b/src/Processors/QueryPlan/CreatingSetsStep.h index ec13ab2052e..97821cb63d3 100644 --- a/src/Processors/QueryPlan/CreatingSetsStep.h +++ b/src/Processors/QueryPlan/CreatingSetsStep.h @@ -34,7 +34,7 @@ private: class CreatingSetsStep : public IQueryPlanStep { public: - CreatingSetsStep(DataStreams input_streams_); + explicit CreatingSetsStep(DataStreams input_streams_); String getName() const override { return "CreatingSets"; } diff --git a/src/Processors/QueryPlan/CubeStep.cpp b/src/Processors/QueryPlan/CubeStep.cpp index de8bb2b3d43..6a0ec33402b 100644 --- a/src/Processors/QueryPlan/CubeStep.cpp +++ b/src/Processors/QueryPlan/CubeStep.cpp @@ -43,4 +43,9 @@ void CubeStep::transformPipeline(QueryPipeline & pipeline) }); } +const Aggregator::Params & CubeStep::getParams() const +{ + return params->params; +} + } diff --git a/src/Processors/QueryPlan/CubeStep.h b/src/Processors/QueryPlan/CubeStep.h index 707f62ce7d6..f67a03dc7e2 100644 --- a/src/Processors/QueryPlan/CubeStep.h +++ b/src/Processors/QueryPlan/CubeStep.h @@ -1,6 +1,7 @@ #pragma once #include #include +#include namespace DB { @@ -18,6 +19,7 @@ public: void transformPipeline(QueryPipeline & pipeline) override; + const Aggregator::Params & getParams() const; private: AggregatingTransformParamsPtr params; }; diff --git a/src/Processors/QueryPlan/FillingStep.h b/src/Processors/QueryPlan/FillingStep.h index 85736464a6c..c8d1f74c6ca 100644 --- a/src/Processors/QueryPlan/FillingStep.h +++ b/src/Processors/QueryPlan/FillingStep.h @@ -17,6 +17,8 @@ public: void describeActions(FormatSettings & settings) const override; + const SortDescription & getSortDescription() const { return sort_description; } + private: SortDescription sort_description; }; diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 39f24a32b45..74c4fa6f329 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -4,9 +4,15 @@ #include #include #include +#include #include #include #include +#include "Processors/QueryPlan/FinishSortingStep.h" +#include "Processors/QueryPlan/MergeSortingStep.h" +#include "Processors/QueryPlan/MergingSortedStep.h" +#include "Processors/QueryPlan/PartialSortingStep.h" +#include #include namespace DB::ErrorCodes @@ -79,6 +85,30 @@ static size_t tryAddNewFilterStep( return 3; } +static Names getAggregatinKeys(const Aggregator::Params & params) +{ + Names keys; + keys.reserve(params.keys.size()); + for (auto pos : params.keys) + keys.push_back(params.src_header.getByPosition(pos).name); + + return keys; +} + +// static NameSet getColumnNamesFromSortDescription(const SortDescription & sort_desc, const Block & header) +// { +// NameSet names; +// for (const auto & column : sort_desc) +// { +// if (!column.column_name.empty()) +// names.insert(column.column_name); +// else +// names.insert(header.safeGetByPosition(column.column_number).name); +// } + +// return names; +// } + size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) { if (parent_node->children.size() != 1) @@ -96,11 +126,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (auto * aggregating = typeid_cast(child.get())) { const auto & params = aggregating->getParams(); - - Names keys; - keys.reserve(params.keys.size()); - for (auto pos : params.keys) - keys.push_back(params.src_header.getByPosition(pos).name); + Names keys = getAggregatinKeys(params); if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, keys)) return updated_steps; @@ -124,6 +150,38 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes return updated_steps; } + if (auto * distinct = typeid_cast(child.get())) + { + Names allowed_inputs = distinct->getOutputStream().header.getNames(); + if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs)) + return updated_steps; + } + + /// TODO. + /// We can filter earlier if expression does not depend on WITH FILL columns. + /// But we cannot just push down condition, because other column may be filled with defaults. + /// + /// It is possible to filter columns before and after WITH FILL, but such change is not idempotent. + /// So, appliying this to pair (Filter -> Filling) several times will create several similar filters. + // if (auto * filling = typeid_cast(child.get())) + // { + // } + + /// Same reason for Cube + // if (auto * cube = typeid_cast(child.get())) + // { + // } + + if (typeid_cast(child.get()) + || typeid_cast(child.get()) + || typeid_cast(child.get()) + || typeid_cast(child.get())) + { + Names allowed_inputs = child->getOutputStream().header.getNames(); + if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, allowed_inputs)) + return updated_steps; + } + return 0; } From e5b9c42860cce08b0b94f7863dbeb6f38b066d83 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Feb 2021 19:54:37 +0300 Subject: [PATCH 42/86] Update test. --- .../01655_plan_optimizations.reference | 70 +++++++++++++++ .../0_stateless/01655_plan_optimizations.sh | 85 +++++++++++++++++-- 2 files changed, 149 insertions(+), 6 deletions(-) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 1e638829c74..7bc75dc0bf6 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -13,32 +13,102 @@ Limit 10 > filter should be pushed down after aggregating Aggregating Filter +0 1 +1 2 +2 3 +3 4 +4 5 +5 6 +6 7 +7 8 +8 9 +9 10 > filter should be pushed down after aggregating, column after aggregation is const COLUMN Const(UInt8) -> notEquals(y, 0) Aggregating Filter Filter +0 1 1 +1 2 1 +2 3 1 +3 4 1 +4 5 1 +5 6 1 +6 7 1 +7 8 1 +8 9 1 +9 10 1 > one condition of filter should be pushed down after aggregating, other condition is aliased Filter column ALIAS notEquals(s, 4) :: 1 -> and(notEquals(y, 0), notEquals(s, 4)) Aggregating Filter column: notEquals(y, 0) +0 1 +1 2 +2 3 +3 4 +5 6 +6 7 +7 8 +8 9 +9 10 > one condition of filter should be pushed down after aggregating, other condition is casted Filter column FUNCTION CAST(minus(s, 4) :: 1, UInt8 :: 3) -> and(notEquals(y, 0), minus(s, 4)) Aggregating Filter column: notEquals(y, 0) +0 1 +1 2 +2 3 +3 4 +5 6 +6 7 +7 8 +8 9 +9 10 > one condition of filter should be pushed down after aggregating, other two conditions are ANDed Filter column FUNCTION and(minus(s, 4) :: 2, minus(s, 8) :: 1) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) Aggregating Filter column: notEquals(y, 0) +0 1 +1 2 +2 3 +3 4 +5 6 +6 7 +7 8 +9 10 > two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased Filter column ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4)) Aggregating Filter column: and(minus(y, 4), notEquals(y, 0)) +0 1 +1 2 +2 3 +4 5 +5 6 +6 7 +7 8 +9 10 > filter is split, one part is filtered before ARRAY JOIN Filter column: and(notEquals(y, 2), notEquals(x, 0)) ARRAY JOIN x Filter column: notEquals(y, 2) +1 3 +> filter is pushed down before Distinct +Distinct +Distinct +Filter column: notEquals(y, 2) +0 0 +0 1 +1 0 +1 1 +> filter is pushed down before sorting steps +MergingSorted +MergeSorting +PartialSorting +Filter column: and(notEquals(x, 0), notEquals(y, 0)) +1 2 +1 1 diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index ccd331df45e..f770643fc41 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -16,49 +16,122 @@ $CLICKHOUSE_CLIENT -q " select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter" +$CLICKHOUSE_CLIENT -q " + select s, y from (select sum(x) as s, y from ( + select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 order by s, y + settings enable_optimize_predicate_expression=0" echo "> filter should be pushed down after aggregating, column after aggregation is const" $CLICKHOUSE_CLIENT -q " - explain actions = 1 select *, y != 0 from (select sum(x), y from ( + explain actions = 1 select s, y, y != 0 from (select sum(x) as s, y from ( select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter\|COLUMN Const(UInt8) -> notEquals(y, 0)" +$CLICKHOUSE_CLIENT -q " + select s, y, y != 0 from (select sum(x) as s, y from ( + select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 order by s, y, y != 0 + settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other condition is aliased" $CLICKHOUSE_CLIENT -q " - explain actions = 1 select * from ( + explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|ALIAS notEquals(s, 4) :: 1 -> and(notEquals(y, 0), notEquals(s, 4))" +$CLICKHOUSE_CLIENT -q " + select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s != 4 order by s, y + settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other condition is casted" $CLICKHOUSE_CLIENT -q " - explain actions = 1 select * from ( + explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION CAST(minus(s, 4) :: 1, UInt8 :: 3) -> and(notEquals(y, 0), minus(s, 4))" +$CLICKHOUSE_CLIENT -q " + select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s - 4 order by s, y + settings enable_optimize_predicate_expression=0" echo "> one condition of filter should be pushed down after aggregating, other two conditions are ANDed" $CLICKHOUSE_CLIENT -q " - explain actions = 1 select * from ( + explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 4) :: 2, minus(s, 8) :: 1) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" +$CLICKHOUSE_CLIENT -q " + select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s - 8 and s - 4 order by s, y + settings enable_optimize_predicate_expression=0" echo "> two conditions of filter should be pushed down after aggregating and ANDed, one condition is aliased" $CLICKHOUSE_CLIENT -q " - explain optimize = 1, actions = 1 select * from ( + explain actions = 1 select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 8 and y - 4 settings enable_optimize_predicate_expression=0" | grep -o "Aggregating\|Filter column\|Filter column: and(minus(y, 4), notEquals(y, 0))\|ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4))" +$CLICKHOUSE_CLIENT -q " + select s, y from ( + select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y + ) where y != 0 and s != 8 and y - 4 order by s, y + settings enable_optimize_predicate_expression=0" echo "> filter is split, one part is filtered before ARRAY JOIN" $CLICKHOUSE_CLIENT -q " explain actions = 1 select x, y from ( select range(number) as x, number + 1 as y from numbers(3) ) array join x where y != 2 and x != 0" | - grep -o "Filter column: and(notEquals(y, 2), notEquals(x, 0))\|ARRAY JOIN x\|Filter column: notEquals(y, 2)" \ No newline at end of file + grep -o "Filter column: and(notEquals(y, 2), notEquals(x, 0))\|ARRAY JOIN x\|Filter column: notEquals(y, 2)" +$CLICKHOUSE_CLIENT -q " + select x, y from ( + select range(number) as x, number + 1 as y from numbers(3) + ) array join x where y != 2 and x != 0 order by x, y" + +# echo "> filter is split, one part is filtered before Aggregating and Cube" +# $CLICKHOUSE_CLIENT -q " +# explain actions = 1 select * from ( +# select sum(x) as s, x, y from (select number as x, number + 1 as y from numbers(10)) group by x, y with cube +# ) where y != 0 and s != 4 +# settings enable_optimize_predicate_expression=0" | +# grep -o "Cube\|Aggregating\|Filter column: notEquals(y, 0)" +# $CLICKHOUSE_CLIENT -q " +# select s, x, y from ( +# select sum(x) as s, x, y from (select number as x, number + 1 as y from numbers(10)) group by x, y with cube +# ) where y != 0 and s != 4 order by s, x, y +# settings enable_optimize_predicate_expression=0" + +echo "> filter is pushed down before Distinct" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select x, y from ( + select distinct x, y from (select number % 2 as x, number % 3 as y from numbers(10)) + ) where y != 2 + settings enable_optimize_predicate_expression=0" | + grep -o "Distinct\|Filter column: notEquals(y, 2)" +$CLICKHOUSE_CLIENT -q " + select x, y from ( + select distinct x, y from (select number % 2 as x, number % 3 as y from numbers(10)) + ) where y != 2 order by x, y + settings enable_optimize_predicate_expression=0" + +echo "> filter is pushed down before sorting steps" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select x, y from ( + select number % 2 as x, number % 3 as y from numbers(6) order by y desc + ) where x != 0 and y != 0 + settings enable_optimize_predicate_expression = 0" | + grep -o "MergingSorted\|MergeSorting\|PartialSorting\|Filter column: and(notEquals(x, 0), notEquals(y, 0))" +$CLICKHOUSE_CLIENT -q " + select x, y from ( + select number % 2 as x, number % 3 as y from numbers(6) order by y desc + ) where x != 0 and y != 0 + settings enable_optimize_predicate_expression = 0" From f6278ed429dc2231d68aa5179e63b3bb635d081a Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Feb 2021 19:56:17 +0300 Subject: [PATCH 43/86] Support for DIstinct, sorting steps. --- src/Processors/QueryPlan/Optimizations/filterPushDown.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 74c4fa6f329..02e1914504d 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -60,12 +60,12 @@ static size_t tryAddNewFilterStep( "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", filter_column_name, expression->dumpDAG()); - std::cerr << "replacing to expr because filter " << filter_column_name << " was removed\n"; + // std::cerr << "replacing to expr because filter " << filter_column_name << " was removed\n"; parent = std::make_unique(child->getOutputStream(), expression); } else if ((*it)->column && isColumnConst(*(*it)->column)) { - std::cerr << "replacing to expr because filter is const\n"; + // std::cerr << "replacing to expr because filter is const\n"; parent = std::make_unique(child->getOutputStream(), expression); } From 56a5d1dafaa7cb08719277886000349490c47eda Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Feb 2021 21:48:26 +0300 Subject: [PATCH 44/86] Skip stateful functions --- src/Processors/QueryPlan/Optimizations/filterPushDown.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 02e1914504d..456faeb72c2 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -123,6 +123,9 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes if (!filter) return 0; + if (filter->getExpression()->hasStatefulFunctions()) + return 0; + if (auto * aggregating = typeid_cast(child.get())) { const auto & params = aggregating->getParams(); From ec4dafaa5f914e99acc8cede5b60e85458eab134 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 17 Feb 2021 22:19:39 +0300 Subject: [PATCH 45/86] Fix build. --- src/CMakeLists.txt | 4 ++-- src/Processors/ya.make | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 86db7742c97..7a7f160dd81 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -100,8 +100,8 @@ endif() list (APPEND clickhouse_common_io_sources ${CONFIG_BUILD}) list (APPEND clickhouse_common_io_headers ${CONFIG_VERSION} ${CONFIG_COMMON}) -list (APPEND dbms_sources Functions/IFunction.cpp Functions/FunctionFactory.cpp Functions/FunctionHelpers.cpp Functions/extractTimeZoneFromFunctionArguments.cpp Functions/replicate.cpp) -list (APPEND dbms_headers Functions/IFunctionImpl.h Functions/FunctionFactory.h Functions/FunctionHelpers.h Functions/extractTimeZoneFromFunctionArguments.h Functions/replicate.h) +list (APPEND dbms_sources Functions/IFunction.cpp Functions/FunctionFactory.cpp Functions/FunctionHelpers.cpp Functions/extractTimeZoneFromFunctionArguments.cpp Functions/replicate.cpp Functions/FunctionsLogical.cpp) +list (APPEND dbms_headers Functions/IFunctionImpl.h Functions/FunctionFactory.h Functions/FunctionHelpers.h Functions/extractTimeZoneFromFunctionArguments.h Functions/replicate.h Functions/FunctionsLogical.h) list (APPEND dbms_sources AggregateFunctions/AggregateFunctionFactory.cpp diff --git a/src/Processors/ya.make b/src/Processors/ya.make index 34ff61d03c5..71ddd07f6a2 100644 --- a/src/Processors/ya.make +++ b/src/Processors/ya.make @@ -114,6 +114,7 @@ SRCS( QueryPlan/MergingFinal.cpp QueryPlan/MergingSortedStep.cpp QueryPlan/OffsetStep.cpp + QueryPlan/Optimizations/filterPushDown.cpp QueryPlan/Optimizations/liftUpArrayJoin.cpp QueryPlan/Optimizations/limitPushDown.cpp QueryPlan/Optimizations/mergeExpressions.cpp From 7231a97085b34d0ee6fa14a23a085a0bd60cc01f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Feb 2021 14:15:16 +0300 Subject: [PATCH 46/86] Remove MaterializingStep --- .../QueryPlan/MaterializingStep.cpp | 39 ------------------- src/Processors/QueryPlan/MaterializingStep.h | 18 --------- src/Processors/ya.make | 1 - src/Storages/StorageView.cpp | 6 ++- 4 files changed, 4 insertions(+), 60 deletions(-) delete mode 100644 src/Processors/QueryPlan/MaterializingStep.cpp delete mode 100644 src/Processors/QueryPlan/MaterializingStep.h diff --git a/src/Processors/QueryPlan/MaterializingStep.cpp b/src/Processors/QueryPlan/MaterializingStep.cpp deleted file mode 100644 index f5313369020..00000000000 --- a/src/Processors/QueryPlan/MaterializingStep.cpp +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include -#include - -#include - -namespace DB -{ - -static ITransformingStep::Traits getTraits() -{ - return ITransformingStep::Traits - { - { - .preserves_distinct_columns = true, - .returns_single_stream = false, - .preserves_number_of_streams = true, - .preserves_sorting = true, - }, - { - .preserves_number_of_rows = true, - } - }; -} - -MaterializingStep::MaterializingStep(const DataStream & input_stream_) - : ITransformingStep(input_stream_, materializeBlock(input_stream_.header), getTraits()) -{ -} - -void MaterializingStep::transformPipeline(QueryPipeline & pipeline) -{ - pipeline.addSimpleTransform([&](const Block & header) - { - return std::make_shared(header); - }); -} - -} diff --git a/src/Processors/QueryPlan/MaterializingStep.h b/src/Processors/QueryPlan/MaterializingStep.h deleted file mode 100644 index 72b3133dfe4..00000000000 --- a/src/Processors/QueryPlan/MaterializingStep.h +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once -#include - -namespace DB -{ - -/// Materialize constants. See MaterializingTransform. -class MaterializingStep : public ITransformingStep -{ -public: - explicit MaterializingStep(const DataStream & input_stream_); - - String getName() const override { return "Materializing"; } - - void transformPipeline(QueryPipeline & pipeline) override; -}; - -} diff --git a/src/Processors/ya.make b/src/Processors/ya.make index 71ddd07f6a2..a44272cf9c0 100644 --- a/src/Processors/ya.make +++ b/src/Processors/ya.make @@ -108,7 +108,6 @@ SRCS( QueryPlan/ITransformingStep.cpp QueryPlan/LimitByStep.cpp QueryPlan/LimitStep.cpp - QueryPlan/MaterializingStep.cpp QueryPlan/MergeSortingStep.cpp QueryPlan/MergingAggregatedStep.cpp QueryPlan/MergingFinal.cpp diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 38349ef8df9..1ee5ab3d0ca 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -15,7 +15,6 @@ #include #include -#include #include #include @@ -87,7 +86,10 @@ void StorageView::read( /// It's expected that the columns read from storage are not constant. /// Because method 'getSampleBlockForColumns' is used to obtain a structure of result in InterpreterSelectQuery. - auto materializing = std::make_unique(query_plan.getCurrentDataStream()); + auto materializing_actions = std::make_shared(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName()); + materializing_actions->addMaterializingOutputActions(); + + auto materializing = std::make_unique(query_plan.getCurrentDataStream(), std::move(materializing_actions)); materializing->setStepDescription("Materialize constants after VIEW subquery"); query_plan.addStep(std::move(materializing)); From 0449546bca7319132a99693b6634ca8684aa41f3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Feb 2021 16:13:09 +0300 Subject: [PATCH 47/86] Support TotalsHaving. Update test. --- .../Optimizations/filterPushDown.cpp | 38 ++++++++++++++++--- src/Processors/QueryPlan/TotalsHavingStep.h | 2 + .../01655_plan_optimizations.reference | 9 +++++ .../0_stateless/01655_plan_optimizations.sh | 12 ++++++ 4 files changed, 56 insertions(+), 5 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 456faeb72c2..4d01235e2fc 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -5,14 +5,17 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include #include -#include "Processors/QueryPlan/FinishSortingStep.h" -#include "Processors/QueryPlan/MergeSortingStep.h" -#include "Processors/QueryPlan/MergingSortedStep.h" -#include "Processors/QueryPlan/PartialSortingStep.h" -#include +#include + #include namespace DB::ErrorCodes @@ -135,6 +138,31 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes return updated_steps; } + if (auto * totals_having = typeid_cast(child.get())) + { + /// If totals step has HAVING expression, skip it for now. + /// TODO: + /// We can merge HAING expression with current filer. + /// Alos, we can push down part of HAVING which depend only on aggregation keys. + if (totals_having->getActions()) + return 0; + + Names keys; + const auto & header = totals_having->getInputStreams().front().header; + for (const auto & column : header) + if (typeid_cast(column.type.get()) == nullptr) + keys.push_back(column.name); + + /// NOTE: this optimization changes TOTALS value. Example: + /// `select * from (select y, sum(x) from ( + /// select number as x, number % 4 as y from numbers(10) + /// ) group by y with totals) where y != 2` + /// Optimization will replace totals row `y, sum(x)` from `(0, 45)` to `(0, 37)`. + /// It is expected to ok, cause AST optimization `enable_optimize_predicate_expression = 1` also brakes it. + if (auto updated_steps = tryAddNewFilterStep(parent_node, nodes, keys)) + return updated_steps; + } + if (auto * array_join = typeid_cast(child.get())) { const auto & array_join_actions = array_join->arrayJoin(); diff --git a/src/Processors/QueryPlan/TotalsHavingStep.h b/src/Processors/QueryPlan/TotalsHavingStep.h index 7c1638013e5..57d5cf7aad5 100644 --- a/src/Processors/QueryPlan/TotalsHavingStep.h +++ b/src/Processors/QueryPlan/TotalsHavingStep.h @@ -28,6 +28,8 @@ public: void describeActions(FormatSettings & settings) const override; + const ActionsDAGPtr & getActions() const { return actions_dag; } + private: bool overflow_row; ActionsDAGPtr actions_dag; diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index 7bc75dc0bf6..fa83c098412 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -112,3 +112,12 @@ PartialSorting Filter column: and(notEquals(x, 0), notEquals(y, 0)) 1 2 1 1 +> filter is pushed down before TOTALS HAVING and aggregating +TotalsHaving +Aggregating +Filter column: notEquals(y, 2) +0 12 +1 15 +3 10 + +0 37 diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index f770643fc41..e47b03661e4 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -135,3 +135,15 @@ $CLICKHOUSE_CLIENT -q " select number % 2 as x, number % 3 as y from numbers(6) order by y desc ) where x != 0 and y != 0 settings enable_optimize_predicate_expression = 0" + +echo "> filter is pushed down before TOTALS HAVING and aggregating" +$CLICKHOUSE_CLIENT -q " + explain actions = 1 select * from ( + select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals + ) where y != 2 + settings enable_optimize_predicate_expression=0" | + grep -o "TotalsHaving\|Aggregating\|Filter column: notEquals(y, 2)" +$CLICKHOUSE_CLIENT -q " + select * from ( + select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals + ) where y != 2" \ No newline at end of file From f4b0b1110cb77c6901243cc1120615d9735a2da3 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Feb 2021 23:53:40 +0300 Subject: [PATCH 48/86] Fix test. --- tests/queries/0_stateless/01272_totals_and_filter_bug.reference | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01272_totals_and_filter_bug.reference b/tests/queries/0_stateless/01272_totals_and_filter_bug.reference index 0db840561fd..5b407738cb8 100644 --- a/tests/queries/0_stateless/01272_totals_and_filter_bug.reference +++ b/tests/queries/0_stateless/01272_totals_and_filter_bug.reference @@ -1,6 +1,6 @@ 1 1 -0 2 +0 1 - test1 10 0 From 6e9bf682179229b4ae3d7f97ec3ab5c83229704b Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 18 Feb 2021 23:54:42 +0300 Subject: [PATCH 49/86] Fix typo --- src/Processors/QueryPlan/Optimizations/filterPushDown.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 4d01235e2fc..1b84fee4857 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -143,7 +143,7 @@ size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes /// If totals step has HAVING expression, skip it for now. /// TODO: /// We can merge HAING expression with current filer. - /// Alos, we can push down part of HAVING which depend only on aggregation keys. + /// Also, we can push down part of HAVING which depend only on aggregation keys. if (totals_having->getActions()) return 0; From f0396661b3cf74b98ea2b562d96edb18949e9df8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Sat, 20 Feb 2021 19:13:36 +0300 Subject: [PATCH 50/86] Refactor ActionsDAG::splitActionsForFilter --- src/Interpreters/ActionsDAG.cpp | 411 ++++++++++++++++++-------------- src/Interpreters/ActionsDAG.h | 2 + 2 files changed, 228 insertions(+), 185 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 8b6013a4365..b3f86313a1c 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1212,112 +1212,120 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsForFilter(const std::string & co return split(split_nodes); } -ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs) +namespace { - std::unordered_map> inputs_map; - for (const auto & input : inputs) - inputs_map[input->result_name].emplace_back(input); - std::unordered_set allowed_nodes; - for (const auto & name : available_inputs) +struct ConjinctionNodes +{ + std::unordered_set allowed; + std::unordered_set rejected; +}; + +/// Take a node which result is predicate. +/// Assuming predicate is a conjunction (probably, trivial). +/// Find separate conjunctions nodes. Split nodes into allowed and rejected sets. +/// Allowed predicate is a predicate which can be calculated using only nodes from allowed_nodes set. +ConjinctionNodes getConjinctionNodes(ActionsDAG::Node * predicate, std::unordered_set allowed_nodes) +{ + ConjinctionNodes conjunction; + + struct Frame { - auto & inputs_list = inputs_map[name]; - if (inputs_list.empty()) - continue; + ActionsDAG::Node * node; + bool is_predicate = false; + size_t next_child_to_visit = 0; + size_t num_allowed_children = 0; + }; - allowed_nodes.emplace(inputs_list.front()); - inputs_list.pop_front(); - } - - auto it = index.begin(); - for (; it != index.end(); ++it) - if ((*it)->result_name == filter_name) - break; - - if (it == index.end()) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Index for ActionsDAG does not contain filter column name {}. DAG:\n{}", - filter_name, dumpDAG()); - - std::unordered_set selected_predicates; - std::unordered_set other_predicates; + std::stack stack; + std::unordered_set visited_nodes; + stack.push(Frame{.node = predicate, .is_predicate = true}); + visited_nodes.insert(predicate); + while (!stack.empty()) { - struct Frame + auto & cur = stack.top(); + bool is_conjunction = cur.is_predicate + && cur.node->type == ActionsDAG::ActionType::FUNCTION + && cur.node->function_base->getName() == "and"; + + /// At first, visit all children. + while (cur.next_child_to_visit < cur.node->children.size()) { - Node * node; - bool is_predicate = false; - size_t next_child_to_visit = 0; - size_t num_allowed_children = 0; - }; + auto * child = cur.node->children[cur.next_child_to_visit]; - std::stack stack; - std::unordered_set visited_nodes; - - stack.push(Frame{.node = *it, .is_predicate = true}); - visited_nodes.insert(*it); - while (!stack.empty()) - { - auto & cur = stack.top(); - bool is_conjunction = cur.is_predicate - && cur.node->type == ActionType::FUNCTION - && cur.node->function_base->getName() == "and"; - - /// At first, visit all children. - while (cur.next_child_to_visit < cur.node->children.size()) + if (visited_nodes.count(child) == 0) { - auto * child = cur.node->children[cur.next_child_to_visit]; - - if (visited_nodes.count(child) == 0) - { - visited_nodes.insert(child); - stack.push({.node = child, .is_predicate = is_conjunction}); - break; - } - - if (allowed_nodes.contains(child)) - ++cur.num_allowed_children; - ++cur.next_child_to_visit; + visited_nodes.insert(child); + stack.push({.node = child, .is_predicate = is_conjunction}); + break; } - if (cur.next_child_to_visit == cur.node->children.size()) - { - if (cur.num_allowed_children == cur.node->children.size()) - { - if (cur.node->type != ActionType::ARRAY_JOIN && cur.node->type != ActionType::INPUT) - allowed_nodes.emplace(cur.node); - } - else if (is_conjunction) - { - for (auto * child : cur.node->children) - if (allowed_nodes.count(child)) - selected_predicates.insert(child); - } - else if (cur.is_predicate) - { - other_predicates.insert(cur.node); - } + if (allowed_nodes.contains(child)) + ++cur.num_allowed_children; + ++cur.next_child_to_visit; + } - stack.pop(); + if (cur.next_child_to_visit == cur.node->children.size()) + { + if (cur.num_allowed_children == cur.node->children.size()) + { + if (cur.node->type != ActionsDAG::ActionType::ARRAY_JOIN && cur.node->type != ActionsDAG::ActionType::INPUT) + allowed_nodes.emplace(cur.node); } + else if (is_conjunction) + { + for (auto * child : cur.node->children) + if (allowed_nodes.count(child)) + conjunction.allowed.insert(child); + } + else if (cur.is_predicate) + { + conjunction.rejected.insert(cur.node); + } + + stack.pop(); } } - if (selected_predicates.empty()) + if (conjunction.allowed.empty()) { - if (allowed_nodes.count(*it)) - selected_predicates.insert(*it); - else - return nullptr; + if (allowed_nodes.count(predicate)) + conjunction.allowed.insert(predicate); } - // std::cerr << "************* Selectecd predicates\n"; - // for (const auto * p : selected_predicates) - // std::cerr << p->result_name << std::endl; + return conjunction; +} - // std::cerr << "............. Other predicates\n"; - // for (const auto * p : other_predicates) - // std::cerr << p->result_name << std::endl; +ColumnsWithTypeAndName prepareFunctionArguments(const std::vector nodes) +{ + ColumnsWithTypeAndName arguments; + arguments.reserve(nodes.size()); + + for (const auto * child : nodes) + { + ColumnWithTypeAndName argument; + argument.column = child->column; + argument.type = child->result_type; + argument.name = child->result_name; + + arguments.emplace_back(std::move(argument)); + } + + return arguments; +} + +} + +/// Create actions which calculate conjunction of selected nodes. +/// Assume conjunction nodes are predicates (and may be used as arguments of function AND). +/// +/// Result actions add single column with conjunction result (it is always last in index). +/// No other columns are added or removed. +ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::unordered_set conjunction) +{ + if (conjunction.empty()) + return nullptr; auto actions = cloneEmpty(); actions->settings.project_input = false; @@ -1327,82 +1335,128 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, std::make_unique( std::make_shared())); - std::unordered_map nodes_mapping; + std::unordered_map nodes_mapping; + struct Frame { - struct Frame + const ActionsDAG::Node * node; + size_t next_child_to_visit = 0; + }; + + std::stack stack; + + /// DFS. Clone actions. + for (const auto * predicate : conjunction) + { + if (nodes_mapping.count(predicate)) + continue; + + stack.push({.node = predicate}); + while (!stack.empty()) { - const Node * node; - size_t next_child_to_visit = 0; - }; - - std::stack stack; - - for (const auto * predicate : selected_predicates) - { - if (nodes_mapping.count(predicate)) - continue; - - stack.push({.node = predicate}); - while (!stack.empty()) + auto & cur = stack.top(); + /// At first, visit all children. + while (cur.next_child_to_visit < cur.node->children.size()) { - auto & cur = stack.top(); - /// At first, visit all children. - while (cur.next_child_to_visit < cur.node->children.size()) + auto * child = cur.node->children[cur.next_child_to_visit]; + + if (nodes_mapping.count(child) == 0) { - auto * child = cur.node->children[cur.next_child_to_visit]; - - if (nodes_mapping.count(child) == 0) - { - stack.push({.node = child}); - break; - } - - ++cur.next_child_to_visit; + stack.push({.node = child}); + break; } - if (cur.next_child_to_visit == cur.node->children.size()) + ++cur.next_child_to_visit; + } + + if (cur.next_child_to_visit == cur.node->children.size()) + { + auto & node = actions->nodes.emplace_back(*cur.node); + nodes_mapping[cur.node] = &node; + + for (auto & child : node.children) + child = nodes_mapping[child]; + + if (node.type == ActionType::INPUT) { - auto & node = actions->nodes.emplace_back(*cur.node); - nodes_mapping[cur.node] = &node; - - for (auto & child : node.children) - child = nodes_mapping[child]; - - if (node.type == ActionType::INPUT) - { - actions->inputs.emplace_back(&node); - actions->index.insert(&node); - } - - stack.pop(); + actions->inputs.emplace_back(&node); + actions->index.insert(&node); } + + stack.pop(); } } - - Node * result_predicate = nodes_mapping[*selected_predicates.begin()]; - - if (selected_predicates.size() > 1) - { - std::vector args; - args.reserve(selected_predicates.size()); - for (const auto * predicate : selected_predicates) - args.emplace_back(nodes_mapping[predicate]); - - result_predicate = &actions->addFunction(func_builder_and, args, {}, true, false); - } - - actions->index.insert(result_predicate); } - if (selected_predicates.count(*it)) + Node * result_predicate = nodes_mapping[*conjunction.begin()]; + + if (conjunction.size() > 1) + { + std::vector args; + args.reserve(conjunction.size()); + for (const auto * predicate : conjunction) + args.emplace_back(nodes_mapping[predicate]); + + result_predicate = &actions->addFunction(func_builder_and, args, {}, true, false); + } + + actions->index.insert(result_predicate); + return actions; +} + +ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, bool can_remove_filter, const Names & available_inputs) +{ + Node * predicate; + + { + auto it = index.begin(); + for (; it != index.end(); ++it) + if ((*it)->result_name == filter_name) + break; + + if (it == index.end()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Index for ActionsDAG does not contain filter column name {}. DAG:\n{}", + filter_name, dumpDAG()); + + predicate = *it; + } + + std::unordered_set allowed_nodes; + + /// Get input nodes from available_inputs names. + { + std::unordered_map> inputs_map; + for (const auto & input : inputs) + inputs_map[input->result_name].emplace_back(input); + + for (const auto & name : available_inputs) + { + auto & inputs_list = inputs_map[name]; + if (inputs_list.empty()) + continue; + + allowed_nodes.emplace(inputs_list.front()); + inputs_list.pop_front(); + } + } + + auto conjunction = getConjinctionNodes(predicate, allowed_nodes); + auto actions = cloneActionsForConjunction(conjunction.allowed); + if (!actions) + return nullptr; + + /// Now, when actions are created, update current DAG. + + if (conjunction.allowed.count(predicate)) { /// The whole predicate was split. if (can_remove_filter) { + /// If filter column is not needed, remove it from index. for (auto i = index.begin(); i != index.end(); ++i) { - if (*i == *it) + if (*i == predicate) { index.remove(i); break; @@ -1411,84 +1465,71 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, } else { + /// Replace predicate result to constant 1. Node node; node.type = ActionType::COLUMN; - node.result_name = std::move((*it)->result_name); - node.result_type = std::move((*it)->result_type); + node.result_name = std::move(predicate->result_name); + node.result_type = std::move(predicate->result_type); node.column = node.result_type->createColumnConst(0, 1); - *(*it) = std::move(node); + *predicate = std::move(node); } removeUnusedActions(false); } - else if ((*it)->type == ActionType::FUNCTION && (*it)->function_base->getName() == "and") + else { - std::vector new_children(other_predicates.begin(), other_predicates.end()); + /// Predicate is conjunction, where both allowed and rejected sets are not empty. + /// Replace this node to conjunction of rejected predicates. + + std::vector new_children(conjunction.rejected.begin(), conjunction.rejected.end()); if (new_children.size() == 1) { - if (new_children.front()->result_type->equals(*((*it)->result_type))) + /// Rejected set has only one predicate. + if (new_children.front()->result_type->equals(*predicate->result_type)) { + /// If it's type is same, just add alias. Node node; node.type = ActionType::ALIAS; - node.result_name = (*it)->result_name; - node.result_type = (*it)->result_type; + node.result_name = predicate->result_name; + node.result_type = predicate->result_type; node.children.swap(new_children); - *(*it) = std::move(node); + *predicate = std::move(node); } else { + /// If type is different, cast column. + /// This case is possible, cause AND can use any numeric type as argument. Node node; node.type = ActionType::COLUMN; - node.result_name = (*it)->result_type->getName(); + node.result_name = predicate->result_type->getName(); node.column = DataTypeString().createColumnConst(0, node.result_name); node.result_type = std::make_shared(); auto * right_arg = &nodes.emplace_back(std::move(node)); auto * left_arg = new_children.front(); - - (*it)->children = {left_arg, right_arg}; - ColumnsWithTypeAndName arguments; - arguments.reserve((*it)->children.size()); - - for (const auto * child : (*it)->children) - { - ColumnWithTypeAndName argument; - argument.column = child->column; - argument.type = child->result_type; - argument.name = child->result_name; - - arguments.emplace_back(std::move(argument)); - } + predicate->children = {left_arg, right_arg}; + auto arguments = prepareFunctionArguments(predicate->children); FunctionOverloadResolverPtr func_builder_cast = std::make_shared( CastOverloadResolver::createImpl(false)); - (*it)->function_builder = func_builder_cast; - (*it)->function_base = (*it)->function_builder->build(arguments); - (*it)->function = (*it)->function_base->prepare(arguments); + predicate->function_builder = func_builder_cast; + predicate->function_base = predicate->function_builder->build(arguments); + predicate->function = predicate->function_base->prepare(arguments); } } else { - (*it)->children.swap(new_children); - ColumnsWithTypeAndName arguments; - arguments.reserve((*it)->children.size()); + /// Predicate is function AND, which still have more then one argument. + /// Just update children and rebuild it. + predicate->children.swap(new_children); + auto arguments = prepareFunctionArguments(predicate->children); - for (const auto * child : (*it)->children) - { - ColumnWithTypeAndName argument; - argument.column = child->column; - argument.type = child->result_type; - argument.name = child->result_name; - - arguments.emplace_back(std::move(argument)); - } - - (*it)->function_base = (*it)->function_builder->build(arguments); - (*it)->function = (*it)->function_base->prepare(arguments); + predicate->function_base = predicate->function_builder->build(arguments); + predicate->function = predicate->function_base->prepare(arguments); } removeUnusedActions(false); diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index bd1dcd347df..87cf03f6edd 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -313,6 +313,8 @@ private: void addAliases(const NamesWithAliases & aliases, std::vector & result_nodes); void compileFunctions(); + + ActionsDAGPtr cloneActionsForConjunction(std::unordered_set conjunction); }; From 2ae0b47edbf1b01d45461e64c1c8df59ed2a7361 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Sat, 20 Feb 2021 19:25:47 +0300 Subject: [PATCH 51/86] Refactor tryPushDownFilter optimization. --- .../Optimizations/filterPushDown.cpp | 25 +++++++++++++------ 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 1b84fee4857..01e38e81092 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -56,19 +56,30 @@ static size_t tryAddNewFilterStep( if ((*it)->result_name == filter_column_name) break; + const bool found_filter_column = it != expression->getIndex().end(); + + if (!found_filter_column && removes_filter) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", + filter_column_name, expression->dumpDAG()); + + const bool filter_is_constant = found_filter_column && (*it)->column && isColumnConst(*(*it)->column); + + if (!found_filter_column || filter_is_constant) + /// This means that all predicates of filter were pused down. + /// Replace current actions to expression, as we don't need to filter anything. + parent = std::make_unique(child->getOutputStream(), expression); + if (it == expression->getIndex().end()) { - if (!removes_filter) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", - filter_column_name, expression->dumpDAG()); + /// Filter was removed after split. + + - // std::cerr << "replacing to expr because filter " << filter_column_name << " was removed\n"; - parent = std::make_unique(child->getOutputStream(), expression); } else if ((*it)->column && isColumnConst(*(*it)->column)) { - // std::cerr << "replacing to expr because filter is const\n"; + /// Filter column was replaced to constant. parent = std::make_unique(child->getOutputStream(), expression); } From 00e0dbc3e5d39bb8bd0ff79b5001d69866c3a9cf Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Sat, 20 Feb 2021 20:42:06 +0300 Subject: [PATCH 52/86] Fix test. --- src/Interpreters/ActionsDAG.cpp | 23 +++++++++----- src/Interpreters/ActionsDAG.h | 2 +- .../Optimizations/filterPushDown.cpp | 30 ++----------------- .../01655_plan_optimizations.reference | 4 +-- .../0_stateless/01655_plan_optimizations.sh | 4 +-- 5 files changed, 23 insertions(+), 40 deletions(-) diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index b3f86313a1c..1406eecc5c0 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1217,8 +1217,8 @@ namespace struct ConjinctionNodes { - std::unordered_set allowed; - std::unordered_set rejected; + std::vector allowed; + std::vector rejected; }; /// Take a node which result is predicate. @@ -1228,6 +1228,8 @@ struct ConjinctionNodes ConjinctionNodes getConjinctionNodes(ActionsDAG::Node * predicate, std::unordered_set allowed_nodes) { ConjinctionNodes conjunction; + std::unordered_set allowed; + std::unordered_set rejected; struct Frame { @@ -1276,12 +1278,19 @@ ConjinctionNodes getConjinctionNodes(ActionsDAG::Node * predicate, std::unordere else if (is_conjunction) { for (auto * child : cur.node->children) + { if (allowed_nodes.count(child)) - conjunction.allowed.insert(child); + { + if (allowed.insert(child).second) + conjunction.allowed.push_back(child); + + } + } } else if (cur.is_predicate) { - conjunction.rejected.insert(cur.node); + if (rejected.insert(cur.node).second) + conjunction.rejected.push_back(cur.node); } stack.pop(); @@ -1291,7 +1300,7 @@ ConjinctionNodes getConjinctionNodes(ActionsDAG::Node * predicate, std::unordere if (conjunction.allowed.empty()) { if (allowed_nodes.count(predicate)) - conjunction.allowed.insert(predicate); + conjunction.allowed.push_back(predicate); } return conjunction; @@ -1322,7 +1331,7 @@ ColumnsWithTypeAndName prepareFunctionArguments(const std::vector conjunction) +ActionsDAGPtr ActionsDAG::cloneActionsForConjunction(std::vector conjunction) { if (conjunction.empty()) return nullptr; @@ -1448,7 +1457,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, /// Now, when actions are created, update current DAG. - if (conjunction.allowed.count(predicate)) + if (conjunction.rejected.empty()) { /// The whole predicate was split. if (can_remove_filter) diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 87cf03f6edd..2e3baa181fd 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -314,7 +314,7 @@ private: void compileFunctions(); - ActionsDAGPtr cloneActionsForConjunction(std::unordered_set conjunction); + ActionsDAGPtr cloneActionsForConjunction(std::vector conjunction); }; diff --git a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp index 01e38e81092..d64f082b7ee 100644 --- a/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp +++ b/src/Processors/QueryPlan/Optimizations/filterPushDown.cpp @@ -58,11 +58,12 @@ static size_t tryAddNewFilterStep( const bool found_filter_column = it != expression->getIndex().end(); - if (!found_filter_column && removes_filter) + if (!found_filter_column && !removes_filter) throw Exception(ErrorCodes::LOGICAL_ERROR, "Filter column {} was removed from ActionsDAG but it is needed in result. DAG:\n{}", filter_column_name, expression->dumpDAG()); + /// Filter column was replaced to constant. const bool filter_is_constant = found_filter_column && (*it)->column && isColumnConst(*(*it)->column); if (!found_filter_column || filter_is_constant) @@ -70,19 +71,6 @@ static size_t tryAddNewFilterStep( /// Replace current actions to expression, as we don't need to filter anything. parent = std::make_unique(child->getOutputStream(), expression); - if (it == expression->getIndex().end()) - { - /// Filter was removed after split. - - - - } - else if ((*it)->column && isColumnConst(*(*it)->column)) - { - /// Filter column was replaced to constant. - parent = std::make_unique(child->getOutputStream(), expression); - } - /// Add new Filter step before Aggregating. /// Expression/Filter -> Aggregating -> Something auto & node = nodes.emplace_back(); @@ -109,20 +97,6 @@ static Names getAggregatinKeys(const Aggregator::Params & params) return keys; } -// static NameSet getColumnNamesFromSortDescription(const SortDescription & sort_desc, const Block & header) -// { -// NameSet names; -// for (const auto & column : sort_desc) -// { -// if (!column.column_name.empty()) -// names.insert(column.column_name); -// else -// names.insert(header.safeGetByPosition(column.column_number).name); -// } - -// return names; -// } - size_t tryPushDownFilter(QueryPlan::Node * parent_node, QueryPlan::Nodes & nodes) { if (parent_node->children.size() != 1) diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index fa83c098412..f261e134494 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -68,7 +68,7 @@ Filter column: notEquals(y, 0) 9 10 > one condition of filter should be pushed down after aggregating, other two conditions are ANDed Filter column -FUNCTION and(minus(s, 4) :: 2, minus(s, 8) :: 1) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) +FUNCTION and(minus(s, 8) :: 1, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4)) Aggregating Filter column: notEquals(y, 0) 0 1 @@ -83,7 +83,7 @@ Filter column: notEquals(y, 0) Filter column ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4)) Aggregating -Filter column: and(minus(y, 4), notEquals(y, 0)) +Filter column: and(notEquals(y, 0), minus(y, 4)) 0 1 1 2 2 3 diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index e47b03661e4..84452fe651f 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -66,7 +66,7 @@ $CLICKHOUSE_CLIENT -q " select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s - 8 and s - 4 settings enable_optimize_predicate_expression=0" | - grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 4) :: 2, minus(s, 8) :: 1) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" + grep -o "Aggregating\|Filter column\|Filter column: notEquals(y, 0)\|FUNCTION and(minus(s, 8) :: 1, minus(s, 4) :: 2) -> and(notEquals(y, 0), minus(s, 8), minus(s, 4))" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y @@ -79,7 +79,7 @@ $CLICKHOUSE_CLIENT -q " select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y ) where y != 0 and s != 8 and y - 4 settings enable_optimize_predicate_expression=0" | - grep -o "Aggregating\|Filter column\|Filter column: and(minus(y, 4), notEquals(y, 0))\|ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4))" + grep -o "Aggregating\|Filter column\|Filter column: and(notEquals(y, 0), minus(y, 4))\|ALIAS notEquals(s, 8) :: 1 -> and(notEquals(y, 0), notEquals(s, 8), minus(y, 4))" $CLICKHOUSE_CLIENT -q " select s, y from ( select sum(x) as s, y from (select number as x, number + 1 as y from numbers(10)) group by y From f2b8f81e40eeadf3b85a1dae873828f4da40c8e7 Mon Sep 17 00:00:00 2001 From: Denis Zhuravlev Date: Mon, 22 Feb 2021 21:05:25 -0400 Subject: [PATCH 53/86] test for window functions --- .../01592_window_functions.reference | 81 +++++++++++++ .../0_stateless/01592_window_functions.sql | 107 ++++++++++++++++++ .../01592_window_functions1.reference | 4 + .../0_stateless/01592_window_functions1.sql | 35 ++++++ 4 files changed, 227 insertions(+) create mode 100644 tests/queries/0_stateless/01592_window_functions.reference create mode 100644 tests/queries/0_stateless/01592_window_functions.sql create mode 100644 tests/queries/0_stateless/01592_window_functions1.reference create mode 100644 tests/queries/0_stateless/01592_window_functions1.sql diff --git a/tests/queries/0_stateless/01592_window_functions.reference b/tests/queries/0_stateless/01592_window_functions.reference new file mode 100644 index 00000000000..8ca5086821b --- /dev/null +++ b/tests/queries/0_stateless/01592_window_functions.reference @@ -0,0 +1,81 @@ +---- Q1 ---- +Dell Vostro 800.00 Laptop 850 +HP Elite 1200.00 Laptop 850 +Lenovo Thinkpad 700.00 Laptop 850 +Sony VAIO 700.00 Laptop 850 +HTC One 400.00 Smartphone 500 +Microsoft Lumia 200.00 Smartphone 500 +Nexus 500.00 Smartphone 500 +iPhone 900.00 Smartphone 500 +Kindle Fire 150.00 Tablet 350 +Samsung Galaxy Tab 200.00 Tablet 350 +iPad 700.00 Tablet 350 +---- Q2 ---- +Lenovo Thinkpad Laptop 700.00 1 +Sony VAIO Laptop 700.00 1 +Dell Vostro Laptop 800.00 3 +HP Elite Laptop 1200.00 4 +Microsoft Lumia Smartphone 200.00 1 +HTC One Smartphone 400.00 2 +Nexus Smartphone 500.00 3 +iPhone Smartphone 900.00 4 +Kindle Fire Tablet 150.00 1 +Samsung Galaxy Tab Tablet 200.00 2 +iPad Tablet 700.00 3 +---- Q3 ---- +HP Elite Laptop 1200.00 1 +Dell Vostro Laptop 800.00 2 +Lenovo Thinkpad Laptop 700.00 3 +Sony VAIO Laptop 700.00 4 +iPhone Smartphone 900.00 1 +Nexus Smartphone 500.00 2 +HTC One Smartphone 400.00 3 +Microsoft Lumia Smartphone 200.00 4 +iPad Tablet 700.00 1 +Samsung Galaxy Tab Tablet 200.00 2 +Kindle Fire Tablet 150.00 3 +---- Q4 ---- +Lenovo Thinkpad Laptop 700.00 700.00 1 +Sony VAIO Laptop 700.00 700.00 1 +Dell Vostro Laptop 800.00 700.00 2 +HP Elite Laptop 1200.00 700.00 3 +Microsoft Lumia Smartphone 200.00 200.00 1 +HTC One Smartphone 400.00 200.00 2 +Nexus Smartphone 500.00 200.00 3 +iPhone Smartphone 900.00 200.00 4 +---- Q5 ---- +Sony VAIO Laptop 700.00 700.00 +Lenovo Thinkpad Laptop 700.00 700.00 +HP Elite Laptop 1200.00 700.00 +Dell Vostro Laptop 800.00 700.00 +iPhone Smartphone 900.00 900.00 +Nexus Smartphone 500.00 900.00 +Microsoft Lumia Smartphone 200.00 900.00 +HTC One Smartphone 400.00 900.00 +iPad Tablet 700.00 700.00 +Samsung Galaxy Tab Tablet 200.00 700.00 +Kindle Fire Tablet 150.00 700.00 +---- Q6 ---- +Dell Vostro Laptop 800.00 1200.00 +HP Elite Laptop 1200.00 1200.00 +Lenovo Thinkpad Laptop 700.00 1200.00 +Sony VAIO Laptop 700.00 1200.00 +HTC One Smartphone 400.00 900.00 +Microsoft Lumia Smartphone 200.00 900.00 +Nexus Smartphone 500.00 900.00 +iPhone Smartphone 900.00 900.00 +Kindle Fire Tablet 150.00 700.00 +Samsung Galaxy Tab Tablet 200.00 700.00 +iPad Tablet 700.00 700.00 +---- Q7 ---- +Dell Vostro 800.00 Laptop 733 850 +HP Elite 1200.00 Laptop 850 850 +Lenovo Thinkpad 700.00 Laptop 700 850 +Sony VAIO 700.00 Laptop 700 850 +HTC One 400.00 Smartphone 300 500 +Microsoft Lumia 200.00 Smartphone 200 500 +Nexus 500.00 Smartphone 367 500 +iPhone 900.00 Smartphone 500 500 +Kindle Fire 150.00 Tablet 150 350 +Samsung Galaxy Tab 200.00 Tablet 175 350 +iPad 700.00 Tablet 350 350 diff --git a/tests/queries/0_stateless/01592_window_functions.sql b/tests/queries/0_stateless/01592_window_functions.sql new file mode 100644 index 00000000000..8d5033fc821 --- /dev/null +++ b/tests/queries/0_stateless/01592_window_functions.sql @@ -0,0 +1,107 @@ +set allow_experimental_window_functions = 1; + +drop table if exists product_groups; +drop table if exists products; + +CREATE TABLE product_groups ( + group_id Int64, + group_name String +) Engine = Memory; + + +CREATE TABLE products ( + product_id Int64, + product_name String, + price DECIMAL(11, 2), + group_id Int64 +) Engine = Memory; + +INSERT INTO product_groups VALUES (1, 'Smartphone'),(2, 'Laptop'),(3, 'Tablet'); + +INSERT INTO products (product_id,product_name, group_id,price) VALUES (1, 'Microsoft Lumia', 1, 200), (2, 'HTC One', 1, 400), (3, 'Nexus', 1, 500), (4, 'iPhone', 1, 900),(5, 'HP Elite', 2, 1200),(6, 'Lenovo Thinkpad', 2, 700),(7, 'Sony VAIO', 2, 700),(8, 'Dell Vostro', 2, 800),(9, 'iPad', 3, 700),(10, 'Kindle Fire', 3, 150),(11, 'Samsung Galaxy Tab', 3, 200); + +select '---- Q1 ----'; + +SELECT + product_name, + price, + group_name, + AVG(price) OVER (PARTITION BY group_name) +FROM products INNER JOIN product_groups USING (group_id) +order by group_name, product_name, price; + +select '---- Q2 ----'; + +SELECT + product_name, + group_name, + price, + rank() OVER (PARTITION BY group_name ORDER BY price) rank +FROM products INNER JOIN product_groups USING (group_id) +order by group_name, rank, price; + +select '---- Q3 ----'; +SELECT + product_name, + group_name, + price, + row_number() OVER (PARTITION BY group_name ORDER BY price desc) rn +FROM products INNER JOIN product_groups USING (group_id) +ORDER BY group_name, rn; + +select '---- Q4 ----'; +SELECT * +FROM +( + SELECT + product_name, + group_name, + price, + min(price) OVER (PARTITION BY group_name) AS min_price, + dense_rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS r + FROM products + INNER JOIN product_groups USING (group_id) +) AS t +WHERE min_price > 160 +ORDER BY + group_name ASC, + r ASC, + product_name ASC; + +select '---- Q5 ----'; +SELECT + product_name, + group_name, + price, + FIRST_VALUE (price) OVER (PARTITION BY group_name ORDER BY product_name desc) AS price_per_group_per_alphab +FROM products INNER JOIN product_groups USING (group_id) +order by group_name, product_name desc; + +select '---- Q6 ----'; +SELECT + product_name, + group_name, + price, + LAST_VALUE (price) OVER (PARTITION BY group_name ORDER BY + price RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING + ) AS highest_price_per_group +FROM + products +INNER JOIN product_groups USING (group_id) +order by group_name, product_name; + +select '---- Q7 ----'; +select product_name, price, group_name, round(avg0), round(avg1) +from ( +SELECT + product_name, + price, + group_name, + avg(price) OVER (PARTITION BY group_name ORDER BY price) avg0, + avg(price) OVER (PARTITION BY group_name ORDER BY + price RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) avg1 +FROM products INNER JOIN product_groups USING (group_id)) t +order by group_name, product_name, price; + +drop table product_groups; +drop table products; diff --git a/tests/queries/0_stateless/01592_window_functions1.reference b/tests/queries/0_stateless/01592_window_functions1.reference new file mode 100644 index 00000000000..5160cca9c3e --- /dev/null +++ b/tests/queries/0_stateless/01592_window_functions1.reference @@ -0,0 +1,4 @@ +---- arrays ---- +6360452672161319041 +---- window f ---- +6360452672161319041 diff --git a/tests/queries/0_stateless/01592_window_functions1.sql b/tests/queries/0_stateless/01592_window_functions1.sql new file mode 100644 index 00000000000..c7751ab9f33 --- /dev/null +++ b/tests/queries/0_stateless/01592_window_functions1.sql @@ -0,0 +1,35 @@ +drop table if exists stack; + +set allow_experimental_window_functions = 1; + +create table stack(item_id Int64, brand_id Int64, rack_id Int64, dt DateTime, expiration_dt DateTime, quantity UInt64) +Engine = MergeTree +partition by toYYYYMM(dt) +order by (brand_id, toStartOfHour(dt)); + +insert into stack +select number%99991, number%11, number%1111, toDateTime('2020-01-01 00:00:00')+number/100, + toDateTime('2020-02-01 00:00:00')+number/10, intDiv(number,100)+1 +from numbers(10000000); + +select '---- arrays ----'; + +select cityHash64( toString( groupArray (tuple(*) ) )) from ( + select brand_id, rack_id, arrayJoin(arraySlice(arraySort(groupArray(quantity)),1,2)) quantity + from stack + group by brand_id, rack_id + order by brand_id, rack_id, quantity +) t; + + +select '---- window f ----'; + +select cityHash64( toString( groupArray (tuple(*) ) )) from ( + select brand_id, rack_id, quantity from + ( select brand_id, rack_id, quantity, row_number() over (partition by brand_id, rack_id order by quantity) rn + from stack ) as t0 + where rn <= 2 + order by brand_id, rack_id, quantity +) t; + +drop table if exists stack; From 2ebae14f12af45a89ace4dc6ace681669935698f Mon Sep 17 00:00:00 2001 From: Denis Zhuravlev Date: Mon, 22 Feb 2021 23:40:38 -0400 Subject: [PATCH 54/86] mark test as long --- ...unctions1.reference => 01592_long_window_functions1.reference} | 0 ...592_window_functions1.sql => 01592_long_window_functions1.sql} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename tests/queries/0_stateless/{01592_window_functions1.reference => 01592_long_window_functions1.reference} (100%) rename tests/queries/0_stateless/{01592_window_functions1.sql => 01592_long_window_functions1.sql} (100%) diff --git a/tests/queries/0_stateless/01592_window_functions1.reference b/tests/queries/0_stateless/01592_long_window_functions1.reference similarity index 100% rename from tests/queries/0_stateless/01592_window_functions1.reference rename to tests/queries/0_stateless/01592_long_window_functions1.reference diff --git a/tests/queries/0_stateless/01592_window_functions1.sql b/tests/queries/0_stateless/01592_long_window_functions1.sql similarity index 100% rename from tests/queries/0_stateless/01592_window_functions1.sql rename to tests/queries/0_stateless/01592_long_window_functions1.sql From d966725f3320ce48b76d43e0092aaeb5161b2d23 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 24 Feb 2021 17:10:35 +0300 Subject: [PATCH 55/86] Try fix hung in void PullingAsyncPipelineExecutor::cancel() --- .../PullingAsyncPipelineExecutor.cpp | 21 +++++++++---------- src/Processors/Formats/LazyOutputFormat.h | 2 ++ 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp index 21741d30dfa..3270d1186f6 100644 --- a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp @@ -14,6 +14,7 @@ struct PullingAsyncPipelineExecutor::Data { PipelineExecutorPtr executor; std::exception_ptr exception; + LazyOutputFormat * lazy_format = nullptr; std::atomic_bool is_finished = false; std::atomic_bool has_exception = false; ThreadFromGlobalPool thread; @@ -82,6 +83,10 @@ static void threadFunction(PullingAsyncPipelineExecutor::Data & data, ThreadGrou { data.exception = std::current_exception(); data.has_exception = true; + + /// Finish lazy format in case of exception. Otherwise thread.join() may hung. + if (data.lazy_format) + data.lazy_format->cancel(); } data.is_finished = true; @@ -95,6 +100,7 @@ bool PullingAsyncPipelineExecutor::pull(Chunk & chunk, uint64_t milliseconds) { data = std::make_unique(); data->executor = pipeline.execute(); + data->lazy_format = lazy_format.get(); auto func = [&, thread_group = CurrentThread::getGroup()]() { @@ -105,14 +111,7 @@ bool PullingAsyncPipelineExecutor::pull(Chunk & chunk, uint64_t milliseconds) } if (data->has_exception) - { - /// Finish lazy format in case of exception. Otherwise thread.join() may hung. - if (lazy_format) - lazy_format->finish(); - - data->has_exception = false; std::rethrow_exception(std::move(data->exception)); - } bool is_execution_finished = lazy_format ? lazy_format->isFinished() : data->is_finished.load(); @@ -172,14 +171,14 @@ bool PullingAsyncPipelineExecutor::pull(Block & block, uint64_t milliseconds) void PullingAsyncPipelineExecutor::cancel() { - /// Cancel execution if it wasn't finished. - if (data && !data->is_finished && data->executor) - data->executor->cancel(); - /// Finish lazy format. Otherwise thread.join() may hung. if (lazy_format && !lazy_format->isFinished()) lazy_format->finish(); + /// Cancel execution if it wasn't finished. + if (data && !data->is_finished && data->executor) + data->executor->cancel(); + /// Join thread here to wait for possible exception. if (data && data->thread.joinable()) data->thread.join(); diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index 06ec116f3dd..647daa33889 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -36,6 +36,8 @@ public: queue.clear(); } + void onCancel() override { finalize(); } + protected: void consume(Chunk chunk) override { From 9ded4dd3966c9359bdddad75b638c0431f836057 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Wed, 24 Feb 2021 17:13:29 +0300 Subject: [PATCH 56/86] Update test. --- tests/queries/0_stateless/00205_scalar_subqueries.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/00205_scalar_subqueries.sql b/tests/queries/0_stateless/00205_scalar_subqueries.sql index 14244377e5f..03bcd0a3ebc 100644 --- a/tests/queries/0_stateless/00205_scalar_subqueries.sql +++ b/tests/queries/0_stateless/00205_scalar_subqueries.sql @@ -7,3 +7,4 @@ SELECT (SELECT toDate('2015-01-02'), 'Hello'); SELECT (SELECT toDate('2015-01-02'), 'Hello') AS x, x, identity((SELECT 1)), identity((SELECT 1) AS y); -- SELECT (SELECT uniqState('')); + SELECT ( SELECT throwIf(1 + dummy) ); -- { serverError 395 } From aa8632a1bc2eb6379e83655301d9ca00cc156b33 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Feb 2021 00:27:47 +0300 Subject: [PATCH 57/86] Fix tests. --- .../Executors/PullingAsyncPipelineExecutor.cpp | 4 ++-- src/Processors/Formats/LazyOutputFormat.h | 17 ++++++++--------- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp index 3270d1186f6..d058ea9e6ac 100644 --- a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp @@ -86,7 +86,7 @@ static void threadFunction(PullingAsyncPipelineExecutor::Data & data, ThreadGrou /// Finish lazy format in case of exception. Otherwise thread.join() may hung. if (data.lazy_format) - data.lazy_format->cancel(); + data.lazy_format->finalize(); } data.is_finished = true; @@ -120,7 +120,7 @@ bool PullingAsyncPipelineExecutor::pull(Chunk & chunk, uint64_t milliseconds) { /// If lazy format is finished, we don't cancel pipeline but wait for main thread to be finished. data->is_finished = true; - /// Wait thread ant rethrow exception if any. + /// Wait thread and rethrow exception if any. cancel(); return false; } diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index 647daa33889..7188458dd82 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -36,7 +36,14 @@ public: queue.clear(); } - void onCancel() override { finalize(); } + void finalize() override + { + std::cerr << StackTrace().toString() << std::endl; + finished_processing = true; + + /// In case we are waiting for result. + queue.emplace(Chunk()); + } protected: void consume(Chunk chunk) override @@ -48,14 +55,6 @@ protected: void consumeTotals(Chunk chunk) override { totals = std::move(chunk); } void consumeExtremes(Chunk chunk) override { extremes = std::move(chunk); } - void finalize() override - { - finished_processing = true; - - /// In case we are waiting for result. - queue.emplace(Chunk()); - } - private: ConcurrentBoundedQueue queue; From 6f5d4ba8cd9ffdec8743c9c7cdff996324b5481c Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Feb 2021 00:31:15 +0300 Subject: [PATCH 58/86] Fix tests. --- src/Processors/Executors/PullingAsyncPipelineExecutor.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp index d058ea9e6ac..c975153d317 100644 --- a/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp +++ b/src/Processors/Executors/PullingAsyncPipelineExecutor.cpp @@ -171,14 +171,14 @@ bool PullingAsyncPipelineExecutor::pull(Block & block, uint64_t milliseconds) void PullingAsyncPipelineExecutor::cancel() { - /// Finish lazy format. Otherwise thread.join() may hung. - if (lazy_format && !lazy_format->isFinished()) - lazy_format->finish(); - /// Cancel execution if it wasn't finished. if (data && !data->is_finished && data->executor) data->executor->cancel(); + /// Finish lazy format. Otherwise thread.join() may hung. + if (lazy_format && !lazy_format->isFinished()) + lazy_format->finish(); + /// Join thread here to wait for possible exception. if (data && data->thread.joinable()) data->thread.join(); From 33364f6bdf15fe164321bff7da0fdd9e000a3947 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 25 Feb 2021 13:50:35 +0300 Subject: [PATCH 59/86] Remove debug output --- src/Processors/Formats/LazyOutputFormat.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Processors/Formats/LazyOutputFormat.h b/src/Processors/Formats/LazyOutputFormat.h index 7188458dd82..15ea5022f82 100644 --- a/src/Processors/Formats/LazyOutputFormat.h +++ b/src/Processors/Formats/LazyOutputFormat.h @@ -38,7 +38,6 @@ public: void finalize() override { - std::cerr << StackTrace().toString() << std::endl; finished_processing = true; /// In case we are waiting for result. From 70b8d6d5657efe0157babeabf107453dad07e42a Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 25 Feb 2021 14:19:26 +0300 Subject: [PATCH 60/86] Update encryption-functions.md --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index e2c5560e4f6..4388f327eab 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -41,7 +41,7 @@ encrypt('mode', 'plaintext', 'key' [, iv, aad]) **Возвращаемое значение** -- Двоичная зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). +- Бинарная зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). **Примеры** From d4e48f44f5e69d553e4c67f8a158307206cca132 Mon Sep 17 00:00:00 2001 From: Nikita Mikhaylov Date: Thu, 25 Feb 2021 14:20:42 +0300 Subject: [PATCH 61/86] Update encryption-functions.md --- docs/ru/sql-reference/functions/encryption-functions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ru/sql-reference/functions/encryption-functions.md b/docs/ru/sql-reference/functions/encryption-functions.md index 4388f327eab..0216a6b2356 100644 --- a/docs/ru/sql-reference/functions/encryption-functions.md +++ b/docs/ru/sql-reference/functions/encryption-functions.md @@ -136,7 +136,7 @@ aes_encrypt_mysql('mode', 'plaintext', 'key' [, iv]) **Возвращаемое значение** -- Двоичная зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). +- Бинарная зашифрованная строка. [String](../../sql-reference/data-types/string.md#string). **Примеры** From d328bfa41f7a4c6d5796b2d8b8747dbe077984a8 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 26 Feb 2021 19:29:56 +0300 Subject: [PATCH 62/86] Review fixes. Add setting max_optimizations_to_apply. --- src/Common/ErrorCodes.cpp | 3 ++- src/Core/Settings.h | 1 + src/Interpreters/ActionsDAG.cpp | 9 ++++---- src/Interpreters/ActionsDAG.h | 3 +++ .../ClusterProxy/SelectStreamFactory.cpp | 2 +- src/Interpreters/InterpreterExplainQuery.cpp | 6 +++--- src/Interpreters/InterpreterSelectQuery.cpp | 2 +- .../InterpreterSelectWithUnionQuery.cpp | 2 +- src/Interpreters/MutationsInterpreter.cpp | 2 +- .../QueryPlan/Optimizations/Optimizations.h | 2 +- .../QueryPlanOptimizationSettings.cpp | 12 +++++++++++ .../QueryPlanOptimizationSettings.h | 20 ++++++++++++++++++ .../QueryPlan/Optimizations/optimizeTree.cpp | 21 +++++++++++++++---- src/Processors/QueryPlan/QueryPlan.cpp | 12 +++++------ src/Processors/QueryPlan/QueryPlan.h | 9 ++++---- .../MergeTree/StorageFromMergeTreeDataPart.h | 2 +- src/Storages/StorageBuffer.cpp | 2 +- src/Storages/StorageDistributed.cpp | 2 +- src/Storages/StorageMaterializedView.cpp | 2 +- src/Storages/StorageMergeTree.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- src/Storages/StorageView.cpp | 2 +- .../01655_plan_optimizations.reference | 2 ++ .../0_stateless/01655_plan_optimizations.sh | 5 ++++- 24 files changed, 92 insertions(+), 35 deletions(-) create mode 100644 src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.cpp create mode 100644 src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h diff --git a/src/Common/ErrorCodes.cpp b/src/Common/ErrorCodes.cpp index d0d83448b68..d14c9a7e45e 100644 --- a/src/Common/ErrorCodes.cpp +++ b/src/Common/ErrorCodes.cpp @@ -535,12 +535,13 @@ M(566, CANNOT_RMDIR) \ M(567, DUPLICATED_PART_UUIDS) \ M(568, RAFT_ERROR) \ + M(569, TOO_MANY_QUERY_PLAN_OPTIMIZATIONS) \ \ M(999, KEEPER_EXCEPTION) \ M(1000, POCO_EXCEPTION) \ M(1001, STD_EXCEPTION) \ M(1002, UNKNOWN_EXCEPTION) \ - M(1003, INVALID_SHARD_ID) + M(1003, INVALID_SHARD_ID) \ /* See END */ diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 9bb9ad30f15..549a1716a44 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -431,6 +431,7 @@ class IColumn; M(UnionMode, union_default_mode, UnionMode::Unspecified, "Set default Union Mode in SelectWithUnion query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without Union Mode will throw exception.", 0) \ M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \ M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \ + M(UInt64, query_plan_max_optimizations_to_apply, 10000, "Limit the total number of optimizations applied to query plan. If zero, ignored. If limit reached, throw exception", 0) \ // End of COMMON_SETTINGS // Please add settings related to formats into the FORMAT_FACTORY_SETTINGS below. diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index 1406eecc5c0..e994a6a0ef6 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -1215,7 +1215,7 @@ ActionsDAG::SplitResult ActionsDAG::splitActionsForFilter(const std::string & co namespace { -struct ConjinctionNodes +struct ConjunctionNodes { std::vector allowed; std::vector rejected; @@ -1225,9 +1225,9 @@ struct ConjinctionNodes /// Assuming predicate is a conjunction (probably, trivial). /// Find separate conjunctions nodes. Split nodes into allowed and rejected sets. /// Allowed predicate is a predicate which can be calculated using only nodes from allowed_nodes set. -ConjinctionNodes getConjinctionNodes(ActionsDAG::Node * predicate, std::unordered_set allowed_nodes) +ConjunctionNodes getConjunctionNodes(ActionsDAG::Node * predicate, std::unordered_set allowed_nodes) { - ConjinctionNodes conjunction; + ConjunctionNodes conjunction; std::unordered_set allowed; std::unordered_set rejected; @@ -1299,6 +1299,7 @@ ConjinctionNodes getConjinctionNodes(ActionsDAG::Node * predicate, std::unordere if (conjunction.allowed.empty()) { + /// If nothing was added to conjunction, check if it is trivial. if (allowed_nodes.count(predicate)) conjunction.allowed.push_back(predicate); } @@ -1450,7 +1451,7 @@ ActionsDAGPtr ActionsDAG::splitActionsForFilter(const std::string & filter_name, } } - auto conjunction = getConjinctionNodes(predicate, allowed_nodes); + auto conjunction = getConjunctionNodes(predicate, allowed_nodes); auto actions = cloneActionsForConjunction(conjunction.allowed); if (!actions) return nullptr; diff --git a/src/Interpreters/ActionsDAG.h b/src/Interpreters/ActionsDAG.h index 2e3baa181fd..14ed5df949b 100644 --- a/src/Interpreters/ActionsDAG.h +++ b/src/Interpreters/ActionsDAG.h @@ -152,6 +152,9 @@ public: } }; + /// NOTE: std::list is an implementation detail. + /// It allows to add and remove new nodes inplace without reallocation. + /// Raw pointers to nodes remain valid. using Nodes = std::list; using Inputs = std::vector; diff --git a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp index 47726e49d50..e0fcc4738ba 100644 --- a/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp +++ b/src/Interpreters/ClusterProxy/SelectStreamFactory.cpp @@ -284,7 +284,7 @@ void SelectStreamFactory::createForShard( if (try_results.empty() || local_delay < max_remote_delay) { auto plan = createLocalPlan(modified_query_ast, header, context, stage); - return QueryPipeline::getPipe(std::move(*plan->buildQueryPipeline())); + return QueryPipeline::getPipe(std::move(*plan->buildQueryPipeline(QueryPlanOptimizationSettings(context.getSettingsRef())))); } else { diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index b13350d7ba2..5135e40e4dd 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -117,7 +117,7 @@ struct QueryPlanSettings { QueryPlan::ExplainPlanOptions query_plan_options; - /// Apply query plan optimisations. + /// Apply query plan optimizations. bool optimize = true; constexpr static char name[] = "PLAN"; @@ -251,7 +251,7 @@ BlockInputStreamPtr InterpreterExplainQuery::executeImpl() interpreter.buildQueryPlan(plan); if (settings.optimize) - plan.optimize(); + plan.optimize(QueryPlanOptimizationSettings(context.getSettingsRef())); plan.explainPlan(buf, settings.query_plan_options); } @@ -265,7 +265,7 @@ BlockInputStreamPtr InterpreterExplainQuery::executeImpl() InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), context, SelectQueryOptions()); interpreter.buildQueryPlan(plan); - auto pipeline = plan.buildQueryPipeline(); + auto pipeline = plan.buildQueryPipeline(QueryPlanOptimizationSettings(context.getSettingsRef())); if (settings.graph) { diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 9f97160f77f..f13f8fb4106 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -548,7 +548,7 @@ BlockIO InterpreterSelectQuery::execute() buildQueryPlan(query_plan); - res.pipeline = std::move(*query_plan.buildQueryPipeline()); + res.pipeline = std::move(*query_plan.buildQueryPipeline(QueryPlanOptimizationSettings(context->getSettingsRef()))); return res; } diff --git a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp index 59fcff61936..b894db79c7b 100644 --- a/src/Interpreters/InterpreterSelectWithUnionQuery.cpp +++ b/src/Interpreters/InterpreterSelectWithUnionQuery.cpp @@ -413,7 +413,7 @@ BlockIO InterpreterSelectWithUnionQuery::execute() QueryPlan query_plan; buildQueryPlan(query_plan); - auto pipeline = query_plan.buildQueryPipeline(); + auto pipeline = query_plan.buildQueryPipeline(QueryPlanOptimizationSettings(context->getSettingsRef())); res.pipeline = std::move(*pipeline); res.pipeline.addInterpreterContext(context); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 528b5ec6d8e..43ab42b42c7 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -756,7 +756,7 @@ QueryPipelinePtr MutationsInterpreter::addStreamsForLaterStages(const std::vecto } } - auto pipeline = plan.buildQueryPipeline(); + auto pipeline = plan.buildQueryPipeline(QueryPlanOptimizationSettings(context.getSettingsRef())); pipeline->addSimpleTransform([&](const Block & header) { return std::make_shared(header); diff --git a/src/Processors/QueryPlan/Optimizations/Optimizations.h b/src/Processors/QueryPlan/Optimizations/Optimizations.h index a5c3af488a9..f96237fc71a 100644 --- a/src/Processors/QueryPlan/Optimizations/Optimizations.h +++ b/src/Processors/QueryPlan/Optimizations/Optimizations.h @@ -9,7 +9,7 @@ namespace QueryPlanOptimizations { /// This is the main function which optimizes the whole QueryPlan tree. -void optimizeTree(QueryPlan::Node & root, QueryPlan::Nodes & nodes); +void optimizeTree(const QueryPlanOptimizationSettings & settings, QueryPlan::Node & root, QueryPlan::Nodes & nodes); /// Optimization is a function applied to QueryPlan::Node. /// It can read and update subtree of specified node. diff --git a/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.cpp b/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.cpp new file mode 100644 index 00000000000..cbd38d46ebf --- /dev/null +++ b/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.cpp @@ -0,0 +1,12 @@ +#include +#include + +namespace DB +{ + +QueryPlanOptimizationSettings::QueryPlanOptimizationSettings(const Settings & settings) +{ + max_optimizations_to_apply = settings.query_plan_max_optimizations_to_apply; +} + +} diff --git a/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h b/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h new file mode 100644 index 00000000000..074298e24a1 --- /dev/null +++ b/src/Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h @@ -0,0 +1,20 @@ +#pragma once + +#include + +namespace DB +{ + +struct Settings; + +struct QueryPlanOptimizationSettings +{ + QueryPlanOptimizationSettings() = delete; + explicit QueryPlanOptimizationSettings(const Settings & settings); + + /// If not zero, throw if too many optimizations were applied to query plan. + /// It helps to avoid infinite optimization loop. + size_t max_optimizations_to_apply = 0; +}; + +} diff --git a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp index cc81a7f39fc..858bde9c660 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp @@ -1,10 +1,20 @@ #include +#include +#include #include -namespace DB::QueryPlanOptimizations +namespace DB { -void optimizeTree(QueryPlan::Node & root, QueryPlan::Nodes & nodes) +namespace ErrorCodes +{ + extern const int TOO_MANY_QUERY_PLAN_OPTIMIZATIONS; +} + +namespace QueryPlanOptimizations +{ + +void optimizeTree(const QueryPlanOptimizationSettings & settings, QueryPlan::Node & root, QueryPlan::Nodes & nodes) { const auto & optimizations = getOptimizations(); @@ -23,7 +33,7 @@ void optimizeTree(QueryPlan::Node & root, QueryPlan::Nodes & nodes) std::stack stack; stack.push(Frame{.node = &root}); - size_t max_optimizations_to_apply = 0; + size_t max_optimizations_to_apply = settings.max_optimizations_to_apply; size_t total_applied_optimizations = 0; while (!stack.empty()) @@ -58,7 +68,9 @@ void optimizeTree(QueryPlan::Node & root, QueryPlan::Nodes & nodes) continue; if (max_optimizations_to_apply && max_optimizations_to_apply < total_applied_optimizations) - continue; + throw Exception(ErrorCodes::TOO_MANY_QUERY_PLAN_OPTIMIZATIONS, + "Too many optimizations applied to query plan. Current limit {}", + max_optimizations_to_apply); /// Try to apply optimization. auto update_depth = optimization.apply(frame.node, nodes); @@ -81,3 +93,4 @@ void optimizeTree(QueryPlan::Node & root, QueryPlan::Nodes & nodes) } } +} diff --git a/src/Processors/QueryPlan/QueryPlan.cpp b/src/Processors/QueryPlan/QueryPlan.cpp index 755944fdf9f..f5d5e0d99b7 100644 --- a/src/Processors/QueryPlan/QueryPlan.cpp +++ b/src/Processors/QueryPlan/QueryPlan.cpp @@ -130,10 +130,10 @@ void QueryPlan::addStep(QueryPlanStepPtr step) " input expected", ErrorCodes::LOGICAL_ERROR); } -QueryPipelinePtr QueryPlan::buildQueryPipeline() +QueryPipelinePtr QueryPlan::buildQueryPipeline(const QueryPlanOptimizationSettings & optimization_settings) { checkInitialized(); - optimize(); + optimize(optimization_settings); struct Frame { @@ -177,7 +177,7 @@ QueryPipelinePtr QueryPlan::buildQueryPipeline() return last_pipeline; } -Pipe QueryPlan::convertToPipe() +Pipe QueryPlan::convertToPipe(const QueryPlanOptimizationSettings & optimization_settings) { if (!isInitialized()) return {}; @@ -185,7 +185,7 @@ Pipe QueryPlan::convertToPipe() if (isCompleted()) throw Exception("Cannot convert completed QueryPlan to Pipe", ErrorCodes::LOGICAL_ERROR); - return QueryPipeline::getPipe(std::move(*buildQueryPipeline())); + return QueryPipeline::getPipe(std::move(*buildQueryPipeline(optimization_settings))); } void QueryPlan::addInterpreterContext(std::shared_ptr context) @@ -333,9 +333,9 @@ void QueryPlan::explainPipeline(WriteBuffer & buffer, const ExplainPipelineOptio } } -void QueryPlan::optimize() +void QueryPlan::optimize(const QueryPlanOptimizationSettings & optimization_settings) { - QueryPlanOptimizations::optimizeTree(*root, nodes); + QueryPlanOptimizations::optimizeTree(optimization_settings, *root, nodes); } } diff --git a/src/Processors/QueryPlan/QueryPlan.h b/src/Processors/QueryPlan/QueryPlan.h index 9d2d7d93a36..7973f9af45a 100644 --- a/src/Processors/QueryPlan/QueryPlan.h +++ b/src/Processors/QueryPlan/QueryPlan.h @@ -5,6 +5,7 @@ #include #include +#include namespace DB { @@ -27,7 +28,7 @@ class Pipe; /// A tree of query steps. /// The goal of QueryPlan is to build QueryPipeline. -/// QueryPlan let delay pipeline creation which is helpful for pipeline-level optimisations. +/// QueryPlan let delay pipeline creation which is helpful for pipeline-level optimizations. class QueryPlan { public: @@ -43,12 +44,12 @@ public: bool isCompleted() const; /// Tree is not empty and root hasOutputStream() const DataStream & getCurrentDataStream() const; /// Checks that (isInitialized() && !isCompleted()) - void optimize(); + void optimize(const QueryPlanOptimizationSettings & optimization_settings); - QueryPipelinePtr buildQueryPipeline(); + QueryPipelinePtr buildQueryPipeline(const QueryPlanOptimizationSettings & optimization_settings); /// If initialized, build pipeline and convert to pipe. Otherwise, return empty pipe. - Pipe convertToPipe(); + Pipe convertToPipe(const QueryPlanOptimizationSettings & optimization_settings); struct ExplainPlanOptions { diff --git a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h index 98e1880de54..1d011effc69 100644 --- a/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h +++ b/src/Storages/MergeTree/StorageFromMergeTreeDataPart.h @@ -33,7 +33,7 @@ public: std::move(*MergeTreeDataSelectExecutor(part->storage) .readFromParts({part}, column_names, metadata_snapshot, query_info, context, max_block_size, num_streams)); - return query_plan.convertToPipe(); + return query_plan.convertToPipe(QueryPlanOptimizationSettings(context.getSettingsRef())); } diff --git a/src/Storages/StorageBuffer.cpp b/src/Storages/StorageBuffer.cpp index e28d5f4d6d1..33aa2140a1f 100644 --- a/src/Storages/StorageBuffer.cpp +++ b/src/Storages/StorageBuffer.cpp @@ -166,7 +166,7 @@ Pipe StorageBuffer::read( { QueryPlan plan; read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe(); + return plan.convertToPipe(QueryPlanOptimizationSettings(context.getSettingsRef())); } void StorageBuffer::read( diff --git a/src/Storages/StorageDistributed.cpp b/src/Storages/StorageDistributed.cpp index c08dc38fa2d..f66e2f782ca 100644 --- a/src/Storages/StorageDistributed.cpp +++ b/src/Storages/StorageDistributed.cpp @@ -501,7 +501,7 @@ Pipe StorageDistributed::read( { QueryPlan plan; read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe(); + return plan.convertToPipe(QueryPlanOptimizationSettings(context.getSettingsRef())); } void StorageDistributed::read( diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index af00b37b1d5..02654b37d68 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -126,7 +126,7 @@ Pipe StorageMaterializedView::read( { QueryPlan plan; read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe(); + return plan.convertToPipe(QueryPlanOptimizationSettings(context.getSettingsRef())); } void StorageMaterializedView::read( diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 202e909af0f..c8f44c78e6e 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -198,7 +198,7 @@ Pipe StorageMergeTree::read( { QueryPlan plan; read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe(); + return plan.convertToPipe(QueryPlanOptimizationSettings(context.getSettingsRef())); } std::optional StorageMergeTree::totalRows(const Settings &) const diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index 518577c473c..a0d96f43c17 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -3809,7 +3809,7 @@ Pipe StorageReplicatedMergeTree::read( { QueryPlan plan; read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe(); + return plan.convertToPipe(QueryPlanOptimizationSettings(context.getSettingsRef())); } diff --git a/src/Storages/StorageView.cpp b/src/Storages/StorageView.cpp index 1ee5ab3d0ca..632d3807f83 100644 --- a/src/Storages/StorageView.cpp +++ b/src/Storages/StorageView.cpp @@ -59,7 +59,7 @@ Pipe StorageView::read( { QueryPlan plan; read(plan, column_names, metadata_snapshot, query_info, context, processed_stage, max_block_size, num_streams); - return plan.convertToPipe(); + return plan.convertToPipe(QueryPlanOptimizationSettings(context.getSettingsRef())); } void StorageView::read( diff --git a/tests/queries/0_stateless/01655_plan_optimizations.reference b/tests/queries/0_stateless/01655_plan_optimizations.reference index f261e134494..99b32b74ca7 100644 --- a/tests/queries/0_stateless/01655_plan_optimizations.reference +++ b/tests/queries/0_stateless/01655_plan_optimizations.reference @@ -1,3 +1,5 @@ +Too many optimizations applied to query plan +Too many optimizations applied to query plan > sipHash should be calculated after filtration FUNCTION sipHash64 Filter column: equals diff --git a/tests/queries/0_stateless/01655_plan_optimizations.sh b/tests/queries/0_stateless/01655_plan_optimizations.sh index 84452fe651f..3148dc4a597 100755 --- a/tests/queries/0_stateless/01655_plan_optimizations.sh +++ b/tests/queries/0_stateless/01655_plan_optimizations.sh @@ -4,6 +4,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +$CLICKHOUSE_CLIENT -q "select x + 1 from (select y + 2 as x from (select dummy + 3 as y)) settings query_plan_max_optimizations_to_apply = 1" 2>&1 | + grep -o "Too many optimizations applied to query plan" + echo "> sipHash should be calculated after filtration" $CLICKHOUSE_CLIENT -q "explain actions = 1 select sum(x), sum(y) from (select sipHash64(number) as x, bitAnd(number, 1024) as y from numbers_mt(1000000000) limit 1000000000) where y = 0" | grep -o "FUNCTION sipHash64\|Filter column: equals" echo "> sorting steps should know about limit" @@ -146,4 +149,4 @@ $CLICKHOUSE_CLIENT -q " $CLICKHOUSE_CLIENT -q " select * from ( select y, sum(x) from (select number as x, number % 4 as y from numbers(10)) group by y with totals - ) where y != 2" \ No newline at end of file + ) where y != 2" From b082b661f78e90632bf05d2929ae9428e030de94 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Fri, 26 Feb 2021 19:32:12 +0300 Subject: [PATCH 63/86] Fix ya.make --- src/Processors/ya.make | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Processors/ya.make b/src/Processors/ya.make index a44272cf9c0..4b376cdbfb2 100644 --- a/src/Processors/ya.make +++ b/src/Processors/ya.make @@ -113,6 +113,7 @@ SRCS( QueryPlan/MergingFinal.cpp QueryPlan/MergingSortedStep.cpp QueryPlan/OffsetStep.cpp + QueryPlan/Optimizations/QueryPlanOptimizationSettings.cpp QueryPlan/Optimizations/filterPushDown.cpp QueryPlan/Optimizations/liftUpArrayJoin.cpp QueryPlan/Optimizations/limitPushDown.cpp From f7dbcfc12a2ccaa4c57a149fb8ea624d46c511b6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 22 Feb 2021 18:40:56 +0300 Subject: [PATCH 64/86] DDLTask: Use std::uncaught_exceptions() over deprecated std::uncaught_exception() --- src/Interpreters/DDLTask.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DDLTask.h b/src/Interpreters/DDLTask.h index 18c1f4c80cd..45702599fcf 100644 --- a/src/Interpreters/DDLTask.h +++ b/src/Interpreters/DDLTask.h @@ -189,7 +189,7 @@ public: void commit(); - ~ZooKeeperMetadataTransaction() { assert(isExecuted() || std::uncaught_exception()); } + ~ZooKeeperMetadataTransaction() { assert(isExecuted() || std::uncaught_exceptions()); } }; } From 78c6e0527d77683e83f483bc4abc2f698ba7b8a6 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 22 Feb 2021 18:39:10 +0300 Subject: [PATCH 65/86] DDLWorker: Fix reference to zookeeper with distributed_ddl.pool_size > 1 (thread pool) --- src/Interpreters/DDLWorker.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 67f716c235c..8757b71c61a 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -365,7 +365,7 @@ void DDLWorker::scheduleTasks() if (worker_pool) { - worker_pool->scheduleOrThrowOnError([this, &saved_task, &zookeeper]() + worker_pool->scheduleOrThrowOnError([this, &saved_task, zookeeper]() { setThreadName("DDLWorkerExec"); processTask(saved_task, zookeeper); From b899d8d9195d5f9f7e89e61eab83112f341ea107 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 22 Feb 2021 18:43:32 +0300 Subject: [PATCH 66/86] tests: regression for test for invalid reference to zookeeper for distributed_ddl.pool_size > 1 --- tests/integration/test_distributed_ddl_parallel/test.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/integration/test_distributed_ddl_parallel/test.py b/tests/integration/test_distributed_ddl_parallel/test.py index 96530b111cb..73a16bed471 100644 --- a/tests/integration/test_distributed_ddl_parallel/test.py +++ b/tests/integration/test_distributed_ddl_parallel/test.py @@ -87,3 +87,7 @@ def test_two_in_parallel_two_queued(): for thread in threads: thread.join() inner_test() + +def test_smoke(): + for _ in range(100): + initiator.query('DROP DATABASE IF EXISTS foo ON CLUSTER cluster') From 06e8bb955d82d1180bf719292b6a6985a03ec0c7 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 22 Feb 2021 19:13:09 +0300 Subject: [PATCH 67/86] tests: catch exceptions in threads in test_distributed_ddl_parallel Otherwise the test will not report an error. --- .../test_distributed_ddl_parallel/test.py | 27 ++++++++++++++++--- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_distributed_ddl_parallel/test.py b/tests/integration/test_distributed_ddl_parallel/test.py index 73a16bed471..4474928815a 100644 --- a/tests/integration/test_distributed_ddl_parallel/test.py +++ b/tests/integration/test_distributed_ddl_parallel/test.py @@ -10,6 +10,25 @@ from helpers.cluster import ClickHouseCluster cluster = ClickHouseCluster(__file__) +# By default the exceptions that was throwed in threads will be ignored +# (they will not mark the test as failed, only printed to stderr). +# +# Wrap thrading.Thread and re-throw exception on join() +class SafeThread(threading.Thread): + def __init__(self, target): + super().__init__() + self.target = target + self.exception = None + def run(self): + try: + self.target() + except Exception as e: # pylint: disable=broad-except + self.exception = e + def join(self, timeout=None): + super().join(timeout) + if self.exception: + raise self.exception + def add_instance(name): main_configs=[ 'configs/ddl.xml', @@ -68,11 +87,11 @@ def test_all_in_parallel(): def inner_test(): threads = [] for _ in range(2): - threads.append(threading.Thread(target=thread_reload_dictionary)) + threads.append(SafeThread(target=thread_reload_dictionary)) for thread in threads: thread.start() for thread in threads: - thread.join() + thread.join(60) inner_test() def test_two_in_parallel_two_queued(): @@ -81,11 +100,11 @@ def test_two_in_parallel_two_queued(): def inner_test(): threads = [] for _ in range(4): - threads.append(threading.Thread(target=thread_reload_dictionary)) + threads.append(SafeThread(target=thread_reload_dictionary)) for thread in threads: thread.start() for thread in threads: - thread.join() + thread.join(60) inner_test() def test_smoke(): From a795db57f1ed990739c340ae4ff124493acea24f Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 22 Feb 2021 19:58:08 +0300 Subject: [PATCH 68/86] tests: decrease distributed_ddl_task_timeout to 60 in test_distributed_ddl_parallel --- .../test_distributed_ddl_parallel/test.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/integration/test_distributed_ddl_parallel/test.py b/tests/integration/test_distributed_ddl_parallel/test.py index 4474928815a..a9f2f069e27 100644 --- a/tests/integration/test_distributed_ddl_parallel/test.py +++ b/tests/integration/test_distributed_ddl_parallel/test.py @@ -70,7 +70,9 @@ def longer_then(sec): # It takes 7 seconds to load slow_dict. def thread_reload_dictionary(): - initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster slow_dict') + initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster slow_dict', settings={ + 'distributed_ddl_task_timeout': 60, + }) # NOTE: uses inner function to exclude slow start_cluster() from timeout. @@ -91,7 +93,7 @@ def test_all_in_parallel(): for thread in threads: thread.start() for thread in threads: - thread.join(60) + thread.join(70) inner_test() def test_two_in_parallel_two_queued(): @@ -104,9 +106,11 @@ def test_two_in_parallel_two_queued(): for thread in threads: thread.start() for thread in threads: - thread.join(60) + thread.join(70) inner_test() def test_smoke(): for _ in range(100): - initiator.query('DROP DATABASE IF EXISTS foo ON CLUSTER cluster') + initiator.query('DROP DATABASE IF EXISTS foo ON CLUSTER cluster', settings={ + 'distributed_ddl_task_timeout': 60, + }) From 7481b05d27214f2d361fd8ac176946ed1aa2175a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 22 Feb 2021 20:33:42 +0300 Subject: [PATCH 69/86] DDLWorker: avoid comparing pointers for getting min element The current_tasks always updated with emplace_back, so front() is ok. --- src/Interpreters/DDLWorker.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 8757b71c61a..7c3db09128d 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -338,7 +338,7 @@ void DDLWorker::scheduleTasks() if (!server_startup) { /// We will recheck status of last executed tasks. It's useful if main thread was just restarted. - auto & min_task = *std::min_element(current_tasks.begin(), current_tasks.end()); + auto & min_task = current_tasks.front(); String min_entry_name = last_skipped_entry_name ? std::min(min_task->entry_name, *last_skipped_entry_name) : min_task->entry_name; begin_node = std::upper_bound(queue_nodes.begin(), queue_nodes.end(), min_entry_name); current_tasks.clear(); From 0cd67ed051174302591a9ddbb790f3ec7433b4a8 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 22 Feb 2021 20:35:52 +0300 Subject: [PATCH 70/86] DDLWorker: remove only completed tasks (significant for distributed_ddl.pool_size > 1) Otherwise it will SIGSEGV due to invalid-read (UAF). --- src/Interpreters/DDLWorker.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 7c3db09128d..09aae87beb2 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -341,7 +341,7 @@ void DDLWorker::scheduleTasks() auto & min_task = current_tasks.front(); String min_entry_name = last_skipped_entry_name ? std::min(min_task->entry_name, *last_skipped_entry_name) : min_task->entry_name; begin_node = std::upper_bound(queue_nodes.begin(), queue_nodes.end(), min_entry_name); - current_tasks.clear(); + current_tasks.remove_if([](const DDLTaskPtr & t) { return t->completely_processed.load(); }); } assert(current_tasks.empty()); From eeda1fe7562cf0573e2308ee0d0bcef36ef2e9f1 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 23 Feb 2021 08:22:08 +0300 Subject: [PATCH 71/86] DDLWorker: wait for pending async tasks --- src/Interpreters/DDLWorker.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 09aae87beb2..347fedadb62 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -1007,6 +1007,9 @@ void DDLWorker::runMainThread() if (Coordination::isHardwareError(e.code)) { initialized = false; + /// Wait for pending async tasks + if (1 < pool_size) + worker_pool = std::make_unique(pool_size); LOG_INFO(log, "Lost ZooKeeper connection, will try to connect again: {}", getCurrentExceptionMessage(true)); } else From 54b1496408ce8df75156028f867a3017e2d673b3 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 23 Feb 2021 08:23:24 +0300 Subject: [PATCH 72/86] DDLWorker: optimize processing of unfinished tasks Do not look at zookeeper if was_executed is false --- src/Interpreters/DDLWorker.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 347fedadb62..e5bed2c752c 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -314,11 +314,14 @@ void DDLWorker::scheduleTasks() { /// Main thread of DDLWorker was restarted, probably due to lost connection with ZooKeeper. /// We have some unfinished tasks. To avoid duplication of some queries, try to write execution status. - bool task_still_exists = zookeeper->exists(task->entry_path); - bool status_written = zookeeper->exists(task->getFinishedNodePath()); - if (task->was_executed && !status_written && task_still_exists) + if (task->was_executed) { - processTask(*task, zookeeper); + bool task_still_exists = zookeeper->exists(task->entry_path); + bool status_written = zookeeper->exists(task->getFinishedNodePath()); + if (!status_written && task_still_exists) + { + processTask(*task, zookeeper); + } } } From 3c7e765b270e619aca88263cd2a032c7b8727dc2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 23 Feb 2021 08:26:46 +0300 Subject: [PATCH 73/86] DDLWorker: process unfinished tasks only after reinitialization --- src/Interpreters/DDLWorker.cpp | 25 +++++++++++++++---------- src/Interpreters/DDLWorker.h | 2 +- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index e5bed2c752c..45ef0d949f5 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -305,22 +305,25 @@ static void filterAndSortQueueNodes(Strings & all_nodes) std::sort(all_nodes.begin(), all_nodes.end()); } -void DDLWorker::scheduleTasks() +void DDLWorker::scheduleTasks(bool reinitialized) { LOG_DEBUG(log, "Scheduling tasks"); auto zookeeper = tryGetZooKeeper(); - for (auto & task : current_tasks) + /// Main thread of DDLWorker was restarted, probably due to lost connection with ZooKeeper. + /// We have some unfinished tasks. To avoid duplication of some queries, try to write execution status. + if (reinitialized) { - /// Main thread of DDLWorker was restarted, probably due to lost connection with ZooKeeper. - /// We have some unfinished tasks. To avoid duplication of some queries, try to write execution status. - if (task->was_executed) + for (auto & task : current_tasks) { - bool task_still_exists = zookeeper->exists(task->entry_path); - bool status_written = zookeeper->exists(task->getFinishedNodePath()); - if (!status_written && task_still_exists) + if (task->was_executed) { - processTask(*task, zookeeper); + bool task_still_exists = zookeeper->exists(task->entry_path); + bool status_written = zookeeper->exists(task->getFinishedNodePath()); + if (!status_written && task_still_exists) + { + processTask(*task, zookeeper); + } } } } @@ -992,6 +995,8 @@ void DDLWorker::runMainThread() { try { + bool reinitialized = !initialized; + /// Reinitialize DDLWorker state (including ZooKeeper connection) if required if (!initialized) { @@ -1000,7 +1005,7 @@ void DDLWorker::runMainThread() } cleanup_event->set(); - scheduleTasks(); + scheduleTasks(reinitialized); LOG_DEBUG(log, "Waiting for queue updates"); queue_updated_event->wait(); diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index 8b0a8f038a0..e0d8077265f 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -69,7 +69,7 @@ protected: ZooKeeperPtr getAndSetZooKeeper(); /// Iterates through queue tasks in ZooKeeper, runs execution of new tasks - void scheduleTasks(); + void scheduleTasks(bool reinitialized); DDLTaskBase & saveTask(DDLTaskPtr && task); From dfd1c73b31d4f89fb5507407213c7798b40199e2 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Tue, 23 Feb 2021 23:20:31 +0300 Subject: [PATCH 74/86] Load dictionaries before starting accepting connections and DDLWorker --- programs/server/Server.cpp | 62 +++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 8fcdfc4beac..72db8f59494 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1017,17 +1017,6 @@ int Server::main(const std::vector & /*args*/) LOG_INFO(log, "Query Profiler and TraceCollector are disabled because they require PHDR cache to be created" " (otherwise the function 'dl_iterate_phdr' is not lock free and not async-signal safe)."); - if (has_zookeeper && config().has("distributed_ddl")) - { - /// DDL worker should be started after all tables were loaded - String ddl_zookeeper_path = config().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/"); - int pool_size = config().getInt("distributed_ddl.pool_size", 1); - if (pool_size < 1) - throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND); - global_context->setDDLWorker(std::make_unique(pool_size, ddl_zookeeper_path, *global_context, &config(), - "distributed_ddl", "DDLWorker", &CurrentMetrics::MaxDDLEntryID)); - } - std::unique_ptr dns_cache_updater; if (config().has("disable_internal_dns_cache") && config().getInt("disable_internal_dns_cache")) { @@ -1309,6 +1298,37 @@ int Server::main(const std::vector & /*args*/) std::thread::hardware_concurrency()); } + /// try to load dictionaries immediately, throw on error and die + ext::scope_guard dictionaries_xmls, models_xmls; + try + { + if (!config().getBool("dictionaries_lazy_load", true)) + { + global_context->tryCreateEmbeddedDictionaries(); + global_context->getExternalDictionariesLoader().enableAlwaysLoadEverything(true); + } + dictionaries_xmls = global_context->getExternalDictionariesLoader().addConfigRepository( + std::make_unique(config(), "dictionaries_config")); + models_xmls = global_context->getExternalModelsLoader().addConfigRepository( + std::make_unique(config(), "models_config")); + } + catch (...) + { + LOG_ERROR(log, "Caught exception while loading dictionaries."); + throw; + } + + if (has_zookeeper && config().has("distributed_ddl")) + { + /// DDL worker should be started after all tables were loaded + String ddl_zookeeper_path = config().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/"); + int pool_size = config().getInt("distributed_ddl.pool_size", 1); + if (pool_size < 1) + throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND); + global_context->setDDLWorker(std::make_unique(pool_size, ddl_zookeeper_path, *global_context, &config(), + "distributed_ddl", "DDLWorker", &CurrentMetrics::MaxDDLEntryID)); + } + LOG_INFO(log, "Ready for connections."); SCOPE_EXIT({ @@ -1358,26 +1378,6 @@ int Server::main(const std::vector & /*args*/) } }); - /// try to load dictionaries immediately, throw on error and die - ext::scope_guard dictionaries_xmls, models_xmls; - try - { - if (!config().getBool("dictionaries_lazy_load", true)) - { - global_context->tryCreateEmbeddedDictionaries(); - global_context->getExternalDictionariesLoader().enableAlwaysLoadEverything(true); - } - dictionaries_xmls = global_context->getExternalDictionariesLoader().addConfigRepository( - std::make_unique(config(), "dictionaries_config")); - models_xmls = global_context->getExternalModelsLoader().addConfigRepository( - std::make_unique(config(), "models_config")); - } - catch (...) - { - LOG_ERROR(log, "Caught exception while loading dictionaries."); - throw; - } - std::vector> metrics_transmitters; for (const auto & graphite_key : DB::getMultipleKeysFromConfig(config(), "", "graphite")) { From 955974a8b5ba5461894d6ac7692c6593cb62cb9a Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 24 Feb 2021 08:07:31 +0300 Subject: [PATCH 75/86] DDLWorker: avoid NULL dereference on termination and failed zookeeper initialization Log snipped shows the problem: 2021.02.24 04:40:29.349181 [ 39 ] {} DDLWorker: DDLWorker is configured to use multiple threads. It's not recommended because queries can be reordered. Also it may cause some unknown issues to appear. 2021.02.24 04:40:29.349516 [ 39 ] {} Application: Ready for connections. 2021.02.24 04:40:29.349602 [ 74 ] {} DDLWorker: Started DDLWorker cleanup thread 2021.02.24 04:40:29.349639 [ 73 ] {} DDLWorker: Starting DDLWorker thread 2021.02.24 04:40:29.349698 [ 73 ] {} DDLWorker: Started DDLWorker thread 2021.02.24 04:40:29.352548 [ 73 ] {} virtual void DB::DDLWorker::initializeMainThread(): Code: 999, e.displayText() = Coordination::Exception: All connection tries failed while connecting to ZooKeeper. nodes: 192.168.112.3:2181 Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused (version 21.3.1.1), 192.168.112.3:2181 Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused (version 21.3.1.1), 192.168.112.3:2181 Poco::Exception. Code: 1000, e.code() = 111, e.displayText() = Connection refused (version 21.3.1.1), 192.168.112.3:2181 (Connection loss), Stack trace (when copying this message, always include the lines below): 0. Coordination::Exception::Exception(std::__1::basic_string, std::__1::allocator > const&, Coordination::Error, int) @ 0xfe93923 in /usr/bin/clickhouse 1. Coordination::Exception::Exception(std::__1::basic_string, std::__1::allocator > const&, Coordination::Error) @ 0xfe93ba2 in /usr/bin/clickhouse 2. Coordination::ZooKeeper::connect(std::__1::vector > const&, Poco::Timespan) @ 0xfed3a01 in /usr/bin/clickhouse 3. Coordination::ZooKeeper::ZooKeeper(std::__1::vector > const&, std::__1::basic_string, std::__1::allocator > const&, std::__1::basic_string, std::__1::allocator > const&, std::__1::basic_string, std::__1::allocator > const&, Poco::Timespan, Poco::Timespan, Poco::Timespan) @ 0xfed2222 in /usr/bin/clickhouse 4. zkutil::ZooKeeper::init(std::__1::basic_string, std::__1::allocator > const&, std::__1::basic_string, std::__1::allocator > const&, std::__1::basic_string, std::__1::allocator > const&, int, int, std::__1::basic_string, std::__1::allocator > const&) @ 0xfe961cd in /usr/bin/clickhouse 5. zkutil::ZooKeeper::ZooKeeper(Poco::Util::AbstractConfiguration const&, std::__1::basic_string, std::__1::allocator > const&) @ 0xfe97a96 in /usr/bin/clickhouse 6. void std::__1::allocator_traits >::__construct(std::__1::integral_constant, std::__1::allocator&, zkutil::ZooKeeper*, Poco::Util::AbstractConfiguration const&, char const (&) [10]) @ 0xed98387 in /usr/bin/clickhouse 7. DB::Context::getZooKeeper() const @ 0xed75190 in /usr/bin/clickhouse 8. DB::DDLWorker::getAndSetZooKeeper() @ 0xedb81c9 in /usr/bin/clickhouse 9. DB::DDLWorker::initializeMainThread() @ 0xedc9eb0 in /usr/bin/clickhouse 10. DB::DDLWorker::runMainThread() @ 0xedb5d01 in /usr/bin/clickhouse 11. ThreadFromGlobalPool::ThreadFromGlobalPool(void (DB::DDLWorker::*&&)(), DB::DDLWorker*&&)::'lambda'()::operator()() @ 0xedcafa1 in /usr/bin/clickhouse 12. ThreadPoolImpl::worker(std::__1::__list_iterator) @ 0x892651f in /usr/bin/clickhouse 13. ? @ 0x8929fb3 in /usr/bin/clickhouse 14. start_thread @ 0x8ea7 in /lib/x86_64-linux-gnu/libpthread-2.31.so 15. __clone @ 0xfddef in /lib/x86_64-linux-gnu/libc-2.31.so (version 21.3.1.1) ... 2021.02.24 04:40:30.025278 [ 41 ] {} BaseDaemon: Received signal 15 2021.02.24 04:40:30.025336 [ 41 ] {} Application: Received termination signal (Terminated) ... 2021.02.24 04:40:30.582078 [ 39 ] {} Application: Closed all listening sockets. 2021.02.24 04:40:30.582124 [ 39 ] {} Application: Closed connections. 2021.02.24 04:40:30.583770 [ 39 ] {} Application: Shutting down storages. 2021.02.24 04:40:30.583932 [ 39 ] {} Context: Shutdown disk data 2021.02.24 04:40:30.583951 [ 39 ] {} Context: Shutdown disk default 2021.02.24 04:40:30.584163 [ 46 ] {} SystemLog (system.query_log): Terminating 2021.02.24 04:40:30.586025 [ 39 ] {} BackgroundSchedulePool/BgSchPool: Waiting for threads to finish. 2021.02.24 04:40:34.352701 [ 73 ] {} DDLWorker: Initialized DDLWorker thread 2021.02.24 04:40:34.352758 [ 73 ] {} DDLWorker: Scheduling tasks --- src/Databases/DatabaseReplicatedWorker.cpp | 6 ++++-- src/Databases/DatabaseReplicatedWorker.h | 2 +- src/Interpreters/DDLWorker.cpp | 12 ++++++++---- src/Interpreters/DDLWorker.h | 3 ++- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index e0c5717711c..25a19c4dfb2 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -22,7 +22,7 @@ DatabaseReplicatedDDLWorker::DatabaseReplicatedDDLWorker(DatabaseReplicated * db /// We also need similar graph to load tables on server startup in order of topsort. } -void DatabaseReplicatedDDLWorker::initializeMainThread() +bool DatabaseReplicatedDDLWorker::initializeMainThread() { while (!stop_flag) { @@ -33,7 +33,7 @@ void DatabaseReplicatedDDLWorker::initializeMainThread() database->tryConnectToZooKeeperAndInitDatabase(false); initializeReplication(); initialized = true; - return; + return true; } catch (...) { @@ -41,6 +41,8 @@ void DatabaseReplicatedDDLWorker::initializeMainThread() sleepForSeconds(5); } } + + return false; } void DatabaseReplicatedDDLWorker::shutdown() diff --git a/src/Databases/DatabaseReplicatedWorker.h b/src/Databases/DatabaseReplicatedWorker.h index 6ba46a98bca..3a45817c755 100644 --- a/src/Databases/DatabaseReplicatedWorker.h +++ b/src/Databases/DatabaseReplicatedWorker.h @@ -30,7 +30,7 @@ public: void shutdown() override; private: - void initializeMainThread() override; + bool initializeMainThread() override; void initializeReplication(); DDLTaskPtr initAndCheckTask(const String & entry_name, String & out_reason, const ZooKeeperPtr & zookeeper) override; diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 45ef0d949f5..19534c13c99 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -936,11 +936,11 @@ String DDLWorker::enqueueQuery(DDLLogEntry & entry) } -void DDLWorker::initializeMainThread() +bool DDLWorker::initializeMainThread() { assert(!initialized); setThreadName("DDLWorker"); - LOG_DEBUG(log, "Started DDLWorker thread"); + LOG_DEBUG(log, "Initializing DDLWorker thread"); while (!stop_flag) { @@ -949,7 +949,7 @@ void DDLWorker::initializeMainThread() auto zookeeper = getAndSetZooKeeper(); zookeeper->createAncestors(fs::path(queue_dir) / ""); initialized = true; - return; + return true; } catch (const Coordination::Exception & e) { @@ -970,6 +970,8 @@ void DDLWorker::initializeMainThread() /// Avoid busy loop when ZooKeeper is not available. sleepForSeconds(5); } + + return false; } void DDLWorker::runMainThread() @@ -1000,7 +1002,9 @@ void DDLWorker::runMainThread() /// Reinitialize DDLWorker state (including ZooKeeper connection) if required if (!initialized) { - initializeMainThread(); + /// Stopped + if (!initializeMainThread()) + break; LOG_DEBUG(log, "Initialized DDLWorker thread"); } diff --git a/src/Interpreters/DDLWorker.h b/src/Interpreters/DDLWorker.h index e0d8077265f..0ef7456430f 100644 --- a/src/Interpreters/DDLWorker.h +++ b/src/Interpreters/DDLWorker.h @@ -104,7 +104,8 @@ protected: /// Init task node void createStatusDirs(const std::string & node_path, const ZooKeeperPtr & zookeeper); - virtual void initializeMainThread(); + /// Return false if the worker was stopped (stop_flag = true) + virtual bool initializeMainThread(); void runMainThread(); void runCleanupThread(); From 0e68fc67aaef9531440d7c5eb3686d47b5db1963 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 24 Feb 2021 21:22:36 +0300 Subject: [PATCH 76/86] Add other distributed_ddl settings into config.xml as an example --- programs/server/config.xml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/programs/server/config.xml b/programs/server/config.xml index ba9b8b04b05..b72cf53ca03 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -892,6 +892,19 @@ + + + + + + + + + + + From 7d51ae321239f67357acaedc881cf4a0f7a1c215 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 24 Feb 2021 21:22:36 +0300 Subject: [PATCH 77/86] DDLWorker: Fix processing of tasks in parallel (correct queue begin) Otherwise it will: - start from incorrect task and bail because finished node already exists - and also process alreayd processed items --- src/Interpreters/DDLWorker.cpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/Interpreters/DDLWorker.cpp b/src/Interpreters/DDLWorker.cpp index 19534c13c99..4da0d21791b 100644 --- a/src/Interpreters/DDLWorker.cpp +++ b/src/Interpreters/DDLWorker.cpp @@ -338,19 +338,23 @@ void DDLWorker::scheduleTasks(bool reinitialized) else if (max_tasks_in_queue < queue_nodes.size()) cleanup_event->set(); - bool server_startup = current_tasks.empty(); + /// Detect queue start, using: + /// - skipped tasks + /// - in memory tasks (that are currently active) auto begin_node = queue_nodes.begin(); - - if (!server_startup) + UInt64 last_task_id = 0; + if (!current_tasks.empty()) { - /// We will recheck status of last executed tasks. It's useful if main thread was just restarted. - auto & min_task = current_tasks.front(); - String min_entry_name = last_skipped_entry_name ? std::min(min_task->entry_name, *last_skipped_entry_name) : min_task->entry_name; - begin_node = std::upper_bound(queue_nodes.begin(), queue_nodes.end(), min_entry_name); - current_tasks.remove_if([](const DDLTaskPtr & t) { return t->completely_processed.load(); }); + auto & last_task = current_tasks.back(); + last_task_id = DDLTaskBase::getLogEntryNumber(last_task->entry_name); + begin_node = std::upper_bound(queue_nodes.begin(), queue_nodes.end(), last_task->entry_name); + } + if (last_skipped_entry_name) + { + UInt64 last_skipped_entry_id = DDLTaskBase::getLogEntryNumber(*last_skipped_entry_name); + if (last_skipped_entry_id > last_task_id) + begin_node = std::upper_bound(queue_nodes.begin(), queue_nodes.end(), *last_skipped_entry_name); } - - assert(current_tasks.empty()); for (auto it = begin_node; it != queue_nodes.end() && !stop_flag; ++it) { From d42d4cfd6b3ddce7ec92fb35b5b11e96f4da3e66 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Wed, 24 Feb 2021 21:22:36 +0300 Subject: [PATCH 78/86] Improve test_distributed_ddl_parallel to cover more cases Refs: #21264 --- .../configs/ddl_a.xml | 5 ++ .../configs/ddl_b.xml | 5 ++ .../configs/dict.xml | 70 +++++++++++++------ .../configs/remote_servers.xml | 18 ++++- .../test_distributed_ddl_parallel/test.py | 67 ++++++++++++++---- 5 files changed, 126 insertions(+), 39 deletions(-) create mode 100644 tests/integration/test_distributed_ddl_parallel/configs/ddl_a.xml create mode 100644 tests/integration/test_distributed_ddl_parallel/configs/ddl_b.xml diff --git a/tests/integration/test_distributed_ddl_parallel/configs/ddl_a.xml b/tests/integration/test_distributed_ddl_parallel/configs/ddl_a.xml new file mode 100644 index 00000000000..b926f99c687 --- /dev/null +++ b/tests/integration/test_distributed_ddl_parallel/configs/ddl_a.xml @@ -0,0 +1,5 @@ + + + 2 + + diff --git a/tests/integration/test_distributed_ddl_parallel/configs/ddl_b.xml b/tests/integration/test_distributed_ddl_parallel/configs/ddl_b.xml new file mode 100644 index 00000000000..2f038919032 --- /dev/null +++ b/tests/integration/test_distributed_ddl_parallel/configs/ddl_b.xml @@ -0,0 +1,5 @@ + + + 20 + + diff --git a/tests/integration/test_distributed_ddl_parallel/configs/dict.xml b/tests/integration/test_distributed_ddl_parallel/configs/dict.xml index 610d55841a0..d94b3f61dd9 100644 --- a/tests/integration/test_distributed_ddl_parallel/configs/dict.xml +++ b/tests/integration/test_distributed_ddl_parallel/configs/dict.xml @@ -1,26 +1,50 @@ - - slow_dict - - - sleep 7 - TabSeparated - - - - - - - - id - - - value - String - - - - 0 - + + slow_dict_7 + + + sleep 7 + TabSeparated + + + + + + + + id + + + value + String + + + + 0 + + + + slow_dict_3 + + + sleep 3 + TabSeparated + + + + + + + + id + + + value + String + + + + 0 + diff --git a/tests/integration/test_distributed_ddl_parallel/configs/remote_servers.xml b/tests/integration/test_distributed_ddl_parallel/configs/remote_servers.xml index 8ffa9f024d7..eb0ee60186b 100644 --- a/tests/integration/test_distributed_ddl_parallel/configs/remote_servers.xml +++ b/tests/integration/test_distributed_ddl_parallel/configs/remote_servers.xml @@ -1,6 +1,6 @@ - + n1 @@ -13,6 +13,20 @@ 9000 - + + + + + n3 + 9000 + + + + + n4 + 9000 + + + diff --git a/tests/integration/test_distributed_ddl_parallel/test.py b/tests/integration/test_distributed_ddl_parallel/test.py index a9f2f069e27..44971ca3d9e 100644 --- a/tests/integration/test_distributed_ddl_parallel/test.py +++ b/tests/integration/test_distributed_ddl_parallel/test.py @@ -29,11 +29,12 @@ class SafeThread(threading.Thread): if self.exception: raise self.exception -def add_instance(name): +def add_instance(name, ddl_config=None): main_configs=[ - 'configs/ddl.xml', 'configs/remote_servers.xml', ] + if ddl_config: + main_configs.append(ddl_config) dictionaries=[ 'configs/dict.xml', ] @@ -43,8 +44,12 @@ def add_instance(name): with_zookeeper=True) initiator = add_instance('initiator') -n1 = add_instance('n1') -n2 = add_instance('n2') +# distributed_ddl.pool_size = 2 +n1 = add_instance('n1', 'configs/ddl_a.xml') +n2 = add_instance('n2', 'configs/ddl_a.xml') +# distributed_ddl.pool_size = 20 +n3 = add_instance('n3', 'configs/ddl_b.xml') +n4 = add_instance('n4', 'configs/ddl_b.xml') @pytest.fixture(scope='module', autouse=True) def start_cluster(): @@ -68,19 +73,32 @@ def longer_then(sec): return inner return wrapper -# It takes 7 seconds to load slow_dict. -def thread_reload_dictionary(): - initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster slow_dict', settings={ +# It takes 7 seconds to load slow_dict_7. +def execute_reload_dictionary_slow_dict_7(): + initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster_a slow_dict_7', settings={ + 'distributed_ddl_task_timeout': 60, + }) +def execute_reload_dictionary_slow_dict_3(): + initiator.query('SYSTEM RELOAD DICTIONARY ON CLUSTER cluster_b slow_dict_3', settings={ + 'distributed_ddl_task_timeout': 60, + }) +def execute_smoke_query(): + initiator.query('DROP DATABASE IF EXISTS foo ON CLUSTER cluster_b', settings={ 'distributed_ddl_task_timeout': 60, }) +def check_log(): + # ensure that none of tasks processed multiple times + for _, instance in list(cluster.instances.items()): + assert not instance.contains_in_log('Coordination::Exception: Node exists') + # NOTE: uses inner function to exclude slow start_cluster() from timeout. -def test_dict_load(): +def test_slow_dict_load_7(): @pytest.mark.timeout(10) @longer_then(7) def inner_test(): - initiator.query('SYSTEM RELOAD DICTIONARY slow_dict') + initiator.query('SYSTEM RELOAD DICTIONARY slow_dict_7') inner_test() def test_all_in_parallel(): @@ -89,12 +107,13 @@ def test_all_in_parallel(): def inner_test(): threads = [] for _ in range(2): - threads.append(SafeThread(target=thread_reload_dictionary)) + threads.append(SafeThread(target=execute_reload_dictionary_slow_dict_7)) for thread in threads: thread.start() for thread in threads: thread.join(70) inner_test() + check_log() def test_two_in_parallel_two_queued(): @pytest.mark.timeout(19) @@ -102,15 +121,35 @@ def test_two_in_parallel_two_queued(): def inner_test(): threads = [] for _ in range(4): - threads.append(SafeThread(target=thread_reload_dictionary)) + threads.append(SafeThread(target=execute_reload_dictionary_slow_dict_7)) for thread in threads: thread.start() for thread in threads: thread.join(70) inner_test() + check_log() def test_smoke(): for _ in range(100): - initiator.query('DROP DATABASE IF EXISTS foo ON CLUSTER cluster', settings={ - 'distributed_ddl_task_timeout': 60, - }) + execute_smoke_query() + check_log() + +def test_smoke_parallel(): + threads = [] + for _ in range(100): + threads.append(SafeThread(target=execute_smoke_query)) + for thread in threads: + thread.start() + for thread in threads: + thread.join(70) + check_log() + +def test_smoke_parallel_dict_reload(): + threads = [] + for _ in range(100): + threads.append(SafeThread(target=execute_reload_dictionary_slow_dict_3)) + for thread in threads: + thread.start() + for thread in threads: + thread.join(70) + check_log() From 366fba4b0493904cf9a68f158b437cf6ac5a7114 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 1 Mar 2021 16:06:44 +0300 Subject: [PATCH 79/86] Update 01592_long_window_functions1.sql --- tests/queries/0_stateless/01592_long_window_functions1.sql | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01592_long_window_functions1.sql b/tests/queries/0_stateless/01592_long_window_functions1.sql index c7751ab9f33..bb0f77ff60a 100644 --- a/tests/queries/0_stateless/01592_long_window_functions1.sql +++ b/tests/queries/0_stateless/01592_long_window_functions1.sql @@ -1,6 +1,7 @@ drop table if exists stack; set allow_experimental_window_functions = 1; +set max_insert_threads = 4; create table stack(item_id Int64, brand_id Int64, rack_id Int64, dt DateTime, expiration_dt DateTime, quantity UInt64) Engine = MergeTree @@ -10,7 +11,7 @@ order by (brand_id, toStartOfHour(dt)); insert into stack select number%99991, number%11, number%1111, toDateTime('2020-01-01 00:00:00')+number/100, toDateTime('2020-02-01 00:00:00')+number/10, intDiv(number,100)+1 -from numbers(10000000); +from numbers_mt(10000000); select '---- arrays ----'; From a4f2ee0752278198833f495aa9643e0b56ac0685 Mon Sep 17 00:00:00 2001 From: Roman Bug Date: Mon, 1 Mar 2021 16:51:38 +0300 Subject: [PATCH 80/86] DOCSUP-6144: Edit and translate PR to Russian (#19780) Co-authored-by: Anna <42538400+adevyatova@users.noreply.github.com> Co-authored-by: romanzhukov Co-authored-by: Vladimir --- docs/en/operations/system-tables/index.md | 4 +- .../en/sql-reference/statements/select/all.md | 6 +-- docs/ru/operations/system-tables/index.md | 45 +++++++++++++++---- .../external-dicts-dict-sources.md | 2 +- .../ru/sql-reference/statements/select/all.md | 22 +++++++++ 5 files changed, 65 insertions(+), 14 deletions(-) create mode 100644 docs/ru/sql-reference/statements/select/all.md diff --git a/docs/en/operations/system-tables/index.md b/docs/en/operations/system-tables/index.md index 5dc23aee686..e66f082167e 100644 --- a/docs/en/operations/system-tables/index.md +++ b/docs/en/operations/system-tables/index.md @@ -20,7 +20,7 @@ System tables: Most of system tables store their data in RAM. A ClickHouse server creates such system tables at the start. -Unlike other system tables, the system log tables [metric_log](../../operations/system-tables/metric_log.md), [query_log](../../operations/system-tables/query_log.md), [query_thread_log](../../operations/system-tables/query_thread_log.md), [trace_log](../../operations/system-tables/trace_log.md), [part_log](../../operations/system-tables/part_log.md), crash_log and [text_log](../../operations/system-tables/text_log.md) are served by [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine and store their data in a storage filesystem by default. If you remove a table from a filesystem, the ClickHouse server creates the empty one again at the time of the next data writing. If system table schema changed in a new release, then ClickHouse renames the current table and creates a new one. +Unlike other system tables, the system log tables [metric_log](../../operations/system-tables/metric_log.md), [query_log](../../operations/system-tables/query_log.md), [query_thread_log](../../operations/system-tables/query_thread_log.md), [trace_log](../../operations/system-tables/trace_log.md), [part_log](../../operations/system-tables/part_log.md), [crash_log](../../operations/system-tables/crash-log.md) and [text_log](../../operations/system-tables/text_log.md) are served by [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) table engine and store their data in a filesystem by default. If you remove a table from a filesystem, the ClickHouse server creates the empty one again at the time of the next data writing. If system table schema changed in a new release, then ClickHouse renames the current table and creates a new one. System log tables can be customized by creating a config file with the same name as the table under `/etc/clickhouse-server/config.d/`, or setting corresponding elements in `/etc/clickhouse-server/config.xml`. Elements can be customized are: @@ -33,7 +33,7 @@ System log tables can be customized by creating a config file with the same name An example: -``` +```xml system diff --git a/docs/en/sql-reference/statements/select/all.md b/docs/en/sql-reference/statements/select/all.md index 5e0de4c142b..891b82c4319 100644 --- a/docs/en/sql-reference/statements/select/all.md +++ b/docs/en/sql-reference/statements/select/all.md @@ -4,10 +4,8 @@ toc_title: ALL # ALL Clause {#select-all} -`SELECT ALL` is identical to `SELECT` without `DISTINCT`. +If there are multiple matching rows in the table, then `ALL` returns all of them. `SELECT ALL` is identical to `SELECT` without `DISTINCT`. If both `ALL` and `DISTINCT` specified, exception will be thrown. -- If `ALL` specified, ignore it. -- If both `ALL` and `DISTINCT` specified, exception will be thrown. `ALL` can also be specified inside aggregate function with the same effect(noop), for instance: @@ -19,3 +17,5 @@ equals to ```sql SELECT sum(number) FROM numbers(10); ``` + +[Original article](https://clickhouse.tech/docs/en/sql-reference/statements/select/all) diff --git a/docs/ru/operations/system-tables/index.md b/docs/ru/operations/system-tables/index.md index 93ea1c92068..cdea6102a81 100644 --- a/docs/ru/operations/system-tables/index.md +++ b/docs/ru/operations/system-tables/index.md @@ -9,25 +9,54 @@ toc_title: "\u0421\u0438\u0441\u0442\u0435\u043c\u043d\u044b\u0435\u0020\u0442\u Системные таблицы содержат информацию о: -- Состоянии сервера, процессов и окружении. -- Внутренних процессах сервера. +- состоянии сервера, процессов и окружении. +- внутренних процессах сервера. Системные таблицы: -- Находятся в базе данных `system`. -- Доступны только для чтения данных. -- Не могут быть удалены или изменены, но их можно отсоединить. +- находятся в базе данных `system`. +- доступны только для чтения данных. +- не могут быть удалены или изменены, но их можно отсоединить. -Системные таблицы `metric_log`, `query_log`, `query_thread_log`, `trace_log` системные таблицы хранят данные в файловой системе. Остальные системные таблицы хранят свои данные в оперативной памяти. Сервер ClickHouse создает такие системные таблицы при запуске. +Большинство системных таблиц хранят свои данные в оперативной памяти. Сервер ClickHouse создает эти системные таблицы при старте. + +В отличие от других системных таблиц, таблицы с системными логами [metric_log](../../operations/system-tables/metric_log.md), [query_log](../../operations/system-tables/query_log.md), [query_thread_log](../../operations/system-tables/query_thread_log.md), [trace_log](../../operations/system-tables/trace_log.md), [part_log](../../operations/system-tables/part_log.md), [crash_log](../../operations/system-tables/crash-log.md) и [text_log](../../operations/system-tables/text_log.md) используют движок таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) и по умолчанию хранят свои данные в файловой системе. Если удалить таблицу из файловой системы, сервер ClickHouse снова создаст пустую таблицу во время следующей записи данных. Если схема системной таблицы изменилась в новом релизе, то ClickHouse переименует текущую таблицу и создаст новую. + +Таблицы с системными логами `log` можно настроить, создав конфигурационный файл с тем же именем, что и таблица в разделе `/etc/clickhouse-server/config.d/`, или указав соответствующие элементы в `/etc/clickhouse-server/config.xml`. Настраиваться могут следующие элементы: + +- `database` — база данных, к которой принадлежит системная таблица. Эта опция на текущий момент устарела. Все системные таблицы находятся в базе данных `system`. +- `table` — таблица для добавления данных. +- `partition_by` — [ключ партиционирования](../../engines/table-engines/mergetree-family/custom-partitioning-key.md). +- `ttl` — [время жизни](../../sql-reference/statements/alter/ttl.md) таблицы. +- `flush_interval_milliseconds` — интервал сброса данных на диск, в миллисекундах. +- `engine` — полное имя движка (начиная с `ENGINE =` ) с параметрами. Эта опция противоречит `partition_by` и `ttl`. Если указать оба параметра вместе, сервер вернет ошибку и завершит работу. + +Пример: + +```xml + + + system + query_log
+ toYYYYMM(event_date) + event_date + INTERVAL 30 DAY DELETE + + 7500 +
+
+``` + +По умолчанию размер таблицы не ограничен. Управлять размером таблицы можно используя [TTL](../../sql-reference/statements/alter/ttl.md#manipuliatsii-s-ttl-tablitsy) для удаления устаревших записей журнала. Также вы можете использовать функцию партиционирования для таблиц `MergeTree`. ### Источники системных показателей Для сбора системных показателей сервер ClickHouse использует: -- Возможности `CAP_NET_ADMIN`. +- возможности `CAP_NET_ADMIN`. - [procfs](https://ru.wikipedia.org/wiki/Procfs) (только Linux). -**procfs** Если для сервера ClickHouse не включено `CAP_NET_ADMIN`, он пытается обратиться к `ProcfsMetricsProvider`. `ProcfsMetricsProvider` позволяет собирать системные показатели для каждого запроса (для CPU и I/O). diff --git a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md index 3bb11b638b2..77275b65a05 100644 --- a/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md +++ b/docs/ru/sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md @@ -572,7 +572,7 @@ SOURCE(CLICKHOUSE( или ``` sql -SOURCE(MONGO( +SOURCE(MONGODB( host 'localhost' port 27017 user '' diff --git a/docs/ru/sql-reference/statements/select/all.md b/docs/ru/sql-reference/statements/select/all.md new file mode 100644 index 00000000000..4049d77a173 --- /dev/null +++ b/docs/ru/sql-reference/statements/select/all.md @@ -0,0 +1,22 @@ +--- +toc_title: ALL +--- + +# Секция ALL {#select-all} + +Если в таблице несколько совпадающих строк, то `ALL` возвращает все из них. Поведение запроса `SELECT ALL` точно такое же, как и `SELECT` без аргумента `DISTINCT`. Если указаны оба аргумента: `ALL` и `DISTINCT`, функция вернет исключение. + + +`ALL` может быть указан внутри агрегатной функции, например, результат выполнения запроса: + +```sql +SELECT sum(ALL number) FROM numbers(10); +``` + +равен результату выполнения запроса: + +```sql +SELECT sum(number) FROM numbers(10); +``` + +[Оригинальная статья](https://clickhouse.tech/docs/ru/sql-reference/statements/select/all) From 10aa26576cf120f9c2e9fa2996667973a55027a2 Mon Sep 17 00:00:00 2001 From: tavplubix Date: Mon, 1 Mar 2021 17:11:45 +0300 Subject: [PATCH 81/86] Update DatabaseReplicatedWorker.cpp --- src/Databases/DatabaseReplicatedWorker.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Databases/DatabaseReplicatedWorker.cpp b/src/Databases/DatabaseReplicatedWorker.cpp index 25a19c4dfb2..ee5a3b5eed0 100644 --- a/src/Databases/DatabaseReplicatedWorker.cpp +++ b/src/Databases/DatabaseReplicatedWorker.cpp @@ -63,7 +63,7 @@ void DatabaseReplicatedDDLWorker::initializeReplication() if (our_log_ptr == 0 || our_log_ptr + logs_to_keep < max_log_ptr) database->recoverLostReplica(current_zookeeper, our_log_ptr, max_log_ptr); else - last_skipped_entry_name.emplace(log_ptr_str); + last_skipped_entry_name.emplace(DDLTaskBase::getLogEntryName(our_log_ptr)); } String DatabaseReplicatedDDLWorker::enqueueQuery(DDLLogEntry & entry) From 3764a2a2beb106d595ba5c90b2c72ead12b58c58 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 1 Mar 2021 19:15:59 +0300 Subject: [PATCH 82/86] AggregateFunctionSumMap better comment message --- src/AggregateFunctions/AggregateFunctionSumMap.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/AggregateFunctions/AggregateFunctionSumMap.h b/src/AggregateFunctions/AggregateFunctionSumMap.h index f6a473546f9..30efb3d5fa0 100644 --- a/src/AggregateFunctions/AggregateFunctionSumMap.h +++ b/src/AggregateFunctions/AggregateFunctionSumMap.h @@ -118,6 +118,8 @@ public: WhichDataType value_type_to_check(value_type); /// Do not promote decimal because of implementation issues of this function design + /// Currently we cannot get result column type in case of decimal we cannot get decimal scale + /// in method void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override /// If we decide to make this function more efficient we should promote decimal type during summ if (value_type_to_check.isDecimal()) result_type = value_type_without_nullable; @@ -337,7 +339,11 @@ public: if (elem.second[col].isNull()) to_values_col.insertDefault(); else + { + auto element_field = elem.second[col]; + to_values_col.insert(elem.second[col]); + } } } } From 019a2090c71898719858ad451a4e83de842fcf30 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 1 Mar 2021 19:18:14 +0300 Subject: [PATCH 83/86] Fix unused variable --- src/AggregateFunctions/AggregateFunctionSumMap.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionSumMap.h b/src/AggregateFunctions/AggregateFunctionSumMap.h index 30efb3d5fa0..8af20fe0e2d 100644 --- a/src/AggregateFunctions/AggregateFunctionSumMap.h +++ b/src/AggregateFunctions/AggregateFunctionSumMap.h @@ -340,8 +340,6 @@ public: to_values_col.insertDefault(); else { - auto element_field = elem.second[col]; - to_values_col.insert(elem.second[col]); } } From 7ed5900251ef5945dcca32d0b0c1280b2c920800 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Mon, 1 Mar 2021 19:18:56 +0300 Subject: [PATCH 84/86] Updated style --- src/AggregateFunctions/AggregateFunctionSumMap.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionSumMap.h b/src/AggregateFunctions/AggregateFunctionSumMap.h index 8af20fe0e2d..3233199f01e 100644 --- a/src/AggregateFunctions/AggregateFunctionSumMap.h +++ b/src/AggregateFunctions/AggregateFunctionSumMap.h @@ -339,9 +339,7 @@ public: if (elem.second[col].isNull()) to_values_col.insertDefault(); else - { to_values_col.insert(elem.second[col]); - } } } } From 15b3f379a56c8f648cff38695ec2aa617c13fe58 Mon Sep 17 00:00:00 2001 From: olgarev <56617294+olgarev@users.noreply.github.com> Date: Mon, 1 Mar 2021 19:41:16 +0300 Subject: [PATCH 85/86] DOCSUP-4915: documented geo data types (#21294) * Initial * Fix links * Fix master * Mistakes corrected * Minor fix * Fixes and intro. * Update geo.md * Apply suggestions from code review Co-authored-by: Anton Popov Co-authored-by: Olga Revyakina Co-authored-by: Anton Popov --- .../template-data-type.md | 2 +- docs/en/operations/settings/settings.md | 15 ++- docs/en/sql-reference/data-types/geo.md | 106 ++++++++++++++++++ docs/ru/operations/settings/settings.md | 11 ++ docs/ru/sql-reference/data-types/geo.md | 106 ++++++++++++++++++ 5 files changed, 237 insertions(+), 3 deletions(-) create mode 100644 docs/en/sql-reference/data-types/geo.md create mode 100644 docs/ru/sql-reference/data-types/geo.md diff --git a/docs/_description_templates/template-data-type.md b/docs/_description_templates/template-data-type.md index edb6586ee7d..5e560b9325d 100644 --- a/docs/_description_templates/template-data-type.md +++ b/docs/_description_templates/template-data-type.md @@ -26,4 +26,4 @@ The name of an additional section can be any, for example, **Usage**. - [link](#) -[Original article](https://clickhouse.tech/docs/en/data_types//) +[Original article](https://clickhouse.tech/docs/en/data-types//) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 1988e2ec6fa..3c343e09fd3 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -2658,8 +2658,6 @@ Result: Note that this setting influences [Materialized view](../../sql-reference/statements/create/view.md#materialized) and [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md) behaviour. -[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) - ## engine_file_empty_if_not_exists {#engine-file-empty_if-not-exists} Allows to select data from a file engine table without file. @@ -2679,3 +2677,16 @@ Possible values: - 1 — Enabled. Default value: `0`. + +## allow_experimental_geo_types {#allow-experimental-geo-types} + +Allows working with experimental [geo data types](../../sql-reference/data-types/geo.md). + +Possible values: + +- 0 — Working with geo data types is disabled. +- 1 — Working with geo data types is enabled. + +Default value: `0`. + +[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md new file mode 100644 index 00000000000..9ed328e0de6 --- /dev/null +++ b/docs/en/sql-reference/data-types/geo.md @@ -0,0 +1,106 @@ +--- +toc_priority: 62 +toc_title: Geo +--- + +# Geo Data Types {#geo-data-types} + +Clickhouse supports data types for representing geographical objects — locations, lands, etc. + +!!! warning "Warning" + Currently geo data types are an experimental feature. To work with them you must set `allow_experimental_geo_types = 1`. + +**See Also** +- [Representing simple geographical features](https://en.wikipedia.org/wiki/GeoJSON). +- [allow_experimental_geo_types](../../operations/settings/settings.md#allow-experimental-geo-types) setting. + +## Point {#point-data-type} + +`Point` is represented by its X and Y coordinates, stored as a [Tuple](tuple.md)([Float64](float.md), [Float64](float.md)). + +**Example** + +Query: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_point (p Point) ENGINE = Memory(); +INSERT INTO geo_point VALUES((10, 10)); +SELECT p, toTypeName(p) FROM geo_point; +``` +Result: + +``` text +┌─p─────┬─toTypeName(p)─┐ +│ (10,10) │ Point │ +└───────┴───────────────┘ +``` + +## Ring {#ring-data-type} + +`Ring` is a simple polygon without holes stored as an array of points: [Array](array.md)([Point](#point-data-type)). + +**Example** + +Query: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_ring (r Ring) ENGINE = Memory(); +INSERT INTO geo_ring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]); +SELECT r, toTypeName(r) FROM geo_ring; +``` +Result: + +``` text +┌─r─────────────────────────────┬─toTypeName(r)─┐ +│ [(0,0),(10,0),(10,10),(0,10)] │ Ring │ +└───────────────────────────────┴───────────────┘ +``` + +## Polygon {#polygon-data-type} + +`Polygon` is a polygon with holes stored as an array of rings: [Array](array.md)([Ring](#ring-data-type)). First element of outer array is the outer shape of polygon and all the following elements are holes. + +**Example** + +This is a polygon with one hole: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_polygon (pg Polygon) ENGINE = Memory(); +INSERT INTO geo_polygon VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]]); +SELECT pg, toTypeName(pg) FROM geo_polygon; +``` + +Result: + +``` text +┌─pg────────────────────────────────────────────────────────────┬─toTypeName(pg)─┐ +│ [[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]] │ Polygon │ +└───────────────────────────────────────────────────────────────┴────────────────┘ +``` + +## MultiPolygon {#multipolygon-data-type} + +`MultiPolygon` consists of multiple polygons and is stored as an array of polygons: [Array](array.md)([Polygon](#polygon-data-type)). + +**Example** + +This multipolygon consists of two separate polygons — the first one without holes, and the second with one hole: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_multipolygon (mpg MultiPolygon) ENGINE = Memory(); +INSERT INTO geo_multipolygon VALUES([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]); +SELECT mpg, toTypeName(mpg) FROM geo_multipolygon; +``` +Result: + +``` text +┌─mpg─────────────────────────────────────────────────────────────────────────────────────────────┬─toTypeName(mpg)─┐ +│ [[[(0,0),(10,0),(10,10),(0,10)]],[[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]]] │ MultiPolygon │ +└─────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────┘ +``` + +[Original article](https://clickhouse.tech/docs/en/data-types/geo/) diff --git a/docs/ru/operations/settings/settings.md b/docs/ru/operations/settings/settings.md index a67b4a283fa..f8f587c8a36 100644 --- a/docs/ru/operations/settings/settings.md +++ b/docs/ru/operations/settings/settings.md @@ -2552,4 +2552,15 @@ SELECT * FROM test2; Обратите внимание на то, что эта настройка влияет на поведение [материализованных представлений](../../sql-reference/statements/create/view.md#materialized) и БД [MaterializeMySQL](../../engines/database-engines/materialize-mysql.md). +## allow_experimental_geo_types {#allow-experimental-geo-types} + +Разрешает использование экспериментальных типов данных для работы с [географическими структурами](../../sql-reference/data-types/geo.md). + +Возможные значения: + +- 0 — Использование типов данных для работы с географическими структурами не поддерживается. +- 1 — Использование типов данных для работы с географическими структурами поддерживается. + +Значение по умолчанию: `0`. + [Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) diff --git a/docs/ru/sql-reference/data-types/geo.md b/docs/ru/sql-reference/data-types/geo.md new file mode 100644 index 00000000000..23293b30927 --- /dev/null +++ b/docs/ru/sql-reference/data-types/geo.md @@ -0,0 +1,106 @@ +--- +toc_priority: 62 +toc_title: Географические структуры +--- + +# Типы данных для работы с географическими структурами {#geo-data-types} + +ClickHouse поддерживает типы данных для отображения географических объектов — точек (местоположений), территорий и т.п. + +!!! warning "Предупреждение" + Сейчас использование типов данных для работы с географическими структурами является экспериментальной возможностью. Чтобы использовать эти типы данных, включите настройку `allow_experimental_geo_types = 1`. + +**См. также** +- [Хранение географических структур данных](https://ru.wikipedia.org/wiki/GeoJSON). +- Настройка [allow_experimental_geo_types](../../operations/settings/settings.md#allow-experimental-geo-types). + +## Point {#point-data-type} + +Тип `Point` (точка) определяется парой координат X и Y и хранится в виде кортежа [Tuple](tuple.md)([Float64](float.md), [Float64](float.md)). + +**Пример** + +Запрос: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_point (p Point) ENGINE = Memory(); +INSERT INTO geo_point VALUES((10, 10)); +SELECT p, toTypeName(p) FROM geo_point; +``` +Результат: + +``` text +┌─p─────┬─toTypeName(p)─┐ +│ (10,10) │ Point │ +└───────┴───────────────┘ +``` + +## Ring {#ring-data-type} + +Тип `Ring` описывает простой многоугольник без внутренних областей (дыр) и хранится в виде массива точек: [Array](array.md)([Point](#point-data-type)). + +**Пример** + +Запрос: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_ring (r Ring) ENGINE = Memory(); +INSERT INTO geo_ring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]); +SELECT r, toTypeName(r) FROM geo_ring; +``` +Результат: + +``` text +┌─r─────────────────────────────┬─toTypeName(r)─┐ +│ [(0,0),(10,0),(10,10),(0,10)] │ Ring │ +└───────────────────────────────┴───────────────┘ +``` + +## Polygon {#polygon-data-type} + +Тип `Polygon` описывает многоугольник с внутренними областями (дырами) и хранится в виде массива: [Array](array.md)([Ring](#ring-data-type)). Первый элемент массива описывает внешний многоугольник (контур), а остальные элементы описывают дыры. + +**Пример** + +Запись в этой таблице описывает многоугольник с одной дырой: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_polygon (pg Polygon) ENGINE = Memory(); +INSERT INTO geo_polygon VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]]); +SELECT pg, toTypeName(pg) FROM geo_polygon; +``` + +Результат: + +``` text +┌─pg────────────────────────────────────────────────────────────┬─toTypeName(pg)─┐ +│ [[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]] │ Polygon │ +└───────────────────────────────────────────────────────────────┴────────────────┘ +``` + +## MultiPolygon {#multipolygon-data-type} + +Тип `MultiPolygon` описывает элемент, состоящий из нескольких простых многоугольников (полигональную сетку). Он хранится в виде массива многоугольников: [Array](array.md)([Polygon](#polygon-data-type)). + +**Пример** + +Запись в этой таблице описывает элемент, состоящий из двух многоугольников — первый без дыр, а второй с одной дырой: + +```sql +SET allow_experimental_geo_types = 1; +CREATE TABLE geo_multipolygon (mpg MultiPolygon) ENGINE = Memory(); +INSERT INTO geo_multipolygon VALUES([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]); +SELECT mpg, toTypeName(mpg) FROM geo_multipolygon; +``` +Result: + +``` text +┌─mpg─────────────────────────────────────────────────────────────────────────────────────────────┬─toTypeName(mpg)─┐ +│ [[[(0,0),(10,0),(10,10),(0,10)]],[[(20,20),(50,20),(50,50),(20,50)],[(30,30),(50,50),(50,30)]]] │ MultiPolygon │ +└─────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────┘ +``` + +[Оригинальная статья](https://clickhouse.tech/docs/ru/data-types/geo/) From f82e00ff3c47fd99e4bb7ec912392dcf269e5206 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Mon, 1 Mar 2021 23:10:50 +0300 Subject: [PATCH 86/86] Fix inconsistent column type for Values input format. --- .../Formats/Impl/ConstantExpressionTemplate.cpp | 14 +++++++++----- .../Formats/Impl/ConstantExpressionTemplate.h | 2 +- .../Formats/Impl/ValuesBlockInputFormat.cpp | 13 +++++++++---- .../01746_lc_values_format_bug.reference | 1 + .../0_stateless/01746_lc_values_format_bug.sql | 14 ++++++++++++++ 5 files changed, 34 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/01746_lc_values_format_bug.reference create mode 100644 tests/queries/0_stateless/01746_lc_values_format_bug.sql diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp index 4aed9979ab2..f599b7c87e9 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -589,7 +590,7 @@ bool ConstantExpressionTemplate::parseLiteralAndAssertType(ReadBuffer & istr, co } } -ColumnPtr ConstantExpressionTemplate::evaluateAll(BlockMissingValues & nulls, size_t column_idx, size_t offset) +ColumnPtr ConstantExpressionTemplate::evaluateAll(BlockMissingValues & nulls, size_t column_idx, const DataTypePtr & expected_type, size_t offset) { Block evaluated = structure->literals.cloneWithColumns(std::move(columns)); columns = structure->literals.cloneEmptyColumns(); @@ -607,12 +608,13 @@ ColumnPtr ConstantExpressionTemplate::evaluateAll(BlockMissingValues & nulls, si ErrorCodes::LOGICAL_ERROR); rows_count = 0; - ColumnPtr res = evaluated.getByName(structure->result_column_name).column->convertToFullColumnIfConst(); + auto res = evaluated.getByName(structure->result_column_name); + res.column = res.column->convertToFullColumnIfConst(); if (!structure->null_as_default) - return res; + return castColumn(res, expected_type); /// Extract column with evaluated expression and mask for NULLs - const auto & tuple = assert_cast(*res); + const auto & tuple = assert_cast(*res.column); if (tuple.tupleSize() != 2) throw Exception("Invalid tuple size, it'a a bug", ErrorCodes::LOGICAL_ERROR); const auto & is_null = assert_cast(tuple.getColumn(1)); @@ -621,7 +623,9 @@ ColumnPtr ConstantExpressionTemplate::evaluateAll(BlockMissingValues & nulls, si if (is_null.getUInt(i)) nulls.setBit(column_idx, offset + i); - return tuple.getColumnPtr(0); + res.column = tuple.getColumnPtr(0); + res.type = assert_cast(*res.type).getElements()[0]; + return castColumn(res, expected_type); } void ConstantExpressionTemplate::TemplateStructure::addNodesToCastResult(const IDataType & result_column_type, ASTPtr & expr, bool null_as_default) diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.h b/src/Processors/Formats/Impl/ConstantExpressionTemplate.h index 931b05673c6..299ce4c9925 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.h +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.h @@ -72,7 +72,7 @@ public: /// Evaluate batch of expressions were parsed using template. /// If template was deduced with null_as_default == true, set bits in nulls for NULL values in column_idx, starting from offset. - ColumnPtr evaluateAll(BlockMissingValues & nulls, size_t column_idx, size_t offset = 0); + ColumnPtr evaluateAll(BlockMissingValues & nulls, size_t column_idx, const DataTypePtr & expected_type, size_t offset = 0); size_t rowsCount() const { return rows_count; } diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index 3fde84a4090..1455b8f6740 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -73,11 +73,13 @@ Chunk ValuesBlockInputFormat::generate() { if (!templates[i] || !templates[i]->rowsCount()) continue; + + const auto & expected_type = header.getByPosition(i).type; if (columns[i]->empty()) - columns[i] = IColumn::mutate(templates[i]->evaluateAll(block_missing_values, i)); + columns[i] = IColumn::mutate(templates[i]->evaluateAll(block_missing_values, i, expected_type)); else { - ColumnPtr evaluated = templates[i]->evaluateAll(block_missing_values, i, columns[i]->size()); + ColumnPtr evaluated = templates[i]->evaluateAll(block_missing_values, i, expected_type, columns[i]->size()); columns[i]->insertRangeFrom(*evaluated, 0, evaluated->size()); } } @@ -135,13 +137,16 @@ bool ValuesBlockInputFormat::tryParseExpressionUsingTemplate(MutableColumnPtr & return true; } + const auto & header = getPort().getHeader(); + const auto & expected_type = header.getByPosition(column_idx).type; + /// Expression in the current row is not match template deduced on the first row. /// Evaluate expressions, which were parsed using this template. if (column->empty()) - column = IColumn::mutate(templates[column_idx]->evaluateAll(block_missing_values, column_idx)); + column = IColumn::mutate(templates[column_idx]->evaluateAll(block_missing_values, column_idx, expected_type)); else { - ColumnPtr evaluated = templates[column_idx]->evaluateAll(block_missing_values, column_idx, column->size()); + ColumnPtr evaluated = templates[column_idx]->evaluateAll(block_missing_values, column_idx, expected_type, column->size()); column->insertRangeFrom(*evaluated, 0, evaluated->size()); } /// Do not use this template anymore diff --git a/tests/queries/0_stateless/01746_lc_values_format_bug.reference b/tests/queries/0_stateless/01746_lc_values_format_bug.reference new file mode 100644 index 00000000000..78981922613 --- /dev/null +++ b/tests/queries/0_stateless/01746_lc_values_format_bug.reference @@ -0,0 +1 @@ +a diff --git a/tests/queries/0_stateless/01746_lc_values_format_bug.sql b/tests/queries/0_stateless/01746_lc_values_format_bug.sql new file mode 100644 index 00000000000..6717b9ae5e3 --- /dev/null +++ b/tests/queries/0_stateless/01746_lc_values_format_bug.sql @@ -0,0 +1,14 @@ +drop table if exists lc_test; + +CREATE TABLE lc_test +( + `id` LowCardinality(String) +) +ENGINE = MergeTree +PARTITION BY tuple() +ORDER BY id; + +insert into lc_test values (toString('a')); + +select id from lc_test; +drop table if exists lc_test;