Merge pull request #47383 from kitaisreal/analyzer-planner-fixes-before-enable-by-default

Analyzer planner fixes before enable by default
This commit is contained in:
Maksim Kita 2023-03-11 22:09:04 +03:00 committed by GitHub
commit 114d30d2bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
150 changed files with 1894 additions and 842 deletions

View File

@ -2,18 +2,21 @@
#include <Common/SipHash.h>
#include <Common/FieldVisitorToString.h>
#include <DataTypes/IDataType.h>
#include <Analyzer/ConstantNode.h>
#include <IO/WriteBufferFromString.h>
#include <IO/Operators.h>
#include <DataTypes/IDataType.h>
#include <DataTypes/DataTypeSet.h>
#include <Parsers/ASTFunction.h>
#include <Functions/IFunction.h>
#include <AggregateFunctions/IAggregateFunction.h>
#include <Analyzer/Utils.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/IdentifierNode.h>
namespace DB
@ -44,17 +47,29 @@ const DataTypes & FunctionNode::getArgumentTypes() const
ColumnsWithTypeAndName FunctionNode::getArgumentColumns() const
{
const auto & arguments = getArguments().getNodes();
size_t arguments_size = arguments.size();
ColumnsWithTypeAndName argument_columns;
argument_columns.reserve(arguments.size());
for (const auto & arg : arguments)
for (size_t i = 0; i < arguments_size; ++i)
{
ColumnWithTypeAndName argument;
argument.type = arg->getResultType();
if (auto * constant = arg->as<ConstantNode>())
argument.column = argument.type->createColumnConst(1, constant->getValue());
argument_columns.push_back(std::move(argument));
const auto & argument = arguments[i];
ColumnWithTypeAndName argument_column;
if (isNameOfInFunction(function_name) && i == 1)
argument_column.type = std::make_shared<DataTypeSet>();
else
argument_column.type = argument->getResultType();
auto * constant = argument->as<ConstantNode>();
if (constant && !isNotCreatable(argument_column.type))
argument_column.column = argument_column.type->createColumnConst(1, constant->getValue());
argument_columns.push_back(std::move(argument_column));
}
return argument_columns;
}

View File

@ -99,8 +99,9 @@ class InDepthQueryTreeVisitorWithContext
public:
using VisitQueryTreeNodeType = std::conditional_t<const_visitor, const QueryTreeNodePtr, QueryTreeNodePtr>;
explicit InDepthQueryTreeVisitorWithContext(ContextPtr context)
explicit InDepthQueryTreeVisitorWithContext(ContextPtr context, size_t initial_subquery_depth = 0)
: current_context(std::move(context))
, subquery_depth(initial_subquery_depth)
{}
/// Return true if visitor should traverse tree top to bottom, false otherwise
@ -125,11 +126,17 @@ public:
return current_context->getSettingsRef();
}
size_t getSubqueryDepth() const
{
return subquery_depth;
}
void visit(VisitQueryTreeNodeType & query_tree_node)
{
auto current_scope_context_ptr = current_context;
SCOPE_EXIT(
current_context = std::move(current_scope_context_ptr);
--subquery_depth;
);
if (auto * query_node = query_tree_node->template as<QueryNode>())
@ -137,6 +144,8 @@ public:
else if (auto * union_node = query_tree_node->template as<UnionNode>())
current_context = union_node->getContext();
++subquery_depth;
bool traverse_top_to_bottom = getDerived().shouldTraverseTopToBottom();
if (!traverse_top_to_bottom)
visitChildren(query_tree_node);
@ -145,7 +154,12 @@ public:
if (traverse_top_to_bottom)
visitChildren(query_tree_node);
getDerived().leaveImpl(query_tree_node);
}
void leaveImpl(VisitQueryTreeNodeType & node [[maybe_unused]])
{}
private:
Derived & getDerived()
{
@ -172,6 +186,7 @@ private:
}
ContextPtr current_context;
size_t subquery_depth = 0;
};
template <typename Derived>

View File

@ -106,6 +106,12 @@ public:
return locality;
}
/// Set join locality
void setLocality(JoinLocality locality_value)
{
locality = locality_value;
}
/// Get join strictness
JoinStrictness getStrictness() const
{

View File

@ -42,7 +42,7 @@ private:
return;
const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage();
bool is_final_supported = storage && storage->supportsFinal() && !storage->isRemote();
bool is_final_supported = storage && storage->supportsFinal();
if (!is_final_supported)
return;

View File

@ -7,8 +7,6 @@
#include <Analyzer/ConstantNode.h>
#include <Analyzer/HashUtils.h>
#include <DataTypes/DataTypeString.h>
namespace DB
{
@ -100,6 +98,9 @@ private:
}
}
if (and_operands.size() == function_node.getArguments().getNodes().size())
return;
if (and_operands.size() == 1)
{
/// AND operator can have UInt8 or bool as its type.
@ -207,6 +208,9 @@ private:
or_operands.push_back(std::move(in_function));
}
if (or_operands.size() == function_node.getArguments().getNodes().size())
return;
if (or_operands.size() == 1)
{
/// if the result type of operand is the same as the result type of OR

View File

@ -69,8 +69,7 @@ private:
for (auto it = function_arguments.rbegin(); it != function_arguments.rend(); ++it)
candidates.push_back({ *it, is_deterministic });
// Using DFS we traverse function tree and try to find if it uses other keys as function arguments.
// TODO: Also process CONSTANT here. We can simplify GROUP BY x, x + 1 to GROUP BY x.
/// Using DFS we traverse function tree and try to find if it uses other keys as function arguments.
while (!candidates.empty())
{
auto [candidate, parents_are_only_deterministic] = candidates.back();
@ -108,6 +107,7 @@ private:
return false;
}
}
return true;
}

View File

@ -193,13 +193,9 @@ namespace ErrorCodes
* lookup should not be continued, and exception must be thrown because if lookup continues identifier can be resolved from parent scope.
*
* TODO: Update exception messages
* TODO: JOIN TREE subquery constant columns
* TODO: Table identifiers with optional UUID.
* TODO: Lookup functions arrayReduce(sum, [1, 2, 3]);
* TODO: SELECT (compound_expression).*, (compound_expression).COLUMNS are not supported on parser level.
* TODO: SELECT a.b.c.*, a.b.c.COLUMNS. Qualified matcher where identifier size is greater than 2 are not supported on parser level.
* TODO: Support function identifier resolve from parent query scope, if lambda in parent scope does not capture any columns.
* TODO: Scalar subqueries cache.
*/
namespace
@ -701,7 +697,9 @@ struct IdentifierResolveScope
}
if (auto * union_node = scope_node->as<UnionNode>())
{
context = union_node->getContext();
}
else if (auto * query_node = scope_node->as<QueryNode>())
{
context = query_node->getContext();
@ -1336,6 +1334,9 @@ private:
/// Global resolve expression node to projection names map
std::unordered_map<QueryTreeNodePtr, ProjectionNames> resolved_expressions;
/// Global resolve expression node to tree size
std::unordered_map<QueryTreeNodePtr, size_t> node_to_tree_size;
/// Global scalar subquery to scalar value map
std::unordered_map<QueryTreeNodePtrWithHash, Block> scalar_subquery_to_scalar_value;
@ -1864,7 +1865,10 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
Block scalar_block;
QueryTreeNodePtrWithHash node_with_hash(node);
auto node_without_alias = node->clone();
node_without_alias->removeAlias();
QueryTreeNodePtrWithHash node_with_hash(node_without_alias);
auto scalar_value_it = scalar_subquery_to_scalar_value.find(node_with_hash);
if (scalar_value_it != scalar_subquery_to_scalar_value.end())
@ -1954,21 +1958,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, Iden
*
* Example: SELECT (SELECT 2 AS x, x)
*/
NameSet block_column_names;
size_t unique_column_name_counter = 1;
for (auto & column_with_type : block)
{
if (!block_column_names.contains(column_with_type.name))
{
block_column_names.insert(column_with_type.name);
continue;
}
column_with_type.name += '_';
column_with_type.name += std::to_string(unique_column_name_counter);
++unique_column_name_counter;
}
makeUniqueColumnNamesInBlock(block);
scalar_block.insert({
ColumnTuple::create(block.getColumns()),
@ -2348,7 +2338,13 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveTableIdentifierFromDatabaseCatalog(con
storage_id = context->resolveStorageID(storage_id);
bool is_temporary_table = storage_id.getDatabaseName() == DatabaseCatalog::TEMPORARY_DATABASE;
auto storage = DatabaseCatalog::instance().tryGetTable(storage_id, context);
StoragePtr storage;
if (is_temporary_table)
storage = DatabaseCatalog::instance().getTable(storage_id, context);
else
storage = DatabaseCatalog::instance().tryGetTable(storage_id, context);
if (!storage)
return {};
@ -2914,7 +2910,10 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromTableExpression(const Id
break;
IdentifierLookup column_identifier_lookup = {qualified_identifier_with_removed_part, IdentifierLookupContext::EXPRESSION};
if (tryBindIdentifierToAliases(column_identifier_lookup, scope) ||
if (tryBindIdentifierToAliases(column_identifier_lookup, scope))
break;
if (table_expression_data.should_qualify_columns &&
tryBindIdentifierToTableExpressions(column_identifier_lookup, table_expression_node, scope))
break;
@ -3018,11 +3017,39 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromJoin(const IdentifierLoo
resolved_identifier = std::move(result_column_node);
}
else if (scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
else if (left_resolved_identifier->isEqual(*right_resolved_identifier, IQueryTreeNode::CompareOptions{.compare_aliases = false}))
{
const auto & identifier_path_part = identifier_lookup.identifier.front();
auto * left_resolved_identifier_column = left_resolved_identifier->as<ColumnNode>();
auto * right_resolved_identifier_column = right_resolved_identifier->as<ColumnNode>();
if (left_resolved_identifier_column && right_resolved_identifier_column)
{
const auto & left_column_source_alias = left_resolved_identifier_column->getColumnSource()->getAlias();
const auto & right_column_source_alias = right_resolved_identifier_column->getColumnSource()->getAlias();
/** If column from right table was resolved using alias, we prefer column from right table.
*
* Example: SELECT dummy FROM system.one JOIN system.one AS A ON A.dummy = system.one.dummy;
*
* If alias is specified for left table, and alias is not specified for right table and identifier was resolved
* without using left table alias, we prefer column from right table.
*
* Example: SELECT dummy FROM system.one AS A JOIN system.one ON A.dummy = system.one.dummy;
*
* Otherwise we prefer column from left table.
*/
if (identifier_path_part == right_column_source_alias)
return right_resolved_identifier;
else if (!left_column_source_alias.empty() &&
right_column_source_alias.empty() &&
identifier_path_part != left_column_source_alias)
return right_resolved_identifier;
}
return left_resolved_identifier;
}
else if (left_resolved_identifier->isEqual(*right_resolved_identifier, IQueryTreeNode::CompareOptions{.compare_aliases = false}))
else if (scope.joins_count == 1 && scope.context->getSettingsRef().single_join_prefer_left_table)
{
return left_resolved_identifier;
}
@ -4466,6 +4493,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
bool is_special_function_dict_get = false;
bool is_special_function_join_get = false;
bool is_special_function_exists = false;
bool is_special_function_if = false;
if (!lambda_expression_untyped)
{
@ -4473,6 +4501,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
is_special_function_dict_get = functionIsDictGet(function_name);
is_special_function_join_get = functionIsJoinGet(function_name);
is_special_function_exists = function_name == "exists";
is_special_function_if = function_name == "if";
auto function_name_lowercase = Poco::toLower(function_name);
@ -4571,6 +4600,60 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
is_special_function_in = true;
}
if (is_special_function_if && !function_node_ptr->getArguments().getNodes().empty())
{
/** Handle special case with constant If function, even if some of the arguments are invalid.
*
* SELECT if(hasColumnInTable('system', 'numbers', 'not_existing_column'), not_existing_column, 5) FROM system.numbers;
*/
auto & if_function_arguments = function_node_ptr->getArguments().getNodes();
auto if_function_condition = if_function_arguments[0];
resolveExpressionNode(if_function_condition, scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
auto constant_condition = tryExtractConstantFromConditionNode(if_function_condition);
if (constant_condition.has_value() && if_function_arguments.size() == 3)
{
QueryTreeNodePtr constant_if_result_node;
QueryTreeNodePtr possibly_invalid_argument_node;
if (*constant_condition)
{
possibly_invalid_argument_node = if_function_arguments[2];
constant_if_result_node = if_function_arguments[1];
}
else
{
possibly_invalid_argument_node = if_function_arguments[1];
constant_if_result_node = if_function_arguments[2];
}
bool apply_constant_if_optimization = false;
try
{
resolveExpressionNode(possibly_invalid_argument_node,
scope,
false /*allow_lambda_expression*/,
false /*allow_table_expression*/);
}
catch (...)
{
apply_constant_if_optimization = true;
}
if (apply_constant_if_optimization)
{
auto result_projection_names = resolveExpressionNode(constant_if_result_node,
scope,
false /*allow_lambda_expression*/,
false /*allow_table_expression*/);
node = std::move(constant_if_result_node);
return result_projection_names;
}
}
}
/// Resolve function arguments
bool allow_table_expressions = is_special_function_in;
@ -5059,7 +5142,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
/// Do not constant fold get scalar functions
bool disable_constant_folding = function_name == "__getScalar" || function_name == "shardNum" ||
function_name == "shardCount";
function_name == "shardCount" || function_name == "hostName";
/** If function is suitable for constant folding try to convert it to constant.
* Example: SELECT plus(1, 1);
@ -5085,7 +5168,8 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
/** Do not perform constant folding if there are aggregate or arrayJoin functions inside function.
* Example: SELECT toTypeName(sum(number)) FROM numbers(10);
*/
if (column && isColumnConst(*column) && (!hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin")))
if (column && isColumnConst(*column) && !typeid_cast<const ColumnConst *>(column.get())->getDataColumn().isDummy() &&
(!hasAggregateFunctionNodes(node) && !hasFunctionNode(node, "arrayJoin")))
{
/// Replace function node with result constant node
Field column_constant_value;
@ -5433,9 +5517,9 @@ ProjectionNames QueryAnalyzer::resolveExpressionNode(QueryTreeNodePtr & node, Id
}
}
if (node
&& scope.nullable_group_by_keys.contains(node)
&& !scope.expressions_in_resolve_process_stack.hasAggregateFunction())
validateTreeSize(node, scope.context->getSettingsRef().max_expanded_ast_elements, node_to_tree_size);
if (scope.nullable_group_by_keys.contains(node) && !scope.expressions_in_resolve_process_stack.hasAggregateFunction())
{
node = node->clone();
node->convertToNullable();
@ -6592,6 +6676,17 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
/// Resolve query node sections.
NamesAndTypes projection_columns;
if (!scope.group_by_use_nulls)
{
projection_columns = resolveProjectionExpressionNodeList(query_node_typed.getProjectionNode(), scope);
if (query_node_typed.getProjection().getNodes().empty())
throw Exception(ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED,
"Empty list of columns in projection. In scope {}",
scope.scope_node->formatASTForErrorMessage());
}
if (query_node_typed.hasWith())
resolveExpressionNodeList(query_node_typed.getWithNode(), scope, true /*allow_lambda_expression*/, false /*allow_table_expression*/);
@ -6686,11 +6781,14 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
convertLimitOffsetExpression(query_node_typed.getOffset(), "OFFSET", scope);
}
auto projection_columns = resolveProjectionExpressionNodeList(query_node_typed.getProjectionNode(), scope);
if (scope.group_by_use_nulls)
{
projection_columns = resolveProjectionExpressionNodeList(query_node_typed.getProjectionNode(), scope);
if (query_node_typed.getProjection().getNodes().empty())
throw Exception(ErrorCodes::EMPTY_LIST_OF_COLUMNS_QUERIED,
"Empty list of columns in projection. In scope {}",
scope.scope_node->formatASTForErrorMessage());
}
/** Resolve nodes with duplicate aliases.
* Table expressions cannot have duplicate aliases.
@ -6757,6 +6855,15 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
validateAggregates(query_node, { .group_by_use_nulls = scope.group_by_use_nulls });
for (const auto & column : projection_columns)
{
if (isNotCreatable(column.type))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Invalid projection column with type {}. In scope {}",
column.type->getName(),
scope.scope_node->formatASTForErrorMessage());
}
/** WITH section can be safely removed, because WITH section only can provide aliases to query expressions
* and CTE for other sections to use.
*

View File

@ -60,13 +60,18 @@ bool TableNode::isEqualImpl(const IQueryTreeNode & rhs) const
}
void TableNode::updateTreeHashImpl(HashState & state) const
{
if (!temporary_table_name.empty())
{
state.update(temporary_table_name.size());
state.update(temporary_table_name);
}
else
{
auto full_name = storage_id.getFullNameNotQuoted();
state.update(full_name.size());
state.update(full_name);
state.update(temporary_table_name.size());
state.update(temporary_table_name);
}
if (table_expression_modifiers)
table_expression_modifiers->updateTreeHash(state);

View File

@ -8,6 +8,7 @@
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypeTuple.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
@ -32,6 +33,7 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int BAD_ARGUMENTS;
}
bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root)
@ -79,6 +81,75 @@ bool isNameOfInFunction(const std::string & function_name)
return is_special_function_in;
}
bool isNameOfLocalInFunction(const std::string & function_name)
{
bool is_special_function_in = function_name == "in" ||
function_name == "notIn" ||
function_name == "nullIn" ||
function_name == "notNullIn" ||
function_name == "inIgnoreSet" ||
function_name == "notInIgnoreSet" ||
function_name == "nullInIgnoreSet" ||
function_name == "notNullInIgnoreSet";
return is_special_function_in;
}
bool isNameOfGlobalInFunction(const std::string & function_name)
{
bool is_special_function_in = function_name == "globalIn" ||
function_name == "globalNotIn" ||
function_name == "globalNullIn" ||
function_name == "globalNotNullIn" ||
function_name == "globalInIgnoreSet" ||
function_name == "globalNotInIgnoreSet" ||
function_name == "globalNullInIgnoreSet" ||
function_name == "globalNotNullInIgnoreSet";
return is_special_function_in;
}
std::string getGlobalInFunctionNameForLocalInFunctionName(const std::string & function_name)
{
if (function_name == "in")
return "globalIn";
else if (function_name == "notIn")
return "globalNotIn";
else if (function_name == "nullIn")
return "globalNullIn";
else if (function_name == "notNullIn")
return "globalNotNullIn";
else if (function_name == "inIgnoreSet")
return "globalInIgnoreSet";
else if (function_name == "notInIgnoreSet")
return "globalNotInIgnoreSet";
else if (function_name == "nullInIgnoreSet")
return "globalNullInIgnoreSet";
else if (function_name == "notNullInIgnoreSet")
return "globalNotNullInIgnoreSet";
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Invalid local IN function name {}", function_name);
}
void makeUniqueColumnNamesInBlock(Block & block)
{
NameSet block_column_names;
size_t unique_column_name_counter = 1;
for (auto & column_with_type : block)
{
if (!block_column_names.contains(column_with_type.name))
{
block_column_names.insert(column_with_type.name);
continue;
}
column_with_type.name += '_';
column_with_type.name += std::to_string(unique_column_name_counter);
++unique_column_name_counter;
}
}
QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
const DataTypePtr & type,
const ContextPtr & context,
@ -102,6 +173,27 @@ QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
return cast_function_node;
}
std::optional<bool> tryExtractConstantFromConditionNode(const QueryTreeNodePtr & condition_node)
{
const auto * constant_node = condition_node->as<ConstantNode>();
if (!constant_node)
return {};
const auto & value = constant_node->getValue();
auto constant_type = constant_node->getResultType();
constant_type = removeNullable(removeLowCardinality(constant_type));
auto which_constant_type = WhichDataType(constant_type);
if (!which_constant_type.isUInt8() && !which_constant_type.isNothing())
return {};
if (value.isNull())
return false;
UInt8 predicate_value = value.safeGet<UInt8>();
return predicate_value > 0;
}
static ASTPtr convertIntoTableExpressionAST(const QueryTreeNodePtr & table_expression_node)
{
ASTPtr table_expression_node_ast;

View File

@ -13,6 +13,18 @@ bool isNodePartOfTree(const IQueryTreeNode * node, const IQueryTreeNode * root);
/// Returns true if function name is name of IN function or its variations, false otherwise
bool isNameOfInFunction(const std::string & function_name);
/// Returns true if function name is name of local IN function or its variations, false otherwise
bool isNameOfLocalInFunction(const std::string & function_name);
/// Returns true if function name is name of global IN function or its variations, false otherwise
bool isNameOfGlobalInFunction(const std::string & function_name);
/// Returns global IN function name for local IN function name
std::string getGlobalInFunctionNameForLocalInFunctionName(const std::string & function_name);
/// Add unique suffix to names of duplicate columns in block
void makeUniqueColumnNamesInBlock(Block & block);
/** Build cast function that cast expression into type.
* If resolve = true, then result cast function is resolved during build, otherwise
* result cast function is not resolved during build.
@ -22,6 +34,9 @@ QueryTreeNodePtr buildCastFunction(const QueryTreeNodePtr & expression,
const ContextPtr & context,
bool resolve = true);
/// Try extract boolean constant from condition node
std::optional<bool> tryExtractConstantFromConditionNode(const QueryTreeNodePtr & condition_node);
/** Add table expression in tables in select query children.
* If table expression node is not of identifier node, table node, query node, table function node, join node or array join node type throws logical error exception.
*/

View File

@ -16,6 +16,7 @@ namespace ErrorCodes
{
extern const int NOT_AN_AGGREGATE;
extern const int NOT_IMPLEMENTED;
extern const int BAD_ARGUMENTS;
}
class ValidateGroupByColumnsVisitor : public ConstInDepthQueryTreeVisitor<ValidateGroupByColumnsVisitor>
@ -283,4 +284,52 @@ void assertNoFunctionNodes(const QueryTreeNodePtr & node,
visitor.visit(node);
}
void validateTreeSize(const QueryTreeNodePtr & node,
size_t max_size,
std::unordered_map<QueryTreeNodePtr, size_t> & node_to_tree_size)
{
size_t tree_size = 0;
std::vector<std::pair<QueryTreeNodePtr, bool>> nodes_to_process;
nodes_to_process.emplace_back(node, false);
while (!nodes_to_process.empty())
{
const auto [node_to_process, processed_children] = nodes_to_process.back();
nodes_to_process.pop_back();
if (processed_children)
{
++tree_size;
node_to_tree_size.emplace(node_to_process, tree_size);
continue;
}
auto node_to_size_it = node_to_tree_size.find(node_to_process);
if (node_to_size_it != node_to_tree_size.end())
{
tree_size += node_to_size_it->second;
continue;
}
nodes_to_process.emplace_back(node_to_process, true);
for (const auto & node_to_process_child : node_to_process->getChildren())
{
if (!node_to_process_child)
continue;
nodes_to_process.emplace_back(node_to_process_child, false);
}
auto * constant_node = node_to_process->as<ConstantNode>();
if (constant_node && constant_node->hasSourceExpression())
nodes_to_process.emplace_back(constant_node->getSourceExpression(), false);
}
if (tree_size > max_size)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Query tree is too big. Maximum: {}",
max_size);
}
}

View File

@ -7,7 +7,7 @@ namespace DB
struct ValidationParams
{
bool group_by_use_nulls;
bool group_by_use_nulls = false;
};
/** Validate aggregates in query node.
@ -31,4 +31,11 @@ void assertNoFunctionNodes(const QueryTreeNodePtr & node,
std::string_view exception_function_name,
std::string_view exception_place_message);
/** Validate tree size. If size of tree is greater than max size throws exception.
* Additionally for each node in tree, update node to tree size map.
*/
void validateTreeSize(const QueryTreeNodePtr & node,
size_t max_size,
std::unordered_map<QueryTreeNodePtr, size_t> & node_to_tree_size);
}

View File

@ -113,11 +113,17 @@ ASTPtr WindowNode::toASTImpl() const
window_definition->parent_window_name = parent_window_name;
if (hasPartitionBy())
{
window_definition->children.push_back(getPartitionByNode()->toAST());
window_definition->partition_by = window_definition->children.back();
}
if (hasOrderBy())
{
window_definition->children.push_back(getOrderByNode()->toAST());
window_definition->order_by = window_definition->children.back();
}
window_definition->frame_is_default = window_frame.is_default;
window_definition->frame_type = window_frame.type;

View File

@ -38,6 +38,8 @@ public:
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 0; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
bool isDeterministic() const override { return false; }
bool isDeterministicInScopeOfQuery() const override { return false; }
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{

View File

@ -44,6 +44,10 @@ public:
const auto & on_expr = table_join->getOnlyClause();
bool support_conditions = !on_expr.on_filter_condition_left && !on_expr.on_filter_condition_right;
if (!on_expr.analyzer_left_filter_condition_column_name.empty() ||
!on_expr.analyzer_right_filter_condition_column_name.empty())
support_conditions = false;
/// Key column can change nullability and it's not handled on type conversion stage, so algorithm should be aware of it
bool support_using_and_nulls = !table_join->hasUsing() || !table_join->joinUseNulls();

View File

@ -226,6 +226,12 @@ BlockIO InterpreterSelectQueryAnalyzer::execute()
return result;
}
QueryPlan & InterpreterSelectQueryAnalyzer::getQueryPlan()
{
planner.buildQueryPlanIfNeeded();
return planner.getQueryPlan();
}
QueryPlan && InterpreterSelectQueryAnalyzer::extractQueryPlan() &&
{
planner.buildQueryPlanIfNeeded();

View File

@ -51,6 +51,8 @@ public:
BlockIO execute() override;
QueryPlan & getQueryPlan();
QueryPlan && extractQueryPlan() &&;
QueryPipelineBuilder buildQueryPipeline();

View File

@ -214,9 +214,14 @@ public:
{
/// Constness of limit is validated during query analysis stage
limit_length = query_node.getLimit()->as<ConstantNode &>().getValue().safeGet<UInt64>();
}
if (query_node.hasOffset())
if (query_node.hasOffset() && limit_length)
{
/// Constness of offset is validated during query analysis stage
limit_offset = query_node.getOffset()->as<ConstantNode &>().getValue().safeGet<UInt64>();
}
}
else if (query_node.hasOffset())
{
/// Constness of offset is validated during query analysis stage
limit_offset = query_node.getOffset()->as<ConstantNode &>().getValue().safeGet<UInt64>();

View File

@ -45,7 +45,7 @@ bool GlobalPlannerContext::hasColumnIdentifier(const ColumnIdentifier & column_i
return column_identifiers.contains(column_identifier);
}
PlannerContext::PlannerContext(ContextPtr query_context_, GlobalPlannerContextPtr global_planner_context_)
PlannerContext::PlannerContext(ContextMutablePtr query_context_, GlobalPlannerContextPtr global_planner_context_)
: query_context(std::move(query_context_))
, global_planner_context(std::move(global_planner_context_))
{}

View File

@ -88,16 +88,22 @@ class PlannerContext
{
public:
/// Create planner context with query context and global planner context
PlannerContext(ContextPtr query_context_, GlobalPlannerContextPtr global_planner_context_);
PlannerContext(ContextMutablePtr query_context_, GlobalPlannerContextPtr global_planner_context_);
/// Get planner context query context
const ContextPtr & getQueryContext() const
ContextPtr getQueryContext() const
{
return query_context;
}
/// Get planner context query context
ContextPtr & getQueryContext()
/// Get planner context mutable query context
const ContextMutablePtr & getMutableQueryContext() const
{
return query_context;
}
/// Get planner context mutable query context
ContextMutablePtr & getMutableQueryContext()
{
return query_context;
}
@ -137,12 +143,18 @@ public:
*/
TableExpressionData * getTableExpressionDataOrNull(const QueryTreeNodePtr & table_expression_node);
/// Get table expression node to data read only map
/// Get table expression node to data map
const std::unordered_map<QueryTreeNodePtr, TableExpressionData> & getTableExpressionNodeToData() const
{
return table_expression_node_to_data;
}
/// Get table expression node to data map
std::unordered_map<QueryTreeNodePtr, TableExpressionData> & getTableExpressionNodeToData()
{
return table_expression_node_to_data;
}
/** Get column node identifier.
* For column node source check if table expression data is registered.
* If table expression data is not registered exception is thrown.
@ -184,7 +196,7 @@ public:
private:
/// Query context
ContextPtr query_context;
ContextMutablePtr query_context;
/// Global planner context
GlobalPlannerContextPtr global_planner_context;

View File

@ -34,15 +34,13 @@ namespace
* It is client responsibility to update filter analysis result if filter column must be removed after chain is finalized.
*/
FilterAnalysisResult analyzeFilter(const QueryTreeNodePtr & filter_expression_node,
const ColumnsWithTypeAndName & current_output_columns,
const ColumnsWithTypeAndName & input_columns,
const PlannerContextPtr & planner_context,
ActionsChain & actions_chain)
{
const auto & filter_input = current_output_columns;
FilterAnalysisResult result;
result.filter_actions = buildActionsDAGFromExpressionNode(filter_expression_node, filter_input, planner_context);
result.filter_actions = buildActionsDAGFromExpressionNode(filter_expression_node, input_columns, planner_context);
result.filter_column_name = result.filter_actions->getOutputs().at(0)->result_name;
actions_chain.addStep(std::make_unique<ActionsChainStep>(result.filter_actions));
@ -52,8 +50,8 @@ FilterAnalysisResult analyzeFilter(const QueryTreeNodePtr & filter_expression_no
/** Construct aggregation analysis result if query tree has GROUP BY or aggregates.
* Actions before aggregation are added into actions chain, if result is not null optional.
*/
std::pair<std::optional<AggregationAnalysisResult>, std::optional<ColumnsWithTypeAndName>> analyzeAggregation(const QueryTreeNodePtr & query_tree,
const ColumnsWithTypeAndName & current_output_columns,
std::optional<AggregationAnalysisResult> analyzeAggregation(const QueryTreeNodePtr & query_tree,
const ColumnsWithTypeAndName & input_columns,
const PlannerContextPtr & planner_context,
ActionsChain & actions_chain)
{
@ -69,9 +67,7 @@ std::pair<std::optional<AggregationAnalysisResult>, std::optional<ColumnsWithTyp
Names aggregation_keys;
const auto & group_by_input = current_output_columns;
ActionsDAGPtr before_aggregation_actions = std::make_shared<ActionsDAG>(group_by_input);
ActionsDAGPtr before_aggregation_actions = std::make_shared<ActionsDAG>(input_columns);
before_aggregation_actions->getOutputs().clear();
std::unordered_set<std::string_view> before_aggregation_actions_output_node_names;
@ -203,14 +199,14 @@ std::pair<std::optional<AggregationAnalysisResult>, std::optional<ColumnsWithTyp
aggregation_analysis_result.grouping_sets_parameters_list = std::move(grouping_sets_parameters_list);
aggregation_analysis_result.group_by_with_constant_keys = group_by_with_constant_keys;
return { aggregation_analysis_result, available_columns_after_aggregation };
return aggregation_analysis_result;
}
/** Construct window analysis result if query tree has window functions.
* Actions before window functions are added into actions chain, if result is not null optional.
*/
std::optional<WindowAnalysisResult> analyzeWindow(const QueryTreeNodePtr & query_tree,
const ColumnsWithTypeAndName & current_output_columns,
const ColumnsWithTypeAndName & input_columns,
const PlannerContextPtr & planner_context,
ActionsChain & actions_chain)
{
@ -220,11 +216,9 @@ std::optional<WindowAnalysisResult> analyzeWindow(const QueryTreeNodePtr & query
auto window_descriptions = extractWindowDescriptions(window_function_nodes, *planner_context);
const auto & window_input = current_output_columns;
PlannerActionsVisitor actions_visitor(planner_context);
ActionsDAGPtr before_window_actions = std::make_shared<ActionsDAG>(window_input);
ActionsDAGPtr before_window_actions = std::make_shared<ActionsDAG>(input_columns);
before_window_actions->getOutputs().clear();
std::unordered_set<std::string_view> before_window_actions_output_node_names;
@ -299,12 +293,11 @@ std::optional<WindowAnalysisResult> analyzeWindow(const QueryTreeNodePtr & query
* It is client responsibility to update projection analysis result with project names actions after chain is finalized.
*/
ProjectionAnalysisResult analyzeProjection(const QueryNode & query_node,
const ColumnsWithTypeAndName & current_output_columns,
const ColumnsWithTypeAndName & input_columns,
const PlannerContextPtr & planner_context,
ActionsChain & actions_chain)
{
const auto & projection_input = current_output_columns;
auto projection_actions = buildActionsDAGFromExpressionNode(query_node.getProjectionNode(), projection_input, planner_context);
auto projection_actions = buildActionsDAGFromExpressionNode(query_node.getProjectionNode(), input_columns, planner_context);
auto projection_columns = query_node.getProjectionColumns();
size_t projection_columns_size = projection_columns.size();
@ -347,13 +340,11 @@ ProjectionAnalysisResult analyzeProjection(const QueryNode & query_node,
* Actions before sort are added into actions chain.
*/
SortAnalysisResult analyzeSort(const QueryNode & query_node,
const ColumnsWithTypeAndName & current_output_columns,
const ColumnsWithTypeAndName & input_columns,
const PlannerContextPtr & planner_context,
ActionsChain & actions_chain)
{
const auto & order_by_input = current_output_columns;
ActionsDAGPtr before_sort_actions = std::make_shared<ActionsDAG>(order_by_input);
ActionsDAGPtr before_sort_actions = std::make_shared<ActionsDAG>(input_columns);
auto & before_sort_actions_outputs = before_sort_actions->getOutputs();
before_sort_actions_outputs.clear();
@ -436,13 +427,12 @@ SortAnalysisResult analyzeSort(const QueryNode & query_node,
* Actions before limit by are added into actions chain.
*/
LimitByAnalysisResult analyzeLimitBy(const QueryNode & query_node,
const ColumnsWithTypeAndName & current_output_columns,
const ColumnsWithTypeAndName & input_columns,
const PlannerContextPtr & planner_context,
const NameSet & required_output_nodes_names,
ActionsChain & actions_chain)
{
const auto & limit_by_input = current_output_columns;
auto before_limit_by_actions = buildActionsDAGFromExpressionNode(query_node.getLimitByNode(), limit_by_input, planner_context);
auto before_limit_by_actions = buildActionsDAGFromExpressionNode(query_node.getLimitByNode(), input_columns, planner_context);
NameSet limit_by_column_names_set;
Names limit_by_column_names;
@ -480,8 +470,7 @@ PlannerExpressionsAnalysisResult buildExpressionAnalysisResult(const QueryTreeNo
std::optional<FilterAnalysisResult> where_analysis_result_optional;
std::optional<size_t> where_action_step_index_optional;
const auto * input_columns = actions_chain.getLastStepAvailableOutputColumnsOrNull();
ColumnsWithTypeAndName current_output_columns = input_columns ? *input_columns : join_tree_input_columns;
ColumnsWithTypeAndName current_output_columns = join_tree_input_columns;
if (query_node.hasWhere())
{
@ -490,9 +479,9 @@ PlannerExpressionsAnalysisResult buildExpressionAnalysisResult(const QueryTreeNo
current_output_columns = actions_chain.getLastStepAvailableOutputColumns();
}
auto [aggregation_analysis_result_optional, aggregated_columns_optional] = analyzeAggregation(query_tree, current_output_columns, planner_context, actions_chain);
if (aggregated_columns_optional)
current_output_columns = std::move(*aggregated_columns_optional);
auto aggregation_analysis_result_optional = analyzeAggregation(query_tree, current_output_columns, planner_context, actions_chain);
if (aggregation_analysis_result_optional)
current_output_columns = actions_chain.getLastStepAvailableOutputColumns();
std::optional<FilterAnalysisResult> having_analysis_result_optional;
std::optional<size_t> having_action_step_index_optional;

View File

@ -246,17 +246,87 @@ bool applyTrivialCountIfPossible(
return true;
}
JoinTreeQueryPlan buildQueryPlanForTableExpression(const QueryTreeNodePtr & table_expression,
const SelectQueryInfo & select_query_info,
const SelectQueryOptions & select_query_options,
PlannerContextPtr & planner_context,
bool is_single_table_expression)
void prepareBuildQueryPlanForTableExpression(const QueryTreeNodePtr & table_expression, PlannerContextPtr & planner_context)
{
const auto & query_context = planner_context->getQueryContext();
const auto & settings = query_context->getSettingsRef();
auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression);
auto columns_names = table_expression_data.getColumnNames();
auto * table_node = table_expression->as<TableNode>();
auto * table_function_node = table_expression->as<TableFunctionNode>();
auto * query_node = table_expression->as<QueryNode>();
auto * union_node = table_expression->as<UnionNode>();
/** The current user must have the SELECT privilege.
* We do not check access rights for table functions because they have been already checked in ITableFunction::execute().
*/
if (table_node)
{
auto column_names_with_aliases = columns_names;
const auto & alias_columns_names = table_expression_data.getAliasColumnsNames();
column_names_with_aliases.insert(column_names_with_aliases.end(), alias_columns_names.begin(), alias_columns_names.end());
checkAccessRights(*table_node, column_names_with_aliases, query_context);
}
if (columns_names.empty())
{
NameAndTypePair additional_column_to_read;
if (table_node || table_function_node)
{
const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage();
const auto & storage_snapshot = table_node ? table_node->getStorageSnapshot() : table_function_node->getStorageSnapshot();
additional_column_to_read = chooseSmallestColumnToReadFromStorage(storage, storage_snapshot);
}
else if (query_node || union_node)
{
const auto & projection_columns = query_node ? query_node->getProjectionColumns() : union_node->computeProjectionColumns();
NamesAndTypesList projection_columns_list(projection_columns.begin(), projection_columns.end());
additional_column_to_read = ExpressionActions::getSmallestColumn(projection_columns_list);
}
else
{
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected table, table function, query or union. Actual {}",
table_expression->formatASTForErrorMessage());
}
auto & global_planner_context = planner_context->getGlobalPlannerContext();
const auto & column_identifier = global_planner_context->createColumnIdentifier(additional_column_to_read, table_expression);
columns_names.push_back(additional_column_to_read.name);
table_expression_data.addColumn(additional_column_to_read, column_identifier);
}
/// Limitation on the number of columns to read
if (settings.max_columns_to_read && columns_names.size() > settings.max_columns_to_read)
throw Exception(ErrorCodes::TOO_MANY_COLUMNS,
"Limit for number of columns to read exceeded. Requested: {}, maximum: {}",
columns_names.size(),
settings.max_columns_to_read);
}
JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression,
const SelectQueryInfo & select_query_info,
const SelectQueryOptions & select_query_options,
PlannerContextPtr & planner_context,
bool is_single_table_expression,
bool wrap_read_columns_in_subquery)
{
const auto & query_context = planner_context->getQueryContext();
const auto & settings = query_context->getSettingsRef();
auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression);
QueryProcessingStage::Enum from_stage = QueryProcessingStage::Enum::FetchColumns;
if (wrap_read_columns_in_subquery)
{
auto columns = table_expression_data.getColumns();
table_expression = buildSubqueryToReadColumnsFromTableExpression(columns, table_expression, query_context);
}
auto * table_node = table_expression->as<TableNode>();
auto * table_function_node = table_expression->as<TableFunctionNode>();
auto * query_node = table_expression->as<QueryNode>();
@ -264,8 +334,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(const QueryTreeNodePtr & tabl
QueryPlan query_plan;
auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression);
if (table_node || table_function_node)
{
const auto & storage = table_node ? table_node->getStorage() : table_function_node->getStorage();
@ -362,32 +430,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(const QueryTreeNodePtr & tabl
auto columns_names = table_expression_data.getColumnNames();
/** The current user must have the SELECT privilege.
* We do not check access rights for table functions because they have been already checked in ITableFunction::execute().
*/
if (table_node)
{
auto column_names_with_aliases = columns_names;
const auto & alias_columns_names = table_expression_data.getAliasColumnsNames();
column_names_with_aliases.insert(column_names_with_aliases.end(), alias_columns_names.begin(), alias_columns_names.end());
checkAccessRights(*table_node, column_names_with_aliases, planner_context->getQueryContext());
}
/// Limitation on the number of columns to read
if (settings.max_columns_to_read && columns_names.size() > settings.max_columns_to_read)
throw Exception(ErrorCodes::TOO_MANY_COLUMNS,
"Limit for number of columns to read exceeded. Requested: {}, maximum: {}",
columns_names.size(),
settings.max_columns_to_read);
if (columns_names.empty())
{
auto additional_column_to_read = chooseSmallestColumnToReadFromStorage(storage, storage_snapshot);
const auto & column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(additional_column_to_read, table_expression);
columns_names.push_back(additional_column_to_read.name);
table_expression_data.addColumn(additional_column_to_read, column_identifier);
}
bool need_rewrite_query_with_final = storage->needRewriteQueryWithFinal(columns_names);
if (need_rewrite_query_with_final)
{
@ -423,6 +465,17 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(const QueryTreeNodePtr & tabl
{
from_stage = storage->getQueryProcessingStage(query_context, select_query_options.to_stage, storage_snapshot, table_expression_query_info);
storage->read(query_plan, columns_names, storage_snapshot, table_expression_query_info, query_context, from_stage, max_block_size, max_streams);
if (query_context->hasQueryContext() && !select_query_options.is_internal)
{
auto local_storage_id = storage->getStorageID();
query_context->getQueryContext()->addQueryAccessInfo(
backQuoteIfNeed(local_storage_id.getDatabaseName()),
local_storage_id.getFullTableName(),
columns_names,
{},
{});
}
}
if (query_plan.isInitialized())
@ -464,16 +517,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(const QueryTreeNodePtr & tabl
}
else
{
if (table_expression_data.getColumnNames().empty())
{
const auto & projection_columns = query_node ? query_node->getProjectionColumns() : union_node->computeProjectionColumns();
NamesAndTypesList projection_columns_list(projection_columns.begin(), projection_columns.end());
auto additional_column_to_read = ExpressionActions::getSmallestColumn(projection_columns_list);
const auto & column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(additional_column_to_read, table_expression);
table_expression_data.addColumn(additional_column_to_read, column_identifier);
}
auto subquery_options = select_query_options.subquery();
Planner subquery_planner(table_expression, subquery_options, planner_context->getGlobalPlannerContext());
/// Propagate storage limits to subquery
@ -516,10 +559,11 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(const QueryTreeNodePtr & tabl
planner.buildQueryPlanIfNeeded();
auto expected_header = planner.getQueryPlan().getCurrentDataStream().header;
materializeBlockInplace(expected_header);
if (!blocksHaveEqualStructure(query_plan.getCurrentDataStream().header, expected_header))
{
materializeBlockInplace(expected_header);
auto rename_actions_dag = ActionsDAG::makeConvertingActions(
query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(),
expected_header.getColumnsWithTypeAndName(),
@ -1059,14 +1103,40 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node,
const ColumnIdentifierSet & outer_scope_columns,
PlannerContextPtr & planner_context)
{
const auto & query_node_typed = query_node->as<QueryNode &>();
auto table_expressions_stack = buildTableExpressionsStack(query_node_typed.getJoinTree());
auto table_expressions_stack = buildTableExpressionsStack(query_node->as<QueryNode &>().getJoinTree());
size_t table_expressions_stack_size = table_expressions_stack.size();
bool is_single_table_expression = table_expressions_stack_size == 1;
std::vector<ColumnIdentifierSet> table_expressions_outer_scope_columns(table_expressions_stack_size);
ColumnIdentifierSet current_outer_scope_columns = outer_scope_columns;
/// For each table, table function, query, union table expressions prepare before query plan build
for (size_t i = 0; i < table_expressions_stack_size; ++i)
{
const auto & table_expression = table_expressions_stack[i];
auto table_expression_type = table_expression->getNodeType();
if (table_expression_type == QueryTreeNodeType::JOIN ||
table_expression_type == QueryTreeNodeType::ARRAY_JOIN)
continue;
prepareBuildQueryPlanForTableExpression(table_expression, planner_context);
}
/** If left most table expression query plan is planned to stage that is not equal to fetch columns,
* then left most table expression is responsible for providing valid JOIN TREE part of final query plan.
*
* Examples: Distributed, LiveView, Merge storages.
*/
auto left_table_expression = table_expressions_stack.front();
auto left_table_expression_query_plan = buildQueryPlanForTableExpression(left_table_expression,
select_query_info,
select_query_options,
planner_context,
is_single_table_expression,
false /*wrap_read_columns_in_subquery*/);
if (left_table_expression_query_plan.from_stage != QueryProcessingStage::FetchColumns)
return left_table_expression_query_plan;
for (Int64 i = static_cast<Int64>(table_expressions_stack_size) - 1; i >= 0; --i)
{
table_expressions_outer_scope_columns[i] = current_outer_scope_columns;
@ -1120,19 +1190,23 @@ JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node,
}
else
{
const auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression);
if (table_expression_data.isRemote() && i != 0)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
"JOIN with multiple remote storages is unsupported");
if (table_expression == left_table_expression)
{
query_plans_stack.push_back(std::move(left_table_expression_query_plan));
left_table_expression = {};
continue;
}
/** If table expression is remote and it is not left most table expression, we wrap read columns from such
* table expression in subquery.
*/
bool is_remote = planner_context->getTableExpressionDataOrThrow(table_expression).isRemote();
query_plans_stack.push_back(buildQueryPlanForTableExpression(table_expression,
select_query_info,
select_query_options,
planner_context,
is_single_table_expression));
if (query_plans_stack.back().from_stage != QueryProcessingStage::FetchColumns)
break;
is_single_table_expression,
is_remote /*wrap_read_columns_in_subquery*/));
}
}

View File

@ -18,6 +18,7 @@
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <Analyzer/Utils.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/TableNode.h>
@ -61,6 +62,8 @@ void JoinClause::dump(WriteBuffer & buffer) const
for (const auto & dag_node : dag_nodes)
{
dag_nodes_dump += dag_node->result_name;
dag_nodes_dump += " ";
dag_nodes_dump += dag_node->result_type->getName();
dag_nodes_dump += ", ";
}

View File

@ -101,6 +101,17 @@ public:
return column_names;
}
NamesAndTypes getColumns() const
{
NamesAndTypes result;
result.reserve(column_names.size());
for (const auto & column_name : column_names)
result.push_back(column_name_to_column.at(column_name));
return result;
}
ColumnIdentifiers getColumnIdentifiers() const
{
ColumnIdentifiers result;

View File

@ -4,6 +4,7 @@
#include <Parsers/ASTSelectQuery.h>
#include <Parsers/ASTSubquery.h>
#include <DataTypes/DataTypesNumber.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypeNullable.h>
@ -19,6 +20,7 @@
#include <Analyzer/Utils.h>
#include <Analyzer/ConstantNode.h>
#include <Analyzer/ColumnNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/QueryNode.h>
#include <Analyzer/UnionNode.h>
@ -341,27 +343,6 @@ QueryTreeNodePtr mergeConditionNodes(const QueryTreeNodes & condition_nodes, con
return function_node;
}
std::optional<bool> tryExtractConstantFromConditionNode(const QueryTreeNodePtr & condition_node)
{
const auto * constant_node = condition_node->as<ConstantNode>();
if (!constant_node)
return {};
const auto & value = constant_node->getValue();
auto constant_type = constant_node->getResultType();
constant_type = removeNullable(removeLowCardinality(constant_type));
auto which_constant_type = WhichDataType(constant_type);
if (!which_constant_type.isUInt8() && !which_constant_type.isNothing())
return {};
if (value.isNull())
return false;
UInt8 predicate_value = value.safeGet<UInt8>();
return predicate_value > 0;
}
QueryTreeNodePtr replaceTablesAndTableFunctionsWithDummyTables(const QueryTreeNodePtr & query_node,
const ContextPtr & context,
ResultReplacementMap * result_replacement_map)
@ -391,4 +372,36 @@ QueryTreeNodePtr replaceTablesAndTableFunctionsWithDummyTables(const QueryTreeNo
return query_node->cloneAndReplace(replacement_map);
}
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
const QueryTreeNodePtr & table_expression,
const ContextPtr & context)
{
auto projection_columns = columns;
QueryTreeNodes subquery_projection_nodes;
subquery_projection_nodes.reserve(projection_columns.size());
for (const auto & column : projection_columns)
subquery_projection_nodes.push_back(std::make_shared<ColumnNode>(column, table_expression));
if (subquery_projection_nodes.empty())
{
auto constant_data_type = std::make_shared<DataTypeUInt64>();
subquery_projection_nodes.push_back(std::make_shared<ConstantNode>(1UL, constant_data_type));
projection_columns.push_back({"1", std::move(constant_data_type)});
}
auto context_copy = Context::createCopy(context);
updateContextForSubqueryExecution(context_copy);
auto query_node = std::make_shared<QueryNode>(std::move(context_copy));
query_node->resolveProjectionColumns(projection_columns);
query_node->getProjection().getNodes() = std::move(subquery_projection_nodes);
query_node->getJoinTree() = table_expression;
query_node->setIsSubquery(true);
return query_node;
}
}

View File

@ -63,13 +63,15 @@ bool queryHasWithTotalsInAnySubqueryInJoinTree(const QueryTreeNodePtr & query_no
/// Returns `and` function node that has condition nodes as its arguments
QueryTreeNodePtr mergeConditionNodes(const QueryTreeNodes & condition_nodes, const ContextPtr & context);
/// Try extract boolean constant from condition node
std::optional<bool> tryExtractConstantFromConditionNode(const QueryTreeNodePtr & condition_node);
/// Replace tables nodes and table function nodes with dummy table nodes
using ResultReplacementMap = std::unordered_map<QueryTreeNodePtr, QueryTreeNodePtr>;
QueryTreeNodePtr replaceTablesAndTableFunctionsWithDummyTables(const QueryTreeNodePtr & query_node,
const ContextPtr & context,
ResultReplacementMap * result_replacement_map = nullptr);
/// Build subquery to read specified columns from table expression
QueryTreeNodePtr buildSubqueryToReadColumnsFromTableExpression(const NamesAndTypes & columns,
const QueryTreeNodePtr & table_expression,
const ContextPtr & context);
}

View File

@ -13,6 +13,7 @@
#include <Interpreters/getHeaderForProcessingStage.h>
#include <Interpreters/SelectQueryOptions.h>
#include <Interpreters/InterpreterSelectQuery.h>
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
#include <QueryPipeline/narrowPipe.h>
#include <QueryPipeline/Pipe.h>
#include <QueryPipeline/RemoteQueryExecutor.h>
@ -83,8 +84,12 @@ Pipe StorageHDFSCluster::read(
auto extension = getTaskIteratorExtension(query_info.query, context);
/// Calculate the header. This is significant, because some columns could be thrown away in some cases like query with count(*)
Block header =
InterpreterSelectQuery(query_info.query, context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock();
Block header;
if (context->getSettingsRef().allow_experimental_analyzer)
header = InterpreterSelectQueryAnalyzer::getSampleBlock(query_info.query, context, SelectQueryOptions(processed_stage).analyze());
else
header = InterpreterSelectQuery(query_info.query, context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock();
const Scalars & scalars = context->hasQueryContext() ? context->getQueryContext()->getScalars() : Scalars{};

View File

@ -201,6 +201,7 @@ MergeTreeConditionInverted::MergeTreeConditionInverted(
rpn.push_back(RPNElement::FUNCTION_UNKNOWN);
return;
}
rpn = std::move(
RPNBuilder<RPNElement>(
query_info.filter_actions_dag->getOutputs().at(0), context_,
@ -208,10 +209,10 @@ MergeTreeConditionInverted::MergeTreeConditionInverted(
{
return this->traverseAtomAST(node, out);
}).extractRPN());
return;
}
ASTPtr filter_node = buildFilterNode(query_info.query);
if (!filter_node)
{
rpn.push_back(RPNElement::FUNCTION_UNKNOWN);
@ -226,7 +227,6 @@ MergeTreeConditionInverted::MergeTreeConditionInverted(
query_info.prepared_sets,
[&](const RPNBuilderTreeNode & node, RPNElement & out) { return traverseAtomAST(node, out); });
rpn = std::move(builder).extractRPN();
}
/// Keep in-sync with MergeTreeConditionFullText::alwaysUnknownOrTrue

View File

@ -59,7 +59,7 @@ void appendColumnNameWithoutAlias(const ActionsDAG::Node & node, WriteBuffer & o
{
auto name = node.function_base->getName();
if (legacy && name == "modulo")
writeCString("moduleLegacy", out);
writeCString("moduloLegacy", out);
else
writeString(name, out);

View File

@ -39,11 +39,16 @@
#include <Parsers/parseQuery.h>
#include <Parsers/IAST.h>
#include <Analyzer/Utils.h>
#include <Analyzer/ColumnNode.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/TableNode.h>
#include <Analyzer/TableFunctionNode.h>
#include <Analyzer/QueryNode.h>
#include <Analyzer/JoinNode.h>
#include <Analyzer/QueryTreeBuilder.h>
#include <Analyzer/Passes/QueryAnalysisPass.h>
#include <Analyzer/InDepthQueryTreeVisitor.h>
#include <Planner/Planner.h>
#include <Planner/Utils.h>
@ -55,6 +60,7 @@
#include <Interpreters/ExpressionAnalyzer.h>
#include <Interpreters/InterpreterDescribeQuery.h>
#include <Interpreters/InterpreterSelectQuery.h>
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
#include <Interpreters/InterpreterInsertQuery.h>
#include <Interpreters/JoinedTables.h>
#include <Interpreters/TranslateQualifiedNamesVisitor.h>
@ -69,12 +75,14 @@
#include <Interpreters/getCustomKeyFilterForParallelReplicas.h>
#include <Functions/IFunction.h>
#include <Functions/FunctionFactory.h>
#include <TableFunctions/TableFunctionView.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <Storages/IStorageCluster.h>
#include <Processors/Executors/PushingPipelineExecutor.h>
#include <Processors/Executors/CompletedPipelineExecutor.h>
#include <Processors/QueryPlan/QueryPlan.h>
#include <Processors/QueryPlan/BuildQueryPipelineSettings.h>
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
@ -138,6 +146,7 @@ namespace ErrorCodes
extern const int DISTRIBUTED_TOO_MANY_PENDING_BYTES;
extern const int ARGUMENT_OUT_OF_BOUND;
extern const int TOO_LARGE_DISTRIBUTED_DEPTH;
extern const int DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED;
}
namespace ActionLocks
@ -634,12 +643,278 @@ StorageSnapshotPtr StorageDistributed::getStorageSnapshotForQuery(
namespace
{
QueryTreeNodePtr buildQueryTreeDistributedTableReplacedWithLocalTable(const SelectQueryInfo & query_info,
/// Visitor that collect column source to columns mapping from query and all subqueries
class CollectColumnSourceToColumnsVisitor : public InDepthQueryTreeVisitor<CollectColumnSourceToColumnsVisitor>
{
public:
struct Columns
{
NameSet column_names;
NamesAndTypes columns;
void addColumn(NameAndTypePair column)
{
if (column_names.contains(column.name))
return;
column_names.insert(column.name);
columns.push_back(std::move(column));
}
};
const std::unordered_map<QueryTreeNodePtr, Columns> & getColumnSourceToColumns() const
{
return column_source_to_columns;
}
void visitImpl(QueryTreeNodePtr & node)
{
auto * column_node = node->as<ColumnNode>();
if (!column_node)
return;
auto column_source = column_node->getColumnSourceOrNull();
if (!column_source)
return;
auto it = column_source_to_columns.find(column_source);
if (it == column_source_to_columns.end())
{
auto [insert_it, _] = column_source_to_columns.emplace(column_source, Columns());
it = insert_it;
}
it->second.addColumn(column_node->getColumn());
}
private:
std::unordered_map<QueryTreeNodePtr, Columns> column_source_to_columns;
};
/** Visitor that rewrites IN and JOINs in query and all subqueries according to distributed_product_mode and
* prefer_global_in_and_join settings.
*
* Additionally collects GLOBAL JOIN and GLOBAL IN query nodes.
*
* If distributed_product_mode = deny, then visitor throws exception if there are multiple distributed tables.
* If distributed_product_mode = local, then visitor collects replacement map for tables that must be replaced
* with local tables.
* If distributed_product_mode = global or prefer_global_in_and_join setting is true, then visitor rewrites JOINs and IN functions that
* contain distributed tables to GLOBAL JOINs and GLOBAL IN functions.
* If distributed_product_mode = allow, then visitor does not rewrite query if there are multiple distributed tables.
*/
class DistributedProductModeRewriteInJoinVisitor : public InDepthQueryTreeVisitorWithContext<DistributedProductModeRewriteInJoinVisitor>
{
public:
using Base = InDepthQueryTreeVisitorWithContext<DistributedProductModeRewriteInJoinVisitor>;
using Base::Base;
explicit DistributedProductModeRewriteInJoinVisitor(const ContextPtr & context_)
: Base(context_)
{}
struct InFunctionOrJoin
{
QueryTreeNodePtr query_node;
size_t subquery_depth = 0;
};
const std::unordered_map<const IQueryTreeNode *, QueryTreeNodePtr> & getReplacementMap() const
{
return replacement_map;
}
const std::vector<InFunctionOrJoin> & getGlobalInOrJoinNodes() const
{
return global_in_or_join_nodes;
}
static bool needChildVisit(QueryTreeNodePtr & parent, QueryTreeNodePtr & child)
{
auto * function_node = parent->as<FunctionNode>();
if (function_node && isNameOfGlobalInFunction(function_node->getFunctionName()))
return false;
auto * join_node = parent->as<JoinNode>();
if (join_node && join_node->getLocality() == JoinLocality::Global && join_node->getRightTableExpression() == child)
return false;
return true;
}
void visitImpl(QueryTreeNodePtr & node)
{
auto * function_node = node->as<FunctionNode>();
auto * join_node = node->as<JoinNode>();
if ((function_node && isNameOfGlobalInFunction(function_node->getFunctionName())) ||
(join_node && join_node->getLocality() == JoinLocality::Global))
{
InFunctionOrJoin in_function_or_join_entry;
in_function_or_join_entry.query_node = node;
in_function_or_join_entry.subquery_depth = getSubqueryDepth();
global_in_or_join_nodes.push_back(std::move(in_function_or_join_entry));
return;
}
if ((function_node && isNameOfLocalInFunction(function_node->getFunctionName())) ||
(join_node && join_node->getLocality() != JoinLocality::Global))
{
InFunctionOrJoin in_function_or_join_entry;
in_function_or_join_entry.query_node = node;
in_function_or_join_entry.subquery_depth = getSubqueryDepth();
in_function_or_join_stack.push_back(in_function_or_join_entry);
return;
}
if (node->getNodeType() == QueryTreeNodeType::TABLE)
tryRewriteTableNodeIfNeeded(node);
}
void leaveImpl(QueryTreeNodePtr & node)
{
if (!in_function_or_join_stack.empty() && node.get() == in_function_or_join_stack.back().query_node.get())
in_function_or_join_stack.pop_back();
}
private:
void tryRewriteTableNodeIfNeeded(const QueryTreeNodePtr & table_node)
{
const auto & table_node_typed = table_node->as<TableNode &>();
const auto * distributed_storage = typeid_cast<const StorageDistributed *>(table_node_typed.getStorage().get());
if (!distributed_storage)
return;
bool distributed_valid_for_rewrite = distributed_storage->getShardCount() >= 2;
if (!distributed_valid_for_rewrite)
return;
auto distributed_product_mode = getSettings().distributed_product_mode;
if (distributed_product_mode == DistributedProductMode::LOCAL)
{
StorageID remote_storage_id = StorageID{distributed_storage->getRemoteDatabaseName(),
distributed_storage->getRemoteTableName()};
auto resolved_remote_storage_id = getContext()->resolveStorageID(remote_storage_id);
const auto & distributed_storage_columns = table_node_typed.getStorageSnapshot()->metadata->getColumns();
auto storage = std::make_shared<StorageDummy>(resolved_remote_storage_id, distributed_storage_columns);
auto replacement_table_expression = std::make_shared<TableNode>(std::move(storage), getContext());
replacement_map.emplace(table_node.get(), std::move(replacement_table_expression));
}
else if ((distributed_product_mode == DistributedProductMode::GLOBAL || getSettings().prefer_global_in_and_join) &&
!in_function_or_join_stack.empty())
{
auto * in_or_join_node_to_modify = in_function_or_join_stack.back().query_node.get();
if (auto * in_function_to_modify = in_or_join_node_to_modify->as<FunctionNode>())
{
auto global_in_function_name = getGlobalInFunctionNameForLocalInFunctionName(in_function_to_modify->getFunctionName());
auto global_in_function_resolver = FunctionFactory::instance().get(global_in_function_name, getContext());
in_function_to_modify->resolveAsFunction(global_in_function_resolver->build(in_function_to_modify->getArgumentColumns()));
}
else if (auto * join_node_to_modify = in_or_join_node_to_modify->as<JoinNode>())
{
join_node_to_modify->setLocality(JoinLocality::Global);
}
global_in_or_join_nodes.push_back(in_function_or_join_stack.back());
}
else if (distributed_product_mode == DistributedProductMode::ALLOW)
{
return;
}
else if (distributed_product_mode == DistributedProductMode::DENY)
{
throw Exception(ErrorCodes::DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED,
"Double-distributed IN/JOIN subqueries is denied (distributed_product_mode = 'deny'). "
"You may rewrite query to use local tables "
"in subqueries, or use GLOBAL keyword, or set distributed_product_mode to suitable value.");
}
}
std::vector<InFunctionOrJoin> in_function_or_join_stack;
std::unordered_map<const IQueryTreeNode *, QueryTreeNodePtr> replacement_map;
std::vector<InFunctionOrJoin> global_in_or_join_nodes;
};
/** Execute subquery node and put result in mutable context temporary table.
* Returns table node that is initialized with temporary table storage.
*/
QueryTreeNodePtr executeSubqueryNode(const QueryTreeNodePtr & subquery_node,
ContextMutablePtr & mutable_context,
size_t subquery_depth)
{
auto subquery_hash = subquery_node->getTreeHash();
String temporary_table_name = fmt::format("_data_{}_{}", subquery_hash.first, subquery_hash.second);
const auto & external_tables = mutable_context->getExternalTables();
auto external_table_it = external_tables.find(temporary_table_name);
if (external_table_it != external_tables.end())
{
auto temporary_table_expression_node = std::make_shared<TableNode>(external_table_it->second, mutable_context);
temporary_table_expression_node->setTemporaryTableName(temporary_table_name);
return temporary_table_expression_node;
}
auto subquery_options = SelectQueryOptions(QueryProcessingStage::Complete, subquery_depth, true /*is_subquery*/);
auto context_copy = Context::createCopy(mutable_context);
updateContextForSubqueryExecution(context_copy);
InterpreterSelectQueryAnalyzer interpreter(subquery_node, context_copy, subquery_options);
auto & query_plan = interpreter.getQueryPlan();
auto sample_block_with_unique_names = query_plan.getCurrentDataStream().header;
makeUniqueColumnNamesInBlock(sample_block_with_unique_names);
if (!blocksHaveEqualStructure(sample_block_with_unique_names, query_plan.getCurrentDataStream().header))
{
auto actions_dag = ActionsDAG::makeConvertingActions(
query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName(),
sample_block_with_unique_names.getColumnsWithTypeAndName(),
ActionsDAG::MatchColumnsMode::Position);
auto converting_step = std::make_unique<ExpressionStep>(query_plan.getCurrentDataStream(), std::move(actions_dag));
query_plan.addStep(std::move(converting_step));
}
Block sample = interpreter.getSampleBlock();
NamesAndTypesList columns = sample.getNamesAndTypesList();
auto external_storage_holder = TemporaryTableHolder(
mutable_context,
ColumnsDescription{columns},
ConstraintsDescription{},
nullptr /*query*/,
true /*create_for_global_subquery*/);
StoragePtr external_storage = external_storage_holder.getTable();
auto temporary_table_expression_node = std::make_shared<TableNode>(external_storage, mutable_context);
temporary_table_expression_node->setTemporaryTableName(temporary_table_name);
auto table_out = external_storage->write({}, external_storage->getInMemoryMetadataPtr(), mutable_context);
auto io = interpreter.execute();
io.pipeline.complete(std::move(table_out));
CompletedPipelineExecutor executor(io.pipeline);
executor.execute();
mutable_context->addExternalTable(temporary_table_name, std::move(external_storage_holder));
return temporary_table_expression_node;
}
QueryTreeNodePtr buildQueryTreeDistributed(SelectQueryInfo & query_info,
const StorageSnapshotPtr & distributed_storage_snapshot,
const StorageID & remote_storage_id,
const ASTPtr & remote_table_function)
{
const auto & query_context = query_info.planner_context->getQueryContext();
auto & planner_context = query_info.planner_context;
const auto & query_context = planner_context->getQueryContext();
std::optional<TableExpressionModifiers> table_expression_modifiers;
if (auto * query_info_table_node = query_info.table_expression->as<TableNode>())
table_expression_modifiers = query_info_table_node->getTableExpressionModifiers();
else if (auto * query_info_table_function_node = query_info.table_expression->as<TableFunctionNode>())
table_expression_modifiers = query_info_table_function_node->getTableExpressionModifiers();
QueryTreeNodePtr replacement_table_expression;
@ -651,6 +926,9 @@ QueryTreeNodePtr buildQueryTreeDistributedTableReplacedWithLocalTable(const Sele
auto table_function_node = std::make_shared<TableFunctionNode>(remote_table_function_node.getFunctionName());
table_function_node->getArgumentsNode() = remote_table_function_node.getArgumentsNode();
if (table_expression_modifiers)
table_function_node->setTableExpressionModifiers(*table_expression_modifiers);
QueryAnalysisPass query_analysis_pass;
query_analysis_pass.run(table_function_node, query_context);
@ -660,13 +938,89 @@ QueryTreeNodePtr buildQueryTreeDistributedTableReplacedWithLocalTable(const Sele
{
auto resolved_remote_storage_id = query_context->resolveStorageID(remote_storage_id);
auto storage = std::make_shared<StorageDummy>(resolved_remote_storage_id, distributed_storage_snapshot->metadata->getColumns());
auto table_node = std::make_shared<TableNode>(std::move(storage), query_context);
replacement_table_expression = std::make_shared<TableNode>(std::move(storage), query_context);
if (table_expression_modifiers)
table_node->setTableExpressionModifiers(*table_expression_modifiers);
replacement_table_expression = std::move(table_node);
}
replacement_table_expression->setAlias(query_info.table_expression->getAlias());
return query_info.query_tree->cloneAndReplace(query_info.table_expression, std::move(replacement_table_expression));
auto query_tree_to_modify = query_info.query_tree->cloneAndReplace(query_info.table_expression, std::move(replacement_table_expression));
CollectColumnSourceToColumnsVisitor collect_column_source_to_columns_visitor;
collect_column_source_to_columns_visitor.visit(query_tree_to_modify);
const auto & column_source_to_columns = collect_column_source_to_columns_visitor.getColumnSourceToColumns();
DistributedProductModeRewriteInJoinVisitor visitor(query_info.planner_context->getQueryContext());
visitor.visit(query_tree_to_modify);
auto replacement_map = visitor.getReplacementMap();
const auto & global_in_or_join_nodes = visitor.getGlobalInOrJoinNodes();
for (const auto & global_in_or_join_node : global_in_or_join_nodes)
{
if (auto * join_node = global_in_or_join_node.query_node->as<JoinNode>())
{
auto join_right_table_expression = join_node->getRightTableExpression();
auto join_right_table_expression_node_type = join_right_table_expression->getNodeType();
QueryTreeNodePtr subquery_node;
if (join_right_table_expression_node_type == QueryTreeNodeType::QUERY ||
join_right_table_expression_node_type == QueryTreeNodeType::UNION)
{
subquery_node = join_right_table_expression;
}
else if (join_right_table_expression_node_type == QueryTreeNodeType::TABLE ||
join_right_table_expression_node_type == QueryTreeNodeType::TABLE_FUNCTION)
{
const auto & columns = column_source_to_columns.at(join_right_table_expression).columns;
subquery_node = buildSubqueryToReadColumnsFromTableExpression(columns,
join_right_table_expression,
planner_context->getQueryContext());
}
else
{
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Expected JOIN right table expression to be table, table function, query or union node. Actual {}",
join_right_table_expression->formatASTForErrorMessage());
}
auto temporary_table_expression_node = executeSubqueryNode(subquery_node,
planner_context->getMutableQueryContext(),
global_in_or_join_node.subquery_depth);
temporary_table_expression_node->setAlias(join_right_table_expression->getAlias());
replacement_map.emplace(join_right_table_expression.get(), std::move(temporary_table_expression_node));
continue;
}
else if (auto * in_function_node = global_in_or_join_node.query_node->as<FunctionNode>())
{
auto & in_function_subquery_node = in_function_node->getArguments().getNodes().at(1);
auto in_function_node_type = in_function_subquery_node->getNodeType();
if (in_function_node_type != QueryTreeNodeType::QUERY && in_function_node_type != QueryTreeNodeType::UNION)
continue;
auto temporary_table_expression_node = executeSubqueryNode(in_function_subquery_node,
planner_context->getMutableQueryContext(),
global_in_or_join_node.subquery_depth);
in_function_subquery_node = std::move(temporary_table_expression_node);
}
else
{
throw Exception(ErrorCodes::LOGICAL_ERROR,
"Expected global IN or JOIN query node. Actual {}",
global_in_or_join_node.query_node->formatASTForErrorMessage());
}
}
if (!replacement_map.empty())
query_tree_to_modify = query_tree_to_modify->cloneAndReplace(replacement_map);
return query_tree_to_modify;
}
}
@ -694,17 +1048,13 @@ void StorageDistributed::read(
if (!remote_table_function_ptr)
remote_storage_id = StorageID{remote_database, remote_table};
auto query_tree_with_replaced_distributed_table = buildQueryTreeDistributedTableReplacedWithLocalTable(query_info,
auto query_tree_distributed = buildQueryTreeDistributed(query_info,
storage_snapshot,
remote_storage_id,
remote_table_function_ptr);
query_ast = queryNodeToSelectQuery(query_tree_with_replaced_distributed_table);
Planner planner(query_tree_with_replaced_distributed_table, SelectQueryOptions(processed_stage).analyze());
planner.buildQueryPlanIfNeeded();
header = planner.getQueryPlan().getCurrentDataStream().header;
query_ast = queryNodeToSelectQuery(query_tree_distributed);
header = InterpreterSelectQueryAnalyzer::getSampleBlock(query_ast, local_context, SelectQueryOptions(processed_stage).analyze());
}
else
{

View File

@ -18,6 +18,7 @@
#include <Interpreters/TransactionLog.h>
#include <Interpreters/ClusterProxy/executeQuery.h>
#include <Interpreters/ClusterProxy/SelectStreamFactory.h>
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
#include <IO/copyData.h>
#include <Parsers/ASTCheckQuery.h>
#include <Parsers/ASTFunction.h>
@ -223,8 +224,12 @@ void StorageMergeTree::read(
auto cluster = local_context->getCluster(local_context->getSettingsRef().cluster_for_parallel_replicas);
Block header =
InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock();
Block header;
if (local_context->getSettingsRef().allow_experimental_analyzer)
header = InterpreterSelectQueryAnalyzer::getSampleBlock(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze());
else
header = InterpreterSelectQuery(modified_query_ast, local_context, SelectQueryOptions(processed_stage).analyze()).getSampleBlock();
ClusterProxy::SelectStreamFactory select_stream_factory =
ClusterProxy::SelectStreamFactory(

View File

@ -17,6 +17,7 @@
#include <Interpreters/InterpreterSelectQuery.h>
#include <Interpreters/AddDefaultDatabaseVisitor.h>
#include <Interpreters/TranslateQualifiedNamesVisitor.h>
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
#include <Processors/Transforms/AddingDefaultsTransform.h>
#include <QueryPipeline/narrowPipe.h>
#include <QueryPipeline/Pipe.h>
@ -102,7 +103,20 @@ Pipe StorageS3Cluster::read(
auto extension = getTaskIteratorExtension(query_info.query, context);
/// Calculate the header. This is significant, because some columns could be thrown away in some cases like query with count(*)
Block sample_block;
ASTPtr query_to_send = query_info.query;
if (context->getSettingsRef().allow_experimental_analyzer)
{
sample_block = InterpreterSelectQueryAnalyzer::getSampleBlock(query_info.query, context, SelectQueryOptions(processed_stage));
}
else
{
auto interpreter = InterpreterSelectQuery(query_info.query, context, SelectQueryOptions(processed_stage).analyze());
sample_block = interpreter.getSampleBlock();
query_to_send = interpreter.getQueryInfo().query->clone();
}
const Scalars & scalars = context->hasQueryContext() ? context->getQueryContext()->getScalars() : Scalars{};
@ -110,7 +124,6 @@ Pipe StorageS3Cluster::read(
const bool add_agg_info = processed_stage == QueryProcessingStage::WithMergeableState;
ASTPtr query_to_send = interpreter.getQueryInfo().query->clone();
if (!structure_argument_was_provided)
addColumnsStructureToQueryWithClusterEngine(
query_to_send, StorageDictionary::generateNamesAndTypesDescription(storage_snapshot->metadata->getColumns().getAll()), 5, getName());
@ -136,7 +149,7 @@ Pipe StorageS3Cluster::read(
shard_info.pool,
std::vector<IConnectionPool::Entry>{try_result},
queryToString(query_to_send),
interpreter.getSampleBlock(),
sample_block,
context,
/*throttler=*/nullptr,
scalars,

View File

@ -1,6 +1,7 @@
#include <Interpreters/InterpreterSelectQuery.h>
#include <Interpreters/InterpreterSelectWithUnionQuery.h>
#include <Interpreters/InterpreterSelectQueryAnalyzer.h>
#include <Interpreters/NormalizeSelectWithUnionQueryVisitor.h>
#include <Interpreters/Context.h>
#include <DataTypes/DataTypeLowCardinality.h>
@ -117,6 +118,10 @@ StorageView::StorageView(
SelectQueryDescription description;
description.inner_query = query.select->ptr();
NormalizeSelectWithUnionQueryVisitor::Data data{SetOperationMode::Unspecified};
NormalizeSelectWithUnionQueryVisitor{data}.visit(description.inner_query);
is_parameterized_view = query.isParameterizedView();
parameter_types = analyzeReceiveQueryParamsWithType(description.inner_query);
storage_metadata.setSelectQuery(description);

View File

@ -78,6 +78,7 @@ namespace ErrorCodes
extern const int SUPPORT_IS_DISABLED;
extern const int TABLE_WAS_NOT_DROPPED;
extern const int NOT_IMPLEMENTED;
extern const int UNSUPPORTED_METHOD;
}
namespace
@ -1158,6 +1159,10 @@ StorageWindowView::StorageWindowView(
, fire_signal_timeout_s(context_->getSettingsRef().wait_for_window_view_fire_signal_timeout.totalSeconds())
, clean_interval_usec(context_->getSettingsRef().window_view_clean_interval.totalMicroseconds())
{
if (context_->getSettingsRef().allow_experimental_analyzer)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD,
"Experimental WINDOW VIEW feature is not supported with new infrastructure for query analysis (the setting 'allow_experimental_analyzer')");
if (!query.select)
throw Exception(ErrorCodes::INCORRECT_QUERY, "SELECT query is not specified for {}", getName());

View File

@ -53,7 +53,7 @@ std::vector<size_t> TableFunctionMerge::skipAnalysisForArguments(const QueryTree
result.push_back(i);
}
return {0};
return result;
}
void TableFunctionMerge::parseArguments(const ASTPtr & ast_function, ContextPtr context)

View File

@ -9,6 +9,8 @@
#include <Storages/StorageExternalDistributed.h>
#include <Storages/NamedCollectionsHelpers.h>
#include <TableFunctions/TableFunctionFactory.h>
#include <Analyzer/FunctionNode.h>
#include <Analyzer/TableFunctionNode.h>
#include <Interpreters/parseColumnsListForTableFunction.h>
#include <Interpreters/Context.h>
#include <Formats/FormatFactory.h>
@ -26,6 +28,24 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
}
std::vector<size_t> TableFunctionURL::skipAnalysisForArguments(const QueryTreeNodePtr & query_node_table_function, ContextPtr) const
{
auto & table_function_node = query_node_table_function->as<TableFunctionNode &>();
auto & table_function_arguments_nodes = table_function_node.getArguments().getNodes();
size_t table_function_arguments_size = table_function_arguments_nodes.size();
std::vector<size_t> result;
for (size_t i = 0; i < table_function_arguments_size; ++i)
{
auto * function_node = table_function_arguments_nodes[i]->as<FunctionNode>();
if (function_node && function_node->getFunctionName() == "headers")
result.push_back(i);
}
return result;
}
void TableFunctionURL::parseArguments(const ASTPtr & ast, ContextPtr context)
{
const auto & ast_function = assert_cast<const ASTFunction *>(ast.get());

View File

@ -12,7 +12,7 @@ class Context;
/* url(source, format[, structure, compression]) - creates a temporary storage from url.
*/
class TableFunctionURL : public ITableFunctionFileLike
class TableFunctionURL final: public ITableFunctionFileLike
{
public:
static constexpr auto name = "url";
@ -23,10 +23,11 @@ public:
ColumnsDescription getActualTableStructure(ContextPtr context) const override;
protected:
private:
std::vector<size_t> skipAnalysisForArguments(const QueryTreeNodePtr & query_node_table_function, ContextPtr context) const override;
void parseArguments(const ASTPtr & ast, ContextPtr context) override;
private:
StoragePtr getStorage(
const String & source, const String & format_, const ColumnsDescription & columns, ContextPtr global_context,
const std::string & table_name, const String & compression_method_) const override;

View File

@ -48,10 +48,10 @@
{
"i0": "0",
"u0": "0",
"ip": "9223372036854775807",
"in": "-9223372036854775808",
"up": "18446744073709551615",
"arr": ["0"],
"ip": "0",
"in": "0",
"up": "0",
"arr": [],
"tuple": ["0","0"]
},
@ -119,7 +119,7 @@
["0", "0", "9223372036854775807", "-9223372036854775808", "18446744073709551615", ["0"], ["0","0"]]
],
"totals": ["0", "0", "9223372036854775807", "-9223372036854775808", "18446744073709551615", ["0"], ["0","0"]],
"totals": ["0", "0", "0", "0", "0", [], ["0","0"]],
"extremes":
{
@ -180,10 +180,10 @@
{
"i0": 0,
"u0": 0,
"ip": 9223372036854775807,
"in": -9223372036854775808,
"up": 18446744073709551615,
"arr": [0],
"ip": 0,
"in": 0,
"up": 0,
"arr": [],
"tuple": [0,0]
},
@ -251,7 +251,7 @@
[0, 0, 9223372036854775807, -9223372036854775808, 18446744073709551615, [0], [0,0]]
],
"totals": [0, 0, 9223372036854775807, -9223372036854775808, 18446744073709551615, [0], [0,0]],
"totals": [0, 0, 0, 0, 0, [], [0,0]],
"extremes":
{

View File

@ -2,6 +2,7 @@
SET output_format_write_statistics = 0;
SET extremes = 1;
SET allow_experimental_analyzer = 1;
SET output_format_json_quote_64bit_integers = 1;
SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSON;

View File

@ -22,13 +22,13 @@
13 13
14 14
\N 8
0 0
0 2
0 4
0 6
0 8
1 1
3 3
5 5
7 7
9 9
\N 0
\N 2
\N 4
\N 6
\N 8

View File

@ -1,3 +1,4 @@
SET allow_experimental_analyzer = 1;
SET join_use_nulls = 0;
SET any_join_distinct_right_table_keys = 1;

View File

@ -16,24 +16,24 @@
┌─x──────┬─name─┐
│ system │ one │
└────────┴──────┘
┌─database─┬─t.name─┐
┌─database─┬─name─┐
│ system │ one │
└──────────┴────────
└──────────┴──────┘
┌─db.x───┬─name─┐
│ system │ one │
└────────┴──────┘
┌─db.name─┬─t.name─┐
┌─db.name─┬─name─┐
│ system │ one │
└─────────┴────────
┌─db.name─┬─t.name─┐
└─────────┴──────┘
┌─db.name─┬─name─┐
│ system │ one │
└─────────┴────────
┌─t.database─┬─t.name─┐
└─────────┴──────┘
┌─database─┬─name─┐
│ system │ one │
└────────────────────┘
┌─database─┬─t.name─┐
└──────────┴──────┘
┌─database─┬─name─┐
│ system │ one │
└──────────┴────────
└──────────┴──────┘
2
2
2

View File

@ -1,3 +1,5 @@
-- Tags: no-parallel
SET allow_experimental_analyzer = 1;
DROP TABLE IF EXISTS one;

View File

@ -10,13 +10,13 @@ l \N \N String Nullable(String)
\N \N
\N \N
using
l \N String Nullable(String)
\N String Nullable(String)
l \N String Nullable(String)
l \N Nullable(String) Nullable(String)
l \N Nullable(String) Nullable(String)
\N \N Nullable(String) Nullable(String)
\N \N Nullable(String) Nullable(String)
l \N Nullable(String) Nullable(String)
l \N Nullable(String) Nullable(String)
\N \N Nullable(String) Nullable(String)
l \N String Nullable(String)
\N String Nullable(String)
l \N String Nullable(String)
\N \N Nullable(String) Nullable(String)
\N \N
\N \N
@ -32,13 +32,13 @@ l \N \N Nullable(String) Nullable(String)
\N \N
\N \N
using + join_use_nulls
l \N String Nullable(String)
l \N Nullable(String) Nullable(String)
\N \N Nullable(String) Nullable(String)
\N \N Nullable(String) Nullable(String)
l \N String Nullable(String)
l \N Nullable(String) Nullable(String)
\N \N Nullable(String) Nullable(String)
\N \N Nullable(String) Nullable(String)
r \N Nullable(String) Nullable(String)
r \N Nullable(String) Nullable(String)
l \N Nullable(String) Nullable(String)
l \N Nullable(String) Nullable(String)
r \N Nullable(String) Nullable(String)
r \N Nullable(String) Nullable(String)
\N \N
\N \N

View File

@ -1,4 +1,5 @@
SET any_join_distinct_right_table_keys = 1;
SET allow_experimental_analyzer = 1;
DROP TABLE IF EXISTS t1_00848;
DROP TABLE IF EXISTS t2_00848;
@ -53,16 +54,16 @@ SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 LEFT JOIN t3_00848 t3 ON t1
SELECT 'using + join_use_nulls';
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY t2.id, t3.id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 FULL JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 FULL JOIN t3_00848 t3 USING(id) ORDER BY t2.id, t3.id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY id;
SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 FULL JOIN t3_00848 t3 USING(id) ORDER BY id;
SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 FULL JOIN t3_00848 t3 USING(id) ORDER BY id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY id;
SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY id;
DROP TABLE t1_00848;
DROP TABLE t2_00848;

View File

@ -15,5 +15,5 @@ bar bar 1 2 String Nullable(String)
\N 0 1 String Nullable(String)
foo 2 0 String
bar 1 2 String
test 0 1 String
0 1 String
0 1 String

View File

@ -27,7 +27,7 @@ SELECT s1.other, s2.other, count_a, count_b, toTypeName(s1.other), toTypeName(s2
( SELECT other, count() AS count_a FROM table_a GROUP BY other ) s1
ALL FULL JOIN
( SELECT other, count() AS count_b FROM table_b GROUP BY other ) s2
USING other
ON s1.other = s2.other
ORDER BY s2.other DESC, count_a, s1.other;
SELECT s1.something, s2.something, count_a, count_b, toTypeName(s1.something), toTypeName(s2.something) FROM
@ -41,7 +41,7 @@ SELECT s1.something, s2.something, count_a, count_b, toTypeName(s1.something), t
( SELECT something, count() AS count_a FROM table_a GROUP BY something ) s1
ALL RIGHT JOIN
( SELECT something, count() AS count_b FROM table_b GROUP BY something ) s2
USING (something)
ON s1.something = s2.something
ORDER BY count_a DESC, s1.something, s2.something;
SET joined_subquery_requires_alias = 0;
@ -50,7 +50,7 @@ SELECT something, count_a, count_b, toTypeName(something) FROM
( SELECT something, count() AS count_a FROM table_a GROUP BY something ) as s1
ALL FULL JOIN
( SELECT something, count() AS count_b FROM table_b GROUP BY something ) as s2
USING (something)
ON s1.something = s2.something
ORDER BY count_a DESC, something DESC;
DROP TABLE table_a;

View File

@ -1,3 +1,4 @@
set allow_experimental_analyzer = 1;
set distributed_product_mode = 'local';
drop table if exists shard1;
@ -21,7 +22,7 @@ where distr1.id in
from distr1
join distr2 on distr1.id = distr2.id
where distr1.id > 0
); -- { serverError 288 }
);
select distinct(d0.id) from distr1 d0
where d0.id in
@ -32,15 +33,14 @@ where d0.id in
where d1.id > 0
);
-- TODO
--select distinct(distr1.id) from distr1
--where distr1.id in
--(
-- select distr1.id
-- from distr1 as d1
-- join distr2 as d2 on distr1.id = distr2.id
-- where distr1.id > 0
--);
select distinct(distr1.id) from distr1
where distr1.id in
(
select distr1.id
from distr1 as d1
join distr2 as d2 on distr1.id = distr2.id
where distr1.id > 0
);
drop table shard1;
drop table shard2;

View File

@ -1 +1,3 @@
SELECT 1 AS a, a + a AS b, b + b AS c, c + c AS d, d + d AS e, e + e AS f, f + f AS g, g + g AS h, h + h AS i, i + i AS j, j + j AS k, k + k AS l, l + l AS m, m + m AS n, n + n AS o, o + o AS p, p + p AS q, q + q AS r, r + r AS s, s + s AS t, t + t AS u, u + u AS v, v + v AS w, w + w AS x, x + x AS y, y + y AS z; -- { serverError 168 }
SET allow_experimental_analyzer = 1;
SELECT 1 AS a, a + a AS b, b + b AS c, c + c AS d, d + d AS e, e + e AS f, f + f AS g, g + g AS h, h + h AS i, i + i AS j, j + j AS k, k + k AS l, l + l AS m, m + m AS n, n + n AS o, o + o AS p, p + p AS q, q + q AS r, r + r AS s, s + s AS t, t + t AS u, u + u AS v, v + v AS w, w + w AS x, x + x AS y, y + y AS z; -- { serverError 36 }

View File

@ -3,7 +3,10 @@
SET max_memory_usage = 32000000;
SET join_on_disk_max_files_to_merge = 4;
SELECT number * 200000 as n, j FROM numbers(5) nums
SELECT n, j FROM
(
SELECT number * 200000 as n FROM numbers(5)
) nums
ANY LEFT JOIN (
SELECT number * 2 AS n, number AS j
FROM numbers(1000000)
@ -13,14 +16,20 @@ USING n; -- { serverError 241 }
SET join_algorithm = 'partial_merge';
SET default_max_bytes_in_join = 0;
SELECT number * 200000 as n, j FROM numbers(5) nums
SELECT n, j FROM
(
SELECT number * 200000 as n FROM numbers(5)
) nums
ANY LEFT JOIN (
SELECT number * 2 AS n, number AS j
FROM numbers(1000000)
) js2
USING n; -- { serverError 12 }
SELECT number * 200000 as n, j FROM numbers(5) nums
SELECT n, j FROM
(
SELECT number * 200000 as n FROM numbers(5)
) nums
ANY LEFT JOIN (
SELECT number * 2 AS n, number AS j
FROM numbers(1000000)
@ -28,7 +37,10 @@ ANY LEFT JOIN (
USING n
SETTINGS max_bytes_in_join = 30000000; -- { serverError 241 }
SELECT number * 200000 as n, j FROM numbers(5) nums
SELECT n, j FROM
(
SELECT number * 200000 as n FROM numbers(5)
) nums
ANY LEFT JOIN (
SELECT number * 2 AS n, number AS j
FROM numbers(1000000)
@ -39,7 +51,10 @@ SETTINGS max_bytes_in_join = 10000000;
SET partial_merge_join_optimizations = 1;
SELECT number * 200000 as n, j FROM numbers(5) nums
SELECT n, j FROM
(
SELECT number * 200000 as n FROM numbers(5)
) nums
LEFT JOIN (
SELECT number * 2 AS n, number AS j
FROM numbers(1000000)
@ -50,7 +65,10 @@ SETTINGS max_rows_in_join = 100000;
SET default_max_bytes_in_join = 10000000;
SELECT number * 200000 as n, j FROM numbers(5) nums
SELECT n, j FROM
(
SELECT number * 200000 as n FROM numbers(5)
) nums
JOIN (
SELECT number * 2 AS n, number AS j
FROM numbers(1000000)

View File

@ -1,12 +1,15 @@
0 0
0 0
0 0
0
0
0
0
┌─one.dummy─┬─A.dummy─┬─B.dummy─┐
0
0
┌─system.one.dummy─┬─A.dummy─┬─B.dummy─┐
│ 0 │ 0 │ 0 │
└───────────┴─────────┴─────────┘
└──────────────────┴─────────┴─────────┘
┌─A.dummy─┬─one.dummy─┬─two.dummy─┐
│ 0 │ 0 │ 0 │
└─────────┴───────────┴───────────┘

View File

@ -1,4 +1,6 @@
select * from system.one cross join system.one; -- { serverError 352 }
SET allow_experimental_analyzer = 1;
select * from system.one cross join system.one;
select * from system.one cross join system.one r;
select * from system.one l cross join system.one;
select * from system.one left join system.one using dummy;
@ -8,10 +10,10 @@ USE system;
SELECT dummy FROM one AS A JOIN one ON A.dummy = one.dummy;
SELECT dummy FROM one JOIN one AS A ON A.dummy = one.dummy;
SELECT dummy FROM one l JOIN one r ON dummy = r.dummy; -- { serverError 352 }
SELECT dummy FROM one l JOIN one r ON l.dummy = dummy; -- { serverError 352 }
SELECT dummy FROM one l JOIN one r ON one.dummy = r.dummy; -- { serverError 352 }
SELECT dummy FROM one l JOIN one r ON l.dummy = one.dummy; -- { serverError 352 }
SELECT dummy FROM one l JOIN one r ON dummy = r.dummy;
SELECT dummy FROM one l JOIN one r ON l.dummy = dummy; -- { serverError 403 }
SELECT dummy FROM one l JOIN one r ON one.dummy = r.dummy;
SELECT dummy FROM one l JOIN one r ON l.dummy = one.dummy; -- { serverError 403 }
SELECT * from one
JOIN one A ON one.dummy = A.dummy

View File

@ -1,5 +1,5 @@
122
Table dictdb_01041_01040.dict_invalidate doesn\'t exist
1
133

View File

@ -53,7 +53,7 @@ function check_exception_detected()
export -f check_exception_detected;
timeout 30 bash -c check_exception_detected 2> /dev/null
$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb_01041_01040' AND name = 'invalidate'" 2>&1 | grep -Eo "Table dictdb_01041_01040.dict_invalidate .* exist"
$CLICKHOUSE_CLIENT --query "SELECT last_exception FROM system.dictionaries WHERE database = 'dictdb_01041_01040' AND name = 'invalidate'" 2>&1 | grep -Eo "dictdb_01041_01040.dict_invalidate.*UNKNOWN_TABLE" | wc -l
$CLICKHOUSE_CLIENT --query "
CREATE TABLE dictdb_01041_01040.dict_invalidate

View File

@ -1,5 +1,6 @@
-- Tags: no-parallel
SET allow_experimental_analyzer = 0;
SET allow_experimental_window_view = 1;
DROP DATABASE IF EXISTS test_01047;
set allow_deprecated_database_ordinary=1;

View File

@ -1,5 +1,6 @@
-- Tags: no-parallel
SET allow_experimental_analyzer = 0;
SET allow_experimental_window_view = 1;
DROP DATABASE IF EXISTS test_01048;
set allow_deprecated_database_ordinary=1;

View File

@ -1,3 +1,4 @@
SET allow_experimental_analyzer = 0;
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;

View File

@ -1,3 +1,4 @@
SET allow_experimental_analyzer = 0;
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -18,11 +22,11 @@ INSERT INTO mt VALUES (1);
EOF
for _ in {1..100}; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
sleep .5
done
$CLICKHOUSE_CLIENT --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -18,11 +22,11 @@ INSERT INTO mt VALUES (1);
EOF
for _ in {1..100}; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
sleep .5
done
$CLICKHOUSE_CLIENT --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -18,11 +22,11 @@ INSERT INTO mt VALUES (1, now('US/Samoa') + 1);
EOF
for _ in {1..100}; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
sleep .5
done
$CLICKHOUSE_CLIENT --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -18,11 +22,11 @@ INSERT INTO mt VALUES (1, now('US/Samoa') + 1);
EOF
for _ in {1..100}; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
sleep .5
done
$CLICKHOUSE_CLIENT --query="SELECT count FROM dst;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count FROM dst;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst;"

View File

@ -50,12 +50,12 @@ FROM
WHERE (co != 0) AND (co2 != 2)
1 0 3 1
1 0 0 1
SELECT alias AS name
SELECT name
FROM
(
SELECT name AS alias
SELECT name
FROM system.settings
WHERE alias = \'enable_optimize_predicate_expression\'
WHERE name = \'enable_optimize_predicate_expression\'
)
ANY INNER JOIN
(

View File

@ -12,8 +12,8 @@ EXPLAIN SYNTAX SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FRO
SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM ( SELECT 1 AS co,2 AS co2 ,3 AS co3 ) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2;
-- https://github.com/ClickHouse/ClickHouse/issues/6734
EXPLAIN SYNTAX SELECT alias AS name FROM ( SELECT name AS alias FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression';
SELECT alias AS name FROM ( SELECT name AS alias FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression';
EXPLAIN SYNTAX SELECT name FROM ( SELECT name FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression';
SELECT name FROM ( SELECT name FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression';
-- https://github.com/ClickHouse/ClickHouse/issues/6767
DROP TABLE IF EXISTS t1;

View File

@ -20,6 +20,8 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt)
client2.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -24,10 +28,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -24,10 +28,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -20,6 +20,8 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt)
client2.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -25,10 +29,10 @@ INSERT INTO mt VALUES (1, 8, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "7" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "7" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -25,10 +29,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -20,6 +20,8 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt)
client2.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -25,10 +29,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -25,10 +29,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -19,6 +19,8 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt)
client2.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -27,10 +31,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:11');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end, count;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end, count;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst;"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -27,10 +31,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:11');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end, count;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end, count;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst;"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -28,10 +32,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:12');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "2" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "2" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end, count;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end, count;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst;"

View File

@ -20,6 +20,8 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt)
client2.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -20,6 +20,8 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt)
client2.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS info;
@ -29,11 +33,11 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE info"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE info"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -5,7 +5,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -19,11 +23,11 @@ INSERT INTO mt VALUES ('test1', 'test2');
EOF
for _ in {1..100}; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
sleep .5
done
$CLICKHOUSE_CLIENT --query="SELECT colA, colB FROM dst"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT colA, colB FROM dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -26,10 +30,10 @@ CREATE WINDOW VIEW wv TO dst WATERMARK=ASCENDING POPULATE AS SELECT count(a) AS
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "7" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "7" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SELECT * FROM dst ORDER BY market, w_end;
INSERT INTO mt VALUES (1, 8, '1990/01/01 12:00:35');
INSERT INTO mt VALUES (1, 8, '1990/01/01 12:00:37');
@ -38,11 +42,11 @@ SELECT '------';
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "9" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "9" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS info;
@ -30,11 +34,11 @@ CREATE WINDOW VIEW wv TO dst WATERMARK=ASCENDING POPULATE AS SELECT count(a) AS
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE info"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE info"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -19,11 +23,11 @@ CREATE WINDOW VIEW wv TO dst POPULATE AS SELECT count(a) AS count, tumbleEnd(wid
EOF
for _ in {1..100}; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "1" && echo 'OK' && break
sleep .5
done
$CLICKHOUSE_CLIENT --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count FROM dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -23,13 +27,13 @@ INSERT INTO mt VALUES (1, 5, '1990/01/01 12:00:06');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="SELECT '----ALTER TABLE...MODIFY QUERY----';"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT '----ALTER TABLE...MODIFY QUERY----';"
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
ALTER TABLE wv MODIFY QUERY SELECT count(a) AS count, mt.market * 2 as market, tumbleEnd(wid) AS w_end FROM mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND, 'US/Samoa') AS wid, mt.market;
INSERT INTO mt VALUES (1, 6, '1990/01/01 12:00:10');
@ -38,10 +42,10 @@ INSERT INTO mt VALUES (1, 8, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "5" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "5" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS mt2;
@ -25,13 +29,13 @@ INSERT INTO mt VALUES (1, 5, '1990/01/01 12:00:06');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="SELECT '----ALTER TABLE...MODIFY QUERY----';"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT '----ALTER TABLE...MODIFY QUERY----';"
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
ALTER TABLE wv MODIFY QUERY SELECT count(a) AS count, mt2.market * 2 as market, tumbleEnd(wid) AS w_end FROM mt2 GROUP BY tumble(timestamp, INTERVAL '5' SECOND, 'US/Samoa') AS wid, mt2.market;
INSERT INTO mt2 VALUES (1, 6, '1990/01/01 12:00:10');
@ -40,11 +44,11 @@ INSERT INTO mt2 VALUES (1, 8, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "5" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "5" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt2"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt2"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -20,6 +20,8 @@ with client(name="client1>", log=log) as client1, client(
client2.expect(prompt)
client3.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -25,10 +29,10 @@ INSERT INTO mt VALUES (1, 8, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "7" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "7" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -25,10 +29,10 @@ INSERT INTO mt VALUES (1, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "6" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM dst ORDER BY w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS wv;
@ -23,9 +27,9 @@ INSERT INTO mt VALUES (1, 8, toDateTime('1990/01/01 12:00:30', 'US/Samoa'));
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM wv" | grep -q "7" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM wv" | grep -q "7" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"

View File

@ -19,6 +19,8 @@ with client(name="client1>", log=log) as client1, client(
client1.expect(prompt)
client2.expect(prompt)
client1.send("SET allow_experimental_analyzer = 0")
client1.expect(prompt)
client1.send("SET allow_experimental_window_view = 1")
client1.expect(prompt)
client1.send("SET window_view_heartbeat_interval = 1")

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS dst;
@ -25,11 +29,11 @@ INSERT INTO mt VALUES (1, 8, '1990/01/01 12:00:30');
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM wv" | grep -q "7" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM wv" | grep -q "7" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="SELECT market FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT market FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS wv;
@ -23,9 +27,9 @@ INSERT INTO mt VALUES (1, 8, toDateTime('1990/01/01 12:00:30', 'US/Samoa'));
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM wv" | grep -q "7" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM wv" | grep -q "7" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"

View File

@ -1,5 +1,6 @@
-- Tags: no-parallel
SET allow_experimental_analyzer = 0;
SET allow_experimental_window_view = 1;
DROP DATABASE IF EXISTS test_01085;

View File

@ -5,7 +5,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --allow_deprecated_database_ordinary=1 --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --allow_deprecated_database_ordinary=1 --multiquery <<EOF
SET allow_experimental_window_view = 1;
SET window_view_clean_interval = 1;
@ -24,19 +28,19 @@ INSERT INTO test_01086.mt VALUES (1, 5, toDateTime('1990/01/01 12:00:06', 'US/Sa
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM test_01086.\`.inner.wv\`" | grep -q "5" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM test_01086.\`.inner.wv\`" | grep -q "5" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT sleep(2);"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT sleep(2);"
$CLICKHOUSE_CLIENT --query="INSERT INTO test_01086.mt VALUES (1, 6, toDateTime('1990/01/01 12:00:11', 'US/Samoa'));"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="INSERT INTO test_01086.mt VALUES (1, 6, toDateTime('1990/01/01 12:00:11', 'US/Samoa'));"
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM test_01086.\`.inner.wv\`" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM test_01086.\`.inner.wv\`" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT market, wid FROM test_01086.\`.inner.wv\` ORDER BY market, \`windowID(timestamp, toIntervalSecond('5'), 'US/Samoa')\` as wid";
$CLICKHOUSE_CLIENT --query="DROP TABLE test_01086.wv NO DELAY;"
$CLICKHOUSE_CLIENT --query="DROP TABLE test_01086.mt NO DELAY;"
$CLICKHOUSE_CLIENT --query="DROP TABLE test_01086.dst NO DELAY;"
$CLICKHOUSE_CLIENT --query="DROP DATABASE test_01086 NO DELAY;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT market, wid FROM test_01086.\`.inner.wv\` ORDER BY market, \`windowID(timestamp, toIntervalSecond('5'), 'US/Samoa')\` as wid";
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.wv NO DELAY;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.mt NO DELAY;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE test_01086.dst NO DELAY;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP DATABASE test_01086 NO DELAY;"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS wv;
@ -21,13 +25,13 @@ INSERT INTO mt VALUES (1, 5, toDateTime('1990/01/01 12:00:06', 'US/Samoa'));
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM wv" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM wv" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="SELECT '----ALTER TABLE...MODIFY QUERY----';"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT * FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT '----ALTER TABLE...MODIFY QUERY----';"
$CLICKHOUSE_CLIENT --multiquery <<EOF
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
ALTER TABLE wv MODIFY QUERY SELECT count(a) AS count, mt.market * 2 as market, tumbleEnd(wid) AS w_end FROM mt GROUP BY tumble(timestamp, INTERVAL '5' SECOND, 'US/Samoa') AS wid, mt.market;
INSERT INTO mt VALUES (1, 6, toDateTime('1990/01/01 12:00:10', 'US/Samoa'));
@ -36,9 +40,9 @@ INSERT INTO mt VALUES (1, 8, toDateTime('1990/01/01 12:00:30', 'US/Samoa'));
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM wv" | grep -q "5" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM wv" | grep -q "5" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT count, market, w_end FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count, market, w_end FROM wv ORDER BY market, w_end;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt"

View File

@ -4,7 +4,11 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
$CLICKHOUSE_CLIENT --multiquery <<EOF
opts=(
"--allow_experimental_analyzer=0"
)
$CLICKHOUSE_CLIENT "${opts[@]}" --multiquery <<EOF
SET allow_experimental_window_view = 1;
DROP TABLE IF EXISTS mt;
DROP TABLE IF EXISTS wv;
@ -26,10 +30,10 @@ INSERT INTO mt VALUES (1, 8, toDateTime('1990/01/01 12:00:30', 'US/Samoa'));
EOF
while true; do
$CLICKHOUSE_CLIENT --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count(*) FROM dst" | grep -q "3" && break || sleep .5 ||:
done
$CLICKHOUSE_CLIENT --query="SELECT count, market, w_end FROM dst ORDER BY w_end, market;"
$CLICKHOUSE_CLIENT --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT --query="DROP TABLE dst;"
$CLICKHOUSE_CLIENT --query="DROP TABLE mt;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="SELECT count, market, w_end FROM dst ORDER BY w_end, market;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE wv;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE dst;"
$CLICKHOUSE_CLIENT "${opts[@]}" --query="DROP TABLE mt;"

View File

@ -22,8 +22,8 @@
2 4 109 2
1 619 1
2 537 2
1 619 1
2 537 2
-2 537 2
-1 619 1
2019-05-05 00:00:00 -45363190
2019-05-05 00:00:00 -1249512288
2019-05-05 00:00:00 345522721

View File

@ -14,7 +14,7 @@ SELECT a FROM pk_order GROUP BY a ORDER BY a;
SELECT a, b, sum(c), avg(d) FROM pk_order GROUP BY a, b ORDER BY a, b;
SELECT a, sum(c), avg(d) FROM pk_order GROUP BY a ORDER BY a;
SELECT a, sum(c), avg(d) FROM pk_order GROUP BY -a ORDER BY a;
SELECT -a, sum(c), avg(d) FROM pk_order GROUP BY -a ORDER BY -a;
DROP TABLE IF EXISTS pk_order;
@ -27,7 +27,7 @@ INSERT INTO pk_order
set max_block_size = 1;
SELECT d, max(b) FROM pk_order GROUP BY d, a ORDER BY d, a LIMIT 5;
SELECT d, avg(a) FROM pk_order GROUP BY toString(d) ORDER BY toString(d) LIMIT 5;
SELECT toString(d), avg(a) FROM pk_order GROUP BY toString(d) ORDER BY toString(d) LIMIT 5;
SELECT toStartOfHour(d) as d1, min(a), max(b) FROM pk_order GROUP BY d1 ORDER BY d1 LIMIT 5;
DROP TABLE pk_order;

Some files were not shown because too many files have changed in this diff Show More