mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-02 12:32:04 +00:00
Merge remote-tracking branch 'ClickHouse/master' into query_cache_minor_change
This commit is contained in:
commit
6e5d6ad1c6
4
.github/workflows/backport_branches.yml
vendored
4
.github/workflows/backport_branches.yml
vendored
@ -67,8 +67,6 @@ jobs:
|
|||||||
test_name: Compatibility check (amd64)
|
test_name: Compatibility check (amd64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
|
||||||
CompatibilityCheckAarch64:
|
CompatibilityCheckAarch64:
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -77,8 +75,6 @@ jobs:
|
|||||||
test_name: Compatibility check (aarch64)
|
test_name: Compatibility check (aarch64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
|
4
.github/workflows/master.yml
vendored
4
.github/workflows/master.yml
vendored
@ -73,8 +73,6 @@ jobs:
|
|||||||
test_name: Compatibility check (amd64)
|
test_name: Compatibility check (amd64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
|
||||||
CompatibilityCheckAarch64:
|
CompatibilityCheckAarch64:
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -83,8 +81,6 @@ jobs:
|
|||||||
test_name: Compatibility check (aarch64)
|
test_name: Compatibility check (aarch64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
|
4
.github/workflows/pull_request.yml
vendored
4
.github/workflows/pull_request.yml
vendored
@ -117,8 +117,6 @@ jobs:
|
|||||||
test_name: Compatibility check (amd64)
|
test_name: Compatibility check (amd64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
|
||||||
CompatibilityCheckAarch64:
|
CompatibilityCheckAarch64:
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -127,8 +125,6 @@ jobs:
|
|||||||
test_name: Compatibility check (aarch64)
|
test_name: Compatibility check (aarch64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
|
4
.github/workflows/release_branches.yml
vendored
4
.github/workflows/release_branches.yml
vendored
@ -68,8 +68,6 @@ jobs:
|
|||||||
test_name: Compatibility check (amd64)
|
test_name: Compatibility check (amd64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions
|
|
||||||
CompatibilityCheckAarch64:
|
CompatibilityCheckAarch64:
|
||||||
needs: [RunConfig, BuilderDebAarch64]
|
needs: [RunConfig, BuilderDebAarch64]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
@ -78,8 +76,6 @@ jobs:
|
|||||||
test_name: Compatibility check (aarch64)
|
test_name: Compatibility check (aarch64)
|
||||||
runner_type: style-checker
|
runner_type: style-checker
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
run_command: |
|
|
||||||
python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#################################### ORDINARY BUILDS ####################################
|
#################################### ORDINARY BUILDS ####################################
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#ci_set_reduced
|
#ci_set_reduced
|
||||||
#ci_set_arm
|
#ci_set_arm
|
||||||
#ci_set_integration
|
#ci_set_integration
|
||||||
|
#ci_set_analyzer
|
||||||
|
|
||||||
## To run specified job in CI:
|
## To run specified job in CI:
|
||||||
#job_<JOB NAME>
|
#job_<JOB NAME>
|
||||||
|
@ -21,7 +21,7 @@ Columns:
|
|||||||
- `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query.
|
- `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query.
|
||||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the thread has finished execution of the query.
|
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — The date when the thread has finished execution of the query.
|
||||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query.
|
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query.
|
||||||
- `event_time_microsecinds` ([DateTime](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query with microseconds precision.
|
- `event_time_microseconds` ([DateTime](../../sql-reference/data-types/datetime.md)) — The date and time when the thread has finished execution of the query with microseconds precision.
|
||||||
- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
- `query_start_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Start time of query execution.
|
||||||
- `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision.
|
- `query_start_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — Start time of query execution with microsecond precision.
|
||||||
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution.
|
- `query_duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Duration of query execution.
|
||||||
@ -32,8 +32,7 @@ Columns:
|
|||||||
- `memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The difference between the amount of allocated and freed memory in context of this thread.
|
- `memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The difference between the amount of allocated and freed memory in context of this thread.
|
||||||
- `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The maximum difference between the amount of allocated and freed memory in context of this thread.
|
- `peak_memory_usage` ([Int64](../../sql-reference/data-types/int-uint.md)) — The maximum difference between the amount of allocated and freed memory in context of this thread.
|
||||||
- `thread_name` ([String](../../sql-reference/data-types/string.md)) — Name of the thread.
|
- `thread_name` ([String](../../sql-reference/data-types/string.md)) — Name of the thread.
|
||||||
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Internal thread ID.
|
- `thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md)) — OS thread ID.
|
||||||
- `thread_id` ([Int32](../../sql-reference/data-types/int-uint.md)) — thread ID.
|
|
||||||
- `master_thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — OS initial ID of initial thread.
|
- `master_thread_id` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — OS initial ID of initial thread.
|
||||||
- `query` ([String](../../sql-reference/data-types/string.md)) — Query string.
|
- `query` ([String](../../sql-reference/data-types/string.md)) — Query string.
|
||||||
- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Query type. Possible values:
|
- `is_initial_query` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Query type. Possible values:
|
||||||
|
@ -6651,6 +6651,7 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
|||||||
if (column_default && column_default->kind == ColumnDefaultKind::Alias)
|
if (column_default && column_default->kind == ColumnDefaultKind::Alias)
|
||||||
{
|
{
|
||||||
auto alias_expression = buildQueryTree(column_default->expression, scope.context);
|
auto alias_expression = buildQueryTree(column_default->expression, scope.context);
|
||||||
|
alias_expression = buildCastFunction(alias_expression, column_name_and_type.type, scope.context, false /*resolve*/);
|
||||||
auto column_node = std::make_shared<ColumnNode>(column_name_and_type, std::move(alias_expression), table_expression_node);
|
auto column_node = std::make_shared<ColumnNode>(column_name_and_type, std::move(alias_expression), table_expression_node);
|
||||||
column_name_to_column_node.emplace(column_name_and_type.name, column_node);
|
column_name_to_column_node.emplace(column_name_and_type.name, column_node);
|
||||||
alias_columns_to_resolve.emplace_back(column_name_and_type.name, column_node);
|
alias_columns_to_resolve.emplace_back(column_name_and_type.name, column_node);
|
||||||
@ -6683,9 +6684,7 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
|||||||
alias_column_resolve_scope,
|
alias_column_resolve_scope,
|
||||||
false /*allow_lambda_expression*/,
|
false /*allow_lambda_expression*/,
|
||||||
false /*allow_table_expression*/);
|
false /*allow_table_expression*/);
|
||||||
auto & resolved_expression = alias_column_to_resolve->getExpression();
|
|
||||||
if (!resolved_expression->getResultType()->equals(*alias_column_to_resolve->getResultType()))
|
|
||||||
resolved_expression = buildCastFunction(resolved_expression, alias_column_to_resolve->getResultType(), scope.context, true);
|
|
||||||
column_name_to_column_node = std::move(alias_column_resolve_scope.column_name_to_column_node);
|
column_name_to_column_node = std::move(alias_column_resolve_scope.column_name_to_column_node);
|
||||||
column_name_to_column_node[alias_column_to_resolve_name] = alias_column_to_resolve;
|
column_name_to_column_node[alias_column_to_resolve_name] = alias_column_to_resolve;
|
||||||
}
|
}
|
||||||
|
@ -252,8 +252,21 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
|
|||||||
|
|
||||||
if (which_type.isDateTime64() && src.getType() == Field::Types::Decimal64)
|
if (which_type.isDateTime64() && src.getType() == Field::Types::Decimal64)
|
||||||
{
|
{
|
||||||
/// Already in needed type.
|
const auto & from_type = src.get<Decimal64>();
|
||||||
|
const auto & to_type = static_cast<const DataTypeDateTime64 &>(type);
|
||||||
|
|
||||||
|
const auto scale_from = from_type.getScale();
|
||||||
|
const auto scale_to = to_type.getScale();
|
||||||
|
const auto scale_multiplier_diff = scale_from > scale_to ? from_type.getScaleMultiplier() / to_type.getScaleMultiplier() : to_type.getScaleMultiplier() / from_type.getScaleMultiplier();
|
||||||
|
|
||||||
|
if (scale_multiplier_diff == 1) /// Already in needed type.
|
||||||
return src;
|
return src;
|
||||||
|
|
||||||
|
/// in case if we need to make DateTime64(a) from DateTime64(b), a != b, we need to convert datetime value to the right scale
|
||||||
|
const UInt64 value = scale_from > scale_to ? from_type.getValue().value / scale_multiplier_diff : from_type.getValue().value * scale_multiplier_diff;
|
||||||
|
return DecimalField(
|
||||||
|
DecimalUtils::decimalFromComponentsWithMultiplier<DateTime64>(value, 0, 1),
|
||||||
|
scale_to);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For toDate('xxx') in 1::Int64, we CAST `src` to UInt64, which may
|
/// For toDate('xxx') in 1::Int64, we CAST `src` to UInt64, which may
|
||||||
|
@ -121,12 +121,7 @@ Block getHeaderForProcessingStage(
|
|||||||
|
|
||||||
auto & table_expression_data = query_info.planner_context->getTableExpressionDataOrThrow(left_table_expression);
|
auto & table_expression_data = query_info.planner_context->getTableExpressionDataOrThrow(left_table_expression);
|
||||||
const auto & query_context = query_info.planner_context->getQueryContext();
|
const auto & query_context = query_info.planner_context->getQueryContext();
|
||||||
|
auto columns = table_expression_data.getColumns();
|
||||||
NamesAndTypes columns;
|
|
||||||
const auto & column_name_to_column = table_expression_data.getColumnNameToColumn();
|
|
||||||
for (const auto & column_name : table_expression_data.getSelectedColumnsNames())
|
|
||||||
columns.push_back(column_name_to_column.at(column_name));
|
|
||||||
|
|
||||||
auto new_query_node = buildSubqueryToReadColumnsFromTableExpression(columns, left_table_expression, query_context);
|
auto new_query_node = buildSubqueryToReadColumnsFromTableExpression(columns, left_table_expression, query_context);
|
||||||
query = new_query_node->toAST();
|
query = new_query_node->toAST();
|
||||||
}
|
}
|
||||||
|
@ -29,13 +29,34 @@ namespace
|
|||||||
class CollectSourceColumnsVisitor : public InDepthQueryTreeVisitor<CollectSourceColumnsVisitor>
|
class CollectSourceColumnsVisitor : public InDepthQueryTreeVisitor<CollectSourceColumnsVisitor>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit CollectSourceColumnsVisitor(PlannerContextPtr & planner_context_, bool keep_alias_columns_ = true)
|
explicit CollectSourceColumnsVisitor(PlannerContext & planner_context_)
|
||||||
: planner_context(planner_context_)
|
: planner_context(planner_context_)
|
||||||
, keep_alias_columns(keep_alias_columns_)
|
|
||||||
{}
|
{}
|
||||||
|
|
||||||
void visitImpl(QueryTreeNodePtr & node)
|
void visitImpl(QueryTreeNodePtr & node)
|
||||||
{
|
{
|
||||||
|
/// Special case for USING clause which contains references to ALIAS columns.
|
||||||
|
/// We can not modify such ColumnNode.
|
||||||
|
if (auto * join_node = node->as<JoinNode>())
|
||||||
|
{
|
||||||
|
if (!join_node->isUsingJoinExpression())
|
||||||
|
return;
|
||||||
|
|
||||||
|
auto & using_list = join_node->getJoinExpression()->as<ListNode&>();
|
||||||
|
for (auto & using_element : using_list)
|
||||||
|
{
|
||||||
|
auto & column_node = using_element->as<ColumnNode&>();
|
||||||
|
/// This list contains column nodes from left and right tables.
|
||||||
|
auto & columns_from_subtrees = column_node.getExpressionOrThrow()->as<ListNode&>().getNodes();
|
||||||
|
|
||||||
|
/// Visit left table column node.
|
||||||
|
visitUsingColumn(columns_from_subtrees[0]);
|
||||||
|
/// Visit right table column node.
|
||||||
|
visitUsingColumn(columns_from_subtrees[1]);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
auto * column_node = node->as<ColumnNode>();
|
auto * column_node = node->as<ColumnNode>();
|
||||||
if (!column_node)
|
if (!column_node)
|
||||||
return;
|
return;
|
||||||
@ -51,55 +72,22 @@ public:
|
|||||||
|
|
||||||
/// JOIN using expression
|
/// JOIN using expression
|
||||||
if (column_node->hasExpression() && column_source_node_type == QueryTreeNodeType::JOIN)
|
if (column_node->hasExpression() && column_source_node_type == QueryTreeNodeType::JOIN)
|
||||||
{
|
|
||||||
auto & columns_from_subtrees = column_node->getExpression()->as<ListNode &>().getNodes();
|
|
||||||
if (columns_from_subtrees.size() != 2)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
||||||
"Expected two columns in JOIN using expression for column {}", column_node->dumpTree());
|
|
||||||
|
|
||||||
visit(columns_from_subtrees[0]);
|
|
||||||
visit(columns_from_subtrees[1]);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
auto & table_expression_data = planner_context->getOrCreateTableExpressionData(column_source_node);
|
auto & table_expression_data = planner_context.getOrCreateTableExpressionData(column_source_node);
|
||||||
|
|
||||||
if (isAliasColumn(node))
|
if (column_node->hasExpression() && column_source_node_type != QueryTreeNodeType::ARRAY_JOIN)
|
||||||
{
|
{
|
||||||
/// Column is an ALIAS column with expression
|
/// Replace ALIAS column with expression
|
||||||
bool column_already_exists = table_expression_data.hasColumn(column_node->getColumnName());
|
bool column_already_exists = table_expression_data.hasColumn(column_node->getColumnName());
|
||||||
if (!column_already_exists)
|
if (!column_already_exists)
|
||||||
{
|
{
|
||||||
CollectSourceColumnsVisitor visitor_for_alias_column(planner_context);
|
auto column_identifier = planner_context.getGlobalPlannerContext()->createColumnIdentifier(node);
|
||||||
/// While we are processing expression of ALIAS columns we should not add source columns to selected.
|
table_expression_data.addAliasColumnName(column_node->getColumnName(), column_identifier);
|
||||||
/// See also comment for `select_added_columns`
|
}
|
||||||
visitor_for_alias_column.select_added_columns = false;
|
|
||||||
visitor_for_alias_column.keep_alias_columns = keep_alias_columns;
|
|
||||||
visitor_for_alias_column.visit(column_node->getExpression());
|
|
||||||
|
|
||||||
if (!keep_alias_columns)
|
|
||||||
{
|
|
||||||
/// For PREWHERE we can just replace ALIAS column with it's expression,
|
|
||||||
/// because ActionsDAG for PREWHERE applied right on top of table expression
|
|
||||||
/// and cannot affect subqueries or other table expressions.
|
|
||||||
node = column_node->getExpression();
|
node = column_node->getExpression();
|
||||||
return;
|
visitImpl(node);
|
||||||
}
|
|
||||||
|
|
||||||
auto column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(node);
|
|
||||||
|
|
||||||
ActionsDAGPtr alias_column_actions_dag = std::make_shared<ActionsDAG>();
|
|
||||||
PlannerActionsVisitor actions_visitor(planner_context, false);
|
|
||||||
auto outputs = actions_visitor.visit(alias_column_actions_dag, column_node->getExpression());
|
|
||||||
if (outputs.size() != 1)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
|
||||||
"Expected single output in actions dag for alias column {}. Actual {}", column_node->dumpTree(), outputs.size());
|
|
||||||
const auto & column_name = column_node->getColumnName();
|
|
||||||
const auto & alias_node = alias_column_actions_dag->addAlias(*outputs[0], column_name);
|
|
||||||
alias_column_actions_dag->addOrReplaceInOutputs(alias_node);
|
|
||||||
table_expression_data.addAliasColumn(column_node->getColumn(), column_identifier, alias_column_actions_dag, select_added_columns);
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,58 +102,45 @@ public:
|
|||||||
|
|
||||||
bool column_already_exists = table_expression_data.hasColumn(column_node->getColumnName());
|
bool column_already_exists = table_expression_data.hasColumn(column_node->getColumnName());
|
||||||
if (column_already_exists)
|
if (column_already_exists)
|
||||||
{
|
|
||||||
/// Column may be added when we collected data for ALIAS column
|
|
||||||
/// But now we see it directly in the query, so make sure it's marked as selected
|
|
||||||
if (select_added_columns)
|
|
||||||
table_expression_data.markSelectedColumn(column_node->getColumnName());
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
auto column_identifier = planner_context.getGlobalPlannerContext()->createColumnIdentifier(node);
|
||||||
|
table_expression_data.addColumn(column_node->getColumn(), column_identifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto column_identifier = planner_context->getGlobalPlannerContext()->createColumnIdentifier(node);
|
static bool needChildVisit(const QueryTreeNodePtr & parent, const QueryTreeNodePtr & child_node)
|
||||||
table_expression_data.addColumn(column_node->getColumn(), column_identifier, select_added_columns);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool isAliasColumn(const QueryTreeNodePtr & node)
|
|
||||||
{
|
{
|
||||||
const auto * column_node = node->as<ColumnNode>();
|
if (auto * join_node = parent->as<JoinNode>())
|
||||||
if (!column_node || !column_node->hasExpression())
|
|
||||||
return false;
|
|
||||||
const auto & column_source = column_node->getColumnSourceOrNull();
|
|
||||||
if (!column_source)
|
|
||||||
return false;
|
|
||||||
return column_source->getNodeType() != QueryTreeNodeType::JOIN &&
|
|
||||||
column_source->getNodeType() != QueryTreeNodeType::ARRAY_JOIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool needChildVisit(const QueryTreeNodePtr & parent_node, const QueryTreeNodePtr & child_node)
|
|
||||||
{
|
{
|
||||||
|
if (join_node->getJoinExpression() == child_node && join_node->isUsingJoinExpression())
|
||||||
|
return false;
|
||||||
|
}
|
||||||
auto child_node_type = child_node->getNodeType();
|
auto child_node_type = child_node->getNodeType();
|
||||||
return !(child_node_type == QueryTreeNodeType::QUERY ||
|
return !(child_node_type == QueryTreeNodeType::QUERY || child_node_type == QueryTreeNodeType::UNION);
|
||||||
child_node_type == QueryTreeNodeType::UNION ||
|
|
||||||
isAliasColumn(parent_node));
|
|
||||||
}
|
|
||||||
|
|
||||||
void setKeepAliasColumns(bool keep_alias_columns_)
|
|
||||||
{
|
|
||||||
keep_alias_columns = keep_alias_columns_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
PlannerContextPtr & planner_context;
|
|
||||||
|
|
||||||
/// Replace ALIAS columns with their expressions or register them in table expression data.
|
void visitUsingColumn(QueryTreeNodePtr & node)
|
||||||
/// Usually we can replace them when we build some "local" actions DAG
|
{
|
||||||
/// (for example Row Policy or PREWHERE) that is applied on top of the table expression.
|
auto & column_node = node->as<ColumnNode&>();
|
||||||
/// In other cases, we keep ALIAS columns as ColumnNode with an expression child node,
|
if (column_node.hasExpression())
|
||||||
/// and handle them in the Planner by inserting ActionsDAG to compute them after reading from storage.
|
{
|
||||||
bool keep_alias_columns = true;
|
auto & table_expression_data = planner_context.getOrCreateTableExpressionData(column_node.getColumnSource());
|
||||||
|
bool column_already_exists = table_expression_data.hasColumn(column_node.getColumnName());
|
||||||
|
if (column_already_exists)
|
||||||
|
return;
|
||||||
|
|
||||||
/// Flag `select_added_columns` indicates if we should mark column as explicitly selected.
|
auto column_identifier = planner_context.getGlobalPlannerContext()->createColumnIdentifier(node);
|
||||||
/// For example, for table with columns (a Int32, b ALIAS a+1) and query SELECT b FROM table
|
table_expression_data.addAliasColumnName(column_node.getColumnName(), column_identifier);
|
||||||
/// Column `b` is selected explicitly by user, but not `a` (that is also read though).
|
|
||||||
/// Distinguishing such columns is important for checking access rights for ALIAS columns.
|
visitImpl(column_node.getExpressionOrThrow());
|
||||||
bool select_added_columns = true;
|
}
|
||||||
|
else
|
||||||
|
visitImpl(node);
|
||||||
|
}
|
||||||
|
|
||||||
|
PlannerContext & planner_context;
|
||||||
};
|
};
|
||||||
|
|
||||||
class CollectPrewhereTableExpressionVisitor : public ConstInDepthQueryTreeVisitor<CollectPrewhereTableExpressionVisitor>
|
class CollectPrewhereTableExpressionVisitor : public ConstInDepthQueryTreeVisitor<CollectPrewhereTableExpressionVisitor>
|
||||||
@ -299,7 +274,7 @@ void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContextPtr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CollectSourceColumnsVisitor collect_source_columns_visitor(planner_context);
|
CollectSourceColumnsVisitor collect_source_columns_visitor(*planner_context);
|
||||||
for (auto & node : query_node_typed.getChildren())
|
for (auto & node : query_node_typed.getChildren())
|
||||||
{
|
{
|
||||||
if (!node || node == query_node_typed.getPrewhere())
|
if (!node || node == query_node_typed.getPrewhere())
|
||||||
@ -325,26 +300,21 @@ void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContextPtr
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto & table_expression_data = planner_context->getOrCreateTableExpressionData(prewhere_table_expression);
|
auto & table_expression_data = planner_context->getOrCreateTableExpressionData(prewhere_table_expression);
|
||||||
const auto & read_column_names = table_expression_data.getColumnNames();
|
const auto & column_names = table_expression_data.getColumnNames();
|
||||||
NameSet required_column_names_without_prewhere(read_column_names.begin(), read_column_names.end());
|
NameSet required_column_names_without_prewhere(column_names.begin(), column_names.end());
|
||||||
const auto & selected_column_names = table_expression_data.getSelectedColumnsNames();
|
|
||||||
required_column_names_without_prewhere.insert(selected_column_names.begin(), selected_column_names.end());
|
|
||||||
|
|
||||||
collect_source_columns_visitor.setKeepAliasColumns(false);
|
|
||||||
collect_source_columns_visitor.visit(query_node_typed.getPrewhere());
|
collect_source_columns_visitor.visit(query_node_typed.getPrewhere());
|
||||||
|
|
||||||
auto prewhere_actions_dag = std::make_shared<ActionsDAG>();
|
auto prewhere_actions_dag = std::make_shared<ActionsDAG>();
|
||||||
|
|
||||||
QueryTreeNodePtr query_tree_node = query_node_typed.getPrewhere();
|
|
||||||
|
|
||||||
PlannerActionsVisitor visitor(planner_context, false /*use_column_identifier_as_action_node_name*/);
|
PlannerActionsVisitor visitor(planner_context, false /*use_column_identifier_as_action_node_name*/);
|
||||||
auto expression_nodes = visitor.visit(prewhere_actions_dag, query_tree_node);
|
auto expression_nodes = visitor.visit(prewhere_actions_dag, query_node_typed.getPrewhere());
|
||||||
if (expression_nodes.size() != 1)
|
if (expression_nodes.size() != 1)
|
||||||
throw Exception(ErrorCodes::ILLEGAL_PREWHERE,
|
throw Exception(ErrorCodes::ILLEGAL_PREWHERE,
|
||||||
"Invalid PREWHERE. Expected single boolean expression. In query {}",
|
"Invalid PREWHERE. Expected single boolean expression. In query {}",
|
||||||
query_node->formatASTForErrorMessage());
|
query_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
prewhere_actions_dag->getOutputs().push_back(expression_nodes.back());
|
prewhere_actions_dag->getOutputs().push_back(expression_nodes[0]);
|
||||||
|
|
||||||
for (const auto & prewhere_input_node : prewhere_actions_dag->getInputs())
|
for (const auto & prewhere_input_node : prewhere_actions_dag->getInputs())
|
||||||
if (required_column_names_without_prewhere.contains(prewhere_input_node->result_name))
|
if (required_column_names_without_prewhere.contains(prewhere_input_node->result_name))
|
||||||
@ -354,9 +324,9 @@ void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContextPtr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void collectSourceColumns(QueryTreeNodePtr & expression_node, PlannerContextPtr & planner_context, bool keep_alias_columns)
|
void collectSourceColumns(QueryTreeNodePtr & expression_node, PlannerContextPtr & planner_context)
|
||||||
{
|
{
|
||||||
CollectSourceColumnsVisitor collect_source_columns_visitor(planner_context, keep_alias_columns);
|
CollectSourceColumnsVisitor collect_source_columns_visitor(*planner_context);
|
||||||
collect_source_columns_visitor.visit(expression_node);
|
collect_source_columns_visitor.visit(expression_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,6 +19,6 @@ void collectTableExpressionData(QueryTreeNodePtr & query_node, PlannerContextPtr
|
|||||||
*
|
*
|
||||||
* ALIAS table column nodes are registered in table expression data and replaced in query tree with inner alias expression.
|
* ALIAS table column nodes are registered in table expression data and replaced in query tree with inner alias expression.
|
||||||
*/
|
*/
|
||||||
void collectSourceColumns(QueryTreeNodePtr & expression_node, PlannerContextPtr & planner_context, bool keep_alias_columns = true);
|
void collectSourceColumns(QueryTreeNodePtr & expression_node, PlannerContextPtr & planner_context);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -451,7 +451,6 @@ private:
|
|||||||
std::unordered_map<QueryTreeNodePtr, std::string> node_to_node_name;
|
std::unordered_map<QueryTreeNodePtr, std::string> node_to_node_name;
|
||||||
const PlannerContextPtr planner_context;
|
const PlannerContextPtr planner_context;
|
||||||
ActionNodeNameHelper action_node_name_helper;
|
ActionNodeNameHelper action_node_name_helper;
|
||||||
bool use_column_identifier_as_action_node_name;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAGPtr actions_dag,
|
PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAGPtr actions_dag,
|
||||||
@ -459,7 +458,6 @@ PlannerActionsVisitorImpl::PlannerActionsVisitorImpl(ActionsDAGPtr actions_dag,
|
|||||||
bool use_column_identifier_as_action_node_name_)
|
bool use_column_identifier_as_action_node_name_)
|
||||||
: planner_context(planner_context_)
|
: planner_context(planner_context_)
|
||||||
, action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_)
|
, action_node_name_helper(node_to_node_name, *planner_context, use_column_identifier_as_action_node_name_)
|
||||||
, use_column_identifier_as_action_node_name(use_column_identifier_as_action_node_name_)
|
|
||||||
{
|
{
|
||||||
actions_stack.emplace_back(std::move(actions_dag), nullptr);
|
actions_stack.emplace_back(std::move(actions_dag), nullptr);
|
||||||
}
|
}
|
||||||
@ -505,8 +503,7 @@ PlannerActionsVisitorImpl::NodeNameAndNodeMinLevel PlannerActionsVisitorImpl::vi
|
|||||||
{
|
{
|
||||||
auto column_node_name = action_node_name_helper.calculateActionNodeName(node);
|
auto column_node_name = action_node_name_helper.calculateActionNodeName(node);
|
||||||
const auto & column_node = node->as<ColumnNode &>();
|
const auto & column_node = node->as<ColumnNode &>();
|
||||||
if (column_node.hasExpression() && !use_column_identifier_as_action_node_name)
|
|
||||||
return visitImpl(column_node.getExpression());
|
|
||||||
Int64 actions_stack_size = static_cast<Int64>(actions_stack.size() - 1);
|
Int64 actions_stack_size = static_cast<Int64>(actions_stack.size() - 1);
|
||||||
for (Int64 i = actions_stack_size; i >= 0; --i)
|
for (Int64 i = actions_stack_size; i >= 0; --i)
|
||||||
{
|
{
|
||||||
|
@ -86,7 +86,7 @@ namespace
|
|||||||
/// Check if current user has privileges to SELECT columns from table
|
/// Check if current user has privileges to SELECT columns from table
|
||||||
/// Throws an exception if access to any column from `column_names` is not granted
|
/// Throws an exception if access to any column from `column_names` is not granted
|
||||||
/// If `column_names` is empty, check access to any columns and return names of accessible columns
|
/// If `column_names` is empty, check access to any columns and return names of accessible columns
|
||||||
NameSet checkAccessRights(const TableNode & table_node, const Names & column_names, const ContextPtr & query_context)
|
NameSet checkAccessRights(const TableNode & table_node, Names & column_names, const ContextPtr & query_context)
|
||||||
{
|
{
|
||||||
/// StorageDummy is created on preliminary stage, ignore access check for it.
|
/// StorageDummy is created on preliminary stage, ignore access check for it.
|
||||||
if (typeid_cast<const StorageDummy *>(table_node.getStorage().get()))
|
if (typeid_cast<const StorageDummy *>(table_node.getStorage().get()))
|
||||||
@ -353,7 +353,9 @@ void prepareBuildQueryPlanForTableExpression(const QueryTreeNodePtr & table_expr
|
|||||||
NameSet columns_names_allowed_to_select;
|
NameSet columns_names_allowed_to_select;
|
||||||
if (table_node)
|
if (table_node)
|
||||||
{
|
{
|
||||||
const auto & column_names_with_aliases = table_expression_data.getSelectedColumnsNames();
|
auto column_names_with_aliases = columns_names;
|
||||||
|
const auto & alias_columns_names = table_expression_data.getAliasColumnsNames();
|
||||||
|
column_names_with_aliases.insert(column_names_with_aliases.end(), alias_columns_names.begin(), alias_columns_names.end());
|
||||||
columns_names_allowed_to_select = checkAccessRights(*table_node, column_names_with_aliases, query_context);
|
columns_names_allowed_to_select = checkAccessRights(*table_node, column_names_with_aliases, query_context);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -862,28 +864,6 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
max_block_size,
|
max_block_size,
|
||||||
max_streams);
|
max_streams);
|
||||||
|
|
||||||
const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions();
|
|
||||||
if (!alias_column_expressions.empty() && query_plan.isInitialized() && from_stage == QueryProcessingStage::FetchColumns)
|
|
||||||
{
|
|
||||||
ActionsDAGPtr merged_alias_columns_actions_dag = std::make_shared<ActionsDAG>(query_plan.getCurrentDataStream().header.getColumnsWithTypeAndName());
|
|
||||||
ActionsDAG::NodeRawConstPtrs action_dag_outputs = merged_alias_columns_actions_dag->getInputs();
|
|
||||||
|
|
||||||
for (const auto & [column_name, alias_column_actions_dag] : alias_column_expressions)
|
|
||||||
{
|
|
||||||
const auto & current_outputs = alias_column_actions_dag->getOutputs();
|
|
||||||
action_dag_outputs.insert(action_dag_outputs.end(), current_outputs.begin(), current_outputs.end());
|
|
||||||
merged_alias_columns_actions_dag->mergeNodes(std::move(*alias_column_actions_dag));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto * output_node : action_dag_outputs)
|
|
||||||
merged_alias_columns_actions_dag->addOrReplaceInOutputs(*output_node);
|
|
||||||
merged_alias_columns_actions_dag->removeUnusedActions(false);
|
|
||||||
|
|
||||||
auto alias_column_step = std::make_unique<ExpressionStep>(query_plan.getCurrentDataStream(), std::move(merged_alias_columns_actions_dag));
|
|
||||||
alias_column_step->setStepDescription("Compute alias columns");
|
|
||||||
query_plan.addStep(std::move(alias_column_step));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto & filter_info_and_description : where_filters)
|
for (const auto & filter_info_and_description : where_filters)
|
||||||
{
|
{
|
||||||
const auto & [filter_info, description] = filter_info_and_description;
|
const auto & [filter_info, description] = filter_info_and_description;
|
||||||
@ -927,8 +907,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
/// Create step which reads from empty source if storage has no data.
|
/// Create step which reads from empty source if storage has no data.
|
||||||
const auto & column_names = table_expression_data.getSelectedColumnsNames();
|
auto source_header = storage_snapshot->getSampleBlockForColumns(table_expression_data.getColumnNames());
|
||||||
auto source_header = storage_snapshot->getSampleBlockForColumns(column_names);
|
|
||||||
Pipe pipe(std::make_shared<NullSource>(source_header));
|
Pipe pipe(std::make_shared<NullSource>(source_header));
|
||||||
auto read_from_pipe = std::make_unique<ReadFromPreparedSource>(std::move(pipe));
|
auto read_from_pipe = std::make_unique<ReadFromPreparedSource>(std::move(pipe));
|
||||||
read_from_pipe->setStepDescription("Read from NullSource");
|
read_from_pipe->setStepDescription("Read from NullSource");
|
||||||
@ -1045,6 +1024,57 @@ void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextP
|
|||||||
plan_to_add_cast.addStep(std::move(cast_join_columns_step));
|
plan_to_add_cast.addStep(std::move(cast_join_columns_step));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Actions to calculate table columns that have a functional representation (ALIASes and subcolumns)
|
||||||
|
/// and used in USING clause of JOIN expression.
|
||||||
|
struct UsingAliasKeyActions
|
||||||
|
{
|
||||||
|
UsingAliasKeyActions(
|
||||||
|
const ColumnsWithTypeAndName & left_plan_output_columns,
|
||||||
|
const ColumnsWithTypeAndName & right_plan_output_columns
|
||||||
|
)
|
||||||
|
: left_alias_columns_keys(std::make_shared<ActionsDAG>(left_plan_output_columns))
|
||||||
|
, right_alias_columns_keys(std::make_shared<ActionsDAG>(right_plan_output_columns))
|
||||||
|
{}
|
||||||
|
|
||||||
|
void addLeftColumn(QueryTreeNodePtr & node, const ColumnsWithTypeAndName & plan_output_columns, const PlannerContextPtr & planner_context)
|
||||||
|
{
|
||||||
|
addColumnImpl(left_alias_columns_keys, node, plan_output_columns, planner_context);
|
||||||
|
}
|
||||||
|
|
||||||
|
void addRightColumn(QueryTreeNodePtr & node, const ColumnsWithTypeAndName & plan_output_columns, const PlannerContextPtr & planner_context)
|
||||||
|
{
|
||||||
|
addColumnImpl(right_alias_columns_keys, node, plan_output_columns, planner_context);
|
||||||
|
}
|
||||||
|
|
||||||
|
ActionsDAGPtr getLeftActions()
|
||||||
|
{
|
||||||
|
left_alias_columns_keys->projectInput();
|
||||||
|
return std::move(left_alias_columns_keys);
|
||||||
|
}
|
||||||
|
|
||||||
|
ActionsDAGPtr getRightActions()
|
||||||
|
{
|
||||||
|
right_alias_columns_keys->projectInput();
|
||||||
|
return std::move(right_alias_columns_keys);
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void addColumnImpl(ActionsDAGPtr & alias_columns_keys, QueryTreeNodePtr & node, const ColumnsWithTypeAndName & plan_output_columns, const PlannerContextPtr & planner_context)
|
||||||
|
{
|
||||||
|
auto & column_node = node->as<ColumnNode&>();
|
||||||
|
if (column_node.hasExpression())
|
||||||
|
{
|
||||||
|
auto dag = buildActionsDAGFromExpressionNode(column_node.getExpressionOrThrow(), plan_output_columns, planner_context);
|
||||||
|
const auto & left_inner_column_identifier = planner_context->getColumnNodeIdentifierOrThrow(node);
|
||||||
|
dag->addOrReplaceInOutputs(dag->addAlias(*dag->getOutputs().front(), left_inner_column_identifier));
|
||||||
|
alias_columns_keys->mergeInplace(std::move(*dag));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ActionsDAGPtr left_alias_columns_keys;
|
||||||
|
ActionsDAGPtr right_alias_columns_keys;
|
||||||
|
};
|
||||||
|
|
||||||
JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_expression,
|
JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_expression,
|
||||||
JoinTreeQueryPlan left_join_tree_query_plan,
|
JoinTreeQueryPlan left_join_tree_query_plan,
|
||||||
JoinTreeQueryPlan right_join_tree_query_plan,
|
JoinTreeQueryPlan right_join_tree_query_plan,
|
||||||
@ -1113,6 +1143,8 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_
|
|||||||
|
|
||||||
if (join_node.isUsingJoinExpression())
|
if (join_node.isUsingJoinExpression())
|
||||||
{
|
{
|
||||||
|
UsingAliasKeyActions using_alias_key_actions{left_plan_output_columns, right_plan_output_columns};
|
||||||
|
|
||||||
auto & join_node_using_columns_list = join_node.getJoinExpression()->as<ListNode &>();
|
auto & join_node_using_columns_list = join_node.getJoinExpression()->as<ListNode &>();
|
||||||
for (auto & join_node_using_node : join_node_using_columns_list.getNodes())
|
for (auto & join_node_using_node : join_node_using_columns_list.getNodes())
|
||||||
{
|
{
|
||||||
@ -1122,9 +1154,13 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_
|
|||||||
auto & left_inner_column_node = inner_columns_list.getNodes().at(0);
|
auto & left_inner_column_node = inner_columns_list.getNodes().at(0);
|
||||||
auto & left_inner_column = left_inner_column_node->as<ColumnNode &>();
|
auto & left_inner_column = left_inner_column_node->as<ColumnNode &>();
|
||||||
|
|
||||||
|
using_alias_key_actions.addLeftColumn(left_inner_column_node, left_plan_output_columns, planner_context);
|
||||||
|
|
||||||
auto & right_inner_column_node = inner_columns_list.getNodes().at(1);
|
auto & right_inner_column_node = inner_columns_list.getNodes().at(1);
|
||||||
auto & right_inner_column = right_inner_column_node->as<ColumnNode &>();
|
auto & right_inner_column = right_inner_column_node->as<ColumnNode &>();
|
||||||
|
|
||||||
|
using_alias_key_actions.addRightColumn(right_inner_column_node, right_plan_output_columns, planner_context);
|
||||||
|
|
||||||
const auto & join_node_using_column_node_type = join_node_using_column_node.getColumnType();
|
const auto & join_node_using_column_node_type = join_node_using_column_node.getColumnType();
|
||||||
if (!left_inner_column.getColumnType()->equals(*join_node_using_column_node_type))
|
if (!left_inner_column.getColumnType()->equals(*join_node_using_column_node_type))
|
||||||
{
|
{
|
||||||
@ -1138,6 +1174,14 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_
|
|||||||
right_plan_column_name_to_cast_type.emplace(right_inner_column_identifier, join_node_using_column_node_type);
|
right_plan_column_name_to_cast_type.emplace(right_inner_column_identifier, join_node_using_column_node_type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto left_alias_columns_keys_step = std::make_unique<ExpressionStep>(left_plan.getCurrentDataStream(), using_alias_key_actions.getLeftActions());
|
||||||
|
left_alias_columns_keys_step->setStepDescription("Actions for left table alias column keys");
|
||||||
|
left_plan.addStep(std::move(left_alias_columns_keys_step));
|
||||||
|
|
||||||
|
auto right_alias_columns_keys_step = std::make_unique<ExpressionStep>(right_plan.getCurrentDataStream(), using_alias_key_actions.getRightActions());
|
||||||
|
right_alias_columns_keys_step->setStepDescription("Actions for right table alias column keys");
|
||||||
|
right_plan.addStep(std::move(right_alias_columns_keys_step));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto join_cast_plan_output_nodes = [&](QueryPlan & plan_to_add_cast, std::unordered_map<std::string, DataTypePtr> & plan_column_name_to_cast_type)
|
auto join_cast_plan_output_nodes = [&](QueryPlan & plan_to_add_cast, std::unordered_map<std::string, DataTypePtr> & plan_column_name_to_cast_type)
|
||||||
|
@ -55,7 +55,7 @@ public:
|
|||||||
/// Return true if column with name exists, false otherwise
|
/// Return true if column with name exists, false otherwise
|
||||||
bool hasColumn(const std::string & column_name) const
|
bool hasColumn(const std::string & column_name) const
|
||||||
{
|
{
|
||||||
return column_name_to_column.contains(column_name);
|
return alias_columns_names.contains(column_name) || column_name_to_column.contains(column_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Add column in table expression data.
|
/** Add column in table expression data.
|
||||||
@ -63,40 +63,37 @@ public:
|
|||||||
*
|
*
|
||||||
* Logical error exception is thrown if column already exists.
|
* Logical error exception is thrown if column already exists.
|
||||||
*/
|
*/
|
||||||
void addColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier, bool is_selected_column = true)
|
void addColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier)
|
||||||
{
|
{
|
||||||
if (hasColumn(column.name))
|
if (hasColumn(column.name))
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Column with name {} already exists", column.name);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Column with name {} already exists", column.name);
|
||||||
|
|
||||||
column_names.push_back(column.name);
|
addColumnImpl(column, column_identifier);
|
||||||
addColumnImpl(column, column_identifier, is_selected_column);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add alias column
|
/** Add column if it does not exists in table expression data.
|
||||||
void addAliasColumn(const NameAndTypePair & column, const ColumnIdentifier & column_identifier, ActionsDAGPtr actions_dag, bool is_selected_column = true)
|
* Column identifier must be created using global planner context.
|
||||||
|
*/
|
||||||
|
void addColumnIfNotExists(const NameAndTypePair & column, const ColumnIdentifier & column_identifier)
|
||||||
{
|
{
|
||||||
alias_column_expressions.emplace(column.name, std::move(actions_dag));
|
if (hasColumn(column.name))
|
||||||
addColumnImpl(column, column_identifier, is_selected_column);
|
return;
|
||||||
|
|
||||||
|
addColumnImpl(column, column_identifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mark existing column as selected
|
/// Add alias column name
|
||||||
void markSelectedColumn(const std::string & column_name)
|
void addAliasColumnName(const std::string & column_name, const ColumnIdentifier & column_identifier)
|
||||||
{
|
{
|
||||||
auto [_, inserted] = selected_column_names_set.emplace(column_name);
|
alias_columns_names.insert(column_name);
|
||||||
if (inserted)
|
|
||||||
selected_column_names.push_back(column_name);
|
column_name_to_column_identifier.emplace(column_name, column_identifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get columns that are requested from table expression, including ALIAS columns
|
/// Get alias columns names
|
||||||
const Names & getSelectedColumnsNames() const
|
const NameSet & getAliasColumnsNames() const
|
||||||
{
|
{
|
||||||
return selected_column_names;
|
return alias_columns_names;
|
||||||
}
|
|
||||||
|
|
||||||
/// Get ALIAS columns names mapped to expressions
|
|
||||||
const std::unordered_map<std::string, ActionsDAGPtr> & getAliasColumnExpressions() const
|
|
||||||
{
|
|
||||||
return alias_column_expressions;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get column name to column map
|
/// Get column name to column map
|
||||||
@ -105,7 +102,7 @@ public:
|
|||||||
return column_name_to_column;
|
return column_name_to_column;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get column names that are read from table expression
|
/// Get column names
|
||||||
const Names & getColumnNames() const
|
const Names & getColumnNames() const
|
||||||
{
|
{
|
||||||
return column_names;
|
return column_names;
|
||||||
@ -122,6 +119,23 @@ public:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ColumnIdentifiers getColumnIdentifiers() const
|
||||||
|
{
|
||||||
|
ColumnIdentifiers result;
|
||||||
|
result.reserve(column_identifier_to_column_name.size());
|
||||||
|
|
||||||
|
for (const auto & [column_identifier, _] : column_identifier_to_column_name)
|
||||||
|
result.push_back(column_identifier);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get column name to column identifier map
|
||||||
|
const ColumnNameToColumnIdentifier & getColumnNameToIdentifier() const
|
||||||
|
{
|
||||||
|
return column_name_to_column_identifier;
|
||||||
|
}
|
||||||
|
|
||||||
/// Get column identifier to column name map
|
/// Get column identifier to column name map
|
||||||
const ColumnNameToColumnIdentifier & getColumnIdentifierToColumnName() const
|
const ColumnNameToColumnIdentifier & getColumnIdentifierToColumnName() const
|
||||||
{
|
{
|
||||||
@ -145,6 +159,18 @@ public:
|
|||||||
return it->second;
|
return it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Get column for column name.
|
||||||
|
* Null is returned if there are no column for column name.
|
||||||
|
*/
|
||||||
|
const NameAndTypePair * getColumnOrNull(const std::string & column_name) const
|
||||||
|
{
|
||||||
|
auto it = column_name_to_column.find(column_name);
|
||||||
|
if (it == column_name_to_column.end())
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
return &it->second;
|
||||||
|
}
|
||||||
|
|
||||||
/** Get column identifier for column name.
|
/** Get column identifier for column name.
|
||||||
* Exception is thrown if there are no column identifier for column name.
|
* Exception is thrown if there are no column identifier for column name.
|
||||||
*/
|
*/
|
||||||
@ -174,6 +200,24 @@ public:
|
|||||||
return &it->second;
|
return &it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Get column name for column identifier.
|
||||||
|
* Exception is thrown if there are no column name for column identifier.
|
||||||
|
*/
|
||||||
|
const std::string & getColumnNameOrThrow(const ColumnIdentifier & column_identifier) const
|
||||||
|
{
|
||||||
|
auto it = column_identifier_to_column_name.find(column_identifier);
|
||||||
|
if (it == column_identifier_to_column_name.end())
|
||||||
|
{
|
||||||
|
auto column_identifiers = getColumnIdentifiers();
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Column name for column identifier {} does not exists. There are only column identifiers: {}",
|
||||||
|
column_identifier,
|
||||||
|
fmt::join(column_identifiers.begin(), column_identifiers.end(), ", "));
|
||||||
|
}
|
||||||
|
|
||||||
|
return it->second;
|
||||||
|
}
|
||||||
|
|
||||||
/** Get column name for column identifier.
|
/** Get column name for column identifier.
|
||||||
* Null is returned if there are no column name for column identifier.
|
* Null is returned if there are no column name for column identifier.
|
||||||
*/
|
*/
|
||||||
@ -252,36 +296,23 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void addColumnImpl(const NameAndTypePair & column, const ColumnIdentifier & column_identifier, bool add_to_selected_columns)
|
void addColumnImpl(const NameAndTypePair & column, const ColumnIdentifier & column_identifier)
|
||||||
{
|
{
|
||||||
if (add_to_selected_columns)
|
column_names.push_back(column.name);
|
||||||
markSelectedColumn(column.name);
|
|
||||||
|
|
||||||
column_name_to_column.emplace(column.name, column);
|
column_name_to_column.emplace(column.name, column);
|
||||||
column_name_to_column_identifier.emplace(column.name, column_identifier);
|
column_name_to_column_identifier.emplace(column.name, column_identifier);
|
||||||
column_identifier_to_column_name.emplace(column_identifier, column.name);
|
column_identifier_to_column_name.emplace(column_identifier, column.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set of columns that are physically read from table expression
|
/// Valid for table, table function, array join, query, union nodes
|
||||||
/// In case of ALIAS columns it contains source column names that are used to calculate alias
|
|
||||||
/// This source column may be not used by user
|
|
||||||
Names column_names;
|
Names column_names;
|
||||||
|
|
||||||
/// Set of columns that are SELECTed from table expression
|
|
||||||
/// It may contain ALIAS columns.
|
|
||||||
/// Mainly it's used to determine access to which columns to check
|
|
||||||
/// For example user may have an access to column `a ALIAS x + y` but not to `x` and `y`
|
|
||||||
/// In that case we can read `x` and `y` and calculate `a`, but not return `x` and `y` to user
|
|
||||||
Names selected_column_names;
|
|
||||||
/// To deduplicate columns in `selected_column_names`
|
|
||||||
NameSet selected_column_names_set;
|
|
||||||
|
|
||||||
/// Expression to calculate ALIAS columns
|
|
||||||
std::unordered_map<std::string, ActionsDAGPtr> alias_column_expressions;
|
|
||||||
|
|
||||||
/// Valid for table, table function, array join, query, union nodes
|
/// Valid for table, table function, array join, query, union nodes
|
||||||
ColumnNameToColumn column_name_to_column;
|
ColumnNameToColumn column_name_to_column;
|
||||||
|
|
||||||
|
/// Valid only for table node
|
||||||
|
NameSet alias_columns_names;
|
||||||
|
|
||||||
/// Valid for table, table function, array join, query, union nodes
|
/// Valid for table, table function, array join, query, union nodes
|
||||||
ColumnNameToColumnIdentifier column_name_to_column_identifier;
|
ColumnNameToColumnIdentifier column_name_to_column_identifier;
|
||||||
|
|
||||||
|
@ -469,19 +469,12 @@ FilterDAGInfo buildFilterInfo(ASTPtr filter_expression,
|
|||||||
NameSet table_expression_required_names_without_filter)
|
NameSet table_expression_required_names_without_filter)
|
||||||
{
|
{
|
||||||
const auto & query_context = planner_context->getQueryContext();
|
const auto & query_context = planner_context->getQueryContext();
|
||||||
|
|
||||||
auto filter_query_tree = buildQueryTree(filter_expression, query_context);
|
auto filter_query_tree = buildQueryTree(filter_expression, query_context);
|
||||||
|
|
||||||
QueryAnalysisPass query_analysis_pass(table_expression);
|
QueryAnalysisPass query_analysis_pass(table_expression);
|
||||||
query_analysis_pass.run(filter_query_tree, query_context);
|
query_analysis_pass.run(filter_query_tree, query_context);
|
||||||
|
|
||||||
return buildFilterInfo(std::move(filter_query_tree), table_expression, planner_context, std::move(table_expression_required_names_without_filter));
|
|
||||||
}
|
|
||||||
|
|
||||||
FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree,
|
|
||||||
const QueryTreeNodePtr & table_expression,
|
|
||||||
PlannerContextPtr & planner_context,
|
|
||||||
NameSet table_expression_required_names_without_filter)
|
|
||||||
{
|
|
||||||
if (table_expression_required_names_without_filter.empty())
|
if (table_expression_required_names_without_filter.empty())
|
||||||
{
|
{
|
||||||
auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression);
|
auto & table_expression_data = planner_context->getTableExpressionDataOrThrow(table_expression);
|
||||||
@ -489,7 +482,7 @@ FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree,
|
|||||||
table_expression_required_names_without_filter.insert(table_expression_names.begin(), table_expression_names.end());
|
table_expression_required_names_without_filter.insert(table_expression_names.begin(), table_expression_names.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
collectSourceColumns(filter_query_tree, planner_context, false /*keep_alias_columns*/);
|
collectSourceColumns(filter_query_tree, planner_context);
|
||||||
collectSets(filter_query_tree, *planner_context);
|
collectSets(filter_query_tree, *planner_context);
|
||||||
|
|
||||||
auto filter_actions_dag = std::make_shared<ActionsDAG>();
|
auto filter_actions_dag = std::make_shared<ActionsDAG>();
|
||||||
|
@ -89,11 +89,6 @@ FilterDAGInfo buildFilterInfo(ASTPtr filter_expression,
|
|||||||
PlannerContextPtr & planner_context,
|
PlannerContextPtr & planner_context,
|
||||||
NameSet table_expression_required_names_without_filter = {});
|
NameSet table_expression_required_names_without_filter = {});
|
||||||
|
|
||||||
FilterDAGInfo buildFilterInfo(QueryTreeNodePtr filter_query_tree,
|
|
||||||
const QueryTreeNodePtr & table_expression,
|
|
||||||
PlannerContextPtr & planner_context,
|
|
||||||
NameSet table_expression_required_names_without_filter = {});
|
|
||||||
|
|
||||||
ASTPtr parseAdditionalResultFilter(const Settings & settings);
|
ASTPtr parseAdditionalResultFilter(const Settings & settings);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1434,13 +1434,8 @@ void ReadFromMergeTree::applyFilters(ActionDAGNodes added_filter_nodes)
|
|||||||
if (query_info.planner_context)
|
if (query_info.planner_context)
|
||||||
{
|
{
|
||||||
const auto & table_expression_data = query_info.planner_context->getTableExpressionDataOrThrow(query_info.table_expression);
|
const auto & table_expression_data = query_info.planner_context->getTableExpressionDataOrThrow(query_info.table_expression);
|
||||||
const auto & alias_column_expressions = table_expression_data.getAliasColumnExpressions();
|
|
||||||
for (const auto & [column_identifier, column_name] : table_expression_data.getColumnIdentifierToColumnName())
|
for (const auto & [column_identifier, column_name] : table_expression_data.getColumnIdentifierToColumnName())
|
||||||
{
|
{
|
||||||
/// ALIAS columns cannot be used in the filter expression without being calculated in ActionsDAG,
|
|
||||||
/// so they should not be added to the input nodes.
|
|
||||||
if (alias_column_expressions.contains(column_name))
|
|
||||||
continue;
|
|
||||||
const auto & column = table_expression_data.getColumnOrThrow(column_name);
|
const auto & column = table_expression_data.getColumnOrThrow(column_name);
|
||||||
node_name_to_input_node_column.emplace(column_identifier, ColumnWithTypeAndName(column.type, column_name));
|
node_name_to_input_node_column.emplace(column_identifier, ColumnWithTypeAndName(column.type, column_name));
|
||||||
}
|
}
|
||||||
|
@ -744,32 +744,6 @@ StorageSnapshotPtr StorageDistributed::getStorageSnapshotForQuery(
|
|||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
class ReplaseAliasColumnsVisitor : public InDepthQueryTreeVisitor<ReplaseAliasColumnsVisitor>
|
|
||||||
{
|
|
||||||
static QueryTreeNodePtr getColumnNodeAliasExpression(const QueryTreeNodePtr & node)
|
|
||||||
{
|
|
||||||
const auto * column_node = node->as<ColumnNode>();
|
|
||||||
if (!column_node || !column_node->hasExpression())
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
const auto & column_source = column_node->getColumnSourceOrNull();
|
|
||||||
if (!column_source || column_source->getNodeType() == QueryTreeNodeType::JOIN
|
|
||||||
|| column_source->getNodeType() == QueryTreeNodeType::ARRAY_JOIN)
|
|
||||||
return nullptr;
|
|
||||||
|
|
||||||
auto column_expression = column_node->getExpression();
|
|
||||||
column_expression->setAlias(column_node->getColumnName());
|
|
||||||
return column_expression;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
void visitImpl(QueryTreeNodePtr & node)
|
|
||||||
{
|
|
||||||
if (auto column_expression = getColumnNodeAliasExpression(node))
|
|
||||||
node = column_expression;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
QueryTreeNodePtr buildQueryTreeDistributed(SelectQueryInfo & query_info,
|
QueryTreeNodePtr buildQueryTreeDistributed(SelectQueryInfo & query_info,
|
||||||
const StorageSnapshotPtr & distributed_storage_snapshot,
|
const StorageSnapshotPtr & distributed_storage_snapshot,
|
||||||
const StorageID & remote_storage_id,
|
const StorageID & remote_storage_id,
|
||||||
@ -822,8 +796,6 @@ QueryTreeNodePtr buildQueryTreeDistributed(SelectQueryInfo & query_info,
|
|||||||
replacement_table_expression->setAlias(query_info.table_expression->getAlias());
|
replacement_table_expression->setAlias(query_info.table_expression->getAlias());
|
||||||
|
|
||||||
auto query_tree_to_modify = query_info.query_tree->cloneAndReplace(query_info.table_expression, std::move(replacement_table_expression));
|
auto query_tree_to_modify = query_info.query_tree->cloneAndReplace(query_info.table_expression, std::move(replacement_table_expression));
|
||||||
ReplaseAliasColumnsVisitor replase_alias_columns_visitor;
|
|
||||||
replase_alias_columns_visitor.visit(query_tree_to_modify);
|
|
||||||
|
|
||||||
return buildQueryTreeForShard(query_info.planner_context, query_tree_to_modify);
|
return buildQueryTreeForShard(query_info.planner_context, query_tree_to_modify);
|
||||||
}
|
}
|
||||||
|
@ -4,3 +4,4 @@ test_distributed_type_object/test.py::test_distributed_type_object
|
|||||||
test_merge_table_over_distributed/test.py::test_global_in
|
test_merge_table_over_distributed/test.py::test_global_in
|
||||||
test_merge_table_over_distributed/test.py::test_select_table_name_from_merge_over_distributed
|
test_merge_table_over_distributed/test.py::test_select_table_name_from_merge_over_distributed
|
||||||
test_passing_max_partitions_to_read_remotely/test.py::test_default_database_on_cluster
|
test_passing_max_partitions_to_read_remotely/test.py::test_default_database_on_cluster
|
||||||
|
test_select_access_rights/test_main.py::test_alias_columns
|
||||||
|
@ -21,6 +21,7 @@ class Labels(metaclass=WithIter):
|
|||||||
CI_SET_REDUCED = "ci_set_reduced"
|
CI_SET_REDUCED = "ci_set_reduced"
|
||||||
CI_SET_ARM = "ci_set_arm"
|
CI_SET_ARM = "ci_set_arm"
|
||||||
CI_SET_INTEGRATION = "ci_set_integration"
|
CI_SET_INTEGRATION = "ci_set_integration"
|
||||||
|
CI_SET_ANALYZER = "ci_set_analyzer"
|
||||||
|
|
||||||
libFuzzer = "libFuzzer"
|
libFuzzer = "libFuzzer"
|
||||||
|
|
||||||
@ -398,6 +399,10 @@ bugfix_validate_check = DigestConfig(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
# common test params
|
# common test params
|
||||||
|
compatibility_test_common_params = {
|
||||||
|
"digest": compatibility_check_digest,
|
||||||
|
"run_command": "compatibility_check.py",
|
||||||
|
}
|
||||||
statless_test_common_params = {
|
statless_test_common_params = {
|
||||||
"digest": stateless_check_digest,
|
"digest": stateless_check_digest,
|
||||||
"run_command": 'functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT',
|
"run_command": 'functional_test_check.py "$CHECK_NAME" $KILL_TIMEOUT',
|
||||||
@ -647,6 +652,16 @@ CI_CONFIG = CIConfig(
|
|||||||
JobNames.INTEGRATION_TEST,
|
JobNames.INTEGRATION_TEST,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
|
Labels.CI_SET_ANALYZER: LabelConfig(
|
||||||
|
run_jobs=[
|
||||||
|
JobNames.STYLE_CHECK,
|
||||||
|
JobNames.FAST_TEST,
|
||||||
|
Build.PACKAGE_RELEASE,
|
||||||
|
Build.PACKAGE_ASAN,
|
||||||
|
JobNames.STATELESS_TEST_ANALYZER_S3_REPLICATED_RELEASE,
|
||||||
|
JobNames.INTEGRATION_TEST_ASAN_ANALYZER,
|
||||||
|
]
|
||||||
|
),
|
||||||
Labels.CI_SET_REDUCED: LabelConfig(
|
Labels.CI_SET_REDUCED: LabelConfig(
|
||||||
run_jobs=[
|
run_jobs=[
|
||||||
job
|
job
|
||||||
@ -1038,13 +1053,13 @@ CI_CONFIG = CIConfig(
|
|||||||
JobNames.COMPATIBILITY_TEST: TestConfig(
|
JobNames.COMPATIBILITY_TEST: TestConfig(
|
||||||
Build.PACKAGE_RELEASE,
|
Build.PACKAGE_RELEASE,
|
||||||
job_config=JobConfig(
|
job_config=JobConfig(
|
||||||
required_on_release_branch=True, digest=compatibility_check_digest
|
required_on_release_branch=True, **compatibility_test_common_params # type: ignore
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
JobNames.COMPATIBILITY_TEST_ARM: TestConfig(
|
JobNames.COMPATIBILITY_TEST_ARM: TestConfig(
|
||||||
Build.PACKAGE_AARCH64,
|
Build.PACKAGE_AARCH64,
|
||||||
job_config=JobConfig(
|
job_config=JobConfig(
|
||||||
required_on_release_branch=True, digest=compatibility_check_digest
|
required_on_release_branch=True, **compatibility_test_common_params # type: ignore
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
JobNames.UNIT_TEST: TestConfig(
|
JobNames.UNIT_TEST: TestConfig(
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -122,11 +123,7 @@ def get_run_commands_distributions(
|
|||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
parser = argparse.ArgumentParser("Check compatibility with old distributions")
|
parser = argparse.ArgumentParser("Check compatibility with old distributions")
|
||||||
parser.add_argument("--check-name", required=True)
|
parser.add_argument("--check-name", required=False)
|
||||||
parser.add_argument("--check-glibc", action="store_true")
|
|
||||||
parser.add_argument(
|
|
||||||
"--check-distributions", action="store_true"
|
|
||||||
) # currently hardcoded to x86, don't enable for ARM
|
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
@ -134,6 +131,13 @@ def main():
|
|||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
check_name = args.check_name or os.getenv("CHECK_NAME")
|
||||||
|
assert check_name
|
||||||
|
check_glibc = True
|
||||||
|
# currently hardcoded to x86, don't enable for ARM
|
||||||
|
check_distributions = (
|
||||||
|
"aarch64" not in check_name.lower() and "arm64" not in check_name.lower()
|
||||||
|
)
|
||||||
|
|
||||||
stopwatch = Stopwatch()
|
stopwatch = Stopwatch()
|
||||||
|
|
||||||
@ -150,7 +154,7 @@ def main():
|
|||||||
"clickhouse-common-static_" in url or "clickhouse-server_" in url
|
"clickhouse-common-static_" in url or "clickhouse-server_" in url
|
||||||
)
|
)
|
||||||
|
|
||||||
download_builds_filter(args.check_name, reports_path, packages_path, url_filter)
|
download_builds_filter(check_name, reports_path, packages_path, url_filter)
|
||||||
|
|
||||||
for package in packages_path.iterdir():
|
for package in packages_path.iterdir():
|
||||||
if package.suffix == ".deb":
|
if package.suffix == ".deb":
|
||||||
@ -166,11 +170,11 @@ def main():
|
|||||||
|
|
||||||
run_commands = []
|
run_commands = []
|
||||||
|
|
||||||
if args.check_glibc:
|
if check_glibc:
|
||||||
check_glibc_commands = get_run_commands_glibc(packages_path, result_path)
|
check_glibc_commands = get_run_commands_glibc(packages_path, result_path)
|
||||||
run_commands.extend(check_glibc_commands)
|
run_commands.extend(check_glibc_commands)
|
||||||
|
|
||||||
if args.check_distributions:
|
if check_distributions:
|
||||||
centos_image = pull_image(get_docker_image(IMAGE_CENTOS))
|
centos_image = pull_image(get_docker_image(IMAGE_CENTOS))
|
||||||
ubuntu_image = pull_image(get_docker_image(IMAGE_UBUNTU))
|
ubuntu_image = pull_image(get_docker_image(IMAGE_UBUNTU))
|
||||||
check_distributions_commands = get_run_commands_distributions(
|
check_distributions_commands = get_run_commands_distributions(
|
||||||
@ -195,9 +199,9 @@ def main():
|
|||||||
|
|
||||||
# See https://sourceware.org/glibc/wiki/Glibc%20Timeline
|
# See https://sourceware.org/glibc/wiki/Glibc%20Timeline
|
||||||
max_glibc_version = ""
|
max_glibc_version = ""
|
||||||
if "amd64" in args.check_name:
|
if "amd64" in check_name:
|
||||||
max_glibc_version = "2.4"
|
max_glibc_version = "2.4"
|
||||||
elif "aarch64" in args.check_name:
|
elif "aarch64" in check_name:
|
||||||
max_glibc_version = "2.18" # because of build with newer sysroot?
|
max_glibc_version = "2.18" # because of build with newer sysroot?
|
||||||
else:
|
else:
|
||||||
raise Exception("Can't determine max glibc version")
|
raise Exception("Can't determine max glibc version")
|
||||||
@ -205,8 +209,8 @@ def main():
|
|||||||
state, description, test_results, additional_logs = process_result(
|
state, description, test_results, additional_logs = process_result(
|
||||||
result_path,
|
result_path,
|
||||||
server_log_path,
|
server_log_path,
|
||||||
args.check_glibc,
|
check_glibc,
|
||||||
args.check_distributions,
|
check_distributions,
|
||||||
max_glibc_version,
|
max_glibc_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +25,6 @@ from stopwatch import Stopwatch
|
|||||||
from tee_popen import TeePopen
|
from tee_popen import TeePopen
|
||||||
from upload_result_helper import upload_results
|
from upload_result_helper import upload_results
|
||||||
|
|
||||||
NAME = "Push to Dockerhub"
|
|
||||||
TEMP_PATH = Path(RUNNER_TEMP) / "docker_images_check"
|
TEMP_PATH = Path(RUNNER_TEMP) / "docker_images_check"
|
||||||
TEMP_PATH.mkdir(parents=True, exist_ok=True)
|
TEMP_PATH.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
@ -177,6 +176,9 @@ def main():
|
|||||||
stopwatch = Stopwatch()
|
stopwatch = Stopwatch()
|
||||||
|
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
|
|
||||||
|
NAME = f"Push to Dockerhub {args.suffix}"
|
||||||
|
|
||||||
if args.push:
|
if args.push:
|
||||||
logging.info("login to docker hub")
|
logging.info("login to docker hub")
|
||||||
docker_login()
|
docker_login()
|
||||||
|
@ -41,7 +41,7 @@ def started_cluster():
|
|||||||
CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a;
|
CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a;
|
||||||
INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0);
|
INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0);
|
||||||
|
|
||||||
CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, bb ALIAS b + 1, c UInt16 ALIAS a + bb - 1) ENGINE MergeTree ORDER BY a;
|
CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
|
||||||
INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1);
|
INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1);
|
||||||
|
|
||||||
CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
|
CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
|
||||||
|
@ -60,7 +60,7 @@ def started_cluster():
|
|||||||
CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a;
|
CREATE TABLE mydb.filtered_table2 (a UInt8, b UInt8, c UInt8, d UInt8) ENGINE MergeTree ORDER BY a;
|
||||||
INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0);
|
INSERT INTO mydb.filtered_table2 values (0, 0, 0, 0), (1, 2, 3, 4), (4, 3, 2, 1), (0, 0, 6, 0);
|
||||||
|
|
||||||
CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, bb ALIAS b + 1, c UInt16 ALIAS a + bb - 1) ENGINE MergeTree ORDER BY a;
|
CREATE TABLE mydb.filtered_table3 (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
|
||||||
INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1);
|
INSERT INTO mydb.filtered_table3 values (0, 0), (0, 1), (1, 0), (1, 1);
|
||||||
|
|
||||||
CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
|
CREATE TABLE mydb.`.filtered_table4` (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE MergeTree ORDER BY a;
|
||||||
@ -113,7 +113,6 @@ def test_smoke():
|
|||||||
|
|
||||||
assert node.query("SELECT a FROM mydb.filtered_table3") == TSV([[0], [1]])
|
assert node.query("SELECT a FROM mydb.filtered_table3") == TSV([[0], [1]])
|
||||||
assert node.query("SELECT b FROM mydb.filtered_table3") == TSV([[1], [0]])
|
assert node.query("SELECT b FROM mydb.filtered_table3") == TSV([[1], [0]])
|
||||||
assert node.query("SELECT bb FROM mydb.filtered_table3") == TSV([[2], [1]])
|
|
||||||
assert node.query("SELECT c FROM mydb.filtered_table3") == TSV([[1], [1]])
|
assert node.query("SELECT c FROM mydb.filtered_table3") == TSV([[1], [1]])
|
||||||
assert node.query("SELECT a + b FROM mydb.filtered_table3") == TSV([[1], [1]])
|
assert node.query("SELECT a + b FROM mydb.filtered_table3") == TSV([[1], [1]])
|
||||||
assert node.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == TSV(
|
assert node.query("SELECT a FROM mydb.filtered_table3 WHERE c = 1") == TSV(
|
||||||
|
@ -1,63 +1,63 @@
|
|||||||
1
|
1
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 340.00000000000006
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 0.20550000000000002
|
1979-12-12 21:21:23.000 1.54 0.20550000000000002
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 0.0000010200000000000004
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 6.165000000000001e-10
|
1979-12-12 21:21:23.000 1.54 6.165000000000001e-10
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 0.00136
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 8.22e-7
|
1979-12-12 21:21:23.000 1.54 8.22e-7
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 1.7000000000000004
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 0.0010275000000000002
|
1979-12-12 21:21:23.000 1.54 0.0010275000000000002
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 2040.0000000000005
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 1.233
|
1979-12-12 21:21:23.000 1.54 1.233
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 142800.00000000003
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 86.31
|
1979-12-12 21:21:23.000 1.54 86.31
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 9792000.000000002
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 5918.400000000001
|
1979-12-12 21:21:23.000 1.54 5918.400000000001
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 264384000.00000003
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 159796.80000000002
|
1979-12-12 21:21:23.000 1.54 159796.80000000002
|
||||||
1979-12-12 21:21:23.000 1.54 0
|
1979-12-12 21:21:23.000 1.54 0
|
||||||
1979-12-12 21:21:21.127 3.7 0
|
1979-12-12 21:21:21.123 1.1 0
|
||||||
2299-12-31 23:37:36.788 1.1 0
|
1979-12-12 21:21:21.123 2.34 0
|
||||||
2299-12-31 23:37:36.789 2.34 0
|
1979-12-12 21:21:21.127 3.7 2056320000.0000002
|
||||||
1979-12-12 21:21:21.129 2.1 0
|
1979-12-12 21:21:21.129 2.1 0
|
||||||
1979-12-12 21:21:22.000 1.3345 0
|
1979-12-12 21:21:22.000 1.3345 0
|
||||||
1979-12-12 21:21:23.000 1.54 1242864
|
1979-12-12 21:21:23.000 1.54 1242864
|
||||||
|
@ -18,7 +18,7 @@ SELECT (
|
|||||||
SELECT
|
SELECT
|
||||||
ts,
|
ts,
|
||||||
metric,
|
metric,
|
||||||
nonNegativeDerivative(metric, ts) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv
|
nonNegativeDerivative(metric, ts) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv
|
||||||
FROM nnd
|
FROM nnd
|
||||||
LIMIT 5, 1
|
LIMIT 5, 1
|
||||||
) = (
|
) = (
|
||||||
@ -29,37 +29,37 @@ SELECT (
|
|||||||
FROM nnd
|
FROM nnd
|
||||||
LIMIT 5, 1
|
LIMIT 5, 1
|
||||||
);
|
);
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Nanosecond
|
-- Nanosecond
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Microsecond
|
-- Microsecond
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 4 MICROSECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 4 MICROSECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Millisecond
|
-- Millisecond
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 5 MILLISECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 5 MILLISECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Second
|
-- Second
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 6 SECOND) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 6 SECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Minute
|
-- Minute
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 7 MINUTE) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 7 MINUTE) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Hour
|
-- Hour
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 8 HOUR) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 8 HOUR) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Day
|
-- Day
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 9 DAY) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 3 PRECEDING AND 3 FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 9 DAY) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 3 PRECEDING AND 3 FOLLOWING) AS deriv FROM nnd;
|
||||||
-- Week
|
-- Week
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 10 WEEK) OVER (PARTITION BY id>3 ORDER BY ts ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd;
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 10 WEEK) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd;
|
||||||
|
|
||||||
-- shall not work for month, quarter, year (intervals with floating number of seconds)
|
-- shall not work for month, quarter, year (intervals with floating number of seconds)
|
||||||
-- Month
|
-- Month
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 11 MONTH) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 11 MONTH) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||||
-- Quarter
|
-- Quarter
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 12 QUARTER) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 12 QUARTER) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||||
-- Year
|
-- Year
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 13 YEAR) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 13 YEAR) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
|
||||||
|
|
||||||
-- test against wrong arguments/types
|
-- test against wrong arguments/types
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, 1, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
SELECT ts, metric, nonNegativeDerivative(metric, 1, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
||||||
SELECT ts, metric, nonNegativeDerivative('string not datetime', ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
SELECT ts, metric, nonNegativeDerivative('string not datetime', ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND, id) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND, id) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
||||||
SELECT ts, metric, nonNegativeDerivative(metric) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
SELECT ts, metric, nonNegativeDerivative(metric) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS }
|
||||||
|
|
||||||
-- cleanup
|
-- cleanup
|
||||||
DROP TABLE IF EXISTS nnd;
|
DROP TABLE IF EXISTS nnd;
|
||||||
|
@ -55,33 +55,33 @@ Header: a2 String
|
|||||||
Header: __table1.a2 String
|
Header: __table1.a2 String
|
||||||
__table1.k UInt64
|
__table1.k UInt64
|
||||||
__table4.d2 String
|
__table4.d2 String
|
||||||
Expression (DROP unused columns after JOIN)
|
Expression ((Actions for left table alias column keys + DROP unused columns after JOIN))
|
||||||
Header: __table1.a2 String
|
Header: __table1.a2 String
|
||||||
__table1.k UInt64
|
__table1.k UInt64
|
||||||
Join (JOIN FillRightFirst)
|
Join (JOIN FillRightFirst)
|
||||||
Header: __table1.a2 String
|
Header: __table1.a2 String
|
||||||
__table1.k UInt64
|
__table1.k UInt64
|
||||||
Expression (DROP unused columns after JOIN)
|
Expression ((Actions for left table alias column keys + DROP unused columns after JOIN))
|
||||||
Header: __table1.a2 String
|
Header: __table1.a2 String
|
||||||
__table1.k UInt64
|
__table1.k UInt64
|
||||||
Join (JOIN FillRightFirst)
|
Join (JOIN FillRightFirst)
|
||||||
Header: __table1.a2 String
|
Header: __table1.a2 String
|
||||||
__table1.k UInt64
|
__table1.k UInt64
|
||||||
Expression (Change column names to column identifiers)
|
Expression ((Actions for left table alias column keys + Change column names to column identifiers))
|
||||||
Header: __table1.a2 String
|
Header: __table1.a2 String
|
||||||
__table1.k UInt64
|
__table1.k UInt64
|
||||||
ReadFromMemoryStorage
|
ReadFromMemoryStorage
|
||||||
Header: a2 String
|
Header: a2 String
|
||||||
k UInt64
|
k UInt64
|
||||||
Expression (Change column names to column identifiers)
|
Expression ((Actions for right table alias column keys + Change column names to column identifiers))
|
||||||
Header: __table2.k UInt64
|
Header: __table2.k UInt64
|
||||||
ReadFromMemoryStorage
|
ReadFromMemoryStorage
|
||||||
Header: k UInt64
|
Header: k UInt64
|
||||||
Expression (Change column names to column identifiers)
|
Expression ((Actions for right table alias column keys + Change column names to column identifiers))
|
||||||
Header: __table3.k UInt64
|
Header: __table3.k UInt64
|
||||||
ReadFromMemoryStorage
|
ReadFromMemoryStorage
|
||||||
Header: k UInt64
|
Header: k UInt64
|
||||||
Expression (Change column names to column identifiers)
|
Expression ((Actions for right table alias column keys + Change column names to column identifiers))
|
||||||
Header: __table4.d2 String
|
Header: __table4.d2 String
|
||||||
__table4.k UInt64
|
__table4.k UInt64
|
||||||
ReadFromMemoryStorage
|
ReadFromMemoryStorage
|
||||||
|
@ -1,37 +0,0 @@
|
|||||||
The reference time point is 2023-06-30 23:59:30
|
|
||||||
───────────────────────────────────────────────
|
|
||||||
The argument is before the reference time point
|
|
||||||
───────────────────────────────────────────────
|
|
||||||
Row 1:
|
|
||||||
──────
|
|
||||||
syslog_arg: Jun 30 23:58:30
|
|
||||||
res: 2023-06-30 23:58:30
|
|
||||||
res_null: 2023-06-30 23:58:30
|
|
||||||
res_zero: 2023-06-30 23:58:30
|
|
||||||
res_us: 2023-06-30 23:58:30
|
|
||||||
res_us_null: 2023-06-30 23:58:30
|
|
||||||
res_us_zero: 2023-06-30 23:58:30
|
|
||||||
res64: 2023-06-30 23:58:30.000
|
|
||||||
res64_null: 2023-06-30 23:58:30.000
|
|
||||||
res64_zero: 2023-06-30 23:58:30.000
|
|
||||||
res64_us: 2023-06-30 23:58:30.000
|
|
||||||
res64_us_null: 2023-06-30 23:58:30.000
|
|
||||||
res64_us_zero: 2023-06-30 23:58:30.000
|
|
||||||
──────────────────────────────────────────────
|
|
||||||
The argument is after the reference time point
|
|
||||||
──────────────────────────────────────────────
|
|
||||||
Row 1:
|
|
||||||
──────
|
|
||||||
syslog_arg: Jul 1 00:00:30
|
|
||||||
res: 2022-07-01 00:00:30
|
|
||||||
res_null: 2022-07-01 00:00:30
|
|
||||||
res_zero: 2022-07-01 00:00:30
|
|
||||||
res_us: 2022-07-01 00:00:30
|
|
||||||
res_us_null: 2022-07-01 00:00:30
|
|
||||||
res_us_zero: 2022-07-01 00:00:30
|
|
||||||
res64: 2022-07-01 00:00:30.000
|
|
||||||
res64_null: 2022-07-01 00:00:30.000
|
|
||||||
res64_zero: 2022-07-01 00:00:30.000
|
|
||||||
res64_us: 2022-07-01 00:00:30.000
|
|
||||||
res64_us_null: 2022-07-01 00:00:30.000
|
|
||||||
res64_us_zero: 2022-07-01 00:00:30.000
|
|
@ -1,54 +0,0 @@
|
|||||||
SET session_timezone = 'UTC';
|
|
||||||
|
|
||||||
SELECT 'The reference time point is 2023-06-30 23:59:30';
|
|
||||||
SELECT '───────────────────────────────────────────────';
|
|
||||||
SELECT 'The argument is before the reference time point';
|
|
||||||
SELECT '───────────────────────────────────────────────';
|
|
||||||
|
|
||||||
WITH
|
|
||||||
toDateTime('2023-06-30 23:59:30') AS dt_ref,
|
|
||||||
now() AS dt_now,
|
|
||||||
date_sub(MINUTE, 1, dt_now) as dt_before,
|
|
||||||
dateDiff(SECOND, dt_ref, dt_now) AS time_shift,
|
|
||||||
formatDateTime(dt_before, '%b %e %T') AS syslog_before
|
|
||||||
SELECT
|
|
||||||
formatDateTime(dt_before - time_shift, '%b %e %T') AS syslog_arg,
|
|
||||||
parseDateTimeBestEffort(syslog_before) - time_shift AS res,
|
|
||||||
parseDateTimeBestEffortOrNull(syslog_before) - time_shift AS res_null,
|
|
||||||
parseDateTimeBestEffortOrZero(syslog_before) - time_shift AS res_zero,
|
|
||||||
parseDateTimeBestEffortUS(syslog_before) - time_shift AS res_us,
|
|
||||||
parseDateTimeBestEffortUSOrNull(syslog_before) - time_shift AS res_us_null,
|
|
||||||
parseDateTimeBestEffortUSOrZero(syslog_before) - time_shift AS res_us_zero,
|
|
||||||
parseDateTime64BestEffort(syslog_before) - time_shift AS res64,
|
|
||||||
parseDateTime64BestEffortOrNull(syslog_before) - time_shift AS res64_null,
|
|
||||||
parseDateTime64BestEffortOrZero(syslog_before) - time_shift AS res64_zero,
|
|
||||||
parseDateTime64BestEffortUS(syslog_before) - time_shift AS res64_us,
|
|
||||||
parseDateTime64BestEffortUSOrNull(syslog_before) - time_shift AS res64_us_null,
|
|
||||||
parseDateTime64BestEffortUSOrZero(syslog_before) - time_shift AS res64_us_zero
|
|
||||||
FORMAT Vertical;
|
|
||||||
|
|
||||||
SELECT '──────────────────────────────────────────────';
|
|
||||||
SELECT 'The argument is after the reference time point';
|
|
||||||
SELECT '──────────────────────────────────────────────';
|
|
||||||
|
|
||||||
WITH
|
|
||||||
toDateTime('2023-06-30 23:59:30') AS dt_ref,
|
|
||||||
now() AS dt_now,
|
|
||||||
date_add(MINUTE, 1, dt_now) as dt_after,
|
|
||||||
dateDiff(SECOND, dt_ref, dt_now) AS time_shift,
|
|
||||||
formatDateTime(dt_after, '%b %e %T') AS syslog_after
|
|
||||||
SELECT
|
|
||||||
formatDateTime(dt_after - time_shift, '%b %e %T') AS syslog_arg,
|
|
||||||
parseDateTimeBestEffort(syslog_after) - time_shift AS res,
|
|
||||||
parseDateTimeBestEffortOrNull(syslog_after) - time_shift AS res_null,
|
|
||||||
parseDateTimeBestEffortOrZero(syslog_after) - time_shift AS res_zero,
|
|
||||||
parseDateTimeBestEffortUS(syslog_after) - time_shift AS res_us,
|
|
||||||
parseDateTimeBestEffortUSOrNull(syslog_after) - time_shift AS res_us_null,
|
|
||||||
parseDateTimeBestEffortUSOrZero(syslog_after) - time_shift AS res_us_zero,
|
|
||||||
parseDateTime64BestEffort(syslog_after) - time_shift AS res64,
|
|
||||||
parseDateTime64BestEffortOrNull(syslog_after) - time_shift AS res64_null,
|
|
||||||
parseDateTime64BestEffortOrZero(syslog_after) - time_shift AS res64_zero,
|
|
||||||
parseDateTime64BestEffortUS(syslog_after) - time_shift AS res64_us,
|
|
||||||
parseDateTime64BestEffortUSOrNull(syslog_after) - time_shift AS res64_us_null,
|
|
||||||
parseDateTime64BestEffortUSOrZero(syslog_after) - time_shift AS res64_us_zero
|
|
||||||
FORMAT Vertical;
|
|
@ -14,13 +14,13 @@ Expression ((Projection + Before ORDER BY))
|
|||||||
Parts: 1/1
|
Parts: 1/1
|
||||||
Granules: 1/1
|
Granules: 1/1
|
||||||
Expression ((Project names + Projection))
|
Expression ((Project names + Projection))
|
||||||
Filter ((WHERE + (Change column names to column identifiers + Compute alias columns)))
|
Filter ((WHERE + Change column names to column identifiers))
|
||||||
ReadFromMergeTree (02911_support_alias_column_in_indices.test1)
|
ReadFromMergeTree (02911_support_alias_column_in_indices.test1)
|
||||||
Indexes:
|
Indexes:
|
||||||
PrimaryKey
|
PrimaryKey
|
||||||
Keys:
|
Keys:
|
||||||
c
|
c
|
||||||
Condition: (plus(c, 1) in [11, +Inf))
|
Condition: (_CAST(plus(c, \'UInt64\'), 1) in [11, +Inf))
|
||||||
Parts: 1/2
|
Parts: 1/2
|
||||||
Granules: 1/2
|
Granules: 1/2
|
||||||
Skip
|
Skip
|
||||||
@ -44,17 +44,12 @@ Expression ((Projection + Before ORDER BY))
|
|||||||
Parts: 1/1
|
Parts: 1/1
|
||||||
Granules: 1/1
|
Granules: 1/1
|
||||||
Expression ((Project names + Projection))
|
Expression ((Project names + Projection))
|
||||||
Filter ((WHERE + (Change column names to column identifiers + Compute alias columns)))
|
Filter ((WHERE + Change column names to column identifiers))
|
||||||
ReadFromMergeTree (02911_support_alias_column_in_indices.test2)
|
ReadFromMergeTree (02911_support_alias_column_in_indices.test2)
|
||||||
Indexes:
|
Indexes:
|
||||||
PrimaryKey
|
PrimaryKey
|
||||||
Keys:
|
Keys:
|
||||||
c
|
c
|
||||||
Condition: (plus(plus(c, 1), 1) in [16, +Inf))
|
Condition: (_CAST(plus(_CAST(plus(c, \'UInt64\'), 1), \'UInt64\'), 1) in [16, +Inf))
|
||||||
Parts: 1/2
|
Parts: 1/2
|
||||||
Granules: 1/2
|
Granules: 1/2
|
||||||
Skip
|
|
||||||
Name: i
|
|
||||||
Description: minmax GRANULARITY 1
|
|
||||||
Parts: 1/1
|
|
||||||
Granules: 1/1
|
|
||||||
|
@ -0,0 +1,100 @@
|
|||||||
|
2023-01-01 00:00:00
|
||||||
|
2023-01-01 00:00:00
|
||||||
|
2023-01-01 01:01:01
|
||||||
|
2023-01-01 01:01:01
|
||||||
|
2023-01-02 02:02:02
|
||||||
|
2023-01-02 02:02:02
|
||||||
|
2023-01-03 03:03:03
|
||||||
|
2023-01-03 03:03:03
|
||||||
|
2023-01-04 04:04:04
|
||||||
|
2023-01-04 04:04:04
|
||||||
|
2023-01-05 05:05:05
|
||||||
|
2023-01-05 05:05:05
|
||||||
|
2023-01-06 06:06:06
|
||||||
|
2023-01-06 06:06:06
|
||||||
|
2023-01-07 07:07:07
|
||||||
|
2023-01-07 07:07:07
|
||||||
|
2023-01-08 08:08:08
|
||||||
|
2023-01-08 08:08:08
|
||||||
|
2023-01-09 09:09:09
|
||||||
|
2023-01-09 09:09:09
|
||||||
|
2023-01-01 00:00:00.00
|
||||||
|
2023-01-01 00:00:00.00
|
||||||
|
2023-01-01 01:01:01.00
|
||||||
|
2023-01-01 01:01:01.10
|
||||||
|
2023-01-02 02:02:02.00
|
||||||
|
2023-01-02 02:02:02.12
|
||||||
|
2023-01-03 03:03:03.00
|
||||||
|
2023-01-03 03:03:03.12
|
||||||
|
2023-01-04 04:04:04.00
|
||||||
|
2023-01-04 04:04:04.12
|
||||||
|
2023-01-05 05:05:05.00
|
||||||
|
2023-01-05 05:05:05.12
|
||||||
|
2023-01-06 06:06:06.00
|
||||||
|
2023-01-06 06:06:06.12
|
||||||
|
2023-01-07 07:07:07.00
|
||||||
|
2023-01-07 07:07:07.12
|
||||||
|
2023-01-08 08:08:08.00
|
||||||
|
2023-01-08 08:08:08.12
|
||||||
|
2023-01-09 09:09:09.00
|
||||||
|
2023-01-09 09:09:09.12
|
||||||
|
2023-01-01 00:00:00.000
|
||||||
|
2023-01-01 00:00:00.000
|
||||||
|
2023-01-01 01:01:01.000
|
||||||
|
2023-01-01 01:01:01.100
|
||||||
|
2023-01-02 02:02:02.000
|
||||||
|
2023-01-02 02:02:02.120
|
||||||
|
2023-01-03 03:03:03.000
|
||||||
|
2023-01-03 03:03:03.123
|
||||||
|
2023-01-04 04:04:04.000
|
||||||
|
2023-01-04 04:04:04.123
|
||||||
|
2023-01-05 05:05:05.000
|
||||||
|
2023-01-05 05:05:05.123
|
||||||
|
2023-01-06 06:06:06.000
|
||||||
|
2023-01-06 06:06:06.123
|
||||||
|
2023-01-07 07:07:07.000
|
||||||
|
2023-01-07 07:07:07.123
|
||||||
|
2023-01-08 08:08:08.000
|
||||||
|
2023-01-08 08:08:08.123
|
||||||
|
2023-01-09 09:09:09.000
|
||||||
|
2023-01-09 09:09:09.123
|
||||||
|
2023-01-01 00:00:00.000000
|
||||||
|
2023-01-01 00:00:00.000000
|
||||||
|
2023-01-01 01:01:01.000000
|
||||||
|
2023-01-01 01:01:01.100000
|
||||||
|
2023-01-02 02:02:02.000000
|
||||||
|
2023-01-02 02:02:02.120000
|
||||||
|
2023-01-03 03:03:03.000000
|
||||||
|
2023-01-03 03:03:03.123000
|
||||||
|
2023-01-04 04:04:04.000000
|
||||||
|
2023-01-04 04:04:04.123400
|
||||||
|
2023-01-05 05:05:05.000000
|
||||||
|
2023-01-05 05:05:05.123450
|
||||||
|
2023-01-06 06:06:06.000000
|
||||||
|
2023-01-06 06:06:06.123456
|
||||||
|
2023-01-07 07:07:07.000000
|
||||||
|
2023-01-07 07:07:07.123456
|
||||||
|
2023-01-08 08:08:08.000000
|
||||||
|
2023-01-08 08:08:08.123456
|
||||||
|
2023-01-09 09:09:09.000000
|
||||||
|
2023-01-09 09:09:09.123456
|
||||||
|
2023-01-01 00:00:00.000000
|
||||||
|
2023-01-01 00:00:00.000000
|
||||||
|
2023-01-01 01:01:01.000000
|
||||||
|
2023-01-01 01:01:01.100000
|
||||||
|
2023-01-02 02:02:02.000000
|
||||||
|
2023-01-02 02:02:02.120000
|
||||||
|
2023-01-03 03:03:03.000000
|
||||||
|
2023-01-03 03:03:03.123000
|
||||||
|
2023-01-04 04:04:04.000000
|
||||||
|
2023-01-04 04:04:04.123400
|
||||||
|
2023-01-05 05:05:05.000000
|
||||||
|
2023-01-05 05:05:05.123450
|
||||||
|
2023-01-06 06:06:06.000000
|
||||||
|
2023-01-06 06:06:06.123456
|
||||||
|
2023-01-07 07:07:07.000000
|
||||||
|
2023-01-07 07:07:07.123456
|
||||||
|
2023-01-08 08:08:08.000000
|
||||||
|
2023-01-08 08:08:08.123456
|
||||||
|
2023-01-09 09:09:09.000000
|
||||||
|
2023-01-09 09:09:09.123456
|
@ -0,0 +1,124 @@
|
|||||||
|
DROP TABLE IF EXISTS test_0;
|
||||||
|
CREATE TABLE IF NOT EXISTS test_0 (a DateTime64(0)) engine = MergeTree order by a;
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 00:00:00', 0));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 01:01:01', 1));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-02 02:02:02', 2));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-03 03:03:03', 3));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-04 04:04:04', 4));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-05 05:05:05', 5));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-06 06:06:06', 6));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-07 07:07:07', 7));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-08 08:08:08', 8));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-09 09:09:09', 9));
|
||||||
|
INSERT INTO test_0 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9));
|
||||||
|
SELECT * FROM test_0 ORDER BY a;
|
||||||
|
DROP TABLE test_0;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS test_2;
|
||||||
|
CREATE TABLE IF NOT EXISTS test_2 (a DateTime64(2)) engine = MergeTree order by a;
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 00:00:00', 0));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 01:01:01', 1));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-02 02:02:02', 2));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-03 03:03:03', 3));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-04 04:04:04', 4));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-05 05:05:05', 5));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-06 06:06:06', 6));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-07 07:07:07', 7));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-08 08:08:08', 8));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-09 09:09:09', 9));
|
||||||
|
INSERT INTO test_2 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9));
|
||||||
|
SELECT * FROM test_2 ORDER BY a;
|
||||||
|
DROP TABLE test_2;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS test_3;
|
||||||
|
CREATE TABLE IF NOT EXISTS test_3 (a DateTime64(3)) engine = MergeTree order by a;
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 00:00:00', 0));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 01:01:01', 1));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-02 02:02:02', 2));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-03 03:03:03', 3));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-04 04:04:04', 4));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-05 05:05:05', 5));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-06 06:06:06', 6));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-07 07:07:07', 7));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-08 08:08:08', 8));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-09 09:09:09', 9));
|
||||||
|
INSERT INTO test_3 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9));
|
||||||
|
SELECT * FROM test_3 ORDER BY a;
|
||||||
|
DROP TABLE test_3;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS test_6;
|
||||||
|
CREATE TABLE IF NOT EXISTS test_6 (a DateTime64(6)) engine = MergeTree order by a;
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 00:00:00', 0));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 01:01:01', 1));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-02 02:02:02', 2));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-03 03:03:03', 3));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-04 04:04:04', 4));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-05 05:05:05', 5));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-06 06:06:06', 6));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-07 07:07:07', 7));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-08 08:08:08', 8));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-09 09:09:09', 9));
|
||||||
|
INSERT INTO test_6 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9));
|
||||||
|
SELECT * FROM test_6 ORDER BY a;
|
||||||
|
DROP TABLE test_6;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS test_9;
|
||||||
|
CREATE TABLE IF NOT EXISTS test_9 (a DateTime64(6)) engine = MergeTree order by a;
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 00:00:00', 0));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 01:01:01', 1));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-02 02:02:02', 2));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-03 03:03:03', 3));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-04 04:04:04', 4));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-05 05:05:05', 5));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-06 06:06:06', 6));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-07 07:07:07', 7));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-08 08:08:08', 8));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-09 09:09:09', 9));
|
||||||
|
INSERT INTO test_9 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9));
|
||||||
|
SELECT * FROM test_9 ORDER BY a;
|
||||||
|
DROP TABLE test_9;
|
@ -0,0 +1,21 @@
|
|||||||
|
CREATE TABLE attach_partition_t7 (
|
||||||
|
a UInt32,
|
||||||
|
b UInt32
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PARTITION BY a ORDER BY a;
|
||||||
|
|
||||||
|
ALTER TABLE attach_partition_t7
|
||||||
|
ADD COLUMN mat_column
|
||||||
|
UInt32 MATERIALIZED a+b;
|
||||||
|
|
||||||
|
insert into attach_partition_t7 values (1, 2);
|
||||||
|
|
||||||
|
CREATE TABLE attach_partition_t8 (
|
||||||
|
a UInt32,
|
||||||
|
b UInt32
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
PARTITION BY a ORDER BY a;
|
||||||
|
|
||||||
|
ALTER TABLE attach_partition_t8 ATTACH PARTITION ID '1' FROM attach_partition_t7; -- {serverError INCOMPATIBLE_COLUMNS};
|
Loading…
Reference in New Issue
Block a user