From dd6503bb2ba0cb8bbedcc807df7ebe77fc0310c5 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 14:10:03 +0000 Subject: [PATCH 001/353] Don't allow Variant/Dynamic types in ORDER BY/GROUP BY/PARTITION BY/PRIMARY KEY by default --- docs/en/operations/settings/settings.md | 22 +++ docs/en/sql-reference/data-types/dynamic.md | 3 + docs/en/sql-reference/data-types/variant.md | 2 + src/Analyzer/Resolve/QueryAnalyzer.cpp | 52 ++++- src/Analyzer/Resolve/QueryAnalyzer.h | 4 + src/Core/Settings.h | 3 + src/Interpreters/ExpressionAnalyzer.cpp | 42 ++++ src/Interpreters/ExpressionAnalyzer.h | 2 + src/Storages/KeyDescription.cpp | 9 + ...mic_variant_in_order_by_group_by.reference | 184 ++++++++++++++++++ ...1_dynamic_variant_in_order_by_group_by.sql | 154 +++++++++++++++ 11 files changed, 472 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference create mode 100644 tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index b177ded3e32..302bc8da78f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5682,3 +5682,25 @@ Default value: `0`. Enable `IF NOT EXISTS` for `CREATE` statement by default. If either this setting or `IF NOT EXISTS` is specified and a table with the provided name already exists, no exception will be thrown. Default value: `false`. + +## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} + +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. + +Possible values: + +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. +- 0 — Usage of `Variant` and `Dynamic` types is restricted. + +Default value: 0. + +## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} + +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. + +Possible values: + +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. +- 0 — Usage of `Variant` and `Dynamic` types is restricted. + +Default value: 0. diff --git a/docs/en/sql-reference/data-types/dynamic.md b/docs/en/sql-reference/data-types/dynamic.md index f9befd166fe..4d0bf073535 100644 --- a/docs/en/sql-reference/data-types/dynamic.md +++ b/docs/en/sql-reference/data-types/dynamic.md @@ -411,6 +411,9 @@ SELECT d, dynamicType(d) FROM test ORDER by d; └─────┴────────────────┘ ``` +**Note** by default `Dynamic` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings. + + ## Reaching the limit in number of different data types stored inside Dynamic `Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic). diff --git a/docs/en/sql-reference/data-types/variant.md b/docs/en/sql-reference/data-types/variant.md index 3c2b6e0a362..7cb0f4ad4ea 100644 --- a/docs/en/sql-reference/data-types/variant.md +++ b/docs/en/sql-reference/data-types/variant.md @@ -441,6 +441,8 @@ SELECT v, variantType(v) FROM test ORDER by v; └─────┴────────────────┘ ``` +**Note** by default `Variant` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings. + ## JSONExtract functions with Variant All `JSONExtract*` functions support `Variant` type: diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index a18c2901a58..304338109c1 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -3962,6 +3962,8 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ sort_node.getExpression() = sort_column_list_node->getNodes().front(); } + validateSortingKeyType(sort_node.getExpression()->getResultType(), scope); + size_t sort_expression_projection_names_size = sort_expression_projection_names.size(); if (sort_expression_projection_names_size != 1) throw Exception(ErrorCodes::LOGICAL_ERROR, @@ -4047,6 +4049,24 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ return result_projection_names; } +void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const +{ + if (scope.context->getSettingsRef().allow_suspicious_types_in_order_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); + }; + + check(*sorting_key_type); + sorting_key_type->forEachChild(check); +} + namespace { @@ -4086,11 +4106,12 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR expandTuplesInList(group_by_list); } - if (scope.group_by_use_nulls) + for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes()) { - for (const auto & grouping_set : query_node_typed.getGroupBy().getNodes()) + for (const auto & group_by_elem : grouping_set->as()->getNodes()) { - for (const auto & group_by_elem : grouping_set->as()->getNodes()) + validateGroupByKeyType(group_by_elem->getResultType(), scope); + if (scope.group_by_use_nulls) scope.nullable_group_by_keys.insert(group_by_elem); } } @@ -4106,14 +4127,35 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR auto & group_by_list = query_node_typed.getGroupBy().getNodes(); expandTuplesInList(group_by_list); - if (scope.group_by_use_nulls) + for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes()) { - for (const auto & group_by_elem : query_node_typed.getGroupBy().getNodes()) + validateGroupByKeyType(group_by_elem->getResultType(), scope); + if (scope.group_by_use_nulls) scope.nullable_group_by_keys.insert(group_by_elem); } } } +/** Validate data types of GROUP BY key. + */ +void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const +{ + if (scope.context->getSettingsRef().allow_suspicious_types_in_group_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); + }; + + check(*group_by_key_type); + group_by_key_type->forEachChild(check); +} + /** Resolve interpolate columns nodes list. */ void QueryAnalyzer::resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.h b/src/Analyzer/Resolve/QueryAnalyzer.h index 7f9088b35e5..c90ded09876 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.h +++ b/src/Analyzer/Resolve/QueryAnalyzer.h @@ -217,8 +217,12 @@ private: ProjectionNames resolveSortNodeList(QueryTreeNodePtr & sort_node_list, IdentifierResolveScope & scope); + void validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const; + void resolveGroupByNode(QueryNode & query_node_typed, IdentifierResolveScope & scope); + void validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const; + void resolveInterpolateColumnsNodeList(QueryTreeNodePtr & interpolate_node_list, IdentifierResolveScope & scope); void resolveWindowNodeList(QueryTreeNodePtr & window_node_list, IdentifierResolveScope & scope); diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 23dc2a8fdc5..a3c58144fd0 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -389,6 +389,9 @@ class IColumn; M(Bool, prefer_global_in_and_join, false, "If enabled, all IN/JOIN operators will be rewritten as GLOBAL IN/JOIN. It's useful when the to-be-joined tables are only available on the initiator and we need to always scatter their data on-the-fly during distributed processing with the GLOBAL keyword. It's also useful to reduce the need to access the external sources joining external tables.", 0) \ M(Bool, enable_vertical_final, true, "If enable, remove duplicated rows during FINAL by marking rows as deleted and filtering them later instead of merging rows", 0) \ \ + M(Bool, allow_suspicious_types_in_group_by, false, "Allow suspicious types like Variant/Dynamic in GROUP BY clause", 0) \ + M(Bool, allow_suspicious_types_in_order_by, false, "Allow suspicious types like Variant/Dynamic in ORDER BY clause", 0) \ + \ \ /** Limits during query execution are part of the settings. \ * Used to provide a more safe execution of queries from the user interface. \ diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 7063b2162a0..166b6619bdc 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1367,6 +1367,9 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain } } + for (const auto & result_column : step.getResultColumns()) + validateGroupByKeyType(result_column.type); + if (optimize_aggregation_in_order) { for (auto & child : asts) @@ -1381,6 +1384,24 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain return true; } +void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr & key_type) const +{ + if (getContext()->getSettingsRef().allow_suspicious_types_in_group_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); + }; + + check(*key_type); + key_type->forEachChild(check); +} + void SelectQueryExpressionAnalyzer::appendAggregateFunctionsArguments(ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getAggregatingQuery(); @@ -1564,6 +1585,9 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy(Expr getRootActions(select_query->orderBy(), only_types, step.actions()->dag); + for (const auto & result_column : step.getResultColumns()) + validateOrderByKeyType(result_column.type); + bool with_fill = false; for (auto & child : select_query->orderBy()->children) @@ -1643,6 +1667,24 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy(Expr return actions; } +void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & key_type) const +{ + if (getContext()->getSettingsRef().allow_suspicious_types_in_order_by) + return; + + auto check = [](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception( + ErrorCodes::ILLEGAL_COLUMN, + "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); + }; + + check(*key_type); + key_type->forEachChild(check); +} + bool SelectQueryExpressionAnalyzer::appendLimitBy(ExpressionActionsChain & chain, bool only_types) { const auto * select_query = getSelectQuery(); diff --git a/src/Interpreters/ExpressionAnalyzer.h b/src/Interpreters/ExpressionAnalyzer.h index dc038e10594..3b006ee2106 100644 --- a/src/Interpreters/ExpressionAnalyzer.h +++ b/src/Interpreters/ExpressionAnalyzer.h @@ -397,6 +397,7 @@ private: ActionsAndProjectInputsFlagPtr appendPrewhere(ExpressionActionsChain & chain, bool only_types); bool appendWhere(ExpressionActionsChain & chain, bool only_types); bool appendGroupBy(ExpressionActionsChain & chain, bool only_types, bool optimize_aggregation_in_order, ManyExpressionActions &); + void validateGroupByKeyType(const DataTypePtr & key_type) const; void appendAggregateFunctionsArguments(ExpressionActionsChain & chain, bool only_types); void appendWindowFunctionsArguments(ExpressionActionsChain & chain, bool only_types); @@ -409,6 +410,7 @@ private: bool appendHaving(ExpressionActionsChain & chain, bool only_types); /// appendSelect ActionsAndProjectInputsFlagPtr appendOrderBy(ExpressionActionsChain & chain, bool only_types, bool optimize_read_in_order, ManyExpressionActions &); + void validateOrderByKeyType(const DataTypePtr & key_type) const; bool appendLimitBy(ExpressionActionsChain & chain, bool only_types); /// appendProjectResult }; diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index 7e43966556e..bb0b6d3542d 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -151,6 +151,15 @@ KeyDescription KeyDescription::getSortingKeyFromAST( throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, "Column {} with type {} is not allowed in key expression, it's not comparable", backQuote(result.sample_block.getByPosition(i).name), result.data_types.back()->getName()); + + auto check = [&](const IDataType & type) + { + if (isDynamic(type) || isVariant(type)) + throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, "Column with type Variant/Dynamic is not allowed in key expression"); + }; + + check(*result.data_types.back()); + result.data_types.back()->forEachChild(check); } return result; diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference new file mode 100644 index 00000000000..a3eac1cf3fa --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference @@ -0,0 +1,184 @@ +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +4 +3 +2 +0 +1 +4 +3 +2 +[4] +[3] +[2] +[0] +[1] +{'str':0} +{'str':1} +{'str':4} +{'str':3} +{'str':2} +0 +1 +4 +3 +2 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +4 +3 +2 +0 +1 +4 +3 +2 +[4] +[3] +[2] +[0] +[1] +{'str':0} +{'str':1} +{'str':4} +{'str':3} +{'str':2} +\N +0 +1 +4 +3 +2 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[4] +[0] +[1] +[2] +[3] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[4] +[0] +[1] +[2] +[3] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql new file mode 100644 index 00000000000..a4ea6425622 --- /dev/null +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql @@ -0,0 +1,154 @@ +set allow_experimental_variant_type=1; +set allow_experimental_dynamic_type=1; + +drop table if exists test; + +create table test (d Dynamic) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Variant(UInt64)) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Dynamic) engine=Memory; +insert into test select * from numbers(5); + +set allow_experimental_analyzer=1; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +set allow_experimental_analyzer=0; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +drop table test; + +create table test (d Variant(UInt64)) engine=Memory; +insert into test select * from numbers(5); + +set allow_experimental_analyzer=1; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +set allow_experimental_analyzer=0; + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d; +select * from test group by tuple(d); +select array(d) from test group by array(d); +select map('str', d) from test group by map('str', d); +select * from test group by grouping sets ((d), ('str')); + +drop table test; From 3923efbabf2a3273a055e2889a0df19a517b0b6b Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 14:11:07 +0000 Subject: [PATCH 002/353] Update settings changes history --- src/Core/SettingsChangesHistory.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 5e831c6301c..c2e5e51ab75 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -75,6 +75,8 @@ static std::initializer_list Date: Wed, 18 Sep 2024 19:54:37 +0200 Subject: [PATCH 003/353] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: János Benjamin Antal --- docs/en/operations/settings/settings.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 7dde006b14d..56341205bf7 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -5689,18 +5689,18 @@ Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) a Possible values: -- 1 — Usage of `Variant` and `Dynamic` types is not restricted. - 0 — Usage of `Variant` and `Dynamic` types is restricted. +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. Default value: 0. -## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} +## allow_suspicious_types_in_order_by {#allow_suspicious_types_in_order_by} -Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. +Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in ORDER BY keys. Possible values: -- 1 — Usage of `Variant` and `Dynamic` types is not restricted. - 0 — Usage of `Variant` and `Dynamic` types is restricted. +- 1 — Usage of `Variant` and `Dynamic` types is not restricted. Default value: 0. From c0c04eabbc20d5ab69066d0c0fb8c1339602f0b5 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 18:50:16 +0000 Subject: [PATCH 004/353] Update test --- ...mic_variant_in_order_by_group_by.reference | 10 +++---- ...1_dynamic_variant_in_order_by_group_by.sql | 28 +++++++++++++------ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference index a3eac1cf3fa..5c7b4cb0bea 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference @@ -40,9 +40,9 @@ {'str':2} 0 1 -4 -3 2 +3 +4 \N 0 1 @@ -84,12 +84,12 @@ {'str':4} {'str':3} {'str':2} -\N 0 1 -4 -3 2 +3 +4 +\N 0 1 2 diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql index a4ea6425622..6e4a39c7234 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql @@ -28,7 +28,7 @@ insert into test select * from numbers(5); set allow_experimental_analyzer=1; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -36,6 +36,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -54,11 +57,11 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -66,6 +69,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -84,7 +90,7 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; drop table test; @@ -93,7 +99,7 @@ insert into test select * from numbers(5); set allow_experimental_analyzer=1; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -101,6 +107,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -119,11 +128,11 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; -set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_group_by=1; set allow_suspicious_types_in_order_by=0; select * from test order by d; -- {serverError ILLEGAL_COLUMN} @@ -131,6 +140,9 @@ select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + select * from test group by d; -- {serverError ILLEGAL_COLUMN} select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} @@ -149,6 +161,6 @@ select * from test group by d; select * from test group by tuple(d); select array(d) from test group by array(d); select map('str', d) from test group by map('str', d); -select * from test group by grouping sets ((d), ('str')); +select * from test group by grouping sets ((d), ('str')) order by all; drop table test; From cb488681eb43016e6b9af904e12243b8bb0aea27 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 18 Sep 2024 18:51:46 +0000 Subject: [PATCH 005/353] Fix style --- src/Databases/enableAllExperimentalSettings.cpp | 2 ++ src/Interpreters/ExpressionAnalyzer.cpp | 1 + 2 files changed, 3 insertions(+) diff --git a/src/Databases/enableAllExperimentalSettings.cpp b/src/Databases/enableAllExperimentalSettings.cpp index 9abe05d7bce..01e989dc10b 100644 --- a/src/Databases/enableAllExperimentalSettings.cpp +++ b/src/Databases/enableAllExperimentalSettings.cpp @@ -32,6 +32,8 @@ void enableAllExperimentalSettings(ContextMutablePtr context) context->setSetting("allow_suspicious_low_cardinality_types", 1); context->setSetting("allow_suspicious_fixed_string_types", 1); + context->setSetting("allow_suspicious_types_in_group_by", 1); + context->setSetting("allow_suspicious_types_in_order_by", 1); context->setSetting("allow_suspicious_indices", 1); context->setSetting("allow_suspicious_codecs", 1); context->setSetting("allow_hyperscan", 1); diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 9dcf4cd76e4..2df006aff9b 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -98,6 +98,7 @@ namespace ErrorCodes extern const int NOT_IMPLEMENTED; extern const int UNKNOWN_IDENTIFIER; extern const int UNKNOWN_TYPE_OF_AST_NODE; + extern const int ILLEGAL_COLUMN; } namespace From e290745fe113efdba60cd5c807b92ae415c03d77 Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 19 Sep 2024 12:39:57 +0000 Subject: [PATCH 006/353] Fix tests --- tests/queries/0_stateless/02989_variant_comparison.sql | 1 + tests/queries/0_stateless/03035_dynamic_sorting.sql | 1 + .../03036_dynamic_read_shared_subcolumns_small.sql.j2 | 1 + .../0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 | 1 + tests/queries/0_stateless/03096_variant_in_primary_key.sql | 1 + tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql | 1 + .../queries/0_stateless/03151_dynamic_type_scale_max_types.sql | 2 +- tests/queries/0_stateless/03158_dynamic_type_from_variant.sql | 1 + tests/queries/0_stateless/03159_dynamic_type_all_types.sql | 2 +- tests/queries/0_stateless/03162_dynamic_type_nested.sql | 1 + tests/queries/0_stateless/03163_dynamic_as_supertype.sql | 1 + .../03228_dynamic_serializations_uninitialized_value.sql | 1 + .../queries/0_stateless/03231_dynamic_not_safe_primary_key.sql | 1 + 13 files changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02989_variant_comparison.sql b/tests/queries/0_stateless/02989_variant_comparison.sql index e0dcbc97c27..4d09933fb7b 100644 --- a/tests/queries/0_stateless/02989_variant_comparison.sql +++ b/tests/queries/0_stateless/02989_variant_comparison.sql @@ -1,4 +1,5 @@ set allow_experimental_variant_type=1; +set allow_suspicious_types_in_order_by=1; create table test (v1 Variant(String, UInt64, Array(UInt32)), v2 Variant(String, UInt64, Array(UInt32))) engine=Memory; diff --git a/tests/queries/0_stateless/03035_dynamic_sorting.sql b/tests/queries/0_stateless/03035_dynamic_sorting.sql index e0039a348c6..b2f36fed08e 100644 --- a/tests/queries/0_stateless/03035_dynamic_sorting.sql +++ b/tests/queries/0_stateless/03035_dynamic_sorting.sql @@ -1,4 +1,5 @@ set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by=1; drop table if exists test; create table test (d1 Dynamic(max_types=2), d2 Dynamic(max_types=2)) engine=Memory; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 index dde4f3f53c3..d6732d91e74 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 @@ -1,6 +1,7 @@ set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by = 1; drop table if exists test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 index 3253d7a6c68..daf85077160 100644 --- a/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 +++ b/tests/queries/0_stateless/03036_dynamic_read_subcolumns_small.sql.j2 @@ -1,6 +1,7 @@ set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by = 1; drop table if exists test; diff --git a/tests/queries/0_stateless/03096_variant_in_primary_key.sql b/tests/queries/0_stateless/03096_variant_in_primary_key.sql index 48fbc821bcc..c422b4c3cc5 100644 --- a/tests/queries/0_stateless/03096_variant_in_primary_key.sql +++ b/tests/queries/0_stateless/03096_variant_in_primary_key.sql @@ -1,4 +1,5 @@ set allow_experimental_variant_type=1; +set allow_suspicious_types_in_order_by=1; drop table if exists test; create table test (id UInt64, v Variant(UInt64, String)) engine=MergeTree order by (id, v); insert into test values (1, 1), (1, 'str_1'), (1, 2), (1, 'str_2'); diff --git a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql index 71d5dd4abd1..0e5119a38e0 100644 --- a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql +++ b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; DROP TABLE IF EXISTS null_table; CREATE TABLE null_table diff --git a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql index e476d34a1db..30a86dbc892 100644 --- a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql +++ b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql @@ -1,5 +1,5 @@ SET allow_experimental_dynamic_type=1; -set min_compress_block_size = 585572, max_compress_block_size = 373374, max_block_size = 60768, max_joined_block_size_rows = 18966, max_insert_threads = 5, max_threads = 50, max_read_buffer_size = 708232, connect_timeout_with_failover_ms = 2000, connect_timeout_with_failover_secure_ms = 3000, idle_connection_timeout = 36000, use_uncompressed_cache = true, stream_like_engine_allow_direct_select = true, replication_wait_for_inactive_replica_timeout = 30, compile_aggregate_expressions = false, min_count_to_compile_aggregate_expression = 0, compile_sort_description = false, group_by_two_level_threshold = 1000000, group_by_two_level_threshold_bytes = 12610083, enable_memory_bound_merging_of_aggregation_results = false, min_chunk_bytes_for_parallel_parsing = 18769830, merge_tree_coarse_index_granularity = 12, min_bytes_to_use_direct_io = 10737418240, min_bytes_to_use_mmap_io = 10737418240, log_queries = true, insert_quorum_timeout = 60000, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.05000000074505806, http_response_buffer_size = 294986, fsync_metadata = true, http_send_timeout = 60., http_receive_timeout = 60., opentelemetry_start_trace_probability = 0.10000000149011612, max_bytes_before_external_group_by = 1, max_bytes_before_external_sort = 10737418240, max_bytes_before_remerge_sort = 1326536545, max_untracked_memory = 1048576, memory_profiler_step = 1048576, log_comment = '03151_dynamic_type_scale_max_types.sql', send_logs_level = 'fatal', prefer_localhost_replica = false, optimize_read_in_order = false, optimize_aggregation_in_order = true, aggregation_in_order_max_block_bytes = 27069500, read_in_order_two_level_merge_threshold = 75, allow_introspection_functions = true, database_atomic_wait_for_drop_and_detach_synchronously = true, remote_filesystem_read_method = 'read', local_filesystem_read_prefetch = true, remote_filesystem_read_prefetch = false, merge_tree_compact_parts_min_granules_to_multibuffer_read = 119, async_insert_busy_timeout_max_ms = 5000, read_from_filesystem_cache_if_exists_otherwise_bypass_cache = true, filesystem_cache_segments_batch_size = 10, use_page_cache_for_disks_without_file_cache = true, page_cache_inject_eviction = true, allow_prefetched_read_pool_for_remote_filesystem = false, filesystem_prefetch_step_marks = 50, filesystem_prefetch_min_bytes_for_single_read_task = 16777216, filesystem_prefetch_max_memory_usage = 134217728, filesystem_prefetches_limit = 10, optimize_sorting_by_input_stream_properties = false, allow_experimental_dynamic_type = true, session_timezone = 'Africa/Khartoum', prefer_warmed_unmerged_parts_seconds = 2; +SET allow_suspicious_types_in_order_by=1; drop table if exists to_table; diff --git a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql index a18f985f217..429ac21b5eb 100644 --- a/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql +++ b/tests/queries/0_stateless/03158_dynamic_type_from_variant.sql @@ -1,5 +1,6 @@ SET allow_experimental_dynamic_type=1; SET allow_experimental_variant_type=1; +SET allow_suspicious_types_in_order_by=1; CREATE TABLE test_variable (v Variant(String, UInt32, IPv6, Bool, DateTime64)) ENGINE = Memory; CREATE TABLE test_dynamic (d Dynamic) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql index 28b679e2214..cf8ba687d3f 100644 --- a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql +++ b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql @@ -3,7 +3,7 @@ SET allow_experimental_dynamic_type=1; SET allow_experimental_variant_type=1; SET allow_suspicious_low_cardinality_types=1; - +SET allow_suspicious_types_in_order_by=1; CREATE TABLE t (d Dynamic(max_types=254)) ENGINE = Memory; -- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256) diff --git a/tests/queries/0_stateless/03162_dynamic_type_nested.sql b/tests/queries/0_stateless/03162_dynamic_type_nested.sql index 94007459a9e..59c22491957 100644 --- a/tests/queries/0_stateless/03162_dynamic_type_nested.sql +++ b/tests/queries/0_stateless/03162_dynamic_type_nested.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; CREATE TABLE t (d Dynamic) ENGINE = Memory; diff --git a/tests/queries/0_stateless/03163_dynamic_as_supertype.sql b/tests/queries/0_stateless/03163_dynamic_as_supertype.sql index baba637eea4..e859fbd1815 100644 --- a/tests/queries/0_stateless/03163_dynamic_as_supertype.sql +++ b/tests/queries/0_stateless/03163_dynamic_as_supertype.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; SELECT if(number % 2, number::Dynamic(max_types=3), ('str_' || toString(number))::Dynamic(max_types=2)) AS d, toTypeName(d), dynamicType(d) FROM numbers(4); CREATE TABLE dynamic_test_1 (d Dynamic(max_types=3)) ENGINE = Memory; INSERT INTO dynamic_test_1 VALUES ('str_1'), (42::UInt64); diff --git a/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql b/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql index 8a565fe36b9..60e2439d45f 100644 --- a/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql +++ b/tests/queries/0_stateless/03228_dynamic_serializations_uninitialized_value.sql @@ -1,4 +1,5 @@ set allow_experimental_dynamic_type=1; +set allow_suspicious_types_in_group_by=1; set cast_keep_nullable=1; SELECT toFixedString('str', 3), 3, CAST(if(1 = 0, toInt8(3), NULL), 'Int32') AS x from numbers(10) GROUP BY GROUPING SETS ((CAST(toInt32(1), 'Int32')), ('str', 3), (CAST(toFixedString('str', 3), 'Dynamic')), (CAST(toFixedString(toFixedString('str', 3), 3), 'Dynamic'))); diff --git a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql index f207581f482..101c7cfe8fa 100644 --- a/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql +++ b/tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql @@ -1,4 +1,5 @@ SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; DROP TABLE IF EXISTS t0; DROP TABLE IF EXISTS t1; CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY (c0); From 3c8594d401d7c625a810a61776e689083d79912a Mon Sep 17 00:00:00 2001 From: divanik Date: Fri, 27 Sep 2024 14:30:07 +0000 Subject: [PATCH 007/353] Remove unnecessary changes --- .../DataLakes/DataLakeConfiguration.h | 86 +++++++++ .../DataLakes/DeltaLakeMetadata.cpp | 40 ++-- .../DataLakes/DeltaLakeMetadata.h | 12 +- .../ObjectStorage/DataLakes/HudiMetadata.cpp | 12 +- .../ObjectStorage/DataLakes/HudiMetadata.h | 8 +- .../DataLakes/IStorageDataLake.h | 172 ------------------ .../DataLakes/IcebergMetadata.cpp | 24 +-- .../ObjectStorage/DataLakes/IcebergMetadata.h | 8 +- .../DataLakes/registerDataLakeStorages.cpp | 132 -------------- .../ObjectStorage/StorageObjectStorage.cpp | 18 +- .../ObjectStorage/StorageObjectStorage.h | 21 ++- .../registerStorageObjectStorage.cpp | 105 +++++++++++ src/TableFunctions/ITableFunctionDataLake.h | 120 ------------ .../TableFunctionObjectStorage.cpp | 90 +++++++++ .../TableFunctionObjectStorage.h | 55 ++++++ .../registerDataLakeTableFunctions.cpp | 88 --------- 16 files changed, 407 insertions(+), 584 deletions(-) create mode 100644 src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h delete mode 100644 src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h delete mode 100644 src/Storages/ObjectStorage/DataLakes/registerDataLakeStorages.cpp delete mode 100644 src/TableFunctions/ITableFunctionDataLake.h delete mode 100644 src/TableFunctions/registerDataLakeTableFunctions.cpp diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h new file mode 100644 index 00000000000..6d8e64aa3b7 --- /dev/null +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -0,0 +1,86 @@ +#pragma once + +#include "config.h" + +#if USE_AVRO + +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include + +# include + + +namespace DB +{ + +template +concept StorageConfiguration = std::derived_from; + +template +class DataLakeConfiguration : public BaseStorageConfiguration, public std::enable_shared_from_this +{ +public: + using Configuration = StorageObjectStorage::Configuration; + + bool isDataLakeConfiguration() const override { return true; } + + std::string getEngineName() const override { return DataLakeMetadata::name; } + + void update(ObjectStoragePtr object_storage, ContextPtr local_context) override + { + auto new_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); + if (current_metadata && *current_metadata == *new_metadata) + return; + + current_metadata = std::move(new_metadata); + BaseStorageConfiguration::setPaths(current_metadata->getDataFiles()); + BaseStorageConfiguration::setPartitionColumns(current_metadata->getPartitionColumns()); + } + +private: + DataLakeMetadataPtr current_metadata; + + ReadFromFormatInfo prepareReadingFromFormat( + ObjectStoragePtr object_storage, + const Strings & requested_columns, + const StorageSnapshotPtr & storage_snapshot, + bool supports_subset_of_columns, + ContextPtr local_context) override + { + auto info = DB::prepareReadingFromFormat(requested_columns, storage_snapshot, supports_subset_of_columns); + if (!current_metadata) + { + current_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); + } + auto column_mapping = current_metadata->getColumnNameToPhysicalNameMapping(); + if (!column_mapping.empty()) + { + for (const auto & [column_name, physical_name] : column_mapping) + { + auto & column = info.format_header.getByName(column_name); + column.name = physical_name; + } + } + return info; + } +}; + +using StorageS3IcebergConfiguration = DataLakeConfiguration; +using StorageAzureIcebergConfiguration = DataLakeConfiguration; +using StorageLocalIcebergConfiguration = DataLakeConfiguration; +using StorageS3DeltaLakeConfiguration = DataLakeConfiguration; +using StorageS3HudiConfiguration = DataLakeConfiguration; + + +} + +#endif diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp index f04e868ee5a..f437faa2e90 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.cpp @@ -55,22 +55,18 @@ namespace ErrorCodes struct DeltaLakeMetadataImpl { - using ConfigurationPtr = DeltaLakeMetadata::ConfigurationPtr; + using ConfigurationObservePtr = DeltaLakeMetadata::ConfigurationObservePtr; ObjectStoragePtr object_storage; - ConfigurationPtr configuration; + ConfigurationObservePtr configuration; ContextPtr context; /** * Useful links: * - https://github.com/delta-io/delta/blob/master/PROTOCOL.md#data-files */ - DeltaLakeMetadataImpl(ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_) - : object_storage(object_storage_) - , configuration(configuration_) - , context(context_) + DeltaLakeMetadataImpl(ObjectStoragePtr object_storage_, ConfigurationObservePtr configuration_, ContextPtr context_) + : object_storage(object_storage_), configuration(configuration_), context(context_) { } @@ -110,6 +106,7 @@ struct DeltaLakeMetadataImpl }; DeltaLakeMetadata processMetadataFiles() { + auto configuration_ptr = configuration.lock(); std::set result_files; NamesAndTypesList current_schema; DataLakePartitionColumns current_partition_columns; @@ -121,7 +118,7 @@ struct DeltaLakeMetadataImpl while (true) { const auto filename = withPadding(++current_version) + metadata_file_suffix; - const auto file_path = std::filesystem::path(configuration->getPath()) / deltalake_metadata_directory / filename; + const auto file_path = std::filesystem::path(configuration_ptr->getPath()) / deltalake_metadata_directory / filename; if (!object_storage->exists(StoredObject(file_path))) break; @@ -135,7 +132,7 @@ struct DeltaLakeMetadataImpl } else { - const auto keys = listFiles(*object_storage, *configuration, deltalake_metadata_directory, metadata_file_suffix); + const auto keys = listFiles(*object_storage, *configuration_ptr, deltalake_metadata_directory, metadata_file_suffix); for (const String & key : keys) processMetadataFile(key, current_schema, current_partition_columns, result_files); } @@ -244,6 +241,8 @@ struct DeltaLakeMetadataImpl } } + auto configuration_ptr = configuration.lock(); + if (object->has("add")) { auto add_object = object->get("add").extract(); @@ -251,7 +250,7 @@ struct DeltaLakeMetadataImpl throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `add` field"); auto path = add_object->getValue("path"); - result.insert(fs::path(configuration->getPath()) / path); + result.insert(fs::path(configuration_ptr->getPath()) / path); auto filename = fs::path(path).filename().string(); auto it = file_partition_columns.find(filename); @@ -295,7 +294,7 @@ struct DeltaLakeMetadataImpl throw Exception(ErrorCodes::LOGICAL_ERROR, "Failed to extract `remove` field"); auto path = remove_object->getValue("path"); - result.erase(fs::path(configuration->getPath()) / path); + result.erase(fs::path(configuration_ptr->getPath()) / path); } } } @@ -486,7 +485,9 @@ struct DeltaLakeMetadataImpl */ size_t readLastCheckpointIfExists() const { - const auto last_checkpoint_file = std::filesystem::path(configuration->getPath()) / deltalake_metadata_directory / "_last_checkpoint"; + auto configuration_ptr = configuration.lock(); + const auto last_checkpoint_file + = std::filesystem::path(configuration_ptr->getPath()) / deltalake_metadata_directory / "_last_checkpoint"; if (!object_storage->exists(StoredObject(last_checkpoint_file))) return 0; @@ -552,7 +553,11 @@ struct DeltaLakeMetadataImpl return 0; const auto checkpoint_filename = withPadding(version) + ".checkpoint.parquet"; - const auto checkpoint_path = std::filesystem::path(configuration->getPath()) / deltalake_metadata_directory / checkpoint_filename; + + auto configuration_ptr = configuration.lock(); + + const auto checkpoint_path + = std::filesystem::path(configuration_ptr->getPath()) / deltalake_metadata_directory / checkpoint_filename; LOG_TRACE(log, "Using checkpoint file: {}", checkpoint_path.string()); @@ -667,7 +672,7 @@ struct DeltaLakeMetadataImpl } LOG_TEST(log, "Adding {}", path); - const auto [_, inserted] = result.insert(std::filesystem::path(configuration->getPath()) / path); + const auto [_, inserted] = result.insert(std::filesystem::path(configuration_ptr->getPath()) / path); if (!inserted) throw Exception(ErrorCodes::INCORRECT_DATA, "File already exists {}", path); } @@ -678,10 +683,7 @@ struct DeltaLakeMetadataImpl LoggerPtr log = getLogger("DeltaLakeMetadataParser"); }; -DeltaLakeMetadata::DeltaLakeMetadata( - ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_) +DeltaLakeMetadata::DeltaLakeMetadata(ObjectStoragePtr object_storage_, ConfigurationObservePtr configuration_, ContextPtr context_) { auto impl = DeltaLakeMetadataImpl(object_storage_, configuration_, context_); auto result = impl.processMetadataFiles(); diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h index a479a3dd293..549443f115e 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h @@ -12,13 +12,10 @@ namespace DB class DeltaLakeMetadata final : public IDataLakeMetadata { public: - using ConfigurationPtr = StorageObjectStorage::ConfigurationPtr; + using ConfigurationObservePtr = StorageObjectStorage::ConfigurationObservePtr; static constexpr auto name = "DeltaLake"; - DeltaLakeMetadata( - ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_); + DeltaLakeMetadata(ObjectStoragePtr object_storage_, ConfigurationObservePtr configuration_, ContextPtr context_); Strings getDataFiles() const override { return data_files; } @@ -36,10 +33,7 @@ public: && data_files == deltalake_metadata->data_files; } - static DataLakeMetadataPtr create( - ObjectStoragePtr object_storage, - ConfigurationPtr configuration, - ContextPtr local_context) + static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObservePtr configuration, ContextPtr local_context) { return std::make_unique(object_storage, configuration, local_context); } diff --git a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp index 91a586ccbf9..8a93a0ea6d3 100644 --- a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp @@ -43,8 +43,9 @@ namespace ErrorCodes */ Strings HudiMetadata::getDataFilesImpl() const { + auto configuration_ptr = configuration.lock(); auto log = getLogger("HudiMetadata"); - const auto keys = listFiles(*object_storage, *configuration, "", Poco::toLower(configuration->format)); + const auto keys = listFiles(*object_storage, *configuration_ptr, "", Poco::toLower(configuration_ptr->format)); using Partition = std::string; using FileID = std::string; @@ -86,13 +87,8 @@ Strings HudiMetadata::getDataFilesImpl() const return result; } -HudiMetadata::HudiMetadata( - ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, - ContextPtr context_) - : WithContext(context_) - , object_storage(object_storage_) - , configuration(configuration_) +HudiMetadata::HudiMetadata(ObjectStoragePtr object_storage_, ConfigurationObservePtr configuration_, ContextPtr context_) + : WithContext(context_), object_storage(object_storage_), configuration(configuration_) { } diff --git a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h index b060b1b0d39..b22dfacb0ad 100644 --- a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h @@ -13,13 +13,13 @@ namespace DB class HudiMetadata final : public IDataLakeMetadata, private WithContext { public: - using ConfigurationPtr = StorageObjectStorage::ConfigurationPtr; + using ConfigurationObservePtr = StorageObjectStorage::ConfigurationObservePtr; static constexpr auto name = "Hudi"; HudiMetadata( ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, + ConfigurationObservePtr configuration_, ContextPtr context_); Strings getDataFiles() const override; @@ -40,7 +40,7 @@ public: static DataLakeMetadataPtr create( ObjectStoragePtr object_storage, - ConfigurationPtr configuration, + ConfigurationObservePtr configuration, ContextPtr local_context) { return std::make_unique(object_storage, configuration, local_context); @@ -48,7 +48,7 @@ public: private: const ObjectStoragePtr object_storage; - const ConfigurationPtr configuration; + const ConfigurationObservePtr configuration; mutable Strings data_files; std::unordered_map column_name_to_physical_name; DataLakePartitionColumns partition_columns; diff --git a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h deleted file mode 100644 index a17fd163253..00000000000 --- a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h +++ /dev/null @@ -1,172 +0,0 @@ -#pragma once - -#include "config.h" - -#if USE_AVRO - -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -/// Storage for read-only integration with Apache Iceberg tables in Amazon S3 (see https://iceberg.apache.org/) -/// Right now it's implemented on top of StorageS3 and right now it doesn't support -/// many Iceberg features like schema evolution, partitioning, positional and equality deletes. -template -class IStorageDataLake final : public StorageObjectStorage -{ -public: - using Storage = StorageObjectStorage; - using ConfigurationPtr = Storage::ConfigurationPtr; - - static StoragePtr create( - ConfigurationPtr base_configuration, - ContextPtr context, - const StorageID & table_id_, - const ColumnsDescription & columns_, - const ConstraintsDescription & constraints_, - const String & comment_, - std::optional format_settings_, - LoadingStrictnessLevel mode) - { - auto object_storage = base_configuration->createObjectStorage(context, /* is_readonly */true); - DataLakeMetadataPtr metadata; - NamesAndTypesList schema_from_metadata; - const bool use_schema_from_metadata = columns_.empty(); - - if (base_configuration->format == "auto") - base_configuration->format = "Parquet"; - - ConfigurationPtr configuration = base_configuration->clone(); - - try - { - metadata = DataLakeMetadata::create(object_storage, base_configuration, context); - configuration->setPaths(metadata->getDataFiles()); - if (use_schema_from_metadata) - schema_from_metadata = metadata->getTableSchema(); - } - catch (...) - { - if (mode <= LoadingStrictnessLevel::CREATE) - throw; - - metadata.reset(); - configuration->setPaths({}); - tryLogCurrentException(__PRETTY_FUNCTION__); - } - - return std::make_shared>( - base_configuration, std::move(metadata), configuration, object_storage, - context, table_id_, - use_schema_from_metadata ? ColumnsDescription(schema_from_metadata) : columns_, - constraints_, comment_, format_settings_); - } - - String getName() const override { return DataLakeMetadata::name; } - - static ColumnsDescription getTableStructureFromData( - ObjectStoragePtr object_storage_, - ConfigurationPtr base_configuration, - const std::optional & format_settings_, - ContextPtr local_context) - { - auto metadata = DataLakeMetadata::create(object_storage_, base_configuration, local_context); - - auto schema_from_metadata = metadata->getTableSchema(); - if (!schema_from_metadata.empty()) - { - return ColumnsDescription(std::move(schema_from_metadata)); - } - else - { - ConfigurationPtr configuration = base_configuration->clone(); - configuration->setPaths(metadata->getDataFiles()); - std::string sample_path; - return Storage::resolveSchemaFromData( - object_storage_, configuration, format_settings_, sample_path, local_context); - } - } - - void updateConfiguration(ContextPtr local_context) override - { - Storage::updateConfiguration(local_context); - - auto new_metadata = DataLakeMetadata::create(Storage::object_storage, base_configuration, local_context); - if (current_metadata && *current_metadata == *new_metadata) - return; - - current_metadata = std::move(new_metadata); - auto updated_configuration = base_configuration->clone(); - updated_configuration->setPaths(current_metadata->getDataFiles()); - updated_configuration->setPartitionColumns(current_metadata->getPartitionColumns()); - - Storage::configuration = updated_configuration; - } - - template - IStorageDataLake( - ConfigurationPtr base_configuration_, - DataLakeMetadataPtr metadata_, - Args &&... args) - : Storage(std::forward(args)...) - , base_configuration(base_configuration_) - , current_metadata(std::move(metadata_)) - { - if (base_configuration->format == "auto") - { - base_configuration->format = Storage::configuration->format; - } - - if (current_metadata) - { - const auto & columns = current_metadata->getPartitionColumns(); - base_configuration->setPartitionColumns(columns); - Storage::configuration->setPartitionColumns(columns); - } - } - -private: - ConfigurationPtr base_configuration; - DataLakeMetadataPtr current_metadata; - - ReadFromFormatInfo prepareReadingFromFormat( - const Strings & requested_columns, - const StorageSnapshotPtr & storage_snapshot, - bool supports_subset_of_columns, - ContextPtr local_context) override - { - auto info = DB::prepareReadingFromFormat(requested_columns, storage_snapshot, supports_subset_of_columns); - if (!current_metadata) - { - Storage::updateConfiguration(local_context); - current_metadata = DataLakeMetadata::create(Storage::object_storage, base_configuration, local_context); - } - auto column_mapping = current_metadata->getColumnNameToPhysicalNameMapping(); - if (!column_mapping.empty()) - { - for (const auto & [column_name, physical_name] : column_mapping) - { - auto & column = info.format_header.getByName(column_name); - column.name = physical_name; - } - } - return info; - } -}; - -using StorageIceberg = IStorageDataLake; -using StorageDeltaLake = IStorageDataLake; -using StorageHudi = IStorageDataLake; - -} - -#endif diff --git a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp index ffc4dd09a3a..11ff749fd9d 100644 --- a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp @@ -50,7 +50,7 @@ extern const int UNSUPPORTED_METHOD; IcebergMetadata::IcebergMetadata( ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, + ConfigurationObservePtr configuration_, DB::ContextPtr context_, Int32 metadata_version_, Int32 format_version_, @@ -381,12 +381,12 @@ std::pair getMetadataFileAndVersion( } -DataLakeMetadataPtr IcebergMetadata::create( - ObjectStoragePtr object_storage, - ConfigurationPtr configuration, - ContextPtr local_context) +DataLakeMetadataPtr +IcebergMetadata::create(ObjectStoragePtr object_storage, ConfigurationObservePtr configuration, ContextPtr local_context) { - const auto [metadata_version, metadata_file_path] = getMetadataFileAndVersion(object_storage, *configuration); + auto configuration_ptr = configuration.lock(); + + const auto [metadata_version, metadata_file_path] = getMetadataFileAndVersion(object_storage, *configuration_ptr); LOG_DEBUG(getLogger("IcebergMetadata"), "Parse metadata {}", metadata_file_path); auto read_settings = local_context->getReadSettings(); auto buf = object_storage->readObject(StoredObject(metadata_file_path), read_settings); @@ -411,12 +411,13 @@ DataLakeMetadataPtr IcebergMetadata::create( if (snapshot->getValue("snapshot-id") == current_snapshot_id) { const auto path = snapshot->getValue("manifest-list"); - manifest_list_file = std::filesystem::path(configuration->getPath()) / "metadata" / std::filesystem::path(path).filename(); + manifest_list_file = std::filesystem::path(configuration_ptr->getPath()) / "metadata" / std::filesystem::path(path).filename(); break; } } - return std::make_unique(object_storage, configuration, local_context, metadata_version, format_version, manifest_list_file, schema_id, schema); + return std::make_unique( + object_storage, configuration_ptr, local_context, metadata_version, format_version, manifest_list_file, schema_id, schema); } /** @@ -446,6 +447,7 @@ DataLakeMetadataPtr IcebergMetadata::create( */ Strings IcebergMetadata::getDataFiles() const { + auto configuration_ptr = configuration.lock(); if (!data_files.empty()) return data_files; @@ -478,7 +480,7 @@ Strings IcebergMetadata::getDataFiles() const { const auto file_path = col_str->getDataAt(i).toView(); const auto filename = std::filesystem::path(file_path).filename(); - manifest_files.emplace_back(std::filesystem::path(configuration->getPath()) / "metadata" / filename); + manifest_files.emplace_back(std::filesystem::path(configuration_ptr->getPath()) / "metadata" / filename); } NameSet files; @@ -612,9 +614,9 @@ Strings IcebergMetadata::getDataFiles() const const auto status = status_int_column->getInt(i); const auto data_path = std::string(file_path_string_column->getDataAt(i).toView()); - const auto pos = data_path.find(configuration->getPath()); + const auto pos = data_path.find(configuration_ptr->getPath()); if (pos == std::string::npos) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected to find {} in data path: {}", configuration->getPath(), data_path); + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected to find {} in data path: {}", configuration_ptr->getPath(), data_path); const auto file_path = data_path.substr(pos); diff --git a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h index 7b0deab91c3..7811bcd8b4b 100644 --- a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h @@ -61,13 +61,13 @@ namespace DB class IcebergMetadata : public IDataLakeMetadata, private WithContext { public: - using ConfigurationPtr = StorageObjectStorage::ConfigurationPtr; + using ConfigurationObservePtr = StorageObjectStorage::ConfigurationObservePtr; static constexpr auto name = "Iceberg"; IcebergMetadata( ObjectStoragePtr object_storage_, - ConfigurationPtr configuration_, + ConfigurationObservePtr configuration_, ContextPtr context_, Int32 metadata_version_, Int32 format_version_, @@ -94,14 +94,14 @@ public: static DataLakeMetadataPtr create( ObjectStoragePtr object_storage, - ConfigurationPtr configuration, + ConfigurationObservePtr configuration, ContextPtr local_context); private: size_t getVersion() const { return metadata_version; } const ObjectStoragePtr object_storage; - const ConfigurationPtr configuration; + const ConfigurationObservePtr configuration; Int32 metadata_version; Int32 format_version; String manifest_list_file; diff --git a/src/Storages/ObjectStorage/DataLakes/registerDataLakeStorages.cpp b/src/Storages/ObjectStorage/DataLakes/registerDataLakeStorages.cpp deleted file mode 100644 index f0bd51de375..00000000000 --- a/src/Storages/ObjectStorage/DataLakes/registerDataLakeStorages.cpp +++ /dev/null @@ -1,132 +0,0 @@ -#include "config.h" - -#if USE_AWS_S3 - -# include -# include -# include -# include -# include -# include - - -namespace DB -{ - -#if USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format. - -void registerStorageIceberg(StorageFactory & factory) -{ - factory.registerStorage( - "Iceberg", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); - - factory.registerStorage( - "IcebergS3", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); - - factory.registerStorage( - "IcebergAzure", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), true); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::AZURE, - }); - - factory.registerStorage( - "IcebergLocal", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageIceberg::create( - configuration, args.getContext(), args.table_id, args.columns, - args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::FILE, - }); -} - -#endif - -#if USE_PARQUET -void registerStorageDeltaLake(StorageFactory & factory) -{ - factory.registerStorage( - "DeltaLake", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageDeltaLake::create( - configuration, args.getContext(), args.table_id, args.columns, - args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); -} -#endif - -void registerStorageHudi(StorageFactory & factory) -{ - factory.registerStorage( - "Hudi", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return StorageHudi::create( - configuration, args.getContext(), args.table_id, args.columns, - args.constraints, args.comment, std::nullopt, args.mode); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::S3, - }); -} - -} - -#endif diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index bc27820707c..f62e0fe20dc 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -124,12 +124,11 @@ bool StorageObjectStorage::supportsSubsetOfColumns(const ContextPtr & context) c return FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(configuration->format, context, format_settings); } -void StorageObjectStorage::updateConfiguration(ContextPtr context) +void StorageObjectStorage::Configuration::update(ObjectStoragePtr object_storage_ptr, ContextPtr context) { - IObjectStorage::ApplyNewSettingsOptions options{ .allow_client_change = !configuration->isStaticConfiguration() }; - object_storage->applyNewSettings(context->getConfigRef(), configuration->getTypeName() + ".", context, options); + IObjectStorage::ApplyNewSettingsOptions options{.allow_client_change = !isStaticConfiguration()}; + object_storage_ptr->applyNewSettings(context->getConfigRef(), getTypeName() + ".", context, options); } - namespace { class ReadFromObjectStorageStep : public SourceStepWithFilter @@ -243,7 +242,8 @@ private: }; } -ReadFromFormatInfo StorageObjectStorage::prepareReadingFromFormat( +ReadFromFormatInfo StorageObjectStorage::Configuration::prepareReadingFromFormat( + ObjectStoragePtr, const Strings & requested_columns, const StorageSnapshotPtr & storage_snapshot, bool supports_subset_of_columns, @@ -262,7 +262,7 @@ void StorageObjectStorage::read( size_t max_block_size, size_t num_streams) { - updateConfiguration(local_context); + configuration->update(object_storage, local_context); if (partition_by && configuration->withPartitionWildcard()) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, @@ -270,8 +270,8 @@ void StorageObjectStorage::read( getName()); } - const auto read_from_format_info = prepareReadingFromFormat( - column_names, storage_snapshot, supportsSubsetOfColumns(local_context), local_context); + const auto read_from_format_info = configuration->prepareReadingFromFormat( + object_storage, column_names, storage_snapshot, supportsSubsetOfColumns(local_context), local_context); const bool need_only_count = (query_info.optimize_trivial_count || read_from_format_info.requested_columns.empty()) && local_context->getSettingsRef()[Setting::optimize_count_from_files]; @@ -300,7 +300,7 @@ SinkToStoragePtr StorageObjectStorage::write( ContextPtr local_context, bool /* async_insert */) { - updateConfiguration(local_context); + configuration->update(object_storage, local_context); const auto sample_block = metadata_snapshot->getSampleBlock(); const auto & settings = configuration->getQuerySettings(local_context); diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index f39586c23b4..9781d5dbe6e 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -25,6 +25,7 @@ class StorageObjectStorage : public IStorage public: class Configuration; using ConfigurationPtr = std::shared_ptr; + using ConfigurationObservePtr = std::weak_ptr; using ObjectInfo = RelativePathWithMetadata; using ObjectInfoPtr = std::shared_ptr; using ObjectInfos = std::vector; @@ -120,16 +121,8 @@ public: const ContextPtr & context); protected: - virtual void updateConfiguration(ContextPtr local_context); - String getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); - virtual ReadFromFormatInfo prepareReadingFromFormat( - const Strings & requested_columns, - const StorageSnapshotPtr & storage_snapshot, - bool supports_subset_of_columns, - ContextPtr local_context); - static std::unique_ptr createReadBufferIterator( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, @@ -206,14 +199,26 @@ public: void setPartitionColumns(const DataLakePartitionColumns & columns) { partition_columns = columns; } const DataLakePartitionColumns & getPartitionColumns() const { return partition_columns; } + virtual bool isDataLakeConfiguration() const { return false; } + + virtual ReadFromFormatInfo prepareReadingFromFormat( + ObjectStoragePtr object_storage, + const Strings & requested_columns, + const StorageSnapshotPtr & storage_snapshot, + bool supports_subset_of_columns, + ContextPtr local_context); + String format = "auto"; String compression_method = "auto"; String structure = "auto"; + virtual void update(ObjectStoragePtr object_storage, ContextPtr local_context); + protected: virtual void fromNamedCollection(const NamedCollection & collection, ContextPtr context) = 0; virtual void fromAST(ASTs & args, ContextPtr context, bool with_structure) = 0; + void assertInitialized() const; bool initialized = false; diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index d0cacc29adf..570e888da91 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -148,4 +149,108 @@ void registerStorageObjectStorage(StorageFactory & factory) UNUSED(factory); } +#if USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format. + +void registerStorageIceberg(StorageFactory & factory) +{ + factory.registerStorage( + "Iceberg", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); + + factory.registerStorage( + "IcebergS3", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); + + factory.registerStorage( + "IcebergAzure", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), true); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::AZURE, + }); + + factory.registerStorage( + "IcebergLocal", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::FILE, + }); +} + +#endif + +#if USE_PARQUET +void registerStorageDeltaLake(StorageFactory & factory) +{ + factory.registerStorage( + "DeltaLake", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); +} +#endif + +void registerStorageHudi(StorageFactory & factory) +{ + factory.registerStorage( + "Hudi", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::S3, + }); +} } diff --git a/src/TableFunctions/ITableFunctionDataLake.h b/src/TableFunctions/ITableFunctionDataLake.h deleted file mode 100644 index db8287f97bf..00000000000 --- a/src/TableFunctions/ITableFunctionDataLake.h +++ /dev/null @@ -1,120 +0,0 @@ -#pragma once - -#include "config.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -template -class ITableFunctionDataLake : public TableFunction -{ -public: - static constexpr auto name = Name::name; - std::string getName() const override { return name; } - -protected: - StoragePtr executeImpl( - const ASTPtr & /* ast_function */, - ContextPtr context, - const std::string & table_name, - ColumnsDescription cached_columns, - bool /*is_insert_query*/) const override - { - ColumnsDescription columns; - auto configuration = TableFunction::getConfiguration(); - if (configuration->structure != "auto") - columns = parseColumnsListFromString(configuration->structure, context); - else if (!cached_columns.empty()) - columns = cached_columns; - - StoragePtr storage = Storage::create( - configuration, context, StorageID(TableFunction::getDatabaseName(), table_name), - columns, ConstraintsDescription{}, String{}, std::nullopt, LoadingStrictnessLevel::CREATE); - - storage->startup(); - return storage; - } - - const char * getStorageTypeName() const override { return name; } - - ColumnsDescription getActualTableStructure(ContextPtr context, bool is_insert_query) const override - { - auto configuration = TableFunction::getConfiguration(); - if (configuration->structure == "auto") - { - context->checkAccess(TableFunction::getSourceAccessType()); - auto object_storage = TableFunction::getObjectStorage(context, !is_insert_query); - return Storage::getTableStructureFromData(object_storage, configuration, std::nullopt, context); - } - else - { - return parseColumnsListFromString(configuration->structure, context); - } - } - - void parseArguments(const ASTPtr & ast_function, ContextPtr context) override - { - auto configuration = TableFunction::getConfiguration(); - configuration->format = "Parquet"; - /// Set default format to Parquet if it's not specified in arguments. - TableFunction::parseArguments(ast_function, context); - } -}; - -struct TableFunctionIcebergName -{ - static constexpr auto name = "iceberg"; -}; - -struct TableFunctionIcebergS3Name -{ - static constexpr auto name = "icebergS3"; -}; - -struct TableFunctionIcebergAzureName -{ - static constexpr auto name = "icebergAzure"; -}; - -struct TableFunctionIcebergLocalName -{ - static constexpr auto name = "icebergLocal"; -}; - -struct TableFunctionDeltaLakeName -{ - static constexpr auto name = "deltaLake"; -}; - -struct TableFunctionHudiName -{ - static constexpr auto name = "hudi"; -}; - -#if USE_AVRO -# if USE_AWS_S3 -using TableFunctionIceberg = ITableFunctionDataLake; -using TableFunctionIcebergS3 = ITableFunctionDataLake; -# endif -# if USE_AZURE_BLOB_STORAGE -using TableFunctionIcebergAzure = ITableFunctionDataLake; -# endif -using TableFunctionIcebergLocal = ITableFunctionDataLake; -#endif -#if USE_AWS_S3 -# if USE_PARQUET -using TableFunctionDeltaLake = ITableFunctionDataLake; -#endif -using TableFunctionHudi = ITableFunctionDataLake; -#endif -} diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index 9cebb91044a..60409a732c4 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -225,4 +225,94 @@ template class TableFunctionObjectStorage; #endif template class TableFunctionObjectStorage; + +#if USE_AVRO +void registerTableFunctionIceberg(TableFunctionFactory & factory) +{ +# if USE_AWS_S3 + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store. Alias to icebergS3)", + .examples{{"iceberg", "SELECT * FROM iceberg(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store.)", + .examples{{"icebergS3", "SELECT * FROM icebergS3(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); + +# endif +# if USE_AZURE_BLOB_STORAGE + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on Azure object store.)", + .examples{{"icebergAzure", "SELECT * FROM icebergAzure(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +# endif + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored locally.)", + .examples{{"icebergLocal", "SELECT * FROM icebergLocal(filename)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +} +#endif + +#if USE_AWS_S3 +# if USE_PARQUET +void registerTableFunctionDeltaLake(TableFunctionFactory & factory) +{ + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the DeltaLake table stored on object store.)", + .examples{{"deltaLake", "SELECT * FROM deltaLake(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +} +# endif + +void registerTableFunctionHudi(TableFunctionFactory & factory) +{ + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Hudi table stored on object store.)", + .examples{{"hudi", "SELECT * FROM hudi(url, access_key_id, secret_access_key)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +} +#endif + +void registerDataLakeTableFunctions(TableFunctionFactory & factory) +{ + UNUSED(factory); +#if USE_AVRO + registerTableFunctionIceberg(factory); +#endif +#if USE_AWS_S3 +# if USE_PARQUET + registerTableFunctionDeltaLake(factory); +# endif + registerTableFunctionHudi(factory); +#endif +} + +#if USE_AVRO +# if USE_AWS_S3 +template class TableFunctionObjectStorage; +template class TableFunctionObjectStorage; +# endif +# if USE_AZURE_BLOB_STORAGE +template class TableFunctionObjectStorage; +# endif +template class TableFunctionObjectStorage; +#endif +#if USE_AWS_S3 +# if USE_PARQUET +template class TableFunctionObjectStorage; +# endif +template class TableFunctionObjectStorage; +#endif } diff --git a/src/TableFunctions/TableFunctionObjectStorage.h b/src/TableFunctions/TableFunctionObjectStorage.h index 6b923f93e75..3cf86f982d1 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.h +++ b/src/TableFunctions/TableFunctionObjectStorage.h @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -61,6 +62,42 @@ struct LocalDefinition static constexpr auto storage_type_name = "Local"; }; +struct IcebergDefinition +{ + static constexpr auto name = "iceberg"; + static constexpr auto storage_type_name = "S3"; +}; + +struct IcebergS3Definition +{ + static constexpr auto name = "icebergS3"; + static constexpr auto storage_type_name = "S3"; +}; + +struct IcebergAzureDefinition +{ + static constexpr auto name = "icebergAzure"; + static constexpr auto storage_type_name = "Azure"; +}; + +struct IcebergLocalDefinition +{ + static constexpr auto name = "icebergLocal"; + static constexpr auto storage_type_name = "Local"; +}; + +struct DeltaLakeDefinition +{ + static constexpr auto name = "deltaLake"; + static constexpr auto storage_type_name = "S3"; +}; + +struct HudiDefinition +{ + static constexpr auto name = "hudi"; + static constexpr auto storage_type_name = "S3"; +}; + template class TableFunctionObjectStorage : public ITableFunction { @@ -137,4 +174,22 @@ using TableFunctionHDFS = TableFunctionObjectStorage; + + +#if USE_AVRO +# if USE_AWS_S3 +using TableFunctionIceberg = TableFunctionObjectStorage; +using TableFunctionIcebergS3 = TableFunctionObjectStorage; +# endif +# if USE_AZURE_BLOB_STORAGE +using TableFunctionIcebergAzure = TableFunctionObjectStorage; +# endif +using TableFunctionIcebergLocal = TableFunctionObjectStorage; +#endif +#if USE_AWS_S3 +# if USE_PARQUET +using TableFunctionDeltaLake = TableFunctionObjectStorage; +# endif +using TableFunctionHudi = TableFunctionObjectStorage; +#endif } diff --git a/src/TableFunctions/registerDataLakeTableFunctions.cpp b/src/TableFunctions/registerDataLakeTableFunctions.cpp deleted file mode 100644 index 8361d8a7977..00000000000 --- a/src/TableFunctions/registerDataLakeTableFunctions.cpp +++ /dev/null @@ -1,88 +0,0 @@ -#include -#include - -namespace DB -{ - -#if USE_AVRO -void registerTableFunctionIceberg(TableFunctionFactory & factory) -{ -# if USE_AWS_S3 - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store. Alias to icebergS3)", - .examples{{"iceberg", "SELECT * FROM iceberg(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store.)", - .examples{{"icebergS3", "SELECT * FROM icebergS3(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); - -# endif -# if USE_AZURE_BLOB_STORAGE - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored on Azure object store.)", - .examples{{"icebergAzure", "SELECT * FROM icebergAzure(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); -# endif - factory.registerFunction( - {.documentation - = {.description = R"(The table function can be used to read the Iceberg table stored locally.)", - .examples{{"icebergLocal", "SELECT * FROM icebergLocal(filename)", ""}}, - .categories{"DataLake"}}, - .allow_readonly = false}); -} -#endif - -#if USE_AWS_S3 -# if USE_PARQUET -void registerTableFunctionDeltaLake(TableFunctionFactory & factory) -{ - factory.registerFunction( - { - .documentation = - { - .description=R"(The table function can be used to read the DeltaLake table stored on object store.)", - .examples{{"deltaLake", "SELECT * FROM deltaLake(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"} - }, - .allow_readonly = false - }); -} -#endif - -void registerTableFunctionHudi(TableFunctionFactory & factory) -{ - factory.registerFunction( - { - .documentation = - { - .description=R"(The table function can be used to read the Hudi table stored on object store.)", - .examples{{"hudi", "SELECT * FROM hudi(url, access_key_id, secret_access_key)", ""}}, - .categories{"DataLake"} - }, - .allow_readonly = false - }); -} -#endif - -void registerDataLakeTableFunctions(TableFunctionFactory & factory) -{ - UNUSED(factory); -#if USE_AVRO - registerTableFunctionIceberg(factory); -#endif -#if USE_AWS_S3 -# if USE_PARQUET - registerTableFunctionDeltaLake(factory); -#endif - registerTableFunctionHudi(factory); -#endif -} - -} From c7f662dc989833d707d15ef086edd69c1d5b64cd Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 30 Sep 2024 02:43:53 +0000 Subject: [PATCH 008/353] fix build, add initial fuzzing processing --- .../data_type_deserialization_fuzzer.cpp | 1 + src/Parsers/fuzzers/CMakeLists.txt | 4 +- .../fuzzers/codegen_fuzzer/CMakeLists.txt | 2 +- tests/fuzz/runner.py | 76 +++++++++++++++++-- 4 files changed, 75 insertions(+), 8 deletions(-) diff --git a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp index f9a733647e1..216b252ad0f 100644 --- a/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp +++ b/src/DataTypes/fuzzers/data_type_deserialization_fuzzer.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include diff --git a/src/Parsers/fuzzers/CMakeLists.txt b/src/Parsers/fuzzers/CMakeLists.txt index 903319d733c..c829c26a805 100644 --- a/src/Parsers/fuzzers/CMakeLists.txt +++ b/src/Parsers/fuzzers/CMakeLists.txt @@ -2,10 +2,10 @@ clickhouse_add_executable(lexer_fuzzer lexer_fuzzer.cpp ${SRCS}) target_link_libraries(lexer_fuzzer PRIVATE clickhouse_parsers) clickhouse_add_executable(select_parser_fuzzer select_parser_fuzzer.cpp ${SRCS}) -target_link_libraries(select_parser_fuzzer PRIVATE clickhouse_parsers dbms) +target_link_libraries(select_parser_fuzzer PRIVATE clickhouse_parsers clickhouse_functions dbms) clickhouse_add_executable(create_parser_fuzzer create_parser_fuzzer.cpp ${SRCS}) -target_link_libraries(create_parser_fuzzer PRIVATE clickhouse_parsers dbms) +target_link_libraries(create_parser_fuzzer PRIVATE clickhouse_parsers clickhouse_functions dbms) add_subdirectory(codegen_fuzzer) diff --git a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt index 74fdcff79f7..ee17e03fce2 100644 --- a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt +++ b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt @@ -47,4 +47,4 @@ target_compile_options (codegen_select_fuzzer PRIVATE -Wno-newline-eof) target_link_libraries(protoc ch_contrib::fuzzer) target_include_directories(codegen_select_fuzzer SYSTEM BEFORE PRIVATE "${CMAKE_CURRENT_BINARY_DIR}") -target_link_libraries(codegen_select_fuzzer PRIVATE ch_contrib::protobuf_mutator ch_contrib::protoc dbms) +target_link_libraries(codegen_select_fuzzer PRIVATE ch_contrib::protobuf_mutator ch_contrib::protoc clickhouse_functions dbms) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 44259228f60..5abab282afd 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -4,19 +4,70 @@ import configparser import logging import os from pathlib import Path +import re import subprocess DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") +def report(source: str, reason: str, call_stack: list, test_unit: str): + print(f"########### REPORT: {source} {reason} {test_unit}") + for line in call_stack: + print(f" {line}") + print("########### END OF REPORT ###########") + +def process_fuzzer_output(output: str): + pass + +def process_error(error: str): + ERROR = r'^==\d+== ERROR: (\S+): (.*)' + error_source = '' + error_reason = '' + SUMMARY = r'^SUMMARY: ' + TEST_UNIT_LINE = r"artifact_prefix='.*/'; Test unit written to (.*)" + test_unit = '' + CALL_STACK_LINE = r'^\s+(#\d+.*)' + call_stack = [] + is_call_stack = False + + for line_num, line in enumerate(error.splitlines(), 1): + + if is_call_stack: + match = re.search(CALL_STACK_LINE, line) + if match: + call_stack.append(match.group(1)) + continue + else: + if re.search(SUMMARY, line): + is_call_stack = False + continue + + if not call_stack and not is_call_stack: + match = re.search(ERROR, line) + if match: + error_source = match.group(1) + error_reason = match.group(2) + is_call_stack = True + continue + + match = re.search(TEST_UNIT_LINE, line) + if match: + test_unit = match.group(1) + + report(error_source, error_reason, call_stack, test_unit) def run_fuzzer(fuzzer: str): logging.info("Running fuzzer %s...", fuzzer) - corpus_dir = f"{fuzzer}.in" - with Path(corpus_dir) as path: + seed_corpus_dir = f"{fuzzer}.in" + with Path(seed_corpus_dir) as path: if not path.exists() or not path.is_dir(): - corpus_dir = "" + seed_corpus_dir = "" + + active_corpus_dir = f"{fuzzer}.corpus" + if not os.path.exists(active_corpus_dir): + os.makedirs(active_corpus_dir) + options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" @@ -53,7 +104,7 @@ def run_fuzzer(fuzzer: str): for key, value in parser["fuzzer_arguments"].items() ) - cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {corpus_dir}" + cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" if custom_libfuzzer_options: cmd_line += f" {custom_libfuzzer_options}" if fuzzer_arguments: @@ -65,8 +116,23 @@ def run_fuzzer(fuzzer: str): cmd_line += " < /dev/null" logging.info("...will execute: %s", cmd_line) - subprocess.check_call(cmd_line, shell=True) + #subprocess.check_call(cmd_line, shell=True) + try: + result = subprocess.run( + cmd_line, + stderr=subprocess.PIPE, + stdout=subprocess.DEVNULL, + text=True, + check=True, + shell=True + ) + except subprocess.CalledProcessError as e: +# print("Command failed with error:", e) + print("Stderr output:", e.stderr) + process_error(e.stderr) + else: + process_fuzzer_output(result.stderr) def main(): logging.basicConfig(level=logging.INFO) From abd3747806dd8f3fb75eac4f0a5cea3c6eacffc2 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 30 Sep 2024 03:43:34 +0000 Subject: [PATCH 009/353] fix style --- tests/fuzz/runner.py | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 5abab282afd..6825a072e2d 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -3,30 +3,33 @@ import configparser import logging import os -from pathlib import Path import re import subprocess +from pathlib import Path DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") + def report(source: str, reason: str, call_stack: list, test_unit: str): print(f"########### REPORT: {source} {reason} {test_unit}") for line in call_stack: print(f" {line}") print("########### END OF REPORT ###########") + def process_fuzzer_output(output: str): pass + def process_error(error: str): - ERROR = r'^==\d+== ERROR: (\S+): (.*)' - error_source = '' - error_reason = '' - SUMMARY = r'^SUMMARY: ' + ERROR = r"^==\d+== ERROR: (\S+): (.*)" + error_source = "" + error_reason = "" + SUMMARY = r"^SUMMARY: " TEST_UNIT_LINE = r"artifact_prefix='.*/'; Test unit written to (.*)" - test_unit = '' - CALL_STACK_LINE = r'^\s+(#\d+.*)' + test_unit = "" + CALL_STACK_LINE = r"^\s+(#\d+.*)" call_stack = [] is_call_stack = False @@ -56,6 +59,7 @@ def process_error(error: str): report(error_source, error_reason, call_stack, test_unit) + def run_fuzzer(fuzzer: str): logging.info("Running fuzzer %s...", fuzzer) @@ -68,7 +72,6 @@ def run_fuzzer(fuzzer: str): if not os.path.exists(active_corpus_dir): os.makedirs(active_corpus_dir) - options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" fuzzer_arguments = "" @@ -104,7 +107,9 @@ def run_fuzzer(fuzzer: str): for key, value in parser["fuzzer_arguments"].items() ) - cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" + cmd_line = ( + f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" + ) if custom_libfuzzer_options: cmd_line += f" {custom_libfuzzer_options}" if fuzzer_arguments: @@ -116,7 +121,7 @@ def run_fuzzer(fuzzer: str): cmd_line += " < /dev/null" logging.info("...will execute: %s", cmd_line) - #subprocess.check_call(cmd_line, shell=True) + # subprocess.check_call(cmd_line, shell=True) try: result = subprocess.run( @@ -125,15 +130,16 @@ def run_fuzzer(fuzzer: str): stdout=subprocess.DEVNULL, text=True, check=True, - shell=True + shell=True, ) except subprocess.CalledProcessError as e: -# print("Command failed with error:", e) + # print("Command failed with error:", e) print("Stderr output:", e.stderr) process_error(e.stderr) else: process_fuzzer_output(result.stderr) + def main(): logging.basicConfig(level=logging.INFO) From 55ae792706177ce96940f23d7147914db06dcf39 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 30 Sep 2024 04:02:25 +0000 Subject: [PATCH 010/353] fix style --- tests/fuzz/runner.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 6825a072e2d..deb219baff9 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -18,6 +18,7 @@ def report(source: str, reason: str, call_stack: list, test_unit: str): print("########### END OF REPORT ###########") +# pylint: disable=unused-argument def process_fuzzer_output(output: str): pass @@ -33,6 +34,7 @@ def process_error(error: str): call_stack = [] is_call_stack = False + # pylint: disable=unused-variable for line_num, line in enumerate(error.splitlines(), 1): if is_call_stack: @@ -40,10 +42,10 @@ def process_error(error: str): if match: call_stack.append(match.group(1)) continue - else: - if re.search(SUMMARY, line): - is_call_stack = False - continue + + if re.search(SUMMARY, line): + is_call_stack = False + continue if not call_stack and not is_call_stack: match = re.search(ERROR, line) From ba5a0e98e3acc83531542ed6b35b57a1a0c10fee Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 30 Sep 2024 13:03:17 +0000 Subject: [PATCH 011/353] fix build --- src/AggregateFunctions/fuzzers/CMakeLists.txt | 2 +- src/Core/fuzzers/CMakeLists.txt | 2 +- src/DataTypes/fuzzers/CMakeLists.txt | 2 +- src/Formats/fuzzers/CMakeLists.txt | 2 +- src/Interpreters/fuzzers/CMakeLists.txt | 1 + src/Storages/fuzzers/CMakeLists.txt | 2 +- 6 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/AggregateFunctions/fuzzers/CMakeLists.txt b/src/AggregateFunctions/fuzzers/CMakeLists.txt index 6a7be0d4377..f01bcb0b631 100644 --- a/src/AggregateFunctions/fuzzers/CMakeLists.txt +++ b/src/AggregateFunctions/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions) +target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions dbms) diff --git a/src/Core/fuzzers/CMakeLists.txt b/src/Core/fuzzers/CMakeLists.txt index c60ce0e097f..51db6fa0b53 100644 --- a/src/Core/fuzzers/CMakeLists.txt +++ b/src/Core/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp) -target_link_libraries (names_and_types_fuzzer PRIVATE) +target_link_libraries (names_and_types_fuzzer PRIVATE dbms) diff --git a/src/DataTypes/fuzzers/CMakeLists.txt b/src/DataTypes/fuzzers/CMakeLists.txt index 9e5b1b3f673..8dedd3470e2 100644 --- a/src/DataTypes/fuzzers/CMakeLists.txt +++ b/src/DataTypes/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions) +target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions dbms) diff --git a/src/Formats/fuzzers/CMakeLists.txt b/src/Formats/fuzzers/CMakeLists.txt index ee1a4fd4358..83aa5eb781a 100644 --- a/src/Formats/fuzzers/CMakeLists.txt +++ b/src/Formats/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS}) -target_link_libraries(format_fuzzer PRIVATE clickhouse_aggregate_functions) +target_link_libraries(format_fuzzer PRIVATE clickhouse_aggregate_functions dbms) diff --git a/src/Interpreters/fuzzers/CMakeLists.txt b/src/Interpreters/fuzzers/CMakeLists.txt index 3317bba7e30..174fae299b7 100644 --- a/src/Interpreters/fuzzers/CMakeLists.txt +++ b/src/Interpreters/fuzzers/CMakeLists.txt @@ -3,5 +3,6 @@ target_link_libraries(execute_query_fuzzer PRIVATE dbms clickhouse_table_functions clickhouse_aggregate_functions + clickhouse_functions clickhouse_dictionaries clickhouse_dictionaries_embedded) diff --git a/src/Storages/fuzzers/CMakeLists.txt b/src/Storages/fuzzers/CMakeLists.txt index 2c7c0c16fc2..719b9b77cd9 100644 --- a/src/Storages/fuzzers/CMakeLists.txt +++ b/src/Storages/fuzzers/CMakeLists.txt @@ -4,4 +4,4 @@ clickhouse_add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.c target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms) clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp) -target_link_libraries (columns_description_fuzzer PRIVATE) +target_link_libraries (columns_description_fuzzer PRIVATE dbms) From 4e6180b50aaf3e39616750f8e4c6b114e0362e97 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 30 Sep 2024 13:18:44 +0000 Subject: [PATCH 012/353] Resolve conflicts, better exception message --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 8 ++++++-- src/Core/Settings.h | 2 +- src/Interpreters/ExpressionAnalyzer.cpp | 8 ++++++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index f3d77b0f091..56c96d41c6c 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -103,6 +103,8 @@ namespace Setting extern const SettingsBool single_join_prefer_left_table; extern const SettingsBool transform_null_in; extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions; + extern const SettingsBool allow_suspicious_types_in_group_by; + extern const SettingsBool allow_suspicious_types_in_order_by; } @@ -4100,7 +4102,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, const IdentifierResolveScope & scope) const { - if (scope.context->getSettingsRef().allow_suspicious_types_in_order_by) + if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_order_by]) return; auto check = [](const IDataType & type) @@ -4109,6 +4111,7 @@ void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; @@ -4189,7 +4192,7 @@ void QueryAnalyzer::resolveGroupByNode(QueryNode & query_node_typed, IdentifierR */ void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type, const IdentifierResolveScope & scope) const { - if (scope.context->getSettingsRef().allow_suspicious_types_in_group_by) + if (scope.context->getSettingsRef()[Setting::allow_suspicious_types_in_group_by]) return; auto check = [](const IDataType & type) @@ -4198,6 +4201,7 @@ void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; diff --git a/src/Core/Settings.h b/src/Core/Settings.h index bc2d0b423c1..5909ab6314c 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -156,4 +156,4 @@ struct Settings private: std::unique_ptr impl; }; -} \ No newline at end of file +} diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index dc7dca712a0..9a09bf8e16f 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -106,6 +106,8 @@ namespace Setting extern const SettingsBool query_plan_aggregation_in_order; extern const SettingsBool query_plan_read_in_order; extern const SettingsUInt64 use_index_for_in_with_subqueries_max_values; + extern const SettingsBool allow_suspicious_types_in_group_by; + extern const SettingsBool allow_suspicious_types_in_order_by; } @@ -1409,7 +1411,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr & key_type) const { - if (getContext()->getSettingsRef().allow_suspicious_types_in_group_by) + if (getContext()->getSettingsRef()[Setting::allow_suspicious_types_in_group_by]) return; auto check = [](const IDataType & type) @@ -1418,6 +1420,7 @@ void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; @@ -1692,7 +1695,7 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & key_type) const { - if (getContext()->getSettingsRef().allow_suspicious_types_in_order_by) + if (getContext()->getSettingsRef()[Setting::allow_suspicious_types_in_order_by]) return; auto check = [](const IDataType & type) @@ -1701,6 +1704,7 @@ void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & k throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; From 11c3c0de2447e5fcab999b13d0539cd074f3831d Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 30 Sep 2024 13:22:34 +0000 Subject: [PATCH 013/353] Even better exception message --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 6 ++++-- src/Interpreters/ExpressionAnalyzer.cpp | 6 ++++-- src/Storages/KeyDescription.cpp | 5 ++++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 56c96d41c6c..7dc1d99efd0 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -4111,7 +4111,8 @@ void QueryAnalyzer::validateSortingKeyType(const DataTypePtr & sorting_key_type, throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; @@ -4201,7 +4202,8 @@ void QueryAnalyzer::validateGroupByKeyType(const DataTypePtr & group_by_key_type throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 9a09bf8e16f..12e769f249a 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1420,7 +1420,8 @@ void SelectQueryExpressionAnalyzer::validateGroupByKeyType(const DB::DataTypePtr throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in GROUP BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_group_by = 1 in order to allow it"); }; @@ -1704,7 +1705,8 @@ void SelectQueryExpressionAnalyzer::validateOrderByKeyType(const DataTypePtr & k throw Exception( ErrorCodes::ILLEGAL_COLUMN, "Data types Variant/Dynamic are not allowed in ORDER BY keys, because it can lead to unexpected results. " - "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn). " + "Consider using a subcolumn with a specific data type instead (for example 'column.Int64' or 'json.some.path.:Int64' if " + "its a JSON path subcolumn) or casting this column to a specific data type. " "Set setting allow_suspicious_types_in_order_by = 1 in order to allow it"); }; diff --git a/src/Storages/KeyDescription.cpp b/src/Storages/KeyDescription.cpp index bb0b6d3542d..5c0449612e7 100644 --- a/src/Storages/KeyDescription.cpp +++ b/src/Storages/KeyDescription.cpp @@ -155,7 +155,10 @@ KeyDescription KeyDescription::getSortingKeyFromAST( auto check = [&](const IDataType & type) { if (isDynamic(type) || isVariant(type)) - throw Exception(ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, "Column with type Variant/Dynamic is not allowed in key expression"); + throw Exception( + ErrorCodes::DATA_TYPE_CANNOT_BE_USED_IN_KEY, + "Column with type Variant/Dynamic is not allowed in key expression. Consider using a subcolumn with a specific data " + "type instead (for example 'column.Int64' or 'json.some.path.:Int64' if its a JSON path subcolumn) or casting this column to a specific data type"); }; check(*result.data_types.back()); From dda32963fdd399c2c614b2cb630fb714549e2804 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 30 Sep 2024 13:57:19 +0000 Subject: [PATCH 014/353] Fix tests --- src/Core/SettingsChangesHistory.cpp | 6 ++---- .../03096_variant_in_primary_key.reference | 4 ---- .../0_stateless/03096_variant_in_primary_key.sql | 8 -------- .../03231_dynamic_incomplete_type_insert_bug.sql | 1 + .../03231_dynamic_not_safe_primary_key.reference | 0 .../03231_dynamic_not_safe_primary_key.sql | 11 ----------- .../0_stateless/03231_dynamic_uniq_group_by.sql | 2 ++ 7 files changed, 5 insertions(+), 27 deletions(-) delete mode 100644 tests/queries/0_stateless/03096_variant_in_primary_key.reference delete mode 100644 tests/queries/0_stateless/03096_variant_in_primary_key.sql delete mode 100644 tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.reference delete mode 100644 tests/queries/0_stateless/03231_dynamic_not_safe_primary_key.sql diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 21a42b970f2..7bc9517a6a6 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -72,6 +72,8 @@ static std::initializer_list Date: Mon, 30 Sep 2024 14:56:34 +0000 Subject: [PATCH 015/353] ignore encoding errors in fuzzers output --- tests/fuzz/runner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index deb219baff9..6f229725d4e 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -133,6 +133,7 @@ def run_fuzzer(fuzzer: str): text=True, check=True, shell=True, + errors='replace', ) except subprocess.CalledProcessError as e: # print("Command failed with error:", e) From 07fd719c8b2be80d08f088c2849a5fc150b98bc5 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 30 Sep 2024 15:03:00 +0000 Subject: [PATCH 016/353] Automatic style fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 6f229725d4e..e6eff430d1b 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -133,7 +133,7 @@ def run_fuzzer(fuzzer: str): text=True, check=True, shell=True, - errors='replace', + errors="replace", ) except subprocess.CalledProcessError as e: # print("Command failed with error:", e) From a1a571c45e43b767d4c2f2a7c4114020513882b9 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 1 Oct 2024 12:59:46 +0000 Subject: [PATCH 017/353] Fix tests --- tests/queries/0_stateless/01825_new_type_json_10.sql | 1 + tests/queries/0_stateless/01825_new_type_json_11.sh | 6 +++--- tests/queries/0_stateless/01825_new_type_json_12.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_13.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_6.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_7.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_in_array.sql | 3 +++ .../0_stateless/01825_new_type_json_insert_select.sql | 2 ++ .../queries/0_stateless/02421_new_type_json_async_insert.sh | 2 +- .../0_stateless/03151_dynamic_type_scale_max_types.sql | 5 +++-- 11 files changed, 18 insertions(+), 11 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_10.sql b/tests/queries/0_stateless/01825_new_type_json_10.sql index f586cc4477b..9aac35e2c88 100644 --- a/tests/queries/0_stateless/01825_new_type_json_10.sql +++ b/tests/queries/0_stateless/01825_new_type_json_10.sql @@ -1,6 +1,7 @@ -- Tags: no-fasttest SET allow_experimental_json_type = 1; +SET allow_suspicious_types_in_order_by = 1; DROP TABLE IF EXISTS t_json_10; CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; diff --git a/tests/queries/0_stateless/01825_new_type_json_11.sh b/tests/queries/0_stateless/01825_new_type_json_11.sh index f448b7433ab..e9b90af4499 100755 --- a/tests/queries/0_stateless/01825_new_type_json_11.sh +++ b/tests/queries/0_stateless/01825_new_type_json_11.sh @@ -57,8 +57,8 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key_1[]))) as path FROM t_json_11 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(obj.key_1[].key_3[])))) as path FROM t_json_11 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_1[].key_3[].key_4[]))))) as path FROM t_json_11 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" -$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3 FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" -$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3[].key_4[].key_5, obj.key_1[].key_3[].key_7 FROM t_json_11 ORDER BY obj.id" +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" --allow_suspicious_types_in_order_by 1 +$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3 FROM t_json_11 ORDER BY obj.id FORMAT JSONEachRow" --allow_suspicious_types_in_order_by 1 +$CLICKHOUSE_CLIENT -q "SELECT obj.key_1[].key_3[].key_4[].key_5, obj.key_1[].key_3[].key_7 FROM t_json_11 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_11;" diff --git a/tests/queries/0_stateless/01825_new_type_json_12.sh b/tests/queries/0_stateless/01825_new_type_json_12.sh index d7c938d7cd1..e3909787690 100755 --- a/tests/queries/0_stateless/01825_new_type_json_12.sh +++ b/tests/queries/0_stateless/01825_new_type_json_12.sh @@ -49,6 +49,6 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_0[].key_1[].key_3[]))))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 $CLICKHOUSE_CLIENT -q "SELECT obj.key_0[].key_1[].key_3[].key_4, obj.key_0[].key_1[].key_3[].key_5, \ - obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" + obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_12;" diff --git a/tests/queries/0_stateless/01825_new_type_json_13.sh b/tests/queries/0_stateless/01825_new_type_json_13.sh index 316e6890d5e..e7d9f556be7 100755 --- a/tests/queries/0_stateless/01825_new_type_json_13.sh +++ b/tests/queries/0_stateless/01825_new_type_json_13.sh @@ -45,6 +45,6 @@ $CLICKHOUSE_CLIENT -q "SELECT \ obj.key_1.key_2.key_3.key_4.key_5, \ obj.key_1.key_2.key_3.key_4.key_6, \ obj.key_1.key_2.key_3.key_4.key_7 \ -FROM t_json_13 ORDER BY obj.id" +FROM t_json_13 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_13;" diff --git a/tests/queries/0_stateless/01825_new_type_json_6.sh b/tests/queries/0_stateless/01825_new_type_json_6.sh index 6b9a7e71f50..a2102636c42 100755 --- a/tests/queries/0_stateless/01825_new_type_json_6.sh +++ b/tests/queries/0_stateless/01825_new_type_json_6.sh @@ -54,6 +54,6 @@ EOF $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) as path FROM t_json_6 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(data.out[]))) as path FROM t_json_6 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(data.out[].outputs[])))) as path FROM t_json_6 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT data.key, data.out[].type, data.out[].value, data.out[].outputs[].index, data.out[].outputs[].n FROM t_json_6 ORDER BY data.key" +$CLICKHOUSE_CLIENT -q "SELECT data.key, data.out[].type, data.out[].value, data.out[].outputs[].index, data.out[].outputs[].n FROM t_json_6 ORDER BY data.key" --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "DROP TABLE t_json_6;" diff --git a/tests/queries/0_stateless/01825_new_type_json_7.sh b/tests/queries/0_stateless/01825_new_type_json_7.sh index 36483175df6..b6ea46f5ff8 100755 --- a/tests/queries/0_stateless/01825_new_type_json_7.sh +++ b/tests/queries/0_stateless/01825_new_type_json_7.sh @@ -25,6 +25,6 @@ cat < Date: Tue, 1 Oct 2024 14:02:17 +0000 Subject: [PATCH 018/353] add timeout for every fuzzer --- tests/fuzz/runner.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index e6eff430d1b..cfd60d8f259 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -62,7 +62,7 @@ def process_error(error: str): report(error_source, error_reason, call_stack, test_unit) -def run_fuzzer(fuzzer: str): +def run_fuzzer(fuzzer: str, timeout: int): logging.info("Running fuzzer %s...", fuzzer) seed_corpus_dir = f"{fuzzer}.in" @@ -134,6 +134,7 @@ def run_fuzzer(fuzzer: str): check=True, shell=True, errors="replace", + timeout=timeout, ) except subprocess.CalledProcessError as e: # print("Command failed with error:", e) @@ -148,10 +149,16 @@ def main(): subprocess.check_call("ls -al", shell=True) + timeout = 30 + + match = re.search(r"(^|\s+)-max_total_time=(\d+)($|\s)", FUZZER_ARGS) + if match: + timeout += match.group(2) + with Path() as current: for fuzzer in current.iterdir(): if (current / fuzzer).is_file() and os.access(current / fuzzer, os.X_OK): - run_fuzzer(fuzzer) + run_fuzzer(fuzzer, timeout) if __name__ == "__main__": From 77e13544d6d5641a68a765c7e15f7af4b9bfec00 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 1 Oct 2024 14:03:05 +0000 Subject: [PATCH 019/353] Parallel relicas: use local plan for local replica by default --- src/Core/Settings.cpp | 2 +- src/Core/SettingsChangesHistory.cpp | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index d0ce90e6fdd..dfba3b128bb 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -965,7 +965,7 @@ namespace ErrorCodes M(Bool, parallel_replicas_prefer_local_join, true, "If true, and JOIN can be executed with parallel replicas algorithm, and all storages of right JOIN part are *MergeTree, local JOIN will be used instead of GLOBAL JOIN.", 0) \ M(UInt64, parallel_replicas_mark_segment_size, 0, "Parts virtually divided into segments to be distributed between replicas for parallel reading. This setting controls the size of these segments. Not recommended to change until you're absolutely sure in what you're doing. Value should be in range [128; 16384]", 0) \ M(Bool, allow_archive_path_syntax, true, "File/S3 engines/table function will parse paths with '::' as ' :: ' if archive has correct extension", 0) \ - M(Bool, parallel_replicas_local_plan, false, "Build local plan for local replica", 0) \ + M(Bool, parallel_replicas_local_plan, true, "If true, use local plan for local replica in a query with parallel replicas, otherwise all replicas in a used cluster considered as remote", 0) \ \ M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental inverted index.", 0) \ M(Bool, allow_experimental_full_text_index, false, "If it is set to true, allow to use experimental full-text index.", 0) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 560f144866b..92cf586b9c6 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -67,6 +67,7 @@ static std::initializer_list Date: Tue, 1 Oct 2024 15:49:26 +0000 Subject: [PATCH 020/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index cfd60d8f259..ccc5a4b7465 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -153,7 +153,7 @@ def main(): match = re.search(r"(^|\s+)-max_total_time=(\d+)($|\s)", FUZZER_ARGS) if match: - timeout += match.group(2) + timeout += int(match.group(2)) with Path() as current: for fuzzer in current.iterdir(): From da525b6ab5b752c5029433e3513007e6b5e8759b Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 1 Oct 2024 18:25:22 +0000 Subject: [PATCH 021/353] process timeout --- tests/fuzz/runner.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index ccc5a4b7465..f4a6a67e1f8 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -138,8 +138,11 @@ def run_fuzzer(fuzzer: str, timeout: int): ) except subprocess.CalledProcessError as e: # print("Command failed with error:", e) - print("Stderr output:", e.stderr) + print("Stderr output: ", e.stderr) process_error(e.stderr) + except subprocess.TimeoutExpired as e: + print("Timeout: ", e.stderr) + process_fuzzer_output(e.stderr) else: process_fuzzer_output(result.stderr) From fec1b32a79987767618e44dc06a04ac8f6762a09 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 14:01:02 +0000 Subject: [PATCH 022/353] fix parser --- tests/fuzz/runner.py | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index f4a6a67e1f8..4099ff940e8 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -24,42 +24,34 @@ def process_fuzzer_output(output: str): def process_error(error: str): - ERROR = r"^==\d+== ERROR: (\S+): (.*)" + ERROR = r"^==\d+==\s?ERROR: (\S+): (.*)" error_source = "" error_reason = "" - SUMMARY = r"^SUMMARY: " TEST_UNIT_LINE = r"artifact_prefix='.*/'; Test unit written to (.*)" - test_unit = "" - CALL_STACK_LINE = r"^\s+(#\d+.*)" call_stack = [] is_call_stack = False # pylint: disable=unused-variable for line_num, line in enumerate(error.splitlines(), 1): - if is_call_stack: - match = re.search(CALL_STACK_LINE, line) - if match: - call_stack.append(match.group(1)) - continue - - if re.search(SUMMARY, line): + if re.search(r"^==\d+==", line): is_call_stack = False + continue + call_stack.append(line) continue - if not call_stack and not is_call_stack: - match = re.search(ERROR, line) + if call_stack: + match = re.search(TEST_UNIT_LINE, line) if match: - error_source = match.group(1) - error_reason = match.group(2) - is_call_stack = True - continue + report(error_source, error_reason, call_stack, match.group(1)) + call_stack.clear() + continue - match = re.search(TEST_UNIT_LINE, line) + match = re.search(ERROR, line) if match: - test_unit = match.group(1) - - report(error_source, error_reason, call_stack, test_unit) + error_source = match.group(1) + error_reason = match.group(2) + is_call_stack = True def run_fuzzer(fuzzer: str, timeout: int): From 28b4c8cba32fe57840529f3e2d3298c27564cafe Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 2 Oct 2024 15:16:38 +0000 Subject: [PATCH 023/353] Fix tests --- tests/queries/0_stateless/01825_new_type_json_12.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_13.sh | 2 +- tests/queries/0_stateless/01825_new_type_json_in_array.sql | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_12.sh b/tests/queries/0_stateless/01825_new_type_json_12.sh index e3909787690..fd5b9fddd75 100755 --- a/tests/queries/0_stateless/01825_new_type_json_12.sh +++ b/tests/queries/0_stateless/01825_new_type_json_12.sh @@ -47,7 +47,7 @@ $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key_0[]))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(obj.key_0[].key_1[])))) as path FROM t_json_12 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arrayJoin(obj.key_0[].key_1[].key_3[]))))) as path FROM t_json_12 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_12 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "SELECT obj.key_0[].key_1[].key_3[].key_4, obj.key_0[].key_1[].key_3[].key_5, \ obj.key_0[].key_1[].key_3[].key_6, obj.key_0[].key_1[].key_3[].key_7 FROM t_json_12 ORDER BY obj.id" --allow_suspicious_types_in_order_by 1 diff --git a/tests/queries/0_stateless/01825_new_type_json_13.sh b/tests/queries/0_stateless/01825_new_type_json_13.sh index e7d9f556be7..116665e58e3 100755 --- a/tests/queries/0_stateless/01825_new_type_json_13.sh +++ b/tests/queries/0_stateless/01825_new_type_json_13.sh @@ -39,7 +39,7 @@ EOF $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) as path FROM t_json_13 order by path;" $CLICKHOUSE_CLIENT -q "SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(obj.key1[]))) as path FROM t_json_13 order by path;" -$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_13 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 +$CLICKHOUSE_CLIENT -q "SELECT obj FROM t_json_13 ORDER BY obj.id FORMAT JSONEachRow" --output_format_json_named_tuples_as_objects 1 --allow_suspicious_types_in_order_by 1 $CLICKHOUSE_CLIENT -q "SELECT \ obj.key_1.key_2.key_3.key_8, \ obj.key_1.key_2.key_3.key_4.key_5, \ diff --git a/tests/queries/0_stateless/01825_new_type_json_in_array.sql b/tests/queries/0_stateless/01825_new_type_json_in_array.sql index 3d2e04a1bfd..ef15061e6c8 100644 --- a/tests/queries/0_stateless/01825_new_type_json_in_array.sql +++ b/tests/queries/0_stateless/01825_new_type_json_in_array.sql @@ -3,7 +3,7 @@ SET allow_experimental_json_type = 1; SET allow_experimental_analyzer = 1; SET allow_suspicious_types_in_order_by = 1; -SET allow_suspicious_types_in_order_by = 1; +SET allow_suspicious_types_in_group_by = 1; DROP TABLE IF EXISTS t_json_array; From ab89e4daa0fe9cf6035c030b1863d64c4c2d8ce0 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 15:51:41 +0000 Subject: [PATCH 024/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 4099ff940e8..d752fce1bd0 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -133,7 +133,7 @@ def run_fuzzer(fuzzer: str, timeout: int): print("Stderr output: ", e.stderr) process_error(e.stderr) except subprocess.TimeoutExpired as e: - print("Timeout: ", e.stderr) + print("Timeout") process_fuzzer_output(e.stderr) else: process_fuzzer_output(result.stderr) From 674ccf939e2312fd91095133b5900081ebcc5638 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 17:14:11 +0000 Subject: [PATCH 025/353] debugging timeouts --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index d752fce1bd0..05b8faa96a2 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -133,7 +133,7 @@ def run_fuzzer(fuzzer: str, timeout: int): print("Stderr output: ", e.stderr) process_error(e.stderr) except subprocess.TimeoutExpired as e: - print("Timeout") + print("Timeout for %s", cmd_line) process_fuzzer_output(e.stderr) else: process_fuzzer_output(result.stderr) From 0f8fed3d83bac3f9a91225c5c190fa1d6624ebe3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 20:07:02 +0000 Subject: [PATCH 026/353] add s3 corpus --- tests/fuzz/runner.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 05b8faa96a2..3b916145e0c 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -6,6 +6,8 @@ import os import re import subprocess from pathlib import Path +from tests.ci.env_helper import S3_BUILDS_BUCKET +from tests.ci.s3_helper import S3Helper DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") @@ -55,6 +57,8 @@ def process_error(error: str): def run_fuzzer(fuzzer: str, timeout: int): + s3 = S3Helper() + logging.info("Running fuzzer %s...", fuzzer) seed_corpus_dir = f"{fuzzer}.in" @@ -63,8 +67,14 @@ def run_fuzzer(fuzzer: str, timeout: int): seed_corpus_dir = "" active_corpus_dir = f"{fuzzer}.corpus" - if not os.path.exists(active_corpus_dir): - os.makedirs(active_corpus_dir) + s3.download_files(bucket=S3_BUILDS_BUCKET, + s3_path=f"fuzzer/corpus/{fuzzer}/", + file_suffix="", + local_directory=active_corpus_dir,) + + new_corpus_dir = f"{fuzzer}.corpus_new" + if not os.path.exists(new_corpus_dir): + os.makedirs(new_corpus_dir) options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" @@ -102,7 +112,7 @@ def run_fuzzer(fuzzer: str, timeout: int): ) cmd_line = ( - f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" + f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" ) if custom_libfuzzer_options: cmd_line += f" {custom_libfuzzer_options}" @@ -133,11 +143,17 @@ def run_fuzzer(fuzzer: str, timeout: int): print("Stderr output: ", e.stderr) process_error(e.stderr) except subprocess.TimeoutExpired as e: - print("Timeout for %s", cmd_line) + print("Timeout for ", cmd_line) process_fuzzer_output(e.stderr) else: process_fuzzer_output(result.stderr) + f = open(f"{new_corpus_dir}/testfile", "a") + f.write("Now the file has more content!") + f.close() + + s3.upload_build_directory_to_s3(new_corpus_dir, "fuzzer/corpus/") + def main(): logging.basicConfig(level=logging.INFO) From f43ebf004f334ec782fdccd2aa38c1846288fe4a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 20:24:13 +0000 Subject: [PATCH 027/353] fix style --- tests/fuzz/runner.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 3b916145e0c..8e1de7ca38d 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -6,6 +6,7 @@ import os import re import subprocess from pathlib import Path + from tests.ci.env_helper import S3_BUILDS_BUCKET from tests.ci.s3_helper import S3Helper @@ -68,9 +69,10 @@ def run_fuzzer(fuzzer: str, timeout: int): active_corpus_dir = f"{fuzzer}.corpus" s3.download_files(bucket=S3_BUILDS_BUCKET, - s3_path=f"fuzzer/corpus/{fuzzer}/", - file_suffix="", - local_directory=active_corpus_dir,) + s3_path=f"fuzzer/corpus/{fuzzer}/", + file_suffix="", + local_directory=active_corpus_dir, + ) new_corpus_dir = f"{fuzzer}.corpus_new" if not os.path.exists(new_corpus_dir): @@ -111,9 +113,8 @@ def run_fuzzer(fuzzer: str, timeout: int): for key, value in parser["fuzzer_arguments"].items() ) - cmd_line = ( - f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" - ) + cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" + if custom_libfuzzer_options: cmd_line += f" {custom_libfuzzer_options}" if fuzzer_arguments: From 245e76a5d3be2dd78cf072ef9c4810da4a497d29 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 20:36:31 +0000 Subject: [PATCH 028/353] fix style --- tests/fuzz/runner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 8e1de7ca38d..7f398d2124a 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -68,7 +68,8 @@ def run_fuzzer(fuzzer: str, timeout: int): seed_corpus_dir = "" active_corpus_dir = f"{fuzzer}.corpus" - s3.download_files(bucket=S3_BUILDS_BUCKET, + s3.download_files( + bucket=S3_BUILDS_BUCKET, s3_path=f"fuzzer/corpus/{fuzzer}/", file_suffix="", local_directory=active_corpus_dir, From 55fd44935d70195fa969941ee3d98b636bdcfe42 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 20:57:16 +0000 Subject: [PATCH 029/353] fix style --- tests/fuzz/runner.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 7f398d2124a..dbe9511b85c 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -7,8 +7,8 @@ import re import subprocess from pathlib import Path -from tests.ci.env_helper import S3_BUILDS_BUCKET -from tests.ci.s3_helper import S3Helper +from ci.env_helper import S3_BUILDS_BUCKET +from ci.s3_helper import S3Helper DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") @@ -150,9 +150,8 @@ def run_fuzzer(fuzzer: str, timeout: int): else: process_fuzzer_output(result.stderr) - f = open(f"{new_corpus_dir}/testfile", "a") - f.write("Now the file has more content!") - f.close() + with open(f"{new_corpus_dir}/testfile", "a", encoding='ascii') as f: + f.write("Now the file has more content!") s3.upload_build_directory_to_s3(new_corpus_dir, "fuzzer/corpus/") From f490d835136e0e28557ffc654e6cb87e13bde65e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 21:09:31 +0000 Subject: [PATCH 030/353] fix style --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index dbe9511b85c..ac6cbc56a7e 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -150,7 +150,7 @@ def run_fuzzer(fuzzer: str, timeout: int): else: process_fuzzer_output(result.stderr) - with open(f"{new_corpus_dir}/testfile", "a", encoding='ascii') as f: + with open(f"{new_corpus_dir}/testfile", "a", encoding="ascii") as f: f.write("Now the file has more content!") s3.upload_build_directory_to_s3(new_corpus_dir, "fuzzer/corpus/") From 4f23f16417c62057f721273492a0d60441588477 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 22:39:20 +0000 Subject: [PATCH 031/353] fix --- tests/fuzz/runner.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index ac6cbc56a7e..d85bc018739 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -7,9 +7,6 @@ import re import subprocess from pathlib import Path -from ci.env_helper import S3_BUILDS_BUCKET -from ci.s3_helper import S3Helper - DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") @@ -174,4 +171,9 @@ def main(): if __name__ == "__main__": + from os import sys, path + ACTIVE_DIR = path.dirname(path.abspath(__file__)) + sys.path.append(path.dirname(ACTIVE_DIR)) + from ci.env_helper import S3_BUILDS_BUCKET + from ci.s3_helper import S3Helper main() From 5e95ce8a485f1497af06b144c3754941fb1fba93 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 23:03:08 +0000 Subject: [PATCH 032/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index d85bc018739..fc93c7437ca 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -171,7 +171,7 @@ def main(): if __name__ == "__main__": - from os import sys, path + from os import path, sys ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append(path.dirname(ACTIVE_DIR)) from ci.env_helper import S3_BUILDS_BUCKET From dff243a132c5014c1485133d92812bfb3750e67d Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 2 Oct 2024 23:19:06 +0000 Subject: [PATCH 033/353] fix --- tests/fuzz/runner.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index fc93c7437ca..d03bc6f5bed 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -172,8 +172,10 @@ def main(): if __name__ == "__main__": from os import path, sys + ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append(path.dirname(ACTIVE_DIR)) from ci.env_helper import S3_BUILDS_BUCKET from ci.s3_helper import S3Helper + main() From d022c4615b851b58aaa0f5dbdb1ab3b05b22ab83 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 00:10:59 +0000 Subject: [PATCH 034/353] fix --- tests/fuzz/runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index d03bc6f5bed..ffd319cf16c 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -175,7 +175,7 @@ if __name__ == "__main__": ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append(path.dirname(ACTIVE_DIR)) - from ci.env_helper import S3_BUILDS_BUCKET - from ci.s3_helper import S3Helper + from ci.env_helper import S3_BUILDS_BUCKET # pylint: disable=import-error + from ci.s3_helper import S3Helper # pylint: disable=import-error main() From f009d1e7d5c7c605874c637977e1639455086b67 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 00:28:15 +0000 Subject: [PATCH 035/353] fix --- tests/fuzz/runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index ffd319cf16c..171c99698a7 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -175,7 +175,7 @@ if __name__ == "__main__": ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append(path.dirname(ACTIVE_DIR)) - from ci.env_helper import S3_BUILDS_BUCKET # pylint: disable=import-error - from ci.s3_helper import S3Helper # pylint: disable=import-error + from ci.env_helper import S3_BUILDS_BUCKET # pylint: disable=import-error + from ci.s3_helper import S3Helper # pylint: disable=import-error main() From 4a7de86089ac2bdcad31791d1db717f25c656b5d Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 00:42:53 +0000 Subject: [PATCH 036/353] fix --- tests/fuzz/runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 171c99698a7..af3f2ff6040 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -175,7 +175,7 @@ if __name__ == "__main__": ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append(path.dirname(ACTIVE_DIR)) - from ci.env_helper import S3_BUILDS_BUCKET # pylint: disable=import-error - from ci.s3_helper import S3Helper # pylint: disable=import-error + from ci.env_helper import S3_BUILDS_BUCKET # pylint: disable=import-error,no-name-in-module + from ci.s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module main() From bf292bcc45a131a589bbb0ba113bcc80db380b07 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 3 Oct 2024 00:52:51 +0000 Subject: [PATCH 037/353] Automatic style fix --- tests/fuzz/runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index af3f2ff6040..718799a7f63 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -175,7 +175,9 @@ if __name__ == "__main__": ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append(path.dirname(ACTIVE_DIR)) - from ci.env_helper import S3_BUILDS_BUCKET # pylint: disable=import-error,no-name-in-module + from ci.env_helper import ( # pylint: disable=import-error,no-name-in-module + S3_BUILDS_BUCKET, + ) from ci.s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module main() From d279be6ac2683cfebe56399b8d1e60cca085eb1e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 02:10:07 +0000 Subject: [PATCH 038/353] add boto3 to requirements --- docker/test/fuzzer/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/fuzzer/requirements.txt b/docker/test/fuzzer/requirements.txt index 3dce93e023b..74147513e76 100644 --- a/docker/test/fuzzer/requirements.txt +++ b/docker/test/fuzzer/requirements.txt @@ -25,3 +25,4 @@ six==1.16.0 wadllib==1.3.6 wheel==0.37.1 zipp==1.0.0 +boto3 From ce3983d757e032cdcbd3af81f0a79a959bf036bc Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 02:20:14 +0000 Subject: [PATCH 039/353] fix --- docker/test/fuzzer/requirements.txt | 1 - docker/test/libfuzzer/requirements.txt | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/fuzzer/requirements.txt b/docker/test/fuzzer/requirements.txt index 74147513e76..3dce93e023b 100644 --- a/docker/test/fuzzer/requirements.txt +++ b/docker/test/fuzzer/requirements.txt @@ -25,4 +25,3 @@ six==1.16.0 wadllib==1.3.6 wheel==0.37.1 zipp==1.0.0 -boto3 diff --git a/docker/test/libfuzzer/requirements.txt b/docker/test/libfuzzer/requirements.txt index 3dce93e023b..74147513e76 100644 --- a/docker/test/libfuzzer/requirements.txt +++ b/docker/test/libfuzzer/requirements.txt @@ -25,3 +25,4 @@ six==1.16.0 wadllib==1.3.6 wheel==0.37.1 zipp==1.0.0 +boto3 From c7b8a98fa6a2d0c914112562834c52f4acd04b9a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 03:12:58 +0000 Subject: [PATCH 040/353] fix --- tests/fuzz/runner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 718799a7f63..6c4c2930a90 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -174,10 +174,10 @@ if __name__ == "__main__": from os import path, sys ACTIVE_DIR = path.dirname(path.abspath(__file__)) - sys.path.append(path.dirname(ACTIVE_DIR)) - from ci.env_helper import ( # pylint: disable=import-error,no-name-in-module + sys.path.append(path.dirname(ACTIVE_DIR) / "ci") + from env_helper import ( # pylint: disable=import-error,no-name-in-module S3_BUILDS_BUCKET, ) - from ci.s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module + from s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module main() From 2bb3dd7cbc6c860849add0adcd32c296a00d349c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 04:09:00 +0000 Subject: [PATCH 041/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 6c4c2930a90..a64af5bab66 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -174,7 +174,7 @@ if __name__ == "__main__": from os import path, sys ACTIVE_DIR = path.dirname(path.abspath(__file__)) - sys.path.append(path.dirname(ACTIVE_DIR) / "ci") + sys.path.append(Path(path.dirname(ACTIVE_DIR)) / "ci") from env_helper import ( # pylint: disable=import-error,no-name-in-module S3_BUILDS_BUCKET, ) From 582e01ba57218480a2ef485ccc5f8c4ff440bfc3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 05:39:42 +0000 Subject: [PATCH 042/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index a64af5bab66..51201e85224 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -174,7 +174,7 @@ if __name__ == "__main__": from os import path, sys ACTIVE_DIR = path.dirname(path.abspath(__file__)) - sys.path.append(Path(path.dirname(ACTIVE_DIR)) / "ci") + sys.path.append((Path(path.dirname(ACTIVE_DIR)) / "ci").as_posix()) from env_helper import ( # pylint: disable=import-error,no-name-in-module S3_BUILDS_BUCKET, ) From 1dc67425bdc084346bafa1264828e979b7909071 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 06:45:03 +0000 Subject: [PATCH 043/353] fix --- tests/fuzz/runner.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 51201e85224..e11a5415227 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import botocore import configparser import logging import os @@ -65,12 +66,15 @@ def run_fuzzer(fuzzer: str, timeout: int): seed_corpus_dir = "" active_corpus_dir = f"{fuzzer}.corpus" - s3.download_files( - bucket=S3_BUILDS_BUCKET, - s3_path=f"fuzzer/corpus/{fuzzer}/", - file_suffix="", - local_directory=active_corpus_dir, - ) + try: + s3.download_files( + bucket=S3_BUILDS_BUCKET, + s3_path=f"fuzzer/corpus/{fuzzer}/", + file_suffix="", + local_directory=active_corpus_dir, + ) + except botocore.errorfactory.NoSuchKey as e: + logging.debug("No active corpus exists for %s", fuzzer) new_corpus_dir = f"{fuzzer}.corpus_new" if not os.path.exists(new_corpus_dir): From 0a08ec018a1626a823d4496f57843e24816bf12c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 07:02:11 +0000 Subject: [PATCH 044/353] fix --- tests/fuzz/runner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index e11a5415227..06a232a0e5a 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -import botocore import configparser import logging import os @@ -8,6 +7,8 @@ import re import subprocess from pathlib import Path +import botocore + DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") From 55ff81518f9a35dc3797b1c80acd6d4ef990c5d3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 07:24:50 +0000 Subject: [PATCH 045/353] fix --- tests/fuzz/runner.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 06a232a0e5a..ccd7cbc475a 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -7,7 +7,7 @@ import re import subprocess from pathlib import Path -import botocore +from botocore.exceptions import ClientError DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") @@ -74,8 +74,11 @@ def run_fuzzer(fuzzer: str, timeout: int): file_suffix="", local_directory=active_corpus_dir, ) - except botocore.errorfactory.NoSuchKey as e: - logging.debug("No active corpus exists for %s", fuzzer) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchKey': + logging.debug("No active corpus exists for %s", fuzzer) + else: + raise new_corpus_dir = f"{fuzzer}.corpus_new" if not os.path.exists(new_corpus_dir): From 3008330afec6c45fd3badf335cca57cb173ecadc Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 3 Oct 2024 07:33:39 +0000 Subject: [PATCH 046/353] Automatic style fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index ccd7cbc475a..e1860d60081 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -75,7 +75,7 @@ def run_fuzzer(fuzzer: str, timeout: int): local_directory=active_corpus_dir, ) except ClientError as e: - if e.response['Error']['Code'] == 'NoSuchKey': + if e.response["Error"]["Code"] == "NoSuchKey": logging.debug("No active corpus exists for %s", fuzzer) else: raise From e60ae9c64a237d0a7c9fba5a1e83ff611e0f8c58 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 12:44:02 +0000 Subject: [PATCH 047/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index e1860d60081..2e779401e0b 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -158,7 +158,7 @@ def run_fuzzer(fuzzer: str, timeout: int): with open(f"{new_corpus_dir}/testfile", "a", encoding="ascii") as f: f.write("Now the file has more content!") - s3.upload_build_directory_to_s3(new_corpus_dir, "fuzzer/corpus/") + s3.upload_build_directory_to_s3(new_corpus_dir, Path("fuzzer/corpus/")) def main(): From de69aa8c946258ebd25fc4e0a131b0244f5cbac1 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 13:42:24 +0000 Subject: [PATCH 048/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 2e779401e0b..c6b1b2a623b 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -158,7 +158,7 @@ def run_fuzzer(fuzzer: str, timeout: int): with open(f"{new_corpus_dir}/testfile", "a", encoding="ascii") as f: f.write("Now the file has more content!") - s3.upload_build_directory_to_s3(new_corpus_dir, Path("fuzzer/corpus/")) + s3.upload_build_directory_to_s3(Path(new_corpus_dir), "fuzzer/corpus/") def main(): From da5ebde4d5db8d2838c4473fe21e69d3b5a9ae4e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 16:16:39 +0000 Subject: [PATCH 049/353] add CI env --- tests/ci/libfuzzer_test_check.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 8f19dd7d023..46406dc3557 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -133,6 +133,8 @@ def main(): check_name, run_by_hash_num, run_by_hash_total ) + additional_envs.append("CI=1") + ci_logs_credentials = CiLogsCredentials(Path(temp_path) / "export-logs-config.sh") ci_logs_args = ci_logs_credentials.get_docker_arguments( pr_info, stopwatch.start_time_str, check_name From 4d917d80b42f8dedbd4ddbfecc1c6d9c5fa87c01 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 17:32:05 +0000 Subject: [PATCH 050/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- tests/fuzz/runner.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 46406dc3557..5de28d5641a 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -59,7 +59,7 @@ def get_run_command( envs = [ # a static link, don't use S3_URL or S3_DOWNLOAD - '-e S3_URL="https://s3.amazonaws.com/clickhouse-datasets"', + '-e S3_URL="https://s3.amazonaws.com"', ] envs += [f"-e {e}" for e in additional_envs] diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index c6b1b2a623b..5d0f2865422 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -155,10 +155,7 @@ def run_fuzzer(fuzzer: str, timeout: int): else: process_fuzzer_output(result.stderr) - with open(f"{new_corpus_dir}/testfile", "a", encoding="ascii") as f: - f.write("Now the file has more content!") - - s3.upload_build_directory_to_s3(Path(new_corpus_dir), "fuzzer/corpus/") + s3.upload_build_directory_to_s3(Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False) def main(): From f66bc05c0188d5873696d01b2d80486c73625bb2 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 3 Oct 2024 17:39:14 +0000 Subject: [PATCH 051/353] Automatic style fix --- tests/fuzz/runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 5d0f2865422..2e7c1184bcc 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -155,7 +155,9 @@ def run_fuzzer(fuzzer: str, timeout: int): else: process_fuzzer_output(result.stderr) - s3.upload_build_directory_to_s3(Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False) + s3.upload_build_directory_to_s3( + Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False + ) def main(): From 6fa23c4b72747293d58aebb11d1bb7d2a15b4647 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 23:44:40 +0000 Subject: [PATCH 052/353] kill all fuzzers on timeout --- tests/fuzz/runner.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 2e7c1184bcc..42e54acfecc 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -4,6 +4,7 @@ import configparser import logging import os import re +import signal import subprocess from pathlib import Path @@ -56,6 +57,15 @@ def process_error(error: str): is_call_stack = True +def kill_fuzzer(fuzzer: str): + p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE) + out, err = p.communicate() + for line in out.splitlines(): + if fuzzer in line: + pid = int(line.split(None, 1)[0]) + os.kill(pid, signal.SIGKILL) + + def run_fuzzer(fuzzer: str, timeout: int): s3 = S3Helper() @@ -151,6 +161,7 @@ def run_fuzzer(fuzzer: str, timeout: int): process_error(e.stderr) except subprocess.TimeoutExpired as e: print("Timeout for ", cmd_line) + kill_fuzzer(fuzzer) process_fuzzer_output(e.stderr) else: process_fuzzer_output(result.stderr) From a0d2f2085d56252eb689a72909b567db9325fdc1 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 3 Oct 2024 23:57:05 +0000 Subject: [PATCH 053/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 42e54acfecc..512a20e58c5 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -58,7 +58,7 @@ def process_error(error: str): def kill_fuzzer(fuzzer: str): - p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE) + p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) out, err = p.communicate() for line in out.splitlines(): if fuzzer in line: From 08d098a2f486fab845fa46459b5e842132028ea4 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 00:15:36 +0000 Subject: [PATCH 054/353] fix --- tests/fuzz/runner.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 512a20e58c5..8e05625a6d9 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -58,12 +58,12 @@ def process_error(error: str): def kill_fuzzer(fuzzer: str): - p = subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) - out, err = p.communicate() - for line in out.splitlines(): - if fuzzer in line: - pid = int(line.split(None, 1)[0]) - os.kill(pid, signal.SIGKILL) + with subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) as p + out, _ = p.communicate() + for line in out.splitlines(): + if fuzzer in line: + pid = int(line.split(None, 1)[0]) + os.kill(pid, signal.SIGKILL) def run_fuzzer(fuzzer: str, timeout: int): From bfb2e7c04413f467e310231830f6701b39739e5e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 00:16:16 +0000 Subject: [PATCH 055/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 8e05625a6d9..81a76fbcdb9 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -58,7 +58,7 @@ def process_error(error: str): def kill_fuzzer(fuzzer: str): - with subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) as p + with subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) as p: out, _ = p.communicate() for line in out.splitlines(): if fuzzer in line: From 9d81ff0a8906ed5549ca3a75a0540b4fb0e13dfc Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 01:22:26 +0000 Subject: [PATCH 056/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 81a76fbcdb9..702014ce04f 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -185,7 +185,7 @@ def main(): with Path() as current: for fuzzer in current.iterdir(): if (current / fuzzer).is_file() and os.access(current / fuzzer, os.X_OK): - run_fuzzer(fuzzer, timeout) + run_fuzzer(fuzzer.name, timeout) if __name__ == "__main__": From 5cf7a777a2b7bf80c9a3eba1d89a5a3bbfa2c86f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 02:31:34 +0000 Subject: [PATCH 057/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 702014ce04f..b51b0f99abc 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -61,7 +61,7 @@ def kill_fuzzer(fuzzer: str): with subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) as p: out, _ = p.communicate() for line in out.splitlines(): - if fuzzer in line: + if fuzzer.encode("utf-8") in line: pid = int(line.split(None, 1)[0]) os.kill(pid, signal.SIGKILL) From db69e018bf31acf0ec0c22e63bebe1448429e4fc Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 03:18:01 +0000 Subject: [PATCH 058/353] fix --- tests/fuzz/runner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index b51b0f99abc..bcfc7e6146f 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -63,6 +63,7 @@ def kill_fuzzer(fuzzer: str): for line in out.splitlines(): if fuzzer.encode("utf-8") in line: pid = int(line.split(None, 1)[0]) + logging.info("Killing fuzzer %s, pid %d", fuzzer, pid) os.kill(pid, signal.SIGKILL) From 530d034302720ec3c479e38ba18ac432e27f6ab3 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 04:35:35 +0000 Subject: [PATCH 059/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index bcfc7e6146f..948bc9d48ed 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -161,7 +161,7 @@ def run_fuzzer(fuzzer: str, timeout: int): print("Stderr output: ", e.stderr) process_error(e.stderr) except subprocess.TimeoutExpired as e: - print("Timeout for ", cmd_line) + logging.info("Timeout for %s", cmd_line) kill_fuzzer(fuzzer) process_fuzzer_output(e.stderr) else: From e9e35eb118f35ecfa0b6d21fe4a9be7e87443a1f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 05:31:17 +0000 Subject: [PATCH 060/353] fix --- tests/fuzz/runner.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 948bc9d48ed..ac2bb78b7f0 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -7,6 +7,7 @@ import re import signal import subprocess from pathlib import Path +from time import sleep from botocore.exceptions import ClientError @@ -163,6 +164,7 @@ def run_fuzzer(fuzzer: str, timeout: int): except subprocess.TimeoutExpired as e: logging.info("Timeout for %s", cmd_line) kill_fuzzer(fuzzer) + sleep(10) process_fuzzer_output(e.stderr) else: process_fuzzer_output(result.stderr) From eb8ae504db5b7d04ff1d9f04f6068e91472153eb Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 12:03:21 +0000 Subject: [PATCH 061/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index ac2bb78b7f0..e842f40f8d8 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -59,7 +59,7 @@ def process_error(error: str): def kill_fuzzer(fuzzer: str): - with subprocess.Popen(["ps", "-A"], stdout=subprocess.PIPE) as p: + with subprocess.Popen(["ps", "-A", "u"], stdout=subprocess.PIPE) as p: out, _ = p.communicate() for line in out.splitlines(): if fuzzer.encode("utf-8") in line: From c7902255ba868af3903e075bb69e27381f062351 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 4 Oct 2024 12:54:13 +0000 Subject: [PATCH 062/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index e842f40f8d8..b3c19fbb0a4 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -63,7 +63,7 @@ def kill_fuzzer(fuzzer: str): out, _ = p.communicate() for line in out.splitlines(): if fuzzer.encode("utf-8") in line: - pid = int(line.split(None, 1)[0]) + pid = int(line.split(None, 2)[1]) logging.info("Killing fuzzer %s, pid %d", fuzzer, pid) os.kill(pid, signal.SIGKILL) From 2f923ee24278a22e2c78d957f76077dff21176a5 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 4 Oct 2024 14:36:28 +0000 Subject: [PATCH 063/353] Fix old analyzer --- src/Interpreters/ExpressionAnalyzer.cpp | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 12e769f249a..5913cf644d8 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1372,6 +1372,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain ExpressionActionsChain::Step & step = chain.lastStep(columns_after_join); ASTs asts = select_query->groupBy()->children; + NameSet group_by_keys; if (select_query->group_by_with_grouping_sets) { for (const auto & ast : asts) @@ -1379,6 +1380,7 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain for (const auto & ast_element : ast->children) { step.addRequiredOutput(ast_element->getColumnName()); + group_by_keys.insert(ast_element->getColumnName()); getRootActions(ast_element, only_types, step.actions()->dag); } } @@ -1388,12 +1390,16 @@ bool SelectQueryExpressionAnalyzer::appendGroupBy(ExpressionActionsChain & chain for (const auto & ast : asts) { step.addRequiredOutput(ast->getColumnName()); + group_by_keys.insert(ast->getColumnName()); getRootActions(ast, only_types, step.actions()->dag); } } for (const auto & result_column : step.getResultColumns()) - validateGroupByKeyType(result_column.type); + { + if (group_by_keys.contains(result_column.name)) + validateGroupByKeyType(result_column.type); + } if (optimize_aggregation_in_order) { @@ -1612,9 +1618,6 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( getRootActions(select_query->orderBy(), only_types, step.actions()->dag); - for (const auto & result_column : step.getResultColumns()) - validateOrderByKeyType(result_column.type); - bool with_fill = false; for (auto & child : select_query->orderBy()->children) @@ -1629,6 +1632,12 @@ ActionsAndProjectInputsFlagPtr SelectQueryExpressionAnalyzer::appendOrderBy( with_fill = true; } + for (const auto & result_column : step.getResultColumns()) + { + if (order_by_keys.contains(result_column.name)) + validateOrderByKeyType(result_column.type); + } + if (auto interpolate_list = select_query->interpolate()) { From 52484cbfec0c168bb440d623673aeb321e1c0211 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Mon, 7 Oct 2024 12:45:23 +0800 Subject: [PATCH 064/353] Fix tests --- tests/queries/0_stateless/01825_new_type_json_ghdata.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh index 6a4fc7d5935..cabc3efcd8e 100755 --- a/tests/queries/0_stateless/01825_new_type_json_ghdata.sh +++ b/tests/queries/0_stateless/01825_new_type_json_ghdata.sh @@ -16,7 +16,7 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q \ "SELECT data.repo.name, count() AS stars FROM ghdata \ - WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_order_by 1 --allow_suspicious_types_in_group_by 1 ${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ "SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ From 7808f00857a157e2b49606df6de567a63462aa58 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 7 Oct 2024 06:53:12 +0000 Subject: [PATCH 065/353] Support alter from String to JSON --- src/Columns/ColumnArray.h | 7 + src/Columns/ColumnDynamic.cpp | 9 ++ src/Columns/ColumnDynamic.h | 1 + src/Columns/ColumnMap.cpp | 7 + src/Columns/ColumnMap.h | 1 + src/Columns/ColumnObject.cpp | 25 ++++ src/Columns/ColumnObject.h | 2 + src/Columns/ColumnTuple.cpp | 20 +++ src/Columns/ColumnTuple.h | 1 + src/Columns/ColumnVariant.cpp | 17 +++ src/Columns/ColumnVariant.h | 1 + src/Columns/IColumn.h | 3 + .../Serializations/SerializationDynamic.cpp | 45 +++--- .../Serializations/SerializationDynamic.h | 20 ++- .../Serializations/SerializationObject.cpp | 22 ++- .../Serializations/SerializationObject.h | 16 ++- src/Functions/FunctionsConversion.cpp | 5 +- src/Storages/AlterCommands.cpp | 14 +- .../MergeTreeDataPartWriterCompact.cpp | 31 ++-- .../MergeTreeDataPartWriterCompact.h | 6 +- .../MergeTreeDataPartWriterOnDisk.cpp | 39 +++++ .../MergeTree/MergeTreeDataPartWriterOnDisk.h | 12 ++ .../MergeTree/MergeTreeDataPartWriterWide.cpp | 32 ++--- .../MergeTree/MergeTreeDataPartWriterWide.h | 8 +- .../03246_alter_from_string_to_json.reference | 134 ++++++++++++++++++ .../03246_alter_from_string_to_json.sql.j2 | 32 +++++ ...3247_ghdata_string_to_json_alter.reference | 12 ++ .../03247_ghdata_string_to_json_alter.sh | 30 ++++ .../03248_string_to_json_alter_fuzz.reference | 0 .../03248_string_to_json_alter_fuzz.sql | 17 +++ 30 files changed, 459 insertions(+), 110 deletions(-) create mode 100644 tests/queries/0_stateless/03246_alter_from_string_to_json.reference create mode 100644 tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 create mode 100644 tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference create mode 100755 tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh create mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference create mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index f77268a8be6..df52880d6e4 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -192,6 +192,13 @@ public: bool hasDynamicStructure() const override { return getData().hasDynamicStructure(); } void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; + bool dynamicStructureEquals(const IColumn & rhs) const override + { + if (const auto * rhs_concrete = typeid_cast(&rhs)) + return data->dynamicStructureEquals(*rhs_concrete->data); + return false; + } + private: WrappedPtr data; WrappedPtr offsets; diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index 5a837a62761..09a05e52c90 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -1153,6 +1153,15 @@ void ColumnDynamic::prepareVariantsForSquashing(const Columns & source_columns) } } +bool ColumnDynamic::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_concrete = typeid_cast(&rhs)) + return max_dynamic_types == rhs_concrete->max_dynamic_types && global_max_dynamic_types == rhs_concrete->global_max_dynamic_types + && variant_info.variant_name == rhs_concrete->variant_info.variant_name + && variant_column->dynamicStructureEquals(*rhs_concrete->variant_column); + return false; +} + void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index 17b0d80e5eb..9e8b1f79321 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -367,6 +367,7 @@ public: bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } bool hasDynamicStructure() const override { return true; } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; const StatisticsPtr & getStatistics() const { return statistics; } diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 536da4d06d0..4e81191939f 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -330,6 +330,13 @@ bool ColumnMap::structureEquals(const IColumn & rhs) const return false; } +bool ColumnMap::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_map = typeid_cast(&rhs)) + return nested->dynamicStructureEquals(*rhs_map->nested); + return false; +} + ColumnPtr ColumnMap::compress() const { auto compressed = nested->compress(); diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index 39d15a586b9..8cb0b1680a7 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -120,6 +120,7 @@ public: ColumnPtr compress() const override; bool hasDynamicStructure() const override { return nested->hasDynamicStructure(); } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; }; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index 3577ab1ec82..8e0182c7276 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -1299,6 +1299,31 @@ void ColumnObject::prepareForSquashing(const std::vector & source_col } } +bool ColumnObject::dynamicStructureEquals(const IColumn & rhs) const +{ + const auto * rhs_object = typeid_cast(&rhs); + if (!rhs_object || typed_paths.size() != rhs_object->typed_paths.size() + || global_max_dynamic_paths != rhs_object->global_max_dynamic_paths || max_dynamic_types != rhs_object->max_dynamic_types + || dynamic_paths.size() != rhs_object->dynamic_paths.size()) + return false; + + for (const auto & [path, column] : typed_paths) + { + auto it = rhs_object->typed_paths.find(path); + if (it == rhs_object->typed_paths.end() || !it->second->dynamicStructureEquals(*column)) + return false; + } + + for (const auto & [path, column] : dynamic_paths) + { + auto it = rhs_object->dynamic_paths.find(path); + if (it == rhs_object->dynamic_paths.end() || !it->second->dynamicStructureEquals(*column)) + return false; + } + + return true; +} + void ColumnObject::takeDynamicStructureFromSourceColumns(const DB::Columns & source_columns) { if (!empty()) diff --git a/src/Columns/ColumnObject.h b/src/Columns/ColumnObject.h index c7f282d9079..d5370625115 100644 --- a/src/Columns/ColumnObject.h +++ b/src/Columns/ColumnObject.h @@ -172,6 +172,7 @@ public: bool isFinalized() const override; bool hasDynamicStructure() const override { return true; } + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; const PathToColumnMap & getTypedPaths() const { return typed_paths; } @@ -221,6 +222,7 @@ public: void setDynamicPaths(const std::vector & paths); void setMaxDynamicPaths(size_t max_dynamic_paths_); + void setGlobalMaxDynamicPaths(size_t global_max_dynamic_paths_); void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } void serializePathAndValueIntoSharedData(ColumnString * shared_data_paths, ColumnString * shared_data_values, std::string_view path, const IColumn & column, size_t n); diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index e741eb51c68..42acfdc85be 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -727,6 +727,26 @@ bool ColumnTuple::hasDynamicStructure() const return false; } +bool ColumnTuple::dynamicStructureEquals(const IColumn & rhs) const +{ + if (const auto * rhs_tuple = typeid_cast(&rhs)) + { + const size_t tuple_size = columns.size(); + if (tuple_size != rhs_tuple->columns.size()) + return false; + + for (size_t i = 0; i < tuple_size; ++i) + if (!columns[i]->dynamicStructureEquals(*rhs_tuple->columns[i])) + return false; + + return true; + } + else + { + return false; + } +} + void ColumnTuple::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { std::vector nested_source_columns; diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 6968294aef9..2539c27c441 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -138,6 +138,7 @@ public: ColumnPtr & getColumnPtr(size_t idx) { return columns[idx]; } bool hasDynamicStructure() const override; + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; /// Empty tuple needs a public method to manage its size. diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index c6511695f5c..a18dffd8360 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -1376,6 +1376,23 @@ bool ColumnVariant::structureEquals(const IColumn & rhs) const return true; } +bool ColumnVariant::dynamicStructureEquals(const IColumn & rhs) const +{ + const auto * rhs_variant = typeid_cast(&rhs); + if (!rhs_variant) + return false; + + const size_t num_variants = variants.size(); + if (num_variants != rhs_variant->variants.size()) + return false; + + for (size_t i = 0; i < num_variants; ++i) + if (!variants[i]->dynamicStructureEquals(rhs_variant->getVariantByGlobalDiscriminator(globalDiscriminatorByLocal(i)))) + return false; + + return true; +} + ColumnPtr ColumnVariant::compress() const { ColumnPtr local_discriminators_compressed = local_discriminators->compress(); diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 925eab74af8..2084de4fae7 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -327,6 +327,7 @@ public: void extend(const std::vector & old_to_new_global_discriminators, std::vector> && new_variants_and_discriminators); bool hasDynamicStructure() const override; + bool dynamicStructureEquals(const IColumn & rhs) const override; void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; private: diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index e4fe233ffdf..7131765f99c 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -590,6 +590,9 @@ public: /// Checks if column has dynamic subcolumns. virtual bool hasDynamicStructure() const { return false; } + + /// For columns with dynamic subcolumns checks if columns have equal dynamic structure. + [[nodiscard]] virtual bool dynamicStructureEquals(const IColumn & rhs) const { return structureEquals(rhs); } /// For columns with dynamic subcolumns this method takes dynamic structure from source columns /// and creates proper resulting dynamic structure in advance for merge of these source columns. virtual void takeDynamicStructureFromSourceColumns(const std::vector & /*source_columns*/) {} diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 18a75918499..b00668fa8a4 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -26,8 +26,8 @@ namespace ErrorCodes struct SerializeBinaryBulkStateDynamic : public ISerialization::SerializeBinaryBulkState { - SerializationDynamic::DynamicStructureSerializationVersion structure_version; - size_t max_dynamic_types; + SerializationDynamic::DynamicSerializationVersion structure_version; + size_t num_dynamic_types; DataTypePtr variant_type; Names variant_names; SerializationPtr variant_serialization; @@ -81,14 +81,14 @@ void SerializationDynamic::enumerateStreams( settings.path.pop_back(); } -SerializationDynamic::DynamicStructureSerializationVersion::DynamicStructureSerializationVersion(UInt64 version) : value(static_cast(version)) +SerializationDynamic::DynamicSerializationVersion::DynamicSerializationVersion(UInt64 version) : value(static_cast(version)) { checkVersion(version); } -void SerializationDynamic::DynamicStructureSerializationVersion::checkVersion(UInt64 version) +void SerializationDynamic::DynamicSerializationVersion::checkVersion(UInt64 version) { - if (version != VariantTypeName) + if (version != V1 && version != V2) throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization."); } @@ -108,22 +108,17 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Dynamic column structure during serialization of binary bulk state prefix"); /// Write structure serialization version. - UInt64 structure_version = DynamicStructureSerializationVersion::Value::VariantTypeName; + UInt64 structure_version = DynamicSerializationVersion::Value::V2; writeBinaryLittleEndian(structure_version, *stream); auto dynamic_state = std::make_shared(structure_version); - dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes(); - /// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types - /// that is specified in the Dynamic type (we could decrease it before merge). - writeVarUInt(dynamic_state->max_dynamic_types, *stream); - dynamic_state->variant_type = variant_info.variant_type; dynamic_state->variant_names = variant_info.variant_names; const auto & variant_column = column_dynamic.getVariantColumn(); - /// Write information about variants. - size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it. - writeVarUInt(num_variants, *stream); + /// Write information about dynamic types. + dynamic_state->num_dynamic_types = dynamic_state->variant_names.size() - 1; /// -1 for SharedVariant + writeVarUInt(dynamic_state->num_dynamic_types, *stream); if (settings.data_types_binary_encoding) { const auto & variants = assert_cast(*dynamic_state->variant_type).getVariants(); @@ -251,22 +246,25 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD UInt64 structure_version; readBinaryLittleEndian(structure_version, *structure_stream); auto structure_state = std::make_shared(structure_version); - /// Read max_dynamic_types parameter. - readVarUInt(structure_state->max_dynamic_types, *structure_stream); + if (structure_state->structure_version.value == DynamicSerializationVersion::Value::V1) + { + /// Skip max_dynamic_types parameter in V1 serialization version. + size_t max_dynamic_types; + readVarUInt(max_dynamic_types, *structure_stream); + } /// Read information about variants. DataTypes variants; - size_t num_variants; - readVarUInt(num_variants, *structure_stream); - variants.reserve(num_variants + 1); /// +1 for shared variant. + readVarUInt(structure_state->num_dynamic_types, *structure_stream); + variants.reserve(structure_state->num_dynamic_types + 1); /// +1 for shared variant. if (settings.data_types_binary_encoding) { - for (size_t i = 0; i != num_variants; ++i) + for (size_t i = 0; i != structure_state->num_dynamic_types; ++i) variants.push_back(decodeDataType(*structure_stream)); } else { String data_type_name; - for (size_t i = 0; i != num_variants; ++i) + for (size_t i = 0; i != structure_state->num_dynamic_types; ++i) { readStringBinary(data_type_name, *structure_stream); variants.push_back(DataTypeFactory::instance().get(data_type_name)); @@ -364,9 +362,6 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreamsAndCountTotalSi if (!variant_info.variant_type->equals(*dynamic_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", dynamic_state->variant_type->getName(), variant_info.variant_type->getName()); - if (column_dynamic.getMaxDynamicTypes() != dynamic_state->max_dynamic_types) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_types parameter of Dynamic. Expected: {}, Got: {}", dynamic_state->max_dynamic_types, column_dynamic.getMaxDynamicTypes()); - settings.path.push_back(Substream::DynamicData); assert_cast(*dynamic_state->variant_serialization) .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics( @@ -424,7 +419,7 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams( if (mutable_column->empty()) { - column_dynamic.setMaxDynamicPaths(structure_state->max_dynamic_types); + column_dynamic.setMaxDynamicPaths(structure_state->num_dynamic_types); column_dynamic.setVariantType(structure_state->variant_type); column_dynamic.setStatistics(structure_state->statistics); } diff --git a/src/DataTypes/Serializations/SerializationDynamic.h b/src/DataTypes/Serializations/SerializationDynamic.h index f34b5d0e770..ac98bbbc8b5 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.h +++ b/src/DataTypes/Serializations/SerializationDynamic.h @@ -16,18 +16,28 @@ public: { } - struct DynamicStructureSerializationVersion + struct DynamicSerializationVersion { enum Value { - VariantTypeName = 1, + /// V1 serialization: + /// - DynamicStructure stream: + /// + /// + /// + /// (only in MergeTree serialization) + /// (only in MergeTree serialization) + /// - DynamicData stream: contains the data of nested Variant column. + V1 = 1, + /// V2 serialization: the same as V1 but without max_dynamic_types parameter in DynamicStructure stream. + V2 = 2, }; Value value; static void checkVersion(UInt64 version); - explicit DynamicStructureSerializationVersion(UInt64 version); + explicit DynamicSerializationVersion(UInt64 version); }; void enumerateStreams( @@ -113,9 +123,9 @@ private: struct DeserializeBinaryBulkStateDynamicStructure : public ISerialization::DeserializeBinaryBulkState { - DynamicStructureSerializationVersion structure_version; + DynamicSerializationVersion structure_version; DataTypePtr variant_type; - size_t max_dynamic_types; + size_t num_dynamic_types; ColumnDynamic::StatisticsPtr statistics; explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 760f6ce750d..b3ac2c52d70 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -68,14 +68,13 @@ SerializationObject::ObjectSerializationVersion::ObjectSerializationVersion(UInt void SerializationObject::ObjectSerializationVersion::checkVersion(UInt64 version) { - if (version != BASIC) + if (version != V1 && version != V2) throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Object structure serialization."); } struct SerializeBinaryBulkStateObject: public ISerialization::SerializeBinaryBulkState { SerializationObject::ObjectSerializationVersion serialization_version; - size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_map typed_path_states; std::unordered_map dynamic_path_states; @@ -193,13 +192,10 @@ void SerializationObject::serializeBinaryBulkStatePrefix( throw Exception(ErrorCodes::LOGICAL_ERROR, "Missing stream for Object column structure during serialization of binary bulk state prefix"); /// Write serialization version. - UInt64 serialization_version = ObjectSerializationVersion::Value::BASIC; + UInt64 serialization_version = ObjectSerializationVersion::Value::V2; writeBinaryLittleEndian(serialization_version, *stream); auto object_state = std::make_shared(serialization_version); - object_state->max_dynamic_paths = column_object.getMaxDynamicPaths(); - /// Write max_dynamic_paths parameter. - writeVarUInt(object_state->max_dynamic_paths, *stream); /// Write all dynamic paths in sorted order. object_state->sorted_dynamic_paths.reserve(dynamic_paths.size()); for (const auto & [path, _] : dynamic_paths) @@ -353,8 +349,13 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb UInt64 serialization_version; readBinaryLittleEndian(serialization_version, *structure_stream); auto structure_state = std::make_shared(serialization_version); - /// Read max_dynamic_paths parameter. - readVarUInt(structure_state->max_dynamic_paths, *structure_stream); + if (structure_state->structure_version.value == ObjectSerializationVersion::Value::V1) + { + /// Skip max_dynamic_paths parameter in V1 serialization version. + size_t max_dynamic_paths; + readVarUInt(max_dynamic_paths, *structure_stream); + } + /// Read the sorted list of dynamic paths. size_t dynamic_paths_size; readVarUInt(dynamic_paths_size, *structure_stream); @@ -411,9 +412,6 @@ void SerializationObject::serializeBinaryBulkWithMultipleStreams( const auto & shared_data = column_object.getSharedDataPtr(); auto * object_state = checkAndGetState(state); - if (column_object.getMaxDynamicPaths() != object_state->max_dynamic_paths) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_paths parameter of Object. Expected: {}, Got: {}", object_state->max_dynamic_paths, column_object.getMaxDynamicPaths()); - if (column_object.getDynamicPaths().size() != object_state->sorted_dynamic_paths.size()) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of number of dynamic paths in Object. Expected: {}, Got: {}", object_state->sorted_dynamic_paths.size(), column_object.getDynamicPaths().size()); @@ -538,7 +536,7 @@ void SerializationObject::deserializeBinaryBulkWithMultipleStreams( /// If it's a new object column, set dynamic paths and statistics. if (column_object.empty()) { - column_object.setMaxDynamicPaths(structure_state->max_dynamic_paths); + column_object.setMaxDynamicPaths(structure_state->sorted_dynamic_paths.size()); column_object.setDynamicPaths(structure_state->sorted_dynamic_paths); column_object.setStatistics(structure_state->statistics); } diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 62ff9849f45..ba66dd6470e 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -19,7 +19,20 @@ public: { enum Value { - BASIC = 0, + /// V1 serialization: + /// - ObjectStructure stream: + /// + /// + /// + /// (only in MergeTree serialization) + /// (only in MergeTree serialization) + /// - ObjectData stream: + /// - ObjectTypedPath stream for each column in typed paths + /// - ObjectDynamicPath stream for each column in dynamic paths + /// - ObjectSharedData stream shared data column. + V1 = 0, + /// V2 serialization: the same as V1 but without max_dynamic_paths parameter in ObjectStructure stream. + V2 = 2, }; Value value; @@ -82,7 +95,6 @@ private: struct DeserializeBinaryBulkStateObjectStructure : public ISerialization::DeserializeBinaryBulkState { ObjectSerializationVersion structure_version; - size_t max_dynamic_paths; std::vector sorted_dynamic_paths; std::unordered_set dynamic_paths; /// Paths statistics. Map (dynamic path) -> (number of non-null values in this path). diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index ed13e581759..a7098e85ea0 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -83,6 +83,7 @@ namespace Setting extern const SettingsBool input_format_ipv4_default_on_conversion_error; extern const SettingsBool input_format_ipv6_default_on_conversion_error; extern const SettingsBool precise_float_parsing; + extern const SettingsBool cast_to_json_disable_dynamic_subcolumns; } namespace ErrorCodes @@ -4056,9 +4057,7 @@ private: { return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) { - auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); - res->finalize(); - return res; + return ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); }; } diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 460d74e68bf..0d7d3295e0a 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1466,13 +1466,13 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); - bool has_object_type = isObject(command.data_type); - command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); - if (has_object_type) - throw Exception( - ErrorCodes::BAD_ARGUMENTS, - "The change of data type {} of column {} to {} is not supported.", - old_data_type->getName(), backQuote(column_name), command.data_type->getName()); +// bool has_object_type = isObject(command.data_type); +// command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); +// if (has_object_type) +// throw Exception( +// ErrorCodes::BAD_ARGUMENTS, +// "The change of data type {} of column {} to {} is not supported.", +// old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } if (command.isRemovingProperty()) diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index a859172023f..96623307c8f 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -61,22 +61,6 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( } } -void MergeTreeDataPartWriterCompact::initDynamicStreamsIfNeeded(const Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block.getByName(column.name).column, compression); - } - } -} - void MergeTreeDataPartWriterCompact::addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) { ISerialization::StreamCallback callback = [&](const auto & substream_path) @@ -175,20 +159,25 @@ void writeColumnSingleGranule( void MergeTreeDataPartWriterCompact::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block result_block = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(result_block); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(result_block); assert(index_granularity_for_block >= 1); - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, result_block.rows()); } - Block result_block = permuteBlockIfNeeded(block, permutation); + result_block = permuteBlockIfNeeded(result_block, permutation); if (!header) header = result_block.cloneEmpty(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index b440a37222d..03da9c5f754 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -48,9 +48,7 @@ private: void addToChecksums(MergeTreeDataPartChecksums & checksums); - void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) override; Block header; @@ -104,8 +102,6 @@ private: /// then finally to 'marks_file'. std::unique_ptr marks_compressor; std::unique_ptr marks_source_hashing; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp index 35914d8c50a..fbf6ac769a0 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.cpp @@ -557,6 +557,45 @@ Names MergeTreeDataPartWriterOnDisk::getSkipIndicesColumns() const return Names(skip_indexes_column_names_set.begin(), skip_indexes_column_names_set.end()); } +void MergeTreeDataPartWriterOnDisk::initOrAdjustDynamicStructureIfNeeded(Block & block) +{ + if (!is_dynamic_streams_initialized) + { + for (const auto & column : columns_list) + { + if (column.type->hasDynamicSubcolumns()) + { + /// Create all streams for dynamic subcolumns using dynamic structure from block. + auto compression = getCodecDescOrDefault(column.name, default_codec); + addStreams(column, block.getByName(column.name).column, compression); + } + } + is_dynamic_streams_initialized = true; + block_sample = block.cloneEmpty(); + } + else + { + size_t size = block.columns(); + for (size_t i = 0; i != size; ++i) + { + auto & column = block.getByPosition(i); + const auto & sample_column = block_sample.getByPosition(i); + /// Check if the dynamic structure of this column is different from the sample column. + if (column.type->hasDynamicSubcolumns() && !column.column->dynamicStructureEquals(*sample_column.column)) + { + /// We need to change the dynamic structure of the column so it matches the sample column. + /// To do it, we create empty column of this type, take dynamic structure from sample column + /// and insert data into it. Resulting column will have required dynamic structure and the content + /// of the column in current block. + auto new_column = sample_column.type->createColumn(); + new_column->takeDynamicStructureFromSourceColumns({sample_column.column}); + new_column->insertRangeFrom(*column.column, 0, column.column->size()); + column.column = std::move(new_column); + } + } + } +} + template struct MergeTreeDataPartWriterOnDisk::Stream; template struct MergeTreeDataPartWriterOnDisk::Stream; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h index 8d84442981e..69a089eda1b 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterOnDisk.h @@ -153,6 +153,14 @@ protected: /// Get unique non ordered skip indices column. Names getSkipIndicesColumns() const; + virtual void addStreams(const NameAndTypePair & name_and_type, const ColumnPtr & column, const ASTPtr & effective_codec_desc) = 0; + + /// On first block create all required streams for columns with dynamic subcolumns and remember the block sample. + /// On each next block check if dynamic structure of the columns equals to the dynamic structure of the same + /// columns in the sample block. If for some column dynamic structure is different, adjust it so it matches + /// the structure from the sample. + void initOrAdjustDynamicStructureIfNeeded(Block & block); + const MergeTreeIndices skip_indices; const ColumnsStatistics stats; @@ -187,6 +195,10 @@ protected: size_t current_mark = 0; GinIndexStoreFactory::GinIndexStores gin_index_stores; + + bool is_dynamic_streams_initialized = false; + Block block_sample; + private: void initSkipIndices(); void initPrimaryIndex(); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index 04e07a0588a..ba9d82fd097 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -106,23 +106,6 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( } } -void MergeTreeDataPartWriterWide::initDynamicStreamsIfNeeded(const DB::Block & block) -{ - if (is_dynamic_streams_initialized) - return; - - is_dynamic_streams_initialized = true; - block_sample = block.cloneEmpty(); - for (const auto & column : columns_list) - { - if (column.type->hasDynamicSubcolumns()) - { - auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, block_sample.getByName(column.name).column, compression); - } - } -} - void MergeTreeDataPartWriterWide::addStreams( const NameAndTypePair & name_and_type, const ColumnPtr & column, @@ -260,15 +243,20 @@ void MergeTreeDataPartWriterWide::shiftCurrentMark(const Granules & granules_wri void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Permutation * permutation) { - /// On first block of data initialize streams for dynamic subcolumns. - initDynamicStreamsIfNeeded(block); + Block block_to_write = block; + + /// During serialization columns with dynamic subcolumns (like JSON/Dynamic) must have the same dynamic structure. + /// But it may happen that they don't (for example during ALTER MODIFY COLUMN from some type to JSON/Dynamic). + /// In this case we use dynamic structure of the column from the first written block and adjust columns from + /// the next blocks so they match this dynamic structure. + initOrAdjustDynamicStructureIfNeeded(block_to_write); /// Fill index granularity for this block /// if it's unknown (in case of insert data or horizontal merge, /// but not in case of vertical part of vertical merge) if (compute_granularity) { - size_t index_granularity_for_block = computeIndexGranularity(block); + size_t index_granularity_for_block = computeIndexGranularity(block_to_write); if (rows_written_in_last_mark > 0) { size_t rows_left_in_last_mark = index_granularity.getMarkRows(getCurrentMark()) - rows_written_in_last_mark; @@ -286,11 +274,9 @@ void MergeTreeDataPartWriterWide::write(const Block & block, const IColumn::Perm } } - fillIndexGranularity(index_granularity_for_block, block.rows()); + fillIndexGranularity(index_granularity_for_block, block_to_write.rows()); } - Block block_to_write = block; - auto granules_to_write = getGranulesToWrite(index_granularity, block_to_write.rows(), getCurrentMark(), rows_written_in_last_mark); auto offset_columns = written_offset_columns ? *written_offset_columns : WrittenOffsetColumns{}; diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index ab86ed27c7e..78dfc93c4d2 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -91,9 +91,7 @@ private: void addStreams( const NameAndTypePair & name_and_type, const ColumnPtr & column, - const ASTPtr & effective_codec_desc); - - void initDynamicStreamsIfNeeded(const Block & block); + const ASTPtr & effective_codec_desc) override; /// Method for self check (used in debug-build only). Checks that written /// data and corresponding marks are consistent. Otherwise throws logical @@ -139,10 +137,6 @@ private: /// How many rows we have already written in the current mark. /// More than zero when incoming blocks are smaller then their granularity. size_t rows_written_in_last_mark = 0; - - Block block_sample; - - bool is_dynamic_streams_initialized = false; }; } diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.reference b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference new file mode 100644 index 00000000000..a2d3a799fff --- /dev/null +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference @@ -0,0 +1,134 @@ +All paths: +['key0','key1','key2','key3','key4','key5'] +Shared data paths: +key2 +key3 +key4 +key5 +{"key0":"value0"} +{"key1":"value1"} +{"key0":"value2"} +{"key1":"value3"} +{"key0":"value4"} +{"key1":"value5"} +{"key0":"value6"} +{"key1":"value7"} +{"key0":"value8"} +{"key1":"value9"} +{"key2":"value300000"} +{"key3":"value300001"} +{"key2":"value300002"} +{"key3":"value300003"} +{"key2":"value300004"} +{"key3":"value300005"} +{"key2":"value300006"} +{"key3":"value300007"} +{"key2":"value300008"} +{"key3":"value300009"} +{"key4":"value600000"} +{"key5":"value600001"} +{"key4":"value600002"} +{"key5":"value600003"} +{"key4":"value600004"} +{"key5":"value600005"} +{"key4":"value600006"} +{"key5":"value600007"} +{"key4":"value600008"} +{"key5":"value600009"} +value0 \N \N \N \N \N +\N value1 \N \N \N \N +value2 \N \N \N \N \N +\N value3 \N \N \N \N +value4 \N \N \N \N \N +\N value5 \N \N \N \N +value6 \N \N \N \N \N +\N value7 \N \N \N \N +value8 \N \N \N \N \N +\N value9 \N \N \N \N +\N \N value300000 \N \N \N +\N \N \N value300001 \N \N +\N \N value300002 \N \N \N +\N \N \N value300003 \N \N +\N \N value300004 \N \N \N +\N \N \N value300005 \N \N +\N \N value300006 \N \N \N +\N \N \N value300007 \N \N +\N \N value300008 \N \N \N +\N \N \N value300009 \N \N +\N \N \N \N value600000 \N +\N \N \N \N \N value600001 +\N \N \N \N value600002 \N +\N \N \N \N \N value600003 +\N \N \N \N value600004 \N +\N \N \N \N \N value600005 +\N \N \N \N value600006 \N +\N \N \N \N \N value600007 +\N \N \N \N value600008 \N +\N \N \N \N \N value600009 +All paths: +['key0','key1','key2','key3','key4','key5'] +Shared data paths: +key2 +key3 +key4 +key5 +{"key0":"value0"} +{"key1":"value1"} +{"key0":"value2"} +{"key1":"value3"} +{"key0":"value4"} +{"key1":"value5"} +{"key0":"value6"} +{"key1":"value7"} +{"key0":"value8"} +{"key1":"value9"} +{"key2":"value300000"} +{"key3":"value300001"} +{"key2":"value300002"} +{"key3":"value300003"} +{"key2":"value300004"} +{"key3":"value300005"} +{"key2":"value300006"} +{"key3":"value300007"} +{"key2":"value300008"} +{"key3":"value300009"} +{"key4":"value600000"} +{"key5":"value600001"} +{"key4":"value600002"} +{"key5":"value600003"} +{"key4":"value600004"} +{"key5":"value600005"} +{"key4":"value600006"} +{"key5":"value600007"} +{"key4":"value600008"} +{"key5":"value600009"} +value0 \N \N \N \N \N +\N value1 \N \N \N \N +value2 \N \N \N \N \N +\N value3 \N \N \N \N +value4 \N \N \N \N \N +\N value5 \N \N \N \N +value6 \N \N \N \N \N +\N value7 \N \N \N \N +value8 \N \N \N \N \N +\N value9 \N \N \N \N +\N \N value300000 \N \N \N +\N \N \N value300001 \N \N +\N \N value300002 \N \N \N +\N \N \N value300003 \N \N +\N \N value300004 \N \N \N +\N \N \N value300005 \N \N +\N \N value300006 \N \N \N +\N \N \N value300007 \N \N +\N \N value300008 \N \N \N +\N \N \N value300009 \N \N +\N \N \N \N value600000 \N +\N \N \N \N \N value600001 +\N \N \N \N value600002 \N +\N \N \N \N \N value600003 +\N \N \N \N value600004 \N +\N \N \N \N \N value600005 +\N \N \N \N value600006 \N +\N \N \N \N \N value600007 +\N \N \N \N value600008 \N +\N \N \N \N \N value600009 diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 new file mode 100644 index 00000000000..a13867b145d --- /dev/null +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 @@ -0,0 +1,32 @@ +set allow_experimental_json_type = 1; + +drop table if exists test; + +{% for create_command in ['create table test (x UInt64, json String) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000;', + 'create table test (x UInt64, json String) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1;'] -%} + +{{ create_command }} + +insert into test select number, toJSONString(map('key' || multiIf(number < 300000, number % 2, number < 600000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(1000000); + +alter table test modify column json JSON settings mutations_sync=1; + +select 'All paths:'; +select distinctJSONPaths(json) from test; +select 'Shared data paths:'; +select distinct (arrayJoin(JSONSharedDataPaths(json))) as path from test order by path; +select json from test order by x limit 10; +select json from test order by x limit 10 offset 300000; +select json from test order by x limit 10 offset 600000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 300000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 600000; + +select json from test format Null; +select json from test order by x format Null; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test format Null; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x format Null; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference new file mode 100644 index 00000000000..ca2fb7e8ff9 --- /dev/null +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.reference @@ -0,0 +1,12 @@ +5000 +leonardomso/33-js-concepts 3 +ytdl-org/youtube-dl 3 +Bogdanp/neko 2 +bminossi/AllVideoPocsFromHackerOne 2 +disclose/diodata 2 +Commit 182 +chipeo345 119 +phanwi346 114 +Nicholas Piggin 95 +direwolf-github 49 +2 diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh new file mode 100755 index 00000000000..931d106120c --- /dev/null +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-s3-storage, long +# ^ no-s3-storage: too memory hungry + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" +${CLICKHOUSE_CLIENT} -q "CREATE TABLE ghdata (data String) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'" + +cat $CUR_DIR/data_json/ghdata_sample.json | ${CLICKHOUSE_CLIENT} \ + --max_memory_usage 10G --query "INSERT INTO ghdata FORMAT JSONAsString" + +${CLICKHOUSE_CLIENT} -q "ALTER TABLE ghdata MODIFY column data JSON SETTINGS mutations_sync=1" --allow_experimental_json_type 1 + +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" + +${CLICKHOUSE_CLIENT} -q \ +"SELECT data.repo.name, count() AS stars FROM ghdata \ + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + +${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ +"SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ + ARRAY JOIN data.payload.commits[].author.name \ + GROUP BY name ORDER BY c DESC, name LIMIT 5" + +${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" + +${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS ghdata" diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql new file mode 100644 index 00000000000..87e10df9cc8 --- /dev/null +++ b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql @@ -0,0 +1,17 @@ +set allow_experimental_json_type=1; +set max_insert_block_size=10000; +set max_block_size=10000; + +drop table if exists test; +drop named collection if exists json_alter_fuzzer; + +create table test (json String) engine=MergeTree order by tuple(); +create named collection json_alter_fuzzer AS json_str='{}'; +insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=128) limit 200000; +alter table test modify column json JSON settings mutations_sync=1; +select json from test format Null; +optimize table test final; +select json from test format Null; +drop named collection json_alter_fuzzer; +drop table test; + From a9fc07d9af728f56b7b43c53403e278ae69e8096 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 7 Oct 2024 07:06:10 +0000 Subject: [PATCH 066/353] Remove unneded changes --- src/Storages/AlterCommands.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index 0d7d3295e0a..9972b34ecc4 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1465,14 +1465,6 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const ErrorCodes::BAD_ARGUMENTS, "The change of data type {} of column {} to {} is not allowed. It has known bugs", old_data_type->getName(), backQuote(column_name), command.data_type->getName()); - -// bool has_object_type = isObject(command.data_type); -// command.data_type->forEachChild([&](const IDataType & type){ has_object_type |= isObject(type); }); -// if (has_object_type) -// throw Exception( -// ErrorCodes::BAD_ARGUMENTS, -// "The change of data type {} of column {} to {} is not supported.", -// old_data_type->getName(), backQuote(column_name), command.data_type->getName()); } if (command.isRemovingProperty()) From a10c2674fe15c977a51c1ae7054f9f8e9bc4f7a3 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 7 Oct 2024 07:20:10 +0000 Subject: [PATCH 067/353] Add example in docs --- docs/en/sql-reference/data-types/newjson.md | 22 +++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index 68952590eb9..f799072a02f 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -630,6 +630,28 @@ SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-publi └─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘ ``` +## ALTER MODIFY COLUMN to JSON type + +It's possible to alter an existing table and change the type of the column to the new `JSON` type. Right now only alter from `String` type is supported. + +**Example** + +```sql +CREATE TABLE test (json String) ENGINE=MergeTree ORDeR BY tuple(); +INSERT INTO test VALUES ('{"a" : 42}'), ('{"a" : 43, "b" : "Hello"}'), ('{"a" : 44, "b" : [1, 2, 3]}')), ('{"c" : "2020-01-01"}'); +ALTER TABLE test MODIFY COLUMN json JSON; +SELECT json, json.a, json.b, json.c FROM test; +``` + +```text + ┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐ +1. │ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +2. │ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │ +3. │ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │ +4. │ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │ + └──────────────────────────────┴────────┴─────────┴────────────┘ +``` + ## Tips for better usage of the JSON type Before creating `JSON` column and loading data into it, consider the following tips: From 07da0c99b8318cd368c52a0d573e598599207196 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 8 Oct 2024 05:52:25 +0000 Subject: [PATCH 068/353] Fix tests --- .../03225_alter_to_json_not_supported.reference | 0 .../03225_alter_to_json_not_supported.sql | 15 --------------- .../03248_string_to_json_alter_fuzz.sql | 4 ++-- 3 files changed, 2 insertions(+), 17 deletions(-) delete mode 100644 tests/queries/0_stateless/03225_alter_to_json_not_supported.reference delete mode 100644 tests/queries/0_stateless/03225_alter_to_json_not_supported.sql diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference b/tests/queries/0_stateless/03225_alter_to_json_not_supported.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql b/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql deleted file mode 100644 index 398494d56de..00000000000 --- a/tests/queries/0_stateless/03225_alter_to_json_not_supported.sql +++ /dev/null @@ -1,15 +0,0 @@ -set allow_experimental_json_type = 1; - -drop table if exists test; -create table test (s String) engine=MergeTree order by tuple(); -alter table test modify column s JSON; -- { serverError BAD_ARGUMENTS } -drop table test; - -create table test (s Array(String)) engine=MergeTree order by tuple(); -alter table test modify column s Array(JSON); -- { serverError BAD_ARGUMENTS } -drop table test; - -create table test (s Tuple(String, String)) engine=MergeTree order by tuple(); -alter table test modify column s Tuple(JSON, String); -- { serverError BAD_ARGUMENTS } -drop table test; - diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql index 87e10df9cc8..d4d775732e8 100644 --- a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql +++ b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql @@ -7,8 +7,8 @@ drop named collection if exists json_alter_fuzzer; create table test (json String) engine=MergeTree order by tuple(); create named collection json_alter_fuzzer AS json_str='{}'; -insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=128) limit 200000; -alter table test modify column json JSON settings mutations_sync=1; +insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=64) limit 200000; +alter table test modify column json JSON(max_dynamic_paths=100) settings mutations_sync=1; select json from test format Null; optimize table test final; select json from test format Null; From c6b58f4db2461bcdc09929b67a84b9d061ddefd5 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 8 Oct 2024 08:01:45 +0000 Subject: [PATCH 069/353] Better docs --- docs/en/sql-reference/data-types/newjson.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index f799072a02f..8e9eeb43c72 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -644,12 +644,12 @@ SELECT json, json.a, json.b, json.c FROM test; ``` ```text - ┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐ -1. │ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ -2. │ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │ -3. │ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │ -4. │ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │ - └──────────────────────────────┴────────┴─────────┴────────────┘ +┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐ +│ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ +│ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │ +│ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │ +│ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │ +└──────────────────────────────┴────────┴─────────┴────────────┘ ``` ## Tips for better usage of the JSON type From 41588b05cf1c8104a1e2e344b043a4eec5db5f10 Mon Sep 17 00:00:00 2001 From: avogar Date: Tue, 8 Oct 2024 08:10:21 +0000 Subject: [PATCH 070/353] Fix test --- ...mic_variant_in_order_by_group_by.reference | 188 +++++++++--------- ...1_dynamic_variant_in_order_by_group_by.sql | 32 +-- 2 files changed, 110 insertions(+), 110 deletions(-) diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference index 5c7b4cb0bea..5983dd15f5b 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.reference @@ -20,98 +20,6 @@ 4 0 1 -4 -3 -2 -0 -1 -4 -3 -2 -[4] -[3] -[2] -[0] -[1] -{'str':0} -{'str':1} -{'str':4} -{'str':3} -{'str':2} -0 -1 -2 -3 -4 -\N -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -4 -3 -2 -0 -1 -4 -3 -2 -[4] -[3] -[2] -[0] -[1] -{'str':0} -{'str':1} -{'str':4} -{'str':3} -{'str':2} -0 -1 -2 -3 -4 -\N -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 -2 -3 -4 -0 -1 2 3 4 @@ -120,11 +28,11 @@ 2 3 4 -[4] [0] [1] [2] [3] +[4] {'str':0} {'str':1} {'str':2} @@ -166,11 +74,103 @@ 2 3 4 -[4] [0] [1] [2] [3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] +{'str':0} +{'str':1} +{'str':2} +{'str':3} +{'str':4} +0 +1 +2 +3 +4 +\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 +[0] +[1] +[2] +[3] +[4] {'str':0} {'str':1} {'str':2} diff --git a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql index 6e4a39c7234..a53b02e8e41 100644 --- a/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql +++ b/tests/queries/0_stateless/03231_dynamic_variant_in_order_by_group_by.sql @@ -53,10 +53,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; @@ -86,10 +86,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; drop table test; @@ -124,10 +124,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; set allow_experimental_analyzer=0; @@ -157,10 +157,10 @@ select * from test order by tuple(d); select * from test order by array(d); select * from test order by map('str', d); -select * from test group by d; -select * from test group by tuple(d); -select array(d) from test group by array(d); -select map('str', d) from test group by map('str', d); +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; select * from test group by grouping sets ((d), ('str')) order by all; drop table test; From c4cc4cca91ee5191cdc37ef3de14ea3cd70514d6 Mon Sep 17 00:00:00 2001 From: avogar Date: Wed, 9 Oct 2024 03:14:48 +0000 Subject: [PATCH 071/353] Fix tests and builds --- .../MergeTreeDataPartWriterCompact.cpp | 2 +- .../MergeTree/MergeTreeDataPartWriterWide.cpp | 2 +- .../03246_alter_from_string_to_json.reference | 160 +++++++++--------- .../03246_alter_from_string_to_json.sql.j2 | 11 +- .../03248_string_to_json_alter_fuzz.reference | 0 .../03248_string_to_json_alter_fuzz.sql | 17 -- 6 files changed, 88 insertions(+), 104 deletions(-) delete mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference delete mode 100644 tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp index 96623307c8f..377677c5244 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.cpp @@ -57,7 +57,7 @@ MergeTreeDataPartWriterCompact::MergeTreeDataPartWriterCompact( for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); + MergeTreeDataPartWriterCompact::addStreams(column, nullptr, compression); } } diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp index ba9d82fd097..f015fcb0d10 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.cpp @@ -102,7 +102,7 @@ MergeTreeDataPartWriterWide::MergeTreeDataPartWriterWide( for (const auto & column : columns_list) { auto compression = getCodecDescOrDefault(column.name, default_codec); - addStreams(column, nullptr, compression); + MergeTreeDataPartWriterWide::addStreams(column, nullptr, compression); } } diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.reference b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference index a2d3a799fff..8253c4fef48 100644 --- a/tests/queries/0_stateless/03246_alter_from_string_to_json.reference +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.reference @@ -15,26 +15,26 @@ key5 {"key1":"value7"} {"key0":"value8"} {"key1":"value9"} -{"key2":"value300000"} -{"key3":"value300001"} -{"key2":"value300002"} -{"key3":"value300003"} -{"key2":"value300004"} -{"key3":"value300005"} -{"key2":"value300006"} -{"key3":"value300007"} -{"key2":"value300008"} -{"key3":"value300009"} -{"key4":"value600000"} -{"key5":"value600001"} -{"key4":"value600002"} -{"key5":"value600003"} -{"key4":"value600004"} -{"key5":"value600005"} -{"key4":"value600006"} -{"key5":"value600007"} -{"key4":"value600008"} -{"key5":"value600009"} +{"key2":"value60000"} +{"key3":"value60001"} +{"key2":"value60002"} +{"key3":"value60003"} +{"key2":"value60004"} +{"key3":"value60005"} +{"key2":"value60006"} +{"key3":"value60007"} +{"key2":"value60008"} +{"key3":"value60009"} +{"key4":"value120000"} +{"key5":"value120001"} +{"key4":"value120002"} +{"key5":"value120003"} +{"key4":"value120004"} +{"key5":"value120005"} +{"key4":"value120006"} +{"key5":"value120007"} +{"key4":"value120008"} +{"key5":"value120009"} value0 \N \N \N \N \N \N value1 \N \N \N \N value2 \N \N \N \N \N @@ -45,26 +45,26 @@ value6 \N \N \N \N \N \N value7 \N \N \N \N value8 \N \N \N \N \N \N value9 \N \N \N \N -\N \N value300000 \N \N \N -\N \N \N value300001 \N \N -\N \N value300002 \N \N \N -\N \N \N value300003 \N \N -\N \N value300004 \N \N \N -\N \N \N value300005 \N \N -\N \N value300006 \N \N \N -\N \N \N value300007 \N \N -\N \N value300008 \N \N \N -\N \N \N value300009 \N \N -\N \N \N \N value600000 \N -\N \N \N \N \N value600001 -\N \N \N \N value600002 \N -\N \N \N \N \N value600003 -\N \N \N \N value600004 \N -\N \N \N \N \N value600005 -\N \N \N \N value600006 \N -\N \N \N \N \N value600007 -\N \N \N \N value600008 \N -\N \N \N \N \N value600009 +\N \N value60000 \N \N \N +\N \N \N value60001 \N \N +\N \N value60002 \N \N \N +\N \N \N value60003 \N \N +\N \N value60004 \N \N \N +\N \N \N value60005 \N \N +\N \N value60006 \N \N \N +\N \N \N value60007 \N \N +\N \N value60008 \N \N \N +\N \N \N value60009 \N \N +\N \N \N \N value120000 \N +\N \N \N \N \N value120001 +\N \N \N \N value120002 \N +\N \N \N \N \N value120003 +\N \N \N \N value120004 \N +\N \N \N \N \N value120005 +\N \N \N \N value120006 \N +\N \N \N \N \N value120007 +\N \N \N \N value120008 \N +\N \N \N \N \N value120009 All paths: ['key0','key1','key2','key3','key4','key5'] Shared data paths: @@ -82,26 +82,26 @@ key5 {"key1":"value7"} {"key0":"value8"} {"key1":"value9"} -{"key2":"value300000"} -{"key3":"value300001"} -{"key2":"value300002"} -{"key3":"value300003"} -{"key2":"value300004"} -{"key3":"value300005"} -{"key2":"value300006"} -{"key3":"value300007"} -{"key2":"value300008"} -{"key3":"value300009"} -{"key4":"value600000"} -{"key5":"value600001"} -{"key4":"value600002"} -{"key5":"value600003"} -{"key4":"value600004"} -{"key5":"value600005"} -{"key4":"value600006"} -{"key5":"value600007"} -{"key4":"value600008"} -{"key5":"value600009"} +{"key2":"value60000"} +{"key3":"value60001"} +{"key2":"value60002"} +{"key3":"value60003"} +{"key2":"value60004"} +{"key3":"value60005"} +{"key2":"value60006"} +{"key3":"value60007"} +{"key2":"value60008"} +{"key3":"value60009"} +{"key4":"value120000"} +{"key5":"value120001"} +{"key4":"value120002"} +{"key5":"value120003"} +{"key4":"value120004"} +{"key5":"value120005"} +{"key4":"value120006"} +{"key5":"value120007"} +{"key4":"value120008"} +{"key5":"value120009"} value0 \N \N \N \N \N \N value1 \N \N \N \N value2 \N \N \N \N \N @@ -112,23 +112,23 @@ value6 \N \N \N \N \N \N value7 \N \N \N \N value8 \N \N \N \N \N \N value9 \N \N \N \N -\N \N value300000 \N \N \N -\N \N \N value300001 \N \N -\N \N value300002 \N \N \N -\N \N \N value300003 \N \N -\N \N value300004 \N \N \N -\N \N \N value300005 \N \N -\N \N value300006 \N \N \N -\N \N \N value300007 \N \N -\N \N value300008 \N \N \N -\N \N \N value300009 \N \N -\N \N \N \N value600000 \N -\N \N \N \N \N value600001 -\N \N \N \N value600002 \N -\N \N \N \N \N value600003 -\N \N \N \N value600004 \N -\N \N \N \N \N value600005 -\N \N \N \N value600006 \N -\N \N \N \N \N value600007 -\N \N \N \N value600008 \N -\N \N \N \N \N value600009 +\N \N value60000 \N \N \N +\N \N \N value60001 \N \N +\N \N value60002 \N \N \N +\N \N \N value60003 \N \N +\N \N value60004 \N \N \N +\N \N \N value60005 \N \N +\N \N value60006 \N \N \N +\N \N \N value60007 \N \N +\N \N value60008 \N \N \N +\N \N \N value60009 \N \N +\N \N \N \N value120000 \N +\N \N \N \N \N value120001 +\N \N \N \N value120002 \N +\N \N \N \N \N value120003 +\N \N \N \N value120004 \N +\N \N \N \N \N value120005 +\N \N \N \N value120006 \N +\N \N \N \N \N value120007 +\N \N \N \N value120008 \N +\N \N \N \N \N value120009 diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 index a13867b145d..e8760b659dc 100644 --- a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 @@ -1,4 +1,5 @@ set allow_experimental_json_type = 1; +set max_block_size = 20000; drop table if exists test; @@ -7,7 +8,7 @@ drop table if exists test; {{ create_command }} -insert into test select number, toJSONString(map('key' || multiIf(number < 300000, number % 2, number < 600000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(1000000); +insert into test select number, toJSONString(map('key' || multiIf(number < 60000, number % 2, number < 120000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(200000); alter table test modify column json JSON settings mutations_sync=1; @@ -16,11 +17,11 @@ select distinctJSONPaths(json) from test; select 'Shared data paths:'; select distinct (arrayJoin(JSONSharedDataPaths(json))) as path from test order by path; select json from test order by x limit 10; -select json from test order by x limit 10 offset 300000; -select json from test order by x limit 10 offset 600000; +select json from test order by x limit 10 offset 60000; +select json from test order by x limit 10 offset 120000; select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10; -select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 300000; -select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 600000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 60000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 120000; select json from test format Null; select json from test order by x format Null; diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql b/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql deleted file mode 100644 index d4d775732e8..00000000000 --- a/tests/queries/0_stateless/03248_string_to_json_alter_fuzz.sql +++ /dev/null @@ -1,17 +0,0 @@ -set allow_experimental_json_type=1; -set max_insert_block_size=10000; -set max_block_size=10000; - -drop table if exists test; -drop named collection if exists json_alter_fuzzer; - -create table test (json String) engine=MergeTree order by tuple(); -create named collection json_alter_fuzzer AS json_str='{}'; -insert into test select * from fuzzJSON(json_alter_fuzzer, reuse_output=true, max_output_length=64) limit 200000; -alter table test modify column json JSON(max_dynamic_paths=100) settings mutations_sync=1; -select json from test format Null; -optimize table test final; -select json from test format Null; -drop named collection json_alter_fuzzer; -drop table test; - From b86f3481d1ebf82601b38a12343fb4b055765cda Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 10 Oct 2024 00:45:45 +0000 Subject: [PATCH 072/353] exclude jobs option for fuzzers --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index b3c19fbb0a4..e4a8c691ded 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -122,7 +122,7 @@ def run_fuzzer(fuzzer: str, timeout: int): if parser.has_section("libfuzzer"): custom_libfuzzer_options = " ".join( - f"-{key}={value}" for key, value in parser["libfuzzer"].items() + f"-{key}={value}" for key, value in parser["libfuzzer"].items() if key != "jobs" ) if parser.has_section("fuzzer_arguments"): From c6d6ee27f4e7feaa2dbcedcf2a3c98faef041345 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 10 Oct 2024 00:52:58 +0000 Subject: [PATCH 073/353] Automatic style fix --- tests/fuzz/runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index e4a8c691ded..f398b33308e 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -122,7 +122,9 @@ def run_fuzzer(fuzzer: str, timeout: int): if parser.has_section("libfuzzer"): custom_libfuzzer_options = " ".join( - f"-{key}={value}" for key, value in parser["libfuzzer"].items() if key != "jobs" + f"-{key}={value}" + for key, value in parser["libfuzzer"].items() + if key != "jobs" ) if parser.has_section("fuzzer_arguments"): From df77c6f120beddfe97ff4c8c247473db56c587d7 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:24:47 +0800 Subject: [PATCH 074/353] Print invalid version in exception message --- src/DataTypes/Serializations/SerializationDynamic.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index b00668fa8a4..0e6e866e454 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -89,7 +89,7 @@ SerializationDynamic::DynamicSerializationVersion::DynamicSerializationVersion(U void SerializationDynamic::DynamicSerializationVersion::checkVersion(UInt64 version) { if (version != V1 && version != V2) - throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization."); + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid version for Dynamic structure serialization: {}", version); } void SerializationDynamic::serializeBinaryBulkStatePrefix( From 845c4a543c091f5951b5e5b2063531ad264da6d1 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 10 Oct 2024 18:59:48 +0000 Subject: [PATCH 075/353] add test for libfuzzer --- utils/CMakeLists.txt | 4 ++++ utils/libfuzzer-test/CMakeLists.txt | 1 + utils/libfuzzer-test/README.md | 1 + utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt | 1 + utils/libfuzzer-test/test_basic_fuzzer/main.cpp | 11 +++++++++++ 5 files changed, 18 insertions(+) create mode 100644 utils/libfuzzer-test/CMakeLists.txt create mode 100644 utils/libfuzzer-test/README.md create mode 100644 utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt create mode 100644 utils/libfuzzer-test/test_basic_fuzzer/main.cpp diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index ec44a1e1de9..8c706ee6b67 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -23,3 +23,7 @@ if (ENABLE_UTILS) add_subdirectory (keeper-data-dumper) add_subdirectory (memcpy-bench) endif () + +if (ENABLE_FUZZING) + add_subdirectory (libfuzzer-test) +endif () diff --git a/utils/libfuzzer-test/CMakeLists.txt b/utils/libfuzzer-test/CMakeLists.txt new file mode 100644 index 00000000000..8765787ff8a --- /dev/null +++ b/utils/libfuzzer-test/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory (test_basic_fuzzer) diff --git a/utils/libfuzzer-test/README.md b/utils/libfuzzer-test/README.md new file mode 100644 index 00000000000..5598cbdb961 --- /dev/null +++ b/utils/libfuzzer-test/README.md @@ -0,0 +1 @@ +This folder contains various stuff intended to test libfuzzer functionality. diff --git a/utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt b/utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt new file mode 100644 index 00000000000..dc927f35a4b --- /dev/null +++ b/utils/libfuzzer-test/test_basic_fuzzer/CMakeLists.txt @@ -0,0 +1 @@ +add_executable (test_basic_fuzzer main.cpp) diff --git a/utils/libfuzzer-test/test_basic_fuzzer/main.cpp b/utils/libfuzzer-test/test_basic_fuzzer/main.cpp new file mode 100644 index 00000000000..7ccad63273d --- /dev/null +++ b/utils/libfuzzer-test/test_basic_fuzzer/main.cpp @@ -0,0 +1,11 @@ +#include +#include + +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) +{ + if (size > 0 && data[0] == 'H') + if (size > 1 && data[1] == 'I') + if (size > 2 && data[2] == '!') + __builtin_trap(); + return 0; +} From 6d8125d520a1c00efde8377f27a096aec56a41db Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Thu, 10 Oct 2024 15:38:22 -0400 Subject: [PATCH 076/353] trigger build From b064d757ca0af321e1a4929d6be1fe3b12dd200f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Thu, 10 Oct 2024 15:48:33 -0400 Subject: [PATCH 077/353] trigger build From ca5f3c50d2e9a74a0a5a7cf9b5ef7f42e171fba7 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Thu, 10 Oct 2024 16:10:02 -0400 Subject: [PATCH 078/353] trigger build --- src/DataTypes/fuzzers/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/DataTypes/fuzzers/CMakeLists.txt b/src/DataTypes/fuzzers/CMakeLists.txt index 8dedd3470e2..8940586fc70 100644 --- a/src/DataTypes/fuzzers/CMakeLists.txt +++ b/src/DataTypes/fuzzers/CMakeLists.txt @@ -1,2 +1,3 @@ clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS}) + target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions dbms) From 8f9ccdf69c983440d698deb0497250a92dcf76ec Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 10 Oct 2024 23:08:52 +0000 Subject: [PATCH 079/353] fix parser --- tests/fuzz/runner.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index f398b33308e..c6c978c3508 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -17,8 +17,7 @@ FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") def report(source: str, reason: str, call_stack: list, test_unit: str): print(f"########### REPORT: {source} {reason} {test_unit}") - for line in call_stack: - print(f" {line}") + print("".join(call_stack)) print("########### END OF REPORT ###########") @@ -31,31 +30,28 @@ def process_error(error: str): ERROR = r"^==\d+==\s?ERROR: (\S+): (.*)" error_source = "" error_reason = "" - TEST_UNIT_LINE = r"artifact_prefix='.*/'; Test unit written to (.*)" - call_stack = [] - is_call_stack = False + test_unit = "" + TEST_UNIT_LINE = r"artifact_prefix='.*\/'; Test unit written to (.*)" + error_info = [] + is_error = False # pylint: disable=unused-variable - for line_num, line in enumerate(error.splitlines(), 1): - if is_call_stack: - if re.search(r"^==\d+==", line): - is_call_stack = False - continue - call_stack.append(line) - continue - - if call_stack: + for line_num, line in enumerate(sys.stdin, 1): + if is_error: + error_info.append(line) match = re.search(TEST_UNIT_LINE, line) if match: - report(error_source, error_reason, call_stack, match.group(1)) - call_stack.clear() + test_unit = match.group(1) continue match = re.search(ERROR, line) if match: + error_info.append(line) error_source = match.group(1) error_reason = match.group(2) - is_call_stack = True + is_error = True + + report(error_source, error_reason, error_info, test_unit) def kill_fuzzer(fuzzer: str): From 85a6bb1d1fc4024d57139008953fb35b5be51288 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 11 Oct 2024 03:11:39 +0000 Subject: [PATCH 080/353] fix parser --- tests/fuzz/runner.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index c6c978c3508..3a462d11172 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -16,9 +16,9 @@ FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") def report(source: str, reason: str, call_stack: list, test_unit: str): - print(f"########### REPORT: {source} {reason} {test_unit}") - print("".join(call_stack)) - print("########### END OF REPORT ###########") + logging.info("########### REPORT: %s %s %s", source, reason, test_unit) + logging.info("".join(call_stack)) + logging.info("########### END OF REPORT ###########") # pylint: disable=unused-argument @@ -157,7 +157,7 @@ def run_fuzzer(fuzzer: str, timeout: int): ) except subprocess.CalledProcessError as e: # print("Command failed with error:", e) - print("Stderr output: ", e.stderr) + logging.info("Stderr output: %s", e.stderr) process_error(e.stderr) except subprocess.TimeoutExpired as e: logging.info("Timeout for %s", cmd_line) From 5e99f63e7e5b825813f01ac56a0094d6c95c276a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 11 Oct 2024 04:05:08 +0000 Subject: [PATCH 081/353] fix parser --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 3a462d11172..1d3829598c3 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -36,7 +36,7 @@ def process_error(error: str): is_error = False # pylint: disable=unused-variable - for line_num, line in enumerate(sys.stdin, 1): + for line_num, line in enumerate(error.splitlines(), 1): if is_error: error_info.append(line) match = re.search(TEST_UNIT_LINE, line) From 1bd4be3df127fdc42e4df01dd3c3da938ce6d327 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 01:10:57 +0000 Subject: [PATCH 082/353] prepare for database upload --- tests/fuzz/runner.py | 44 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 1d3829598c3..bc6d3864810 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -8,6 +8,7 @@ import signal import subprocess from pathlib import Path from time import sleep +from typing import List from botocore.exceptions import ClientError @@ -26,7 +27,7 @@ def process_fuzzer_output(output: str): pass -def process_error(error: str): +def process_error(error: str) -> list: ERROR = r"^==\d+==\s?ERROR: (\S+): (.*)" error_source = "" error_reason = "" @@ -52,6 +53,7 @@ def process_error(error: str): is_error = True report(error_source, error_reason, error_info, test_unit) + return error_info def kill_fuzzer(fuzzer: str): @@ -64,7 +66,7 @@ def kill_fuzzer(fuzzer: str): os.kill(pid, signal.SIGKILL) -def run_fuzzer(fuzzer: str, timeout: int): +def run_fuzzer(fuzzer: str, timeout: int) -> TestResult: s3 = S3Helper() logging.info("Running fuzzer %s...", fuzzer) @@ -142,8 +144,9 @@ def run_fuzzer(fuzzer: str, timeout: int): cmd_line += " < /dev/null" logging.info("...will execute: %s", cmd_line) - # subprocess.check_call(cmd_line, shell=True) + test_result = TestResult(fuzzer, "OK") + stopwatch = Stopwatch() try: result = subprocess.run( cmd_line, @@ -158,19 +161,36 @@ def run_fuzzer(fuzzer: str, timeout: int): except subprocess.CalledProcessError as e: # print("Command failed with error:", e) logging.info("Stderr output: %s", e.stderr) - process_error(e.stderr) + test_result = TestResult( + fuzzer, + "FAIL", + stopwatch.duration_seconds, + "", + "\n".join(process_error(e.stderr)), + ) except subprocess.TimeoutExpired as e: logging.info("Timeout for %s", cmd_line) kill_fuzzer(fuzzer) sleep(10) process_fuzzer_output(e.stderr) + test_result = TestResult( + fuzzer, + "Timeout", + stopwatch.duration_seconds, + "", + "", + ) else: process_fuzzer_output(result.stderr) + test_result.time = stopwatch.duration_seconds s3.upload_build_directory_to_s3( Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False ) + logging.info("test_result: %s", test_result) + return test_result + def main(): logging.basicConfig(level=logging.INFO) @@ -183,10 +203,17 @@ def main(): if match: timeout += int(match.group(2)) + test_results = [] + stopwatch = Stopwatch() with Path() as current: for fuzzer in current.iterdir(): if (current / fuzzer).is_file() and os.access(current / fuzzer, os.X_OK): - run_fuzzer(fuzzer.name, timeout) + test_results.append(run_fuzzer(fuzzer.name, timeout)) + + prepared_results = prepare_tests_results_for_clickhouse(PRInfo(), test_results, "failure", stopwatch.duration_seconds, stopwatch.start_time_str, "", "libFuzzer") + # ch_helper = ClickHouseHelper() + # ch_helper.insert_events_into(db="default", table="checks", events=prepared_results) + logging.info("prepared_results: %s", prepared_results) if __name__ == "__main__": @@ -198,5 +225,12 @@ if __name__ == "__main__": S3_BUILDS_BUCKET, ) from s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module + from clickhouse_helper import ( # pylint: disable=import-error,no-name-in-module + ClickHouseHelper, + prepare_tests_results_for_clickhouse, + ) + from pr_info import PRInfo # pylint: disable=import-error,no-name-in-module + from stopwatch import Stopwatch # pylint: disable=import-error,no-name-in-module + from report import TestResult # pylint: disable=import-error,no-name-in-module main() From e590d036fed24a126a63c226d4ee6e01d7a66957 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 01:26:24 +0000 Subject: [PATCH 083/353] fix style --- tests/fuzz/runner.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index bc6d3864810..313b38d2d86 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -8,7 +8,6 @@ import signal import subprocess from pathlib import Path from time import sleep -from typing import List from botocore.exceptions import ClientError @@ -210,7 +209,15 @@ def main(): if (current / fuzzer).is_file() and os.access(current / fuzzer, os.X_OK): test_results.append(run_fuzzer(fuzzer.name, timeout)) - prepared_results = prepare_tests_results_for_clickhouse(PRInfo(), test_results, "failure", stopwatch.duration_seconds, stopwatch.start_time_str, "", "libFuzzer") + prepared_results = prepare_tests_results_for_clickhouse( + PRInfo(), + test_results, + "failure", + stopwatch.duration_seconds, + stopwatch.start_time_str, + "", + "libFuzzer", + ) # ch_helper = ClickHouseHelper() # ch_helper.insert_events_into(db="default", table="checks", events=prepared_results) logging.info("prepared_results: %s", prepared_results) @@ -221,16 +228,16 @@ if __name__ == "__main__": ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append((Path(path.dirname(ACTIVE_DIR)) / "ci").as_posix()) - from env_helper import ( # pylint: disable=import-error,no-name-in-module - S3_BUILDS_BUCKET, - ) - from s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module - from clickhouse_helper import ( # pylint: disable=import-error,no-name-in-module + from clickhouse_helper import ( # pylint: disable=import-error,no-name-in-module,unused-import ClickHouseHelper, prepare_tests_results_for_clickhouse, ) + from env_helper import ( # pylint: disable=import-error,no-name-in-module + S3_BUILDS_BUCKET, + ) from pr_info import PRInfo # pylint: disable=import-error,no-name-in-module - from stopwatch import Stopwatch # pylint: disable=import-error,no-name-in-module from report import TestResult # pylint: disable=import-error,no-name-in-module + from s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module + from stopwatch import Stopwatch # pylint: disable=import-error,no-name-in-module main() From 9c790785d63695e16773192c4cdad3ddd27f2a3e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 02:15:04 +0000 Subject: [PATCH 084/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 313b38d2d86..8dd510a8f6e 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -65,7 +65,7 @@ def kill_fuzzer(fuzzer: str): os.kill(pid, signal.SIGKILL) -def run_fuzzer(fuzzer: str, timeout: int) -> TestResult: +def run_fuzzer(fuzzer: str, timeout: int): s3 = S3Helper() logging.info("Running fuzzer %s...", fuzzer) From fbbac87299ed8a6cec447786eed5afb628c48b66 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 02:57:58 +0000 Subject: [PATCH 085/353] add requests --- docker/test/libfuzzer/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/libfuzzer/requirements.txt b/docker/test/libfuzzer/requirements.txt index 74147513e76..fd19ad04d8f 100644 --- a/docker/test/libfuzzer/requirements.txt +++ b/docker/test/libfuzzer/requirements.txt @@ -26,3 +26,4 @@ wadllib==1.3.6 wheel==0.37.1 zipp==1.0.0 boto3 +requests From 7ed274559330501da9f3d570cc7460ec22926e79 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 03:57:15 +0000 Subject: [PATCH 086/353] add github --- docker/test/libfuzzer/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/libfuzzer/requirements.txt b/docker/test/libfuzzer/requirements.txt index fd19ad04d8f..bebf26db0bf 100644 --- a/docker/test/libfuzzer/requirements.txt +++ b/docker/test/libfuzzer/requirements.txt @@ -27,3 +27,4 @@ wheel==0.37.1 zipp==1.0.0 boto3 requests +github From c1956d4458b9722371610047fda01cccc7278fbb Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 04:49:53 +0000 Subject: [PATCH 087/353] add pygithub --- docker/test/libfuzzer/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/libfuzzer/requirements.txt b/docker/test/libfuzzer/requirements.txt index bebf26db0bf..d73af2861e6 100644 --- a/docker/test/libfuzzer/requirements.txt +++ b/docker/test/libfuzzer/requirements.txt @@ -27,4 +27,4 @@ wheel==0.37.1 zipp==1.0.0 boto3 requests -github +pygithub From 9ebd2fc4dbd3c6407b9bfb1cc9ce9b0c4708cb0f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 05:42:19 +0000 Subject: [PATCH 088/353] add unidiff --- docker/test/libfuzzer/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/test/libfuzzer/requirements.txt b/docker/test/libfuzzer/requirements.txt index d73af2861e6..3fd33058a6b 100644 --- a/docker/test/libfuzzer/requirements.txt +++ b/docker/test/libfuzzer/requirements.txt @@ -28,3 +28,4 @@ zipp==1.0.0 boto3 requests pygithub +unidiff From 7981e99bee1c0f4a6f79ddcace1c53183c883d18 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 14:18:19 +0000 Subject: [PATCH 089/353] use func-tester --- tests/ci/ci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index a34ef624ce3..7637c096474 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -523,7 +523,7 @@ class CI: run_by_labels=[Tags.libFuzzer], timeout=10800, run_command='libfuzzer_test_check.py "$CHECK_NAME"', - runner_type=Runners.STYLE_CHECKER, + runner_type=Runners.FUNC_TESTER, ), JobNames.DOCKER_SERVER: CommonJobConfigs.DOCKER_SERVER.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE, BuildNames.PACKAGE_AARCH64] From f5a99dde8651fcbdcfdc8ba26c7cde4fa86d37c1 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 21:23:04 +0000 Subject: [PATCH 090/353] test results to output directory --- docker/test/libfuzzer/requirements.txt | 3 -- tests/fuzz/runner.py | 57 ++++++++------------------ 2 files changed, 18 insertions(+), 42 deletions(-) diff --git a/docker/test/libfuzzer/requirements.txt b/docker/test/libfuzzer/requirements.txt index 3fd33058a6b..74147513e76 100644 --- a/docker/test/libfuzzer/requirements.txt +++ b/docker/test/libfuzzer/requirements.txt @@ -26,6 +26,3 @@ wadllib==1.3.6 wheel==0.37.1 zipp==1.0.0 boto3 -requests -pygithub -unidiff diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 8dd510a8f6e..a8d48d7c5f3 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -13,6 +13,7 @@ from botocore.exceptions import ClientError DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") +OUTPUT = "/test_output" def report(source: str, reason: str, call_stack: list, test_unit: str): @@ -121,7 +122,7 @@ def run_fuzzer(fuzzer: str, timeout: int): custom_libfuzzer_options = " ".join( f"-{key}={value}" for key, value in parser["libfuzzer"].items() - if key != "jobs" + if key != "jobs" and key != "exact_artifact_path" ) if parser.has_section("fuzzer_arguments"): @@ -130,8 +131,14 @@ def run_fuzzer(fuzzer: str, timeout: int): for key, value in parser["fuzzer_arguments"].items() ) + exact_artifact_path = f"{OUTPUT}/{fuzzer}.unit" + status_path = f"{OUTPUT}/{fuzzer}.status" + out_path = f"{OUTPUT}/{fuzzer}.out" + cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" + cmd_line += f" -exact_artifact_path={exact_artifact_path}" + if custom_libfuzzer_options: cmd_line += f" {custom_libfuzzer_options}" if fuzzer_arguments: @@ -144,12 +151,11 @@ def run_fuzzer(fuzzer: str, timeout: int): logging.info("...will execute: %s", cmd_line) - test_result = TestResult(fuzzer, "OK") stopwatch = Stopwatch() try: result = subprocess.run( cmd_line, - stderr=subprocess.PIPE, + stderr=open(out_path, "w"), stdout=subprocess.DEVNULL, text=True, check=True, @@ -160,36 +166,24 @@ def run_fuzzer(fuzzer: str, timeout: int): except subprocess.CalledProcessError as e: # print("Command failed with error:", e) logging.info("Stderr output: %s", e.stderr) - test_result = TestResult( - fuzzer, - "FAIL", - stopwatch.duration_seconds, - "", - "\n".join(process_error(e.stderr)), - ) + with open(status_path, "w") as status: + status.write(f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n") except subprocess.TimeoutExpired as e: logging.info("Timeout for %s", cmd_line) kill_fuzzer(fuzzer) sleep(10) process_fuzzer_output(e.stderr) - test_result = TestResult( - fuzzer, - "Timeout", - stopwatch.duration_seconds, - "", - "", - ) + with open(status_path,"w") as status: + status.write(f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n") else: process_fuzzer_output(result.stderr) - test_result.time = stopwatch.duration_seconds + with open(status_path,"w") as status: + status.write(f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n") s3.upload_build_directory_to_s3( Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False ) - logging.info("test_result: %s", test_result) - return test_result - def main(): logging.basicConfig(level=logging.INFO) @@ -202,25 +196,16 @@ def main(): if match: timeout += int(match.group(2)) - test_results = [] stopwatch = Stopwatch() with Path() as current: for fuzzer in current.iterdir(): if (current / fuzzer).is_file() and os.access(current / fuzzer, os.X_OK): - test_results.append(run_fuzzer(fuzzer.name, timeout)) + run_fuzzer(fuzzer.name, timeout) + + subprocess.check_call(f"ls -al {OUTPUT}", shell=True) - prepared_results = prepare_tests_results_for_clickhouse( - PRInfo(), - test_results, - "failure", - stopwatch.duration_seconds, - stopwatch.start_time_str, - "", - "libFuzzer", - ) # ch_helper = ClickHouseHelper() # ch_helper.insert_events_into(db="default", table="checks", events=prepared_results) - logging.info("prepared_results: %s", prepared_results) if __name__ == "__main__": @@ -228,15 +213,9 @@ if __name__ == "__main__": ACTIVE_DIR = path.dirname(path.abspath(__file__)) sys.path.append((Path(path.dirname(ACTIVE_DIR)) / "ci").as_posix()) - from clickhouse_helper import ( # pylint: disable=import-error,no-name-in-module,unused-import - ClickHouseHelper, - prepare_tests_results_for_clickhouse, - ) from env_helper import ( # pylint: disable=import-error,no-name-in-module S3_BUILDS_BUCKET, ) - from pr_info import PRInfo # pylint: disable=import-error,no-name-in-module - from report import TestResult # pylint: disable=import-error,no-name-in-module from s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module from stopwatch import Stopwatch # pylint: disable=import-error,no-name-in-module From ae71f1070fdc459809553a38b289f83b16dcc71f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 21:39:03 +0000 Subject: [PATCH 091/353] fix style --- tests/fuzz/runner.py | 41 ++++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index a8d48d7c5f3..f8d318b174a 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -122,7 +122,7 @@ def run_fuzzer(fuzzer: str, timeout: int): custom_libfuzzer_options = " ".join( f"-{key}={value}" for key, value in parser["libfuzzer"].items() - if key != "jobs" and key != "exact_artifact_path" + if key not in ('jobs', 'exact_artifact_path') ) if parser.has_section("fuzzer_arguments"): @@ -153,32 +153,39 @@ def run_fuzzer(fuzzer: str, timeout: int): stopwatch = Stopwatch() try: - result = subprocess.run( - cmd_line, - stderr=open(out_path, "w"), - stdout=subprocess.DEVNULL, - text=True, - check=True, - shell=True, - errors="replace", - timeout=timeout, - ) + with open(out_path, "wb") as out: + result = subprocess.run( + cmd_line, + stderr=out, + stdout=subprocess.DEVNULL, + text=True, + check=True, + shell=True, + errors="replace", + timeout=timeout, + ) except subprocess.CalledProcessError as e: # print("Command failed with error:", e) logging.info("Stderr output: %s", e.stderr) - with open(status_path, "w") as status: - status.write(f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n") + with open(status_path, "wb") as status: + status.write( + f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) except subprocess.TimeoutExpired as e: logging.info("Timeout for %s", cmd_line) kill_fuzzer(fuzzer) sleep(10) process_fuzzer_output(e.stderr) - with open(status_path,"w") as status: - status.write(f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n") + with open(status_path,"wb") as status: + status.write( + f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) else: process_fuzzer_output(result.stderr) - with open(status_path,"w") as status: - status.write(f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n") + with open(status_path,"wb") as status: + status.write( + f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) s3.upload_build_directory_to_s3( Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False From 55a24facd29cb8cb68992334b6a68456fa966c19 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 21:39:48 +0000 Subject: [PATCH 092/353] fix style --- tests/fuzz/runner.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index f8d318b174a..e933c94f2a8 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -203,7 +203,6 @@ def main(): if match: timeout += int(match.group(2)) - stopwatch = Stopwatch() with Path() as current: for fuzzer in current.iterdir(): if (current / fuzzer).is_file() and os.access(current / fuzzer, os.X_OK): From 7a096859a2c1056df602e5d0d4555bff5641a1db Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 16 Oct 2024 21:47:00 +0000 Subject: [PATCH 093/353] Automatic style fix --- tests/fuzz/runner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index e933c94f2a8..a8ca8246ed2 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -122,7 +122,7 @@ def run_fuzzer(fuzzer: str, timeout: int): custom_libfuzzer_options = " ".join( f"-{key}={value}" for key, value in parser["libfuzzer"].items() - if key not in ('jobs', 'exact_artifact_path') + if key not in ("jobs", "exact_artifact_path") ) if parser.has_section("fuzzer_arguments"): @@ -176,13 +176,13 @@ def run_fuzzer(fuzzer: str, timeout: int): kill_fuzzer(fuzzer) sleep(10) process_fuzzer_output(e.stderr) - with open(status_path,"wb") as status: + with open(status_path, "wb") as status: status.write( f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) else: process_fuzzer_output(result.stderr) - with open(status_path,"wb") as status: + with open(status_path, "wb") as status: status.write( f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) From eb7bf08da5c3a693c8ebb8617eb35d36a029e01f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 16 Oct 2024 22:34:40 +0000 Subject: [PATCH 094/353] fix --- tests/fuzz/runner.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index a8ca8246ed2..f483608605b 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -167,7 +167,7 @@ def run_fuzzer(fuzzer: str, timeout: int): except subprocess.CalledProcessError as e: # print("Command failed with error:", e) logging.info("Stderr output: %s", e.stderr) - with open(status_path, "wb") as status: + with open(status_path, "w", encoding="utf-8") as status: status.write( f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) @@ -176,13 +176,13 @@ def run_fuzzer(fuzzer: str, timeout: int): kill_fuzzer(fuzzer) sleep(10) process_fuzzer_output(e.stderr) - with open(status_path, "wb") as status: + with open(status_path, "w", encoding="utf-8") as status: status.write( f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) else: process_fuzzer_output(result.stderr) - with open(status_path, "wb") as status: + with open(status_path, "w", encoding="utf-8") as status: status.write( f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) From 84c664dadaa5bac20fe3afac3e386befcc22fb6a Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 01:00:27 +0000 Subject: [PATCH 095/353] move all s3 stuff to check script --- docker/test/libfuzzer/requirements.txt | 1 - tests/ci/libfuzzer_test_check.py | 39 ++++++++++++++++++- tests/fuzz/runner.py | 53 ++++++++++++-------------- 3 files changed, 61 insertions(+), 32 deletions(-) diff --git a/docker/test/libfuzzer/requirements.txt b/docker/test/libfuzzer/requirements.txt index 74147513e76..3dce93e023b 100644 --- a/docker/test/libfuzzer/requirements.txt +++ b/docker/test/libfuzzer/requirements.txt @@ -25,4 +25,3 @@ six==1.16.0 wadllib==1.3.6 wheel==0.37.1 zipp==1.0.0 -boto3 diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 5de28d5641a..a4f31b1663d 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -11,12 +11,17 @@ from typing import List from build_download_helper import download_fuzzers from clickhouse_helper import CiLogsCredentials from docker_images_helper import DockerImage, get_docker_image, pull_image -from env_helper import REPO_COPY, REPORT_PATH, TEMP_PATH +from env_helper import REPO_COPY, REPORT_PATH, S3_BUILDS_BUCKET, TEMP_PATH from pr_info import PRInfo +from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen +from botocore.exceptions import ClientError + + NO_CHANGES_MSG = "Nothing to run" +s3 = S3Helper() def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total): @@ -85,6 +90,34 @@ def parse_args(): return parser.parse_args() +def download_corpus(corpus_path: str, fuzzer_name: str): + logging.info("Download corpus for %s ...", fuzzer_name) + + units = [] + + try: + units = s3.download_files( + bucket=S3_BUILDS_BUCKET, + s3_path=f"fuzzer/corpus/{fuzzer_name}/", + file_suffix="", + local_directory=corpus_path, + ) + except ClientError as e: + if e.response["Error"]["Code"] == "NoSuchKey": + logging.debug("No active corpus exists for %s", fuzzer_name) + else: + raise + + logging.info("...downloaded %d units", len(units)) + + +def upload_corpus(fuzzers_path: str): + for file in os.listdir(f"{fuzzers_path}/corpus/"): + s3.upload_build_directory_to_s3( + Path(f"{fuzzers_path}/corpus/{file}"), f"fuzzer/corpus/{file}", False + ) + + def main(): logging.basicConfig(level=logging.INFO) @@ -119,6 +152,7 @@ def main(): for file in os.listdir(fuzzers_path): if file.endswith("_fuzzer"): os.chmod(fuzzers_path / file, 0o777) + download_corpus(f"{fuzzers_path}/{file}.corpus", file) elif file.endswith("_seed_corpus.zip"): corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") with zipfile.ZipFile(fuzzers_path / file, "r") as zfd: @@ -133,7 +167,7 @@ def main(): check_name, run_by_hash_num, run_by_hash_total ) - additional_envs.append("CI=1") + # additional_envs.append("CI=1") ci_logs_credentials = CiLogsCredentials(Path(temp_path) / "export-logs-config.sh") ci_logs_args = ci_logs_credentials.get_docker_arguments( @@ -154,6 +188,7 @@ def main(): retcode = process.wait() if retcode == 0: logging.info("Run successfully") + upload_corpus(fuzzers_path) else: logging.info("Run failed") diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index f483608605b..b4c174de6b1 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import configparser +import datetime import logging import os import re @@ -16,6 +17,23 @@ FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") OUTPUT = "/test_output" +class Stopwatch: + def __init__(self): + self.reset() + + @property + def duration_seconds(self) -> float: + return (datetime.datetime.utcnow() - self.start_time).total_seconds() + + @property + def start_time_str(self) -> str: + return self.start_time_str_value + + def reset(self) -> None: + self.start_time = datetime.datetime.utcnow() + self.start_time_str_value = self.start_time.strftime("%Y-%m-%d %H:%M:%S") + + def report(source: str, reason: str, call_stack: list, test_unit: str): logging.info("########### REPORT: %s %s %s", source, reason, test_unit) logging.info("".join(call_stack)) @@ -67,8 +85,6 @@ def kill_fuzzer(fuzzer: str): def run_fuzzer(fuzzer: str, timeout: int): - s3 = S3Helper() - logging.info("Running fuzzer %s...", fuzzer) seed_corpus_dir = f"{fuzzer}.in" @@ -77,20 +93,7 @@ def run_fuzzer(fuzzer: str, timeout: int): seed_corpus_dir = "" active_corpus_dir = f"{fuzzer}.corpus" - try: - s3.download_files( - bucket=S3_BUILDS_BUCKET, - s3_path=f"fuzzer/corpus/{fuzzer}/", - file_suffix="", - local_directory=active_corpus_dir, - ) - except ClientError as e: - if e.response["Error"]["Code"] == "NoSuchKey": - logging.debug("No active corpus exists for %s", fuzzer) - else: - raise - - new_corpus_dir = f"{fuzzer}.corpus_new" + new_corpus_dir = f"{OUTPUT}/corpus/{fuzzer}" if not os.path.exists(new_corpus_dir): os.makedirs(new_corpus_dir) @@ -180,16 +183,18 @@ def run_fuzzer(fuzzer: str, timeout: int): status.write( f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) + os.remove(out_path) else: process_fuzzer_output(result.stderr) with open(status_path, "w", encoding="utf-8") as status: status.write( f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) + os.remove(out_path) - s3.upload_build_directory_to_s3( - Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False - ) + # s3.upload_build_directory_to_s3( + # Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False + # ) def main(): @@ -215,14 +220,4 @@ def main(): if __name__ == "__main__": - from os import path, sys - - ACTIVE_DIR = path.dirname(path.abspath(__file__)) - sys.path.append((Path(path.dirname(ACTIVE_DIR)) / "ci").as_posix()) - from env_helper import ( # pylint: disable=import-error,no-name-in-module - S3_BUILDS_BUCKET, - ) - from s3_helper import S3Helper # pylint: disable=import-error,no-name-in-module - from stopwatch import Stopwatch # pylint: disable=import-error,no-name-in-module - main() From 0b82913507801367caa147627c8b97f99d3871df Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 01:11:45 +0000 Subject: [PATCH 096/353] fix style --- tests/ci/libfuzzer_test_check.py | 5 ++--- tests/fuzz/runner.py | 2 -- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index a4f31b1663d..b0cb375bc56 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -8,6 +8,8 @@ import zipfile from pathlib import Path from typing import List +from botocore.exceptions import ClientError + from build_download_helper import download_fuzzers from clickhouse_helper import CiLogsCredentials from docker_images_helper import DockerImage, get_docker_image, pull_image @@ -17,9 +19,6 @@ from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen -from botocore.exceptions import ClientError - - NO_CHANGES_MSG = "Nothing to run" s3 = S3Helper() diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index b4c174de6b1..3a91d8f62f8 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -10,8 +10,6 @@ import subprocess from pathlib import Path from time import sleep -from botocore.exceptions import ClientError - DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") OUTPUT = "/test_output" From b8f095b6260d647ba50a15094f98651161f2358c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 02:23:38 +0000 Subject: [PATCH 097/353] fix upload corpus, fix s3 helper to allow listing more than 1000 --- tests/ci/libfuzzer_test_check.py | 8 ++++---- tests/ci/s3_helper.py | 22 +++++++++++++--------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index b0cb375bc56..19e72b82712 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -110,10 +110,10 @@ def download_corpus(corpus_path: str, fuzzer_name: str): logging.info("...downloaded %d units", len(units)) -def upload_corpus(fuzzers_path: str): - for file in os.listdir(f"{fuzzers_path}/corpus/"): +def upload_corpus(result_path: str): + for file in os.listdir(f"{result_path}/corpus/"): s3.upload_build_directory_to_s3( - Path(f"{fuzzers_path}/corpus/{file}"), f"fuzzer/corpus/{file}", False + Path(f"{result_path}/corpus/{file}"), f"fuzzer/corpus/{file}", False ) @@ -187,7 +187,7 @@ def main(): retcode = process.wait() if retcode == 0: logging.info("Run successfully") - upload_corpus(fuzzers_path) + upload_corpus(result_path) else: logging.info("Run failed") diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index 9a40ad1277f..7d5b68f0222 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -311,23 +311,27 @@ class S3Helper: def list_prefix( self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET ) -> List[str]: - objects = self.client.list_objects_v2(Bucket=bucket, Prefix=s3_prefix_path) + paginator = self.client.get_paginator('list_objects_v2') + pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path) result = [] - if "Contents" in objects: - for obj in objects["Contents"]: - result.append(obj["Key"]) + for page in pages: + if "Contents" in page: + for obj in page["Contents"]: + result.append(obj["Key"]) return result def list_prefix_non_recursive( self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET ) -> List[str]: - objects = self.client.list_objects_v2(Bucket=bucket, Prefix=s3_prefix_path) + paginator = self.client.get_paginator('list_objects_v2') + pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path) result = [] - if "Contents" in objects: - for obj in objects["Contents"]: - if "/" not in obj["Key"][len(s3_prefix_path) + 1 :]: - result.append(obj["Key"]) + for page in pages: + if "Contents" in page: + for obj in page["Contents"]: + if "/" not in obj["Key"][len(s3_prefix_path) + 1 :]: + result.append(obj["Key"]) return result From 4ba099cd7dd06ef180d5cec57c40597bf69b7051 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 17 Oct 2024 02:29:36 +0000 Subject: [PATCH 098/353] Automatic style fix --- tests/ci/s3_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index 7d5b68f0222..46c206f0540 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -311,7 +311,7 @@ class S3Helper: def list_prefix( self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET ) -> List[str]: - paginator = self.client.get_paginator('list_objects_v2') + paginator = self.client.get_paginator("list_objects_v2") pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path) result = [] for page in pages: @@ -324,7 +324,7 @@ class S3Helper: def list_prefix_non_recursive( self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET ) -> List[str]: - paginator = self.client.get_paginator('list_objects_v2') + paginator = self.client.get_paginator("list_objects_v2") pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path) result = [] for page in pages: From 55d7563c48d4ce467badeaf55796bf8e83cd8173 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 12:42:51 +0000 Subject: [PATCH 099/353] zip corpus --- tests/ci/libfuzzer_test_check.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 19e72b82712..bfd3e5c4373 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -23,6 +23,15 @@ NO_CHANGES_MSG = "Nothing to run" s3 = S3Helper() +def zipdir(path, ziph): + # ziph is zipfile handle + for root, dirs, files in os.walk(path): + for file in files: + ziph.write(os.path.join(root, file), + os.path.relpath(os.path.join(root, file), + os.path.join(path, '..'))) + + def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total): result = [] if "DatabaseReplicated" in check_name: @@ -111,10 +120,15 @@ def download_corpus(corpus_path: str, fuzzer_name: str): def upload_corpus(result_path: str): - for file in os.listdir(f"{result_path}/corpus/"): - s3.upload_build_directory_to_s3( - Path(f"{result_path}/corpus/{file}"), f"fuzzer/corpus/{file}", False - ) + with zipfile.ZipFile(f"{result_path}/corpus.zip", "w", zipfile.ZIP_DEFLATED) as zipf: + zipdir(f"{result_path}/corpus/", zipf) + s3.upload_file( + bucket=S3_BUILDS_BUCKET, file_path=f"{result_path}/corpus.zip", s3_path="fuzzer/corpus.zip" + ) + # for file in os.listdir(f"{result_path}/corpus/"): + # s3.upload_build_directory_to_s3( + # Path(f"{result_path}/corpus/{file}"), f"fuzzer/corpus/{file}", False + # ) def main(): From 846d3835f6b6dd5a8f226e36e572f1dd05190669 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 13:00:31 +0000 Subject: [PATCH 100/353] fix style --- tests/ci/libfuzzer_test_check.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index bfd3e5c4373..5bf03f269cb 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -27,9 +27,10 @@ def zipdir(path, ziph): # ziph is zipfile handle for root, dirs, files in os.walk(path): for file in files: - ziph.write(os.path.join(root, file), - os.path.relpath(os.path.join(root, file), - os.path.join(path, '..'))) + ziph.write( + os.path.join(root, file), + os.path.relpath(os.path.join(root, file), os.path.join(path, '..')), + ) def get_additional_envs(check_name, run_by_hash_num, run_by_hash_total): @@ -120,10 +121,14 @@ def download_corpus(corpus_path: str, fuzzer_name: str): def upload_corpus(result_path: str): - with zipfile.ZipFile(f"{result_path}/corpus.zip", "w", zipfile.ZIP_DEFLATED) as zipf: + with zipfile.ZipFile( + f"{result_path}/corpus.zip", "w", zipfile.ZIP_DEFLATED + ) as zipf: zipdir(f"{result_path}/corpus/", zipf) s3.upload_file( - bucket=S3_BUILDS_BUCKET, file_path=f"{result_path}/corpus.zip", s3_path="fuzzer/corpus.zip" + bucket=S3_BUILDS_BUCKET, + file_path=f"{result_path}/corpus.zip", + s3_path="fuzzer/corpus.zip", ) # for file in os.listdir(f"{result_path}/corpus/"): # s3.upload_build_directory_to_s3( From 8016e92ccce3306c5aea036594eaa8df9fa03487 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 13:12:04 +0000 Subject: [PATCH 101/353] fix style --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 5bf03f269cb..2e1a540b6a9 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -29,7 +29,7 @@ def zipdir(path, ziph): for file in files: ziph.write( os.path.join(root, file), - os.path.relpath(os.path.join(root, file), os.path.join(path, '..')), + os.path.relpath(os.path.join(root, file), os.path.join(path, "..")), ) From 034c5456a0764bc5b14ca149b1f98b9d57635520 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 13:23:23 +0000 Subject: [PATCH 102/353] fix style --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 2e1a540b6a9..513a1cfa353 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -28,7 +28,7 @@ def zipdir(path, ziph): for root, dirs, files in os.walk(path): for file in files: ziph.write( - os.path.join(root, file), + os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, "..")), ) From 794c38ac4da3bed0d85131f31f3548c2a5ca0ea0 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 13:35:41 +0000 Subject: [PATCH 103/353] fix style --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 513a1cfa353..df46bb0daad 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -25,7 +25,7 @@ s3 = S3Helper() def zipdir(path, ziph): # ziph is zipfile handle - for root, dirs, files in os.walk(path): + for root, _, files in os.walk(path): for file in files: ziph.write( os.path.join(root, file), From ac3ee0477bcb3f0c42f11eccad78919ae5df22e9 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 16:29:19 +0000 Subject: [PATCH 104/353] fix --- tests/ci/libfuzzer_test_check.py | 14 ++++++++------ tests/fuzz/runner.py | 11 ++++++----- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index df46bb0daad..bed52d2a608 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -120,14 +120,14 @@ def download_corpus(corpus_path: str, fuzzer_name: str): logging.info("...downloaded %d units", len(units)) -def upload_corpus(result_path: str): +def upload_corpus(path: str): with zipfile.ZipFile( - f"{result_path}/corpus.zip", "w", zipfile.ZIP_DEFLATED + f"{path}/corpus.zip", "w", zipfile.ZIP_DEFLATED ) as zipf: - zipdir(f"{result_path}/corpus/", zipf) + zipdir(f"{path}/corpus/", zipf) s3.upload_file( bucket=S3_BUILDS_BUCKET, - file_path=f"{result_path}/corpus.zip", + file_path=f"{path}/corpus.zip", s3_path="fuzzer/corpus.zip", ) # for file in os.listdir(f"{result_path}/corpus/"): @@ -164,13 +164,15 @@ def main(): fuzzers_path = temp_path / "fuzzers" fuzzers_path.mkdir(parents=True, exist_ok=True) + corpus_path = fuzzers_path / "corpus" + corpus_path.mkdir(parents=True, exist_ok=True) download_fuzzers(check_name, reports_path, fuzzers_path) for file in os.listdir(fuzzers_path): if file.endswith("_fuzzer"): os.chmod(fuzzers_path / file, 0o777) - download_corpus(f"{fuzzers_path}/{file}.corpus", file) + download_corpus(f"{corpus_path}/{file}", file) elif file.endswith("_seed_corpus.zip"): corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") with zipfile.ZipFile(fuzzers_path / file, "r") as zfd: @@ -206,7 +208,7 @@ def main(): retcode = process.wait() if retcode == 0: logging.info("Run successfully") - upload_corpus(result_path) + upload_corpus(fuzzers_path) else: logging.info("Run failed") diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 3a91d8f62f8..1b2ae7b98d1 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -90,10 +90,10 @@ def run_fuzzer(fuzzer: str, timeout: int): if not path.exists() or not path.is_dir(): seed_corpus_dir = "" - active_corpus_dir = f"{fuzzer}.corpus" - new_corpus_dir = f"{OUTPUT}/corpus/{fuzzer}" - if not os.path.exists(new_corpus_dir): - os.makedirs(new_corpus_dir) + active_corpus_dir = f"corpus/{fuzzer}" + # new_corpus_dir = f"{OUTPUT}/corpus/{fuzzer}" + # if not os.path.exists(new_corpus_dir): + # os.makedirs(new_corpus_dir) options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" @@ -136,7 +136,8 @@ def run_fuzzer(fuzzer: str, timeout: int): status_path = f"{OUTPUT}/{fuzzer}.status" out_path = f"{OUTPUT}/{fuzzer}.out" - cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" + cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" + # cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" cmd_line += f" -exact_artifact_path={exact_artifact_path}" From 73438587f280911aec6f91662aa523419a2d710d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 17 Oct 2024 16:35:58 +0000 Subject: [PATCH 105/353] Automatic style fix --- tests/ci/libfuzzer_test_check.py | 4 +--- tests/fuzz/runner.py | 4 +++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index bed52d2a608..c2ceea872a7 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -121,9 +121,7 @@ def download_corpus(corpus_path: str, fuzzer_name: str): def upload_corpus(path: str): - with zipfile.ZipFile( - f"{path}/corpus.zip", "w", zipfile.ZIP_DEFLATED - ) as zipf: + with zipfile.ZipFile(f"{path}/corpus.zip", "w", zipfile.ZIP_DEFLATED) as zipf: zipdir(f"{path}/corpus/", zipf) s3.upload_file( bucket=S3_BUILDS_BUCKET, diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 1b2ae7b98d1..c23f4cbc31c 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -136,7 +136,9 @@ def run_fuzzer(fuzzer: str, timeout: int): status_path = f"{OUTPUT}/{fuzzer}.status" out_path = f"{OUTPUT}/{fuzzer}.out" - cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" + cmd_line = ( + f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" + ) # cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" cmd_line += f" -exact_artifact_path={exact_artifact_path}" From 66bbf11e074855f9e758be76ec45eb002fe67505 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 17 Oct 2024 13:51:08 +0200 Subject: [PATCH 106/353] Allow to disable background cache download for reading metadata files --- src/Common/ProfileEvents.cpp | 1 + src/Core/Settings.cpp | 3 +++ src/Core/SettingsChangesHistory.cpp | 1 + src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp | 2 +- src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp | 4 ++-- src/IO/ReadSettings.h | 1 + src/Interpreters/Cache/FileSegment.cpp | 10 +++++----- src/Interpreters/Cache/FileSegment.h | 6 +++--- src/Interpreters/Context.cpp | 2 ++ 9 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index ec10e25f74e..b6b669943e2 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -546,6 +546,7 @@ The server successfully detected this situation and will download merged part fr M(FilesystemCacheLoadMetadataMicroseconds, "Time spent loading filesystem cache metadata", ValueType::Microseconds) \ M(FilesystemCacheEvictedBytes, "Number of bytes evicted from filesystem cache", ValueType::Bytes) \ M(FilesystemCacheEvictedFileSegments, "Number of file segments evicted from filesystem cache", ValueType::Number) \ + M(FilesystemCacheBackgroundDownloadQueuePush, "Number of file segments sent for background download in filesystem cache", ValueType::Number) \ M(FilesystemCacheEvictionSkippedFileSegments, "Number of file segments skipped for eviction because of being in unreleasable state", ValueType::Number) \ M(FilesystemCacheEvictionSkippedEvictingFileSegments, "Number of file segments skipped for eviction because of being in evicting state", ValueType::Number) \ M(FilesystemCacheEvictionTries, "Number of filesystem cache eviction attempts", ValueType::Number) \ diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index cdaa305e804..b656c297288 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4842,6 +4842,9 @@ Limit on size of a single batch of file segments that a read buffer can request )", 0) \ M(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, R"( Wait time to lock cache for space reservation in filesystem cache +)", 0) \ + M(Bool, filesystem_cache_enable_background_download_for_metadata_files, true, R"( +Enable background download for metadata files in filesystem cache (related to background_download_threads cache settings) )", 0) \ M(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ad9499c6d86..46b491b3afc 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -104,6 +104,7 @@ static std::initializer_list completed_range.right); cache_file_reader.reset(); - file_segments->popFront(); + file_segments->completeAndPopFront(settings.filesystem_cache_allow_background_download); if (file_segments->empty() && !nextFileSegmentsBatch()) return false; diff --git a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp index 6aedc1f5d04..df6fb871772 100644 --- a/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskWriteBufferFromFile.cpp @@ -196,7 +196,7 @@ void FileSegmentRangeWriter::completeFileSegment() if (file_segment.isDetached() || file_segment.isCompleted()) return; - file_segment.complete(); + file_segment.complete(false); appendFilesystemCacheLog(file_segment); } @@ -210,7 +210,7 @@ void FileSegmentRangeWriter::jumpToPosition(size_t position) if (position < current_write_offset) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot jump backwards: {} < {}", position, current_write_offset); - file_segment.complete(); + file_segment.complete(false); file_segments.reset(); } expected_write_offset = position; diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index 7d6b9f10931..ac3d7fc9faf 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -106,6 +106,7 @@ struct ReadSettings bool enable_filesystem_cache_log = false; size_t filesystem_cache_segments_batch_size = 20; size_t filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = 1000; + bool filesystem_cache_allow_background_download = true; bool use_page_cache_for_disks_without_file_cache = false; bool read_from_page_cache_if_exists_otherwise_bypass_cache = false; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index c356800fa57..944d685d2c1 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -627,7 +627,7 @@ void FileSegment::completePartAndResetDownloader() LOG_TEST(log, "Complete batch. ({})", getInfoForLogUnlocked(lk)); } -void FileSegment::complete() +void FileSegment::complete(bool allow_background_download) { ProfileEventTimeIncrement watch(ProfileEvents::FileSegmentCompleteMicroseconds); @@ -704,7 +704,7 @@ void FileSegment::complete() if (is_last_holder) { bool added_to_download_queue = false; - if (background_download_enabled && remote_file_reader) + if (allow_background_download && background_download_enabled && remote_file_reader) { added_to_download_queue = locked_key->addToDownloadQueue(offset(), segment_lock); /// Finish download in background. } @@ -1001,7 +1001,7 @@ void FileSegmentsHolder::reset() ProfileEvents::increment(ProfileEvents::FilesystemCacheUnusedHoldFileSegments, file_segments.size()); for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();) - file_segment_it = completeAndPopFrontImpl(); + file_segment_it = completeAndPopFrontImpl(false); file_segments.clear(); } @@ -1010,9 +1010,9 @@ FileSegmentsHolder::~FileSegmentsHolder() reset(); } -FileSegments::iterator FileSegmentsHolder::completeAndPopFrontImpl() +FileSegments::iterator FileSegmentsHolder::completeAndPopFrontImpl(bool allow_background_download) { - front().complete(); + front().complete(allow_background_download); CurrentMetrics::sub(CurrentMetrics::FilesystemCacheHoldFileSegments); return file_segments.erase(file_segments.begin()); } diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index ee9aee1e354..9d796111659 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -189,7 +189,7 @@ public: * ========== Methods that must do cv.notify() ================== */ - void complete(); + void complete(bool allow_background_download); void completePartAndResetDownloader(); @@ -297,7 +297,7 @@ struct FileSegmentsHolder final : private boost::noncopyable String toString(bool with_state = false) const; - void popFront() { completeAndPopFrontImpl(); } + void completeAndPopFront(bool allow_background_download) { completeAndPopFrontImpl(allow_background_download); } FileSegment & front() { return *file_segments.front(); } const FileSegment & front() const { return *file_segments.front(); } @@ -319,7 +319,7 @@ struct FileSegmentsHolder final : private boost::noncopyable private: FileSegments file_segments{}; - FileSegments::iterator completeAndPopFrontImpl(); + FileSegments::iterator completeAndPopFrontImpl(bool allow_background_download); }; using FileSegmentsHolderPtr = std::unique_ptr; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 8962be59f86..edffa6cc469 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -239,6 +239,7 @@ namespace Setting extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions; extern const SettingsString workload; extern const SettingsString compatibility; + extern const SettingsBool filesystem_cache_enable_background_download_for_metadata_files; } namespace MergeTreeSetting @@ -5687,6 +5688,7 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_segments_batch_size = settings_ref[Setting::filesystem_cache_segments_batch_size]; res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings_ref[Setting::filesystem_cache_reserve_space_wait_lock_timeout_milliseconds]; + res.filesystem_cache_allow_background_download = settings_ref[Setting::filesystem_cache_enable_background_download_for_metadata_files]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; From 8b1608ee21c2c92a16d627e9917317599b4664f2 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 19:01:41 +0000 Subject: [PATCH 107/353] test --- tests/ci/build_download_helper.py | 3 ++- tests/ci/libfuzzer_test_check.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py index 8482abb26e0..1d95aa3f547 100644 --- a/tests/ci/build_download_helper.py +++ b/tests/ci/build_download_helper.py @@ -275,5 +275,6 @@ def download_fuzzers( check_name, reports_path, result_path, - lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + lambda x: x.endswith(("double_delta_decompress_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + # lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), ) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index c2ceea872a7..4d9291ffc57 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -3,6 +3,7 @@ import argparse import logging import os +import subprocess import sys import zipfile from pathlib import Path @@ -121,6 +122,8 @@ def download_corpus(corpus_path: str, fuzzer_name: str): def upload_corpus(path: str): + logging.info("Upload corpus from path %s", path) + subprocess.check_call(f"ls -al {path}", shell=True) with zipfile.ZipFile(f"{path}/corpus.zip", "w", zipfile.ZIP_DEFLATED) as zipf: zipdir(f"{path}/corpus/", zipf) s3.upload_file( From 7ad42664da11c0af82469b26369e47357e9a4e54 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 17 Oct 2024 19:11:34 +0000 Subject: [PATCH 108/353] Automatic style fix --- tests/ci/build_download_helper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py index 1d95aa3f547..2532ad5e64e 100644 --- a/tests/ci/build_download_helper.py +++ b/tests/ci/build_download_helper.py @@ -275,6 +275,8 @@ def download_fuzzers( check_name, reports_path, result_path, - lambda x: x.endswith(("double_delta_decompress_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + lambda x: x.endswith( + ("double_delta_decompress_fuzzer", ".dict", ".options", "_seed_corpus.zip") + ), # lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), ) From 10d346a1d4627857ecb61f3e3d913aa7ab0fafcd Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 19:35:06 +0000 Subject: [PATCH 109/353] test --- tests/ci/libfuzzer_test_check.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 4d9291ffc57..a559ba9ad6a 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -124,6 +124,7 @@ def download_corpus(corpus_path: str, fuzzer_name: str): def upload_corpus(path: str): logging.info("Upload corpus from path %s", path) subprocess.check_call(f"ls -al {path}", shell=True) + subprocess.check_call(f"ls -Ral {path}/corpus/", shell=True) with zipfile.ZipFile(f"{path}/corpus.zip", "w", zipfile.ZIP_DEFLATED) as zipf: zipdir(f"{path}/corpus/", zipf) s3.upload_file( From 6e334a2d635d8f26626c95cd60e12cb0489d3ed6 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 19:55:25 +0000 Subject: [PATCH 110/353] test --- tests/ci/libfuzzer_test_check.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index a559ba9ad6a..a78c33e0f72 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -174,7 +174,9 @@ def main(): for file in os.listdir(fuzzers_path): if file.endswith("_fuzzer"): os.chmod(fuzzers_path / file, 0o777) - download_corpus(f"{corpus_path}/{file}", file) + fuzzer_corpus_path = corpus_path / file + fuzzer_corpus_path.mkdir(parents=True, exist_ok=True) + download_corpus(fuzzer_corpus_path, file) elif file.endswith("_seed_corpus.zip"): corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") with zipfile.ZipFile(fuzzers_path / file, "r") as zfd: From 9f55730b6f3bd962e4f55fbe1c17bb451382d69f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 20:15:34 +0000 Subject: [PATCH 111/353] test --- tests/ci/libfuzzer_test_check.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index a78c33e0f72..3dcf36fdaa9 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -176,7 +176,9 @@ def main(): os.chmod(fuzzers_path / file, 0o777) fuzzer_corpus_path = corpus_path / file fuzzer_corpus_path.mkdir(parents=True, exist_ok=True) + subprocess.check_call(f"ls -Ral {corpus_path}", shell=True) download_corpus(fuzzer_corpus_path, file) + subprocess.check_call(f"ls -Ral {fuzzer_corpus_path}", shell=True) elif file.endswith("_seed_corpus.zip"): corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") with zipfile.ZipFile(fuzzers_path / file, "r") as zfd: From debc90d3f0dab0d71bf7c995322509ace394626f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 20:50:21 +0000 Subject: [PATCH 112/353] test --- tests/ci/libfuzzer_test_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 3dcf36fdaa9..fa0103deba0 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -180,9 +180,9 @@ def main(): download_corpus(fuzzer_corpus_path, file) subprocess.check_call(f"ls -Ral {fuzzer_corpus_path}", shell=True) elif file.endswith("_seed_corpus.zip"): - corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") + seed_corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") with zipfile.ZipFile(fuzzers_path / file, "r") as zfd: - zfd.extractall(corpus_path) + zfd.extractall(seed_corpus_path) result_path = temp_path / "result_path" result_path.mkdir(parents=True, exist_ok=True) From 01d147eadad27c2ab3e112f4d4f0d166e54cb67f Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Thu, 17 Oct 2024 20:56:26 +0000 Subject: [PATCH 113/353] Automatic style fix --- tests/ci/libfuzzer_test_check.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index fa0103deba0..1603e540f00 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -180,7 +180,9 @@ def main(): download_corpus(fuzzer_corpus_path, file) subprocess.check_call(f"ls -Ral {fuzzer_corpus_path}", shell=True) elif file.endswith("_seed_corpus.zip"): - seed_corpus_path = fuzzers_path / (file.removesuffix("_seed_corpus.zip") + ".in") + seed_corpus_path = fuzzers_path / ( + file.removesuffix("_seed_corpus.zip") + ".in" + ) with zipfile.ZipFile(fuzzers_path / file, "r") as zfd: zfd.extractall(seed_corpus_path) From e85ce99262db93753e59e636c69709770e38b3ed Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 21:09:56 +0000 Subject: [PATCH 114/353] test --- tests/ci/libfuzzer_test_check.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 1603e540f00..e8c43070e4f 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -175,7 +175,6 @@ def main(): if file.endswith("_fuzzer"): os.chmod(fuzzers_path / file, 0o777) fuzzer_corpus_path = corpus_path / file - fuzzer_corpus_path.mkdir(parents=True, exist_ok=True) subprocess.check_call(f"ls -Ral {corpus_path}", shell=True) download_corpus(fuzzer_corpus_path, file) subprocess.check_call(f"ls -Ral {fuzzer_corpus_path}", shell=True) From 1624dc3e677d8613ff93c227399ba39c7fbb2407 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 21:25:08 +0000 Subject: [PATCH 115/353] zip corpus --- tests/ci/build_download_helper.py | 5 +---- tests/ci/libfuzzer_test_check.py | 5 ----- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py index 2532ad5e64e..8482abb26e0 100644 --- a/tests/ci/build_download_helper.py +++ b/tests/ci/build_download_helper.py @@ -275,8 +275,5 @@ def download_fuzzers( check_name, reports_path, result_path, - lambda x: x.endswith( - ("double_delta_decompress_fuzzer", ".dict", ".options", "_seed_corpus.zip") - ), - # lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), ) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index e8c43070e4f..fbf0bd87fd7 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -122,9 +122,6 @@ def download_corpus(corpus_path: str, fuzzer_name: str): def upload_corpus(path: str): - logging.info("Upload corpus from path %s", path) - subprocess.check_call(f"ls -al {path}", shell=True) - subprocess.check_call(f"ls -Ral {path}/corpus/", shell=True) with zipfile.ZipFile(f"{path}/corpus.zip", "w", zipfile.ZIP_DEFLATED) as zipf: zipdir(f"{path}/corpus/", zipf) s3.upload_file( @@ -175,9 +172,7 @@ def main(): if file.endswith("_fuzzer"): os.chmod(fuzzers_path / file, 0o777) fuzzer_corpus_path = corpus_path / file - subprocess.check_call(f"ls -Ral {corpus_path}", shell=True) download_corpus(fuzzer_corpus_path, file) - subprocess.check_call(f"ls -Ral {fuzzer_corpus_path}", shell=True) elif file.endswith("_seed_corpus.zip"): seed_corpus_path = fuzzers_path / ( file.removesuffix("_seed_corpus.zip") + ".in" From c67b20b80a55e1c678c7e699f26172326c30d58e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 17 Oct 2024 21:35:48 +0000 Subject: [PATCH 116/353] fix style --- tests/ci/libfuzzer_test_check.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index fbf0bd87fd7..c4c6ca0cdf2 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -3,7 +3,6 @@ import argparse import logging import os -import subprocess import sys import zipfile from pathlib import Path From 5ee699d0597804a6d66c161ab3bba9d282fe7519 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 18 Oct 2024 00:42:37 +0000 Subject: [PATCH 117/353] download corpus zip --- tests/ci/libfuzzer_test_check.py | 36 ++++++++++++++------------------ 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index c4c6ca0cdf2..bb2eb726341 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -99,25 +99,30 @@ def parse_args(): return parser.parse_args() -def download_corpus(corpus_path: str, fuzzer_name: str): - logging.info("Download corpus for %s ...", fuzzer_name) - - units = [] +def download_corpus(path: str): + logging.info("Download corpus...") try: - units = s3.download_files( + s3.download_file( bucket=S3_BUILDS_BUCKET, - s3_path=f"fuzzer/corpus/{fuzzer_name}/", - file_suffix="", - local_directory=corpus_path, + s3_path=f"fuzzer/corpus.zip", + local_file_path=path, ) except ClientError as e: if e.response["Error"]["Code"] == "NoSuchKey": - logging.debug("No active corpus exists for %s", fuzzer_name) + logging.debug("No active corpus exists") else: raise - logging.info("...downloaded %d units", len(units)) + with zipfile.ZipFile(f"{path}/corpus.zip", "r") as zipf: + zipf.extractall(path) + os.remove(f"{path}/corpus.zip") + + units = 0 + for _, _, files in os.walk(path): + units += len(files) + + logging.info("...downloaded %d units", units) def upload_corpus(path: str): @@ -128,10 +133,6 @@ def upload_corpus(path: str): file_path=f"{path}/corpus.zip", s3_path="fuzzer/corpus.zip", ) - # for file in os.listdir(f"{result_path}/corpus/"): - # s3.upload_build_directory_to_s3( - # Path(f"{result_path}/corpus/{file}"), f"fuzzer/corpus/{file}", False - # ) def main(): @@ -162,16 +163,13 @@ def main(): fuzzers_path = temp_path / "fuzzers" fuzzers_path.mkdir(parents=True, exist_ok=True) - corpus_path = fuzzers_path / "corpus" - corpus_path.mkdir(parents=True, exist_ok=True) + download_corpus(fuzzers_path) download_fuzzers(check_name, reports_path, fuzzers_path) for file in os.listdir(fuzzers_path): if file.endswith("_fuzzer"): os.chmod(fuzzers_path / file, 0o777) - fuzzer_corpus_path = corpus_path / file - download_corpus(fuzzer_corpus_path, file) elif file.endswith("_seed_corpus.zip"): seed_corpus_path = fuzzers_path / ( file.removesuffix("_seed_corpus.zip") + ".in" @@ -188,8 +186,6 @@ def main(): check_name, run_by_hash_num, run_by_hash_total ) - # additional_envs.append("CI=1") - ci_logs_credentials = CiLogsCredentials(Path(temp_path) / "export-logs-config.sh") ci_logs_args = ci_logs_credentials.get_docker_arguments( pr_info, stopwatch.start_time_str, check_name From 105f673522eea74a58d833abd57666ca7f52c11f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Fri, 18 Oct 2024 00:54:20 +0000 Subject: [PATCH 118/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index bb2eb726341..b7f62836dea 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -105,7 +105,7 @@ def download_corpus(path: str): try: s3.download_file( bucket=S3_BUILDS_BUCKET, - s3_path=f"fuzzer/corpus.zip", + s3_path="fuzzer/corpus.zip", local_file_path=path, ) except ClientError as e: From 5c422be620c9c05495f2dbecf7662804487c8492 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 18 Oct 2024 12:05:48 +0200 Subject: [PATCH 119/353] Remove part of the changes, to be moved to Sync --- src/Core/Settings.cpp | 3 --- src/Core/SettingsChangesHistory.cpp | 1 - src/Interpreters/Cache/FileSegment.cpp | 2 ++ src/Interpreters/Context.cpp | 2 -- 4 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index b656c297288..cdaa305e804 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4842,9 +4842,6 @@ Limit on size of a single batch of file segments that a read buffer can request )", 0) \ M(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, R"( Wait time to lock cache for space reservation in filesystem cache -)", 0) \ - M(Bool, filesystem_cache_enable_background_download_for_metadata_files, true, R"( -Enable background download for metadata files in filesystem cache (related to background_download_threads cache settings) )", 0) \ M(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 46b491b3afc..ad9499c6d86 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -104,7 +104,6 @@ static std::initializer_listaddToDownloadQueue(offset(), segment_lock); /// Finish download in background. } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index edffa6cc469..8962be59f86 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -239,7 +239,6 @@ namespace Setting extern const SettingsUInt64 use_structure_from_insertion_table_in_table_functions; extern const SettingsString workload; extern const SettingsString compatibility; - extern const SettingsBool filesystem_cache_enable_background_download_for_metadata_files; } namespace MergeTreeSetting @@ -5688,7 +5687,6 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_segments_batch_size = settings_ref[Setting::filesystem_cache_segments_batch_size]; res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings_ref[Setting::filesystem_cache_reserve_space_wait_lock_timeout_milliseconds]; - res.filesystem_cache_allow_background_download = settings_ref[Setting::filesystem_cache_enable_background_download_for_metadata_files]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; From c97c6250fcdcd6059753738bd12928a1e3fb2ac7 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 18 Oct 2024 14:06:33 +0200 Subject: [PATCH 120/353] Fix unit test --- src/Interpreters/tests/gtest_filecache.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Interpreters/tests/gtest_filecache.cpp b/src/Interpreters/tests/gtest_filecache.cpp index 007b31d9fdc..de767947428 100644 --- a/src/Interpreters/tests/gtest_filecache.cpp +++ b/src/Interpreters/tests/gtest_filecache.cpp @@ -253,7 +253,7 @@ void download(FileSegment & file_segment) download(cache_base_path, file_segment); ASSERT_EQ(file_segment.state(), State::DOWNLOADING); - file_segment.complete(); + file_segment.complete(false); ASSERT_EQ(file_segment.state(), State::DOWNLOADED); } @@ -263,7 +263,7 @@ void assertDownloadFails(FileSegment & file_segment) ASSERT_EQ(file_segment.getDownloadedSize(), 0); std::string failure_reason; ASSERT_FALSE(file_segment.reserve(file_segment.range().size(), 1000, failure_reason)); - file_segment.complete(); + file_segment.complete(false); } void download(const HolderPtr & holder) @@ -971,7 +971,7 @@ TEST_F(FileCacheTest, temporaryData) ASSERT_TRUE(segment->getOrSetDownloader() == DB::FileSegment::getCallerId()); ASSERT_TRUE(segment->reserve(segment->range().size(), 1000, failure_reason)); download(*segment); - segment->complete(); + segment->complete(false); } } From 25ab525c0906a4b6fd3c5cf83f29b1d53f327400 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 04:28:48 +0000 Subject: [PATCH 121/353] job report --- tests/ci/libfuzzer_test_check.py | 79 +++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index b7f62836dea..bab624fb144 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -3,6 +3,7 @@ import argparse import logging import os +import re import sys import zipfile from pathlib import Path @@ -15,6 +16,7 @@ from clickhouse_helper import CiLogsCredentials from docker_images_helper import DockerImage, get_docker_image, pull_image from env_helper import REPO_COPY, REPORT_PATH, S3_BUILDS_BUCKET, TEMP_PATH from pr_info import PRInfo +from report import JobReport, TestResult from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -135,6 +137,67 @@ def upload_corpus(path: str): ) +def process_error(path: Path) -> list: + ERROR = r"^==\d+==\s?ERROR: (\S+): (.*)" + # error_source = "" + # error_reason = "" + # test_unit = "" + TEST_UNIT_LINE = r"artifact_prefix='.*\/'; Test unit written to (.*)" + error_info = [] + is_error = False + + with open(path, "r") as file: + for line in file: + if is_error: + error_info.append(line) + # match = re.search(TEST_UNIT_LINE, line) + # if match: + # test_unit = match.group(1) + continue + + match = re.search(ERROR, line) + if match: + error_info.append(line) + # error_source = match.group(1) + # error_reason = match.group(2) + is_error = True + + return error_info + + +def read_status(status_path: Path): + result = [] + with open(status_path, "r") as file: + for line in file: + result.append(line) + return result + + +def process_results(result_path: Path): + test_results = [] + oks = 0 + timeouts = 0 + fails = 0 + for file in result_path.glob("*.status"): + fuzzer = file.stem + file_path = file.parent.with_stem(fuzzer) + file_path_unit = file_path.with_suffix(".unit") + file_path_out = file_path.with_suffix(".out") + status = read_status(file) + if status[0] == "OK": + oks += 1 + elif status[0] == "Timeout": + timeouts += 1 + else: + fails += 1 + result = TestResult(fuzzer, status[0], status[2]) + if file_path_unit.exists: + result.set_raw_logs("\n".join(process_error(file_path_out))) + test_results.append(result) + + return [oks, timeouts, fails, test_results] + + def main(): logging.basicConfig(level=logging.INFO) @@ -209,7 +272,21 @@ def main(): else: logging.info("Run failed") - sys.exit(0) + results = process_results(reports_path) + + success = results[1] == 0 and results[2] == 0 + + JobReport( + description=f"OK: {results[0]}, Timeout: {results[1]}, FAIL: {results[2]}", + test_results=results[3], + status= "OK" if success else "FAILURE", + start_time=stopwatch.start_time_str, + duration=stopwatch.duration_seconds, + additional_files=[], + ).dump() + + if not success: + sys.exit(1) if __name__ == "__main__": From 14166b377035febb53c6e0054e1b3664c71cee58 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 04:41:36 +0000 Subject: [PATCH 122/353] fix style --- tests/ci/libfuzzer_test_check.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index bab624fb144..fc1e1f940f2 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -142,11 +142,11 @@ def process_error(path: Path) -> list: # error_source = "" # error_reason = "" # test_unit = "" - TEST_UNIT_LINE = r"artifact_prefix='.*\/'; Test unit written to (.*)" + # TEST_UNIT_LINE = r"artifact_prefix='.*\/'; Test unit written to (.*)" error_info = [] is_error = False - with open(path, "r") as file: + with open(path, "r", encoding="utf-8") as file: for line in file: if is_error: error_info.append(line) @@ -167,7 +167,7 @@ def process_error(path: Path) -> list: def read_status(status_path: Path): result = [] - with open(status_path, "r") as file: + with open(status_path, "r", encoding="utf-8") as file: for line in file: result.append(line) return result @@ -279,7 +279,7 @@ def main(): JobReport( description=f"OK: {results[0]}, Timeout: {results[1]}, FAIL: {results[2]}", test_results=results[3], - status= "OK" if success else "FAILURE", + status="OK" if success else "FAILURE", start_time=stopwatch.start_time_str, duration=stopwatch.duration_seconds, additional_files=[], From 4edc84d262b3ae97b52f244d0376afa1f5e4c497 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 05:39:36 +0000 Subject: [PATCH 123/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index fc1e1f940f2..fb91a4e50a2 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -279,7 +279,7 @@ def main(): JobReport( description=f"OK: {results[0]}, Timeout: {results[1]}, FAIL: {results[2]}", test_results=results[3], - status="OK" if success else "FAILURE", + status="SUCCESS" if success else "FAILURE", start_time=stopwatch.start_time_str, duration=stopwatch.duration_seconds, additional_files=[], From ca6ff66591055308319361b21fa2a5b3a0035463 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 06:36:10 +0000 Subject: [PATCH 124/353] fix --- tests/ci/libfuzzer_test_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index fb91a4e50a2..e0a985ac7b5 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -16,7 +16,7 @@ from clickhouse_helper import CiLogsCredentials from docker_images_helper import DockerImage, get_docker_image, pull_image from env_helper import REPO_COPY, REPORT_PATH, S3_BUILDS_BUCKET, TEMP_PATH from pr_info import PRInfo -from report import JobReport, TestResult +from report import FAILURE, SUCCESS, JobReport, TestResult from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -279,7 +279,7 @@ def main(): JobReport( description=f"OK: {results[0]}, Timeout: {results[1]}, FAIL: {results[2]}", test_results=results[3], - status="SUCCESS" if success else "FAILURE", + status=SUCCESS if success else FAILURE, start_time=stopwatch.start_time_str, duration=stopwatch.duration_seconds, additional_files=[], From ccbd9559adc0262c1ed5b1840350ecc047c61451 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 14:09:40 +0000 Subject: [PATCH 125/353] test --- tests/ci/build_download_helper.py | 3 ++- tests/ci/ci.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py index 8482abb26e0..47ea772b502 100644 --- a/tests/ci/build_download_helper.py +++ b/tests/ci/build_download_helper.py @@ -275,5 +275,6 @@ def download_fuzzers( check_name, reports_path, result_path, - lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + lambda x: x.endswith(("test_basic_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + # lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), ) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 10431ce038f..e820f445e7a 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1284,6 +1284,7 @@ def main() -> int: dump_to_file=True, ) print(f"Job report url: [{check_url}]") + print(job_report) prepared_events = prepare_tests_results_for_clickhouse( pr_info, job_report.test_results, From 31bf93c58f8dd198b396a3b888beb1e9f7557890 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 14:21:19 +0000 Subject: [PATCH 126/353] test --- tests/ci/ci.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index e820f445e7a..10431ce038f 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1284,7 +1284,6 @@ def main() -> int: dump_to_file=True, ) print(f"Job report url: [{check_url}]") - print(job_report) prepared_events = prepare_tests_results_for_clickhouse( pr_info, job_report.test_results, From 3f0eacb47e80a5054f01374c00bd142303e679a7 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Sat, 19 Oct 2024 14:44:03 +0000 Subject: [PATCH 127/353] Automatic style fix --- tests/ci/build_download_helper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py index 47ea772b502..d7123564890 100644 --- a/tests/ci/build_download_helper.py +++ b/tests/ci/build_download_helper.py @@ -275,6 +275,8 @@ def download_fuzzers( check_name, reports_path, result_path, - lambda x: x.endswith(("test_basic_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + lambda x: x.endswith( + ("test_basic_fuzzer", ".dict", ".options", "_seed_corpus.zip") + ), # lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), ) From daa32561c9a4f36352a9a65499568ee92d41eff9 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 16:32:36 +0000 Subject: [PATCH 128/353] test --- tests/ci/ci.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 10431ce038f..e820f445e7a 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1284,6 +1284,7 @@ def main() -> int: dump_to_file=True, ) print(f"Job report url: [{check_url}]") + print(job_report) prepared_events = prepare_tests_results_for_clickhouse( pr_info, job_report.test_results, From 8df6911a8375f072a0592b86c4015041d749f87c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 17:13:47 +0000 Subject: [PATCH 129/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index e0a985ac7b5..4a6f2875a4c 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -272,7 +272,7 @@ def main(): else: logging.info("Run failed") - results = process_results(reports_path) + results = process_results(result_path) success = results[1] == 0 and results[2] == 0 From 767daedd0d02adb36e3c9b8980a2d2effcb1f1ba Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 17:38:50 +0000 Subject: [PATCH 130/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 4a6f2875a4c..e9f62c26cff 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -180,7 +180,7 @@ def process_results(result_path: Path): fails = 0 for file in result_path.glob("*.status"): fuzzer = file.stem - file_path = file.parent.with_stem(fuzzer) + file_path = file.parent / fuzzer file_path_unit = file_path.with_suffix(".unit") file_path_out = file_path.with_suffix(".out") status = read_status(file) From af8c50deeb3b93a753e8974960d06f57424f37fa Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 18:09:12 +0000 Subject: [PATCH 131/353] fix --- tests/ci/libfuzzer_test_check.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index e9f62c26cff..92f1336aa4b 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -148,6 +148,7 @@ def process_error(path: Path) -> list: with open(path, "r", encoding="utf-8") as file: for line in file: + line = line.rstrip("\n") if is_error: error_info.append(line) # match = re.search(TEST_UNIT_LINE, line) @@ -169,7 +170,7 @@ def read_status(status_path: Path): result = [] with open(status_path, "r", encoding="utf-8") as file: for line in file: - result.append(line) + result.append(line.rstrip("\n")) return result From 610630e20d2698a9ece08ec3d5bc94ec6b8ed735 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 18:51:25 +0000 Subject: [PATCH 132/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 92f1336aa4b..6005a3bdc47 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -191,7 +191,7 @@ def process_results(result_path: Path): timeouts += 1 else: fails += 1 - result = TestResult(fuzzer, status[0], status[2]) + result = TestResult(fuzzer, status[0], float(status[2])) if file_path_unit.exists: result.set_raw_logs("\n".join(process_error(file_path_out))) test_results.append(result) From 8c14c33e5c1a2765049c4a9f21e2f1e1f671fc16 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 19:17:13 +0000 Subject: [PATCH 133/353] test --- tests/ci/build_download_helper.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/ci/build_download_helper.py b/tests/ci/build_download_helper.py index d7123564890..8482abb26e0 100644 --- a/tests/ci/build_download_helper.py +++ b/tests/ci/build_download_helper.py @@ -275,8 +275,5 @@ def download_fuzzers( check_name, reports_path, result_path, - lambda x: x.endswith( - ("test_basic_fuzzer", ".dict", ".options", "_seed_corpus.zip") - ), - # lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), + lambda x: x.endswith(("_fuzzer", ".dict", ".options", "_seed_corpus.zip")), ) From 0a1f24e364a2e22e7235472dd5ef9d2f47fddc87 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 21:51:59 +0000 Subject: [PATCH 134/353] fix --- tests/ci/libfuzzer_test_check.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 6005a3bdc47..33b598ef0a6 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -185,15 +185,19 @@ def process_results(result_path: Path): file_path_unit = file_path.with_suffix(".unit") file_path_out = file_path.with_suffix(".out") status = read_status(file) + result = TestResult(fuzzer, status[0], float(status[2])) if status[0] == "OK": oks += 1 elif status[0] == "Timeout": timeouts += 1 + if file_path_out.exists(): + result.set_log_files([file_path_out]) else: fails += 1 - result = TestResult(fuzzer, status[0], float(status[2])) - if file_path_unit.exists: - result.set_raw_logs("\n".join(process_error(file_path_out))) + if file_path_out.exists(): + result.set_raw_logs("\n".join(process_error(file_path_out))) + if file_path_unit.exists: + result.set_log_files([file_path_unit]) test_results.append(result) return [oks, timeouts, fails, test_results] From ee989751aa1ef8c0de1352c4257ccbf09b3afbf8 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sat, 19 Oct 2024 23:46:02 +0000 Subject: [PATCH 135/353] fix --- tests/ci/libfuzzer_test_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 33b598ef0a6..703ff861eb7 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -191,13 +191,13 @@ def process_results(result_path: Path): elif status[0] == "Timeout": timeouts += 1 if file_path_out.exists(): - result.set_log_files([file_path_out]) + result.set_log_files([str(file_path_out)]) else: fails += 1 if file_path_out.exists(): result.set_raw_logs("\n".join(process_error(file_path_out))) if file_path_unit.exists: - result.set_log_files([file_path_unit]) + result.set_log_files([str(file_path_unit)]) test_results.append(result) return [oks, timeouts, fails, test_results] From 157f7c0f471839942d9a34aa9fc3e5ea18939bed Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 11:39:27 +0000 Subject: [PATCH 136/353] fix --- tests/ci/libfuzzer_test_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 703ff861eb7..cba3b3410db 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -191,13 +191,13 @@ def process_results(result_path: Path): elif status[0] == "Timeout": timeouts += 1 if file_path_out.exists(): - result.set_log_files([str(file_path_out)]) + result.set_log_files(f"[{file_path_unit}]") else: fails += 1 if file_path_out.exists(): result.set_raw_logs("\n".join(process_error(file_path_out))) if file_path_unit.exists: - result.set_log_files([str(file_path_unit)]) + result.set_log_files(f"[{file_path_unit}]") test_results.append(result) return [oks, timeouts, fails, test_results] From 59c8fe9a240f9aee3344d09d5e16b59cbb58b581 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 12:38:28 +0000 Subject: [PATCH 137/353] fix --- tests/ci/libfuzzer_test_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index cba3b3410db..dbc2a2cc61b 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -191,13 +191,13 @@ def process_results(result_path: Path): elif status[0] == "Timeout": timeouts += 1 if file_path_out.exists(): - result.set_log_files(f"[{file_path_unit}]") + result.set_log_files(f"['{file_path_unit}']") else: fails += 1 if file_path_out.exists(): result.set_raw_logs("\n".join(process_error(file_path_out))) if file_path_unit.exists: - result.set_log_files(f"[{file_path_unit}]") + result.set_log_files(f"['{file_path_unit}']") test_results.append(result) return [oks, timeouts, fails, test_results] From 4b09224876c576af908ce95aa0ef295ce4820731 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 14:05:18 +0000 Subject: [PATCH 138/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index dbc2a2cc61b..7012bd08418 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -191,7 +191,7 @@ def process_results(result_path: Path): elif status[0] == "Timeout": timeouts += 1 if file_path_out.exists(): - result.set_log_files(f"['{file_path_unit}']") + result.set_log_files(f"['{file_path_out}']") else: fails += 1 if file_path_out.exists(): From 567d113697a29e06efefea9e0e3089fd1114622d Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 15:19:22 +0000 Subject: [PATCH 139/353] fix --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 7012bd08418..6899083e837 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -196,7 +196,7 @@ def process_results(result_path: Path): fails += 1 if file_path_out.exists(): result.set_raw_logs("\n".join(process_error(file_path_out))) - if file_path_unit.exists: + if file_path_unit.exists(): result.set_log_files(f"['{file_path_unit}']") test_results.append(result) From 5c3e9efdafa0d99328154715b6e2755dbcbcc0a5 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 18:23:34 +0000 Subject: [PATCH 140/353] fix, cleanup --- tests/ci/libfuzzer_test_check.py | 2 ++ tests/fuzz/runner.py | 58 -------------------------------- 2 files changed, 2 insertions(+), 58 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 6899083e837..d7e79cc26fe 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -198,6 +198,8 @@ def process_results(result_path: Path): result.set_raw_logs("\n".join(process_error(file_path_out))) if file_path_unit.exists(): result.set_log_files(f"['{file_path_unit}']") + elif file_path_out.exists(): + result.set_log_files(f"['{file_path_out}']") test_results.append(result) return [oks, timeouts, fails, test_results] diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index c23f4cbc31c..d3129a05b7c 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -32,46 +32,6 @@ class Stopwatch: self.start_time_str_value = self.start_time.strftime("%Y-%m-%d %H:%M:%S") -def report(source: str, reason: str, call_stack: list, test_unit: str): - logging.info("########### REPORT: %s %s %s", source, reason, test_unit) - logging.info("".join(call_stack)) - logging.info("########### END OF REPORT ###########") - - -# pylint: disable=unused-argument -def process_fuzzer_output(output: str): - pass - - -def process_error(error: str) -> list: - ERROR = r"^==\d+==\s?ERROR: (\S+): (.*)" - error_source = "" - error_reason = "" - test_unit = "" - TEST_UNIT_LINE = r"artifact_prefix='.*\/'; Test unit written to (.*)" - error_info = [] - is_error = False - - # pylint: disable=unused-variable - for line_num, line in enumerate(error.splitlines(), 1): - if is_error: - error_info.append(line) - match = re.search(TEST_UNIT_LINE, line) - if match: - test_unit = match.group(1) - continue - - match = re.search(ERROR, line) - if match: - error_info.append(line) - error_source = match.group(1) - error_reason = match.group(2) - is_error = True - - report(error_source, error_reason, error_info, test_unit) - return error_info - - def kill_fuzzer(fuzzer: str): with subprocess.Popen(["ps", "-A", "u"], stdout=subprocess.PIPE) as p: out, _ = p.communicate() @@ -91,10 +51,6 @@ def run_fuzzer(fuzzer: str, timeout: int): seed_corpus_dir = "" active_corpus_dir = f"corpus/{fuzzer}" - # new_corpus_dir = f"{OUTPUT}/corpus/{fuzzer}" - # if not os.path.exists(new_corpus_dir): - # os.makedirs(new_corpus_dir) - options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" fuzzer_arguments = "" @@ -139,7 +95,6 @@ def run_fuzzer(fuzzer: str, timeout: int): cmd_line = ( f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" ) - # cmd_line = f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {new_corpus_dir} {active_corpus_dir} {seed_corpus_dir}" cmd_line += f" -exact_artifact_path={exact_artifact_path}" @@ -169,34 +124,24 @@ def run_fuzzer(fuzzer: str, timeout: int): timeout=timeout, ) except subprocess.CalledProcessError as e: - # print("Command failed with error:", e) - logging.info("Stderr output: %s", e.stderr) with open(status_path, "w", encoding="utf-8") as status: status.write( f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) except subprocess.TimeoutExpired as e: - logging.info("Timeout for %s", cmd_line) kill_fuzzer(fuzzer) sleep(10) - process_fuzzer_output(e.stderr) with open(status_path, "w", encoding="utf-8") as status: status.write( f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) - os.remove(out_path) else: - process_fuzzer_output(result.stderr) with open(status_path, "w", encoding="utf-8") as status: status.write( f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) os.remove(out_path) - # s3.upload_build_directory_to_s3( - # Path(new_corpus_dir), f"fuzzer/corpus/{fuzzer}", False - # ) - def main(): logging.basicConfig(level=logging.INFO) @@ -216,9 +161,6 @@ def main(): subprocess.check_call(f"ls -al {OUTPUT}", shell=True) - # ch_helper = ClickHouseHelper() - # ch_helper.insert_events_into(db="default", table="checks", events=prepared_results) - if __name__ == "__main__": main() From f2b741202d432dce239302363bf81a607d6b5344 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 18:38:35 +0000 Subject: [PATCH 141/353] rename to clickhouse_fuzzer, fix --- tests/fuzz/build.sh | 3 +++ tests/fuzz/runner.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/build.sh b/tests/fuzz/build.sh index 12f41f6e079..f60336e6b53 100755 --- a/tests/fuzz/build.sh +++ b/tests/fuzz/build.sh @@ -1,5 +1,8 @@ #!/bin/bash -eu +# rename clickhouse +mv $OUT/clickhouse $OUT/clickhouse_fuzzer + # copy fuzzer options and dictionaries cp $SRC/tests/fuzz/*.dict $OUT/ cp $SRC/tests/fuzz/*.options $OUT/ diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index d3129a05b7c..c84e34ffdbd 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -113,7 +113,7 @@ def run_fuzzer(fuzzer: str, timeout: int): stopwatch = Stopwatch() try: with open(out_path, "wb") as out: - result = subprocess.run( + subprocess.run( cmd_line, stderr=out, stdout=subprocess.DEVNULL, @@ -123,12 +123,12 @@ def run_fuzzer(fuzzer: str, timeout: int): errors="replace", timeout=timeout, ) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: with open(status_path, "w", encoding="utf-8") as status: status.write( f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) - except subprocess.TimeoutExpired as e: + except subprocess.TimeoutExpired: kill_fuzzer(fuzzer) sleep(10) with open(status_path, "w", encoding="utf-8") as status: From a8c59df8d7da00440b4eb4e30c728c3744c43888 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Sun, 20 Oct 2024 15:38:28 -0400 Subject: [PATCH 142/353] trigger build --- src/DataTypes/fuzzers/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/src/DataTypes/fuzzers/CMakeLists.txt b/src/DataTypes/fuzzers/CMakeLists.txt index 8940586fc70..8dedd3470e2 100644 --- a/src/DataTypes/fuzzers/CMakeLists.txt +++ b/src/DataTypes/fuzzers/CMakeLists.txt @@ -1,3 +1,2 @@ clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS}) - target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions dbms) From 2995cf9d10da2814e9bf215fd1a8bca9f1ab5438 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 21:24:10 +0000 Subject: [PATCH 143/353] fix --- tests/fuzz/runner.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index c84e34ffdbd..9eac0755d78 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -51,6 +51,8 @@ def run_fuzzer(fuzzer: str, timeout: int): seed_corpus_dir = "" active_corpus_dir = f"corpus/{fuzzer}" + if not os.path.exists(active_corpus_dir): + os.makedirs(active_corpus_dir) options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" fuzzer_arguments = "" From f4bd651b9474e5862c78ba84db15826a87a4b6e0 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Sun, 20 Oct 2024 22:37:48 +0000 Subject: [PATCH 144/353] cleanup --- tests/ci/ci.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/ci/ci.py b/tests/ci/ci.py index e820f445e7a..10431ce038f 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -1284,7 +1284,6 @@ def main() -> int: dump_to_file=True, ) print(f"Job report url: [{check_url}]") - print(job_report) prepared_events = prepare_tests_results_for_clickhouse( pr_info, job_report.test_results, From d552f51dfed0e6e6869a3b3b8a4e026d5fd2ca62 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 21 Oct 2024 00:12:55 +0000 Subject: [PATCH 145/353] cleanup --- CMakeLists.txt | 1 + utils/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f0965530739..a165be799c0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -88,6 +88,7 @@ string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES) option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF) +option (ENABLE_FUZZER_TEST "Build testing fuzzers in order to test libFuzzer functionality" OFF) if (ENABLE_FUZZING) # Also set WITH_COVERAGE=1 for better fuzzing process diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 8c706ee6b67..2373a98239a 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -24,6 +24,6 @@ if (ENABLE_UTILS) add_subdirectory (memcpy-bench) endif () -if (ENABLE_FUZZING) +if (ENABLE_FUZZING AND ENABLE_FUZZER_TEST) add_subdirectory (libfuzzer-test) endif () From 40029beaf9891bc250d1203e2cd96788b6650a7b Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 21 Oct 2024 13:49:53 +0000 Subject: [PATCH 146/353] Fix 02967_parallel_replicas_join_algo_and_analyzer_1.sh --- ...eplicas_join_algo_and_analyzer_1.reference | 16 ++++++++++++ ...allel_replicas_join_algo_and_analyzer_1.sh | 26 +++++++++++++++++-- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference index e1bf9c27a81..7475cc7a97e 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.reference @@ -28,3 +28,19 @@ SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.` SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) DefaultCoordinator: Coordination done + +simple (global) join with analyzer and parallel replicas with local plan +4200000 4200000 4200000 -1400000 +4200006 4200006 4200006 -1400002 +4200012 4200012 4200012 -1400004 +4200018 4200018 4200018 -1400006 +4200024 4200024 4200024 -1400008 +4200030 4200030 4200030 -1400010 +4200036 4200036 4200036 -1400012 +4200042 4200042 4200042 -1400014 +4200048 4200048 4200048 -1400016 +4200054 4200054 4200054 -1400018 +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value` FROM `default`.`num_2` AS `__table1` (stage: WithMergeableState) + DefaultCoordinator: Coordination done +SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` GLOBAL ALL INNER JOIN `_data_` AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) + DefaultCoordinator: Coordination done diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh index b4271c3d29b..1d43f540138 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -37,7 +37,7 @@ inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=0" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l @@ -45,7 +45,29 @@ inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, send_logs_level='trace', max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0" 2>&1 | +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=0" 2>&1 | +grep "executeQuery\|.*Coordinator: Coordination done" | +grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | +sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' + +echo +echo "simple (global) join with analyzer and parallel replicas with local plan" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, +max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=0" + +$CLICKHOUSE_CLIENT -q " +select * from (select key, value from num_1) l +inner join (select key, value from num_2) r on l.key = r.key +order by l.key limit 10 offset 700000 +SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, send_logs_level='trace', +max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, +cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' From fc87cd4d52a2645174bfa1c5f85520ac3bc8a667 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Mon, 21 Oct 2024 20:19:08 +0000 Subject: [PATCH 147/353] Update 02967_parallel_replicas_join_algo_and_analyzer_2 --- ...allel_replicas_join_algo_and_analyzer_2.sh | 28 ++++++------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh index ed13bf3321b..f0118ac62df 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh @@ -17,6 +17,8 @@ insert into num_1 select number * 2, toString(number * 2) from numbers(1e7); insert into num_2 select number * 3, -number from numbers(1.5e6); " +PARALLEL_REPLICAS_SETTINGS="enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 1" + ############## echo echo "simple (local) join with analyzer and parallel replicas" @@ -25,17 +27,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS, " 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' @@ -49,17 +47,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" +SETTINGS enable_analyzer=1, join_algorithm='full_sorting_merge', $PARALLEL_REPLICAS_SETTINGS" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +SETTINGS enable_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' @@ -74,7 +68,7 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1" +SETTINGS enable_analyzer=1" ############## @@ -86,18 +80,14 @@ select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2 inner join (select number * 7 as key from numbers(1e5)) as nn on num_2.key = nn.key settings parallel_replicas_prefer_local_join=1) r on l.key = r.key order by l.key limit 10 offset 10000 -SETTINGS allow_experimental_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', -allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=1" 2>&1 | +SETTINGS enable_analyzer=1, join_algorithm='full_sorting_merge', send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' From ba11a188895d0ed123a6be62007222c1b0f0cfc1 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 22 Oct 2024 01:48:17 +0000 Subject: [PATCH 148/353] run fuzzers without shell --- tests/fuzz/runner.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 9eac0755d78..87495dff599 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -116,21 +116,23 @@ def run_fuzzer(fuzzer: str, timeout: int): try: with open(out_path, "wb") as out: subprocess.run( - cmd_line, + cmd_line.split(), stderr=out, stdout=subprocess.DEVNULL, text=True, check=True, - shell=True, + shell=False, errors="replace", timeout=timeout, ) except subprocess.CalledProcessError: + logging.info("Fail running %s", fuzzer) with open(status_path, "w", encoding="utf-8") as status: status.write( f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) except subprocess.TimeoutExpired: + logging.info("Timeout running %s", fuzzer) kill_fuzzer(fuzzer) sleep(10) with open(status_path, "w", encoding="utf-8") as status: @@ -138,6 +140,7 @@ def run_fuzzer(fuzzer: str, timeout: int): f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) else: + logging.info("Successful running %s", fuzzer) with open(status_path, "w", encoding="utf-8") as status: status.write( f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" From be77920fc8b91dc74edc01eaf3904eb383025752 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 22 Oct 2024 02:46:28 +0000 Subject: [PATCH 149/353] fix --- tests/fuzz/runner.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 87495dff599..ea4aef7d92b 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -108,8 +108,6 @@ def run_fuzzer(fuzzer: str, timeout: int): if not "-dict=" in cmd_line and Path(f"{fuzzer}.dict").exists(): cmd_line += f" -dict={fuzzer}.dict" - cmd_line += " < /dev/null" - logging.info("...will execute: %s", cmd_line) stopwatch = Stopwatch() @@ -117,8 +115,9 @@ def run_fuzzer(fuzzer: str, timeout: int): with open(out_path, "wb") as out: subprocess.run( cmd_line.split(), - stderr=out, + stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, + stderr=out, text=True, check=True, shell=False, From b02ea90727fef66bef6a238a15058024a14029f2 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 22 Oct 2024 04:25:08 +0000 Subject: [PATCH 150/353] remove fuzzer args --- tests/fuzz/runner.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index ea4aef7d92b..7d1d6fe6c9e 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -32,16 +32,6 @@ class Stopwatch: self.start_time_str_value = self.start_time.strftime("%Y-%m-%d %H:%M:%S") -def kill_fuzzer(fuzzer: str): - with subprocess.Popen(["ps", "-A", "u"], stdout=subprocess.PIPE) as p: - out, _ = p.communicate() - for line in out.splitlines(): - if fuzzer.encode("utf-8") in line: - pid = int(line.split(None, 2)[1]) - logging.info("Killing fuzzer %s, pid %d", fuzzer, pid) - os.kill(pid, signal.SIGKILL) - - def run_fuzzer(fuzzer: str, timeout: int): logging.info("Running fuzzer %s...", fuzzer) @@ -95,7 +85,7 @@ def run_fuzzer(fuzzer: str, timeout: int): out_path = f"{OUTPUT}/{fuzzer}.out" cmd_line = ( - f"{DEBUGGER} ./{fuzzer} {FUZZER_ARGS} {active_corpus_dir} {seed_corpus_dir}" + f"{DEBUGGER} ./{fuzzer} {active_corpus_dir} {seed_corpus_dir}" ) cmd_line += f" -exact_artifact_path={exact_artifact_path}" @@ -132,8 +122,6 @@ def run_fuzzer(fuzzer: str, timeout: int): ) except subprocess.TimeoutExpired: logging.info("Timeout running %s", fuzzer) - kill_fuzzer(fuzzer) - sleep(10) with open(status_path, "w", encoding="utf-8") as status: status.write( f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" @@ -152,7 +140,7 @@ def main(): subprocess.check_call("ls -al", shell=True) - timeout = 30 + timeout = 60 match = re.search(r"(^|\s+)-max_total_time=(\d+)($|\s)", FUZZER_ARGS) if match: From a742ee863cbf74b3e108bd05564b5d7c0c270fcf Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 22 Oct 2024 04:25:53 +0000 Subject: [PATCH 151/353] fix --- tests/fuzz/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 7d1d6fe6c9e..b37ad81b73c 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -144,7 +144,7 @@ def main(): match = re.search(r"(^|\s+)-max_total_time=(\d+)($|\s)", FUZZER_ARGS) if match: - timeout += int(match.group(2)) + timeout = int(match.group(2)) with Path() as current: for fuzzer in current.iterdir(): From c52986bab761430cf24fe03f526da814bc339dc8 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 22 Oct 2024 04:40:34 +0000 Subject: [PATCH 152/353] fix --- tests/fuzz/runner.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index b37ad81b73c..00c3683e7c7 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -5,10 +5,8 @@ import datetime import logging import os import re -import signal import subprocess from pathlib import Path -from time import sleep DEBUGGER = os.getenv("DEBUGGER", "") FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") @@ -84,9 +82,7 @@ def run_fuzzer(fuzzer: str, timeout: int): status_path = f"{OUTPUT}/{fuzzer}.status" out_path = f"{OUTPUT}/{fuzzer}.out" - cmd_line = ( - f"{DEBUGGER} ./{fuzzer} {active_corpus_dir} {seed_corpus_dir}" - ) + cmd_line = f"{DEBUGGER} ./{fuzzer} {active_corpus_dir} {seed_corpus_dir}" cmd_line += f" -exact_artifact_path={exact_artifact_path}" From 9da2a68357b5c859e5fc05f46c6cfb787b12b066 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Tue, 22 Oct 2024 10:28:36 +0000 Subject: [PATCH 153/353] Fix 02967_parallel_replicas_join_algo_and_analyzer_2 --- ...02967_parallel_replicas_join_algo_and_analyzer_2.reference | 3 --- .../02967_parallel_replicas_join_algo_and_analyzer_2.sh | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference index 297ec311f3e..f17d9aea3d5 100644 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.reference @@ -11,7 +11,6 @@ simple (local) join with analyzer and parallel replicas 4200048 4200048 4200048 -1400016 4200054 4200054 4200054 -1400018 SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) DefaultCoordinator: Coordination done simple (local) join with analyzer and parallel replicas and full sorting merge join @@ -26,7 +25,6 @@ simple (local) join with analyzer and parallel replicas and full sorting merge j 4200048 4200048 4200048 -1400016 4200054 4200054 4200054 -1400018 SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4`) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(700000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) WithOrderCoordinator: Coordination done nested join with analyzer @@ -53,5 +51,4 @@ nested join with analyzer and parallel replicas, both local 420336 420336 420336 -140112 420378 420378 420378 -140126 SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) -SELECT `__table1`.`key` AS `key`, `__table1`.`value` AS `value`, `__table3`.`key` AS `r.key`, `__table3`.`value` AS `r.value` FROM (SELECT `__table2`.`key` AS `key`, `__table2`.`value` AS `value` FROM `default`.`num_1` AS `__table2`) AS `__table1` ALL INNER JOIN (SELECT `__table4`.`key` AS `key`, `__table4`.`value` AS `value` FROM `default`.`num_2` AS `__table4` ALL INNER JOIN (SELECT `__table6`.`number` * 7 AS `key` FROM numbers(100000.) AS `__table6`) AS `__table5` ON `__table4`.`key` = `__table5`.`key` SETTINGS parallel_replicas_prefer_local_join = 1) AS `__table3` ON `__table1`.`key` = `__table3`.`key` ORDER BY `__table1`.`key` ASC LIMIT _CAST(10000, 'UInt64'), _CAST(10, 'UInt64') (stage: WithMergeableState) WithOrderCoordinator: Coordination done diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh index f0118ac62df..4768e308f1e 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_2.sh @@ -17,7 +17,7 @@ insert into num_1 select number * 2, toString(number * 2) from numbers(1e7); insert into num_2 select number * 3, -number from numbers(1.5e6); " -PARALLEL_REPLICAS_SETTINGS="enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 1" +PARALLEL_REPLICAS_SETTINGS="allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 1, parallel_replicas_local_plan=1" ############## echo @@ -33,7 +33,7 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS, " 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' From d1426886e3a7c6f2d3b4d2f81289a005324e6a5d Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 22 Oct 2024 12:25:15 +0000 Subject: [PATCH 154/353] timeout as OK run --- tests/ci/libfuzzer_test_check.py | 10 +++++----- tests/fuzz/runner.py | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index d7e79cc26fe..17cca9a47dc 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -177,7 +177,7 @@ def read_status(status_path: Path): def process_results(result_path: Path): test_results = [] oks = 0 - timeouts = 0 + errors = 0 fails = 0 for file in result_path.glob("*.status"): fuzzer = file.stem @@ -188,8 +188,8 @@ def process_results(result_path: Path): result = TestResult(fuzzer, status[0], float(status[2])) if status[0] == "OK": oks += 1 - elif status[0] == "Timeout": - timeouts += 1 + elif status[0] == "ERROR": + errors += 1 if file_path_out.exists(): result.set_log_files(f"['{file_path_out}']") else: @@ -202,7 +202,7 @@ def process_results(result_path: Path): result.set_log_files(f"['{file_path_out}']") test_results.append(result) - return [oks, timeouts, fails, test_results] + return [oks, errors, fails, test_results] def main(): @@ -284,7 +284,7 @@ def main(): success = results[1] == 0 and results[2] == 0 JobReport( - description=f"OK: {results[0]}, Timeout: {results[1]}, FAIL: {results[2]}", + description=f"OK: {results[0]}, ERROR: {results[1]}, FAIL: {results[2]}", test_results=results[3], status=SUCCESS if success else FAILURE, start_time=stopwatch.start_time_str, diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 00c3683e7c7..59cb9877adb 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -117,17 +117,17 @@ def run_fuzzer(fuzzer: str, timeout: int): f"FAIL\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) except subprocess.TimeoutExpired: - logging.info("Timeout running %s", fuzzer) - with open(status_path, "w", encoding="utf-8") as status: - status.write( - f"Timeout\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" - ) - else: logging.info("Successful running %s", fuzzer) with open(status_path, "w", encoding="utf-8") as status: status.write( f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) + else: + logging.info("Error running %s", fuzzer) + with open(status_path, "w", encoding="utf-8") as status: + status.write( + f"ERROR\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) os.remove(out_path) From 32be533290f996f859eba842911cd0f0b017f52b Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 22 Oct 2024 15:22:59 +0000 Subject: [PATCH 155/353] better diagnostics --- tests/ci/libfuzzer_test_check.py | 5 +++++ tests/fuzz/runner.py | 12 +++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 17cca9a47dc..45370b0cd00 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -184,6 +184,7 @@ def process_results(result_path: Path): file_path = file.parent / fuzzer file_path_unit = file_path.with_suffix(".unit") file_path_out = file_path.with_suffix(".out") + file_path_stdout = file_path.with_suffix(".stdout") status = read_status(file) result = TestResult(fuzzer, status[0], float(status[2])) if status[0] == "OK": @@ -192,6 +193,8 @@ def process_results(result_path: Path): errors += 1 if file_path_out.exists(): result.set_log_files(f"['{file_path_out}']") + elif file_path_stdout.exists(): + result.set_log_files(f"['{file_path_stdout}']") else: fails += 1 if file_path_out.exists(): @@ -200,6 +203,8 @@ def process_results(result_path: Path): result.set_log_files(f"['{file_path_unit}']") elif file_path_out.exists(): result.set_log_files(f"['{file_path_out}']") + elif file_path_stdout.exists(): + result.set_log_files(f"['{file_path_stdout}']") test_results.append(result) return [oks, errors, fails, test_results] diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 59cb9877adb..2c1d57ce5eb 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -81,6 +81,7 @@ def run_fuzzer(fuzzer: str, timeout: int): exact_artifact_path = f"{OUTPUT}/{fuzzer}.unit" status_path = f"{OUTPUT}/{fuzzer}.status" out_path = f"{OUTPUT}/{fuzzer}.out" + stdout_path = f"{OUTPUT}/{fuzzer}.stdout" cmd_line = f"{DEBUGGER} ./{fuzzer} {active_corpus_dir} {seed_corpus_dir}" @@ -98,11 +99,11 @@ def run_fuzzer(fuzzer: str, timeout: int): stopwatch = Stopwatch() try: - with open(out_path, "wb") as out: + with open(out_path, "wb") as out, open(stdout_path, "wb") as stdout: subprocess.run( cmd_line.split(), stdin=subprocess.DEVNULL, - stdout=subprocess.DEVNULL, + stdout=stdout, stderr=out, text=True, check=True, @@ -122,13 +123,18 @@ def run_fuzzer(fuzzer: str, timeout: int): status.write( f"OK\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) + except Exception as e: + logging.info("Unexpected exception running %s: %s", fuzzer, e) + with open(status_path, "w", encoding="utf-8") as status: + status.write( + f"ERROR\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" + ) else: logging.info("Error running %s", fuzzer) with open(status_path, "w", encoding="utf-8") as status: status.write( f"ERROR\n{stopwatch.start_time_str}\n{stopwatch.duration_seconds}\n" ) - os.remove(out_path) def main(): From e3ebe51968acf6a43922f12a9443c8e17a9cabc2 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 23 Oct 2024 01:27:10 +0000 Subject: [PATCH 156/353] Make ParquetMetadata say whether bloom filter is present --- .../Impl/ParquetMetadataInputFormat.cpp | 5 +- .../02718_parquet_metadata_format.reference | 70 +++++++++++++++++-- .../02718_parquet_metadata_format.sh | 1 + 3 files changed, 69 insertions(+), 7 deletions(-) diff --git a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp index 7fd6e93dd80..8264b565e39 100644 --- a/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp +++ b/src/Processors/Formats/Impl/ParquetMetadataInputFormat.cpp @@ -92,8 +92,9 @@ static NamesAndTypesList getHeaderForParquetMetadata() std::make_shared(std::make_shared()), std::make_shared(std::make_shared())}, Names{"num_values", "null_count", "distinct_count", "min", "max"}), + DataTypeFactory::instance().get("Bool"), }, - Names{"name", "path", "total_compressed_size", "total_uncompressed_size", "have_statistics", "statistics"}))}, + Names{"name", "path", "total_compressed_size", "total_uncompressed_size", "have_statistics", "statistics", "have_bloom_filter"}))}, Names{"num_columns", "num_rows", "total_uncompressed_size", "total_compressed_size", "columns"}))}, }; return names_and_types; @@ -350,6 +351,8 @@ void ParquetMetadataInputFormat::fillColumnChunksMetadata(const std::unique_ptr< fillColumnStatistics(column_chunk_metadata->statistics(), tuple_column.getColumn(5), row_group_metadata->schema()->Column(column_i)->type_length()); else tuple_column.getColumn(5).insertDefault(); + bool have_bloom_filter = column_chunk_metadata->bloom_filter_offset().has_value(); + assert_cast(tuple_column.getColumn(6)).insertValue(have_bloom_filter); } array_column.getOffsets().push_back(tuple_column.size()); } diff --git a/tests/queries/0_stateless/02718_parquet_metadata_format.reference b/tests/queries/0_stateless/02718_parquet_metadata_format.reference index 1f55c29da56..815968aeba5 100644 --- a/tests/queries/0_stateless/02718_parquet_metadata_format.reference +++ b/tests/queries/0_stateless/02718_parquet_metadata_format.reference @@ -78,7 +78,8 @@ "distinct_count": null, "min": "0", "max": "999" - } + }, + "have_bloom_filter": false }, { "name": "str", @@ -92,7 +93,8 @@ "distinct_count": null, "min": "Hello0", "max": "Hello999" - } + }, + "have_bloom_filter": false }, { "name": "mod", @@ -106,7 +108,8 @@ "distinct_count": null, "min": "0", "max": "8" - } + }, + "have_bloom_filter": false } ] }, @@ -128,7 +131,8 @@ "distinct_count": null, "min": "0", "max": "999" - } + }, + "have_bloom_filter": false }, { "name": "str", @@ -142,7 +146,8 @@ "distinct_count": null, "min": "Hello0", "max": "Hello999" - } + }, + "have_bloom_filter": false }, { "name": "mod", @@ -156,7 +161,8 @@ "distinct_count": null, "min": "0", "max": "8" - } + }, + "have_bloom_filter": false } ] } @@ -223,3 +229,55 @@ } 1 1 +{ + "num_columns": "1", + "num_rows": "5", + "num_row_groups": "1", + "format_version": "1.0", + "metadata_size": "267", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "columns": [ + { + "name": "ipv6", + "path": "ipv6", + "max_definition_level": "0", + "max_repetition_level": "0", + "physical_type": "FIXED_LEN_BYTE_ARRAY", + "logical_type": "None", + "compression": "GZIP", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "space_saved": "-21.9%", + "encodings": [ + "PLAIN", + "BIT_PACKED" + ] + } + ], + "row_groups": [ + { + "num_columns": "1", + "num_rows": "5", + "total_uncompressed_size": "105", + "total_compressed_size": "128", + "columns": [ + { + "name": "ipv6", + "path": "ipv6", + "total_compressed_size": "128", + "total_uncompressed_size": "105", + "have_statistics": true, + "statistics": { + "num_values": "5", + "null_count": "0", + "distinct_count": null, + "min": "27 32 150 125 17 250 66 31 157 44 75 218 51 50 19 144 ", + "max": "154 31 90 141 15 7 68 47 190 29 121 145 188 162 234 154 " + }, + "have_bloom_filter": true + } + ] + } + ] +} diff --git a/tests/queries/0_stateless/02718_parquet_metadata_format.sh b/tests/queries/0_stateless/02718_parquet_metadata_format.sh index 94d7f453850..c6371cff7a3 100755 --- a/tests/queries/0_stateless/02718_parquet_metadata_format.sh +++ b/tests/queries/0_stateless/02718_parquet_metadata_format.sh @@ -17,3 +17,4 @@ $CLICKHOUSE_LOCAL -q "select some_column from file('$CURDIR/data_parquet/02718_d $CLICKHOUSE_LOCAL -q "select num_columns from file('$CURDIR/data_parquet/02718_data.parquet', ParquetMetadata, 'num_columns Array(UInt32)')" 2>&1 | grep -c "BAD_ARGUMENTS" +$CLICKHOUSE_LOCAL -q "select * from file('$CURDIR/data_parquet/ipv6_bloom_filter.gz.parquet', ParquetMetadata) format JSONEachRow" | python3 -m json.tool From b958dcb50fb994f6375e04196df193ea5106c1d2 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 23 Oct 2024 14:36:27 +0000 Subject: [PATCH 157/353] reorganize command line, add CI.FUZZER_ARGS option --- tests/fuzz/clickhouse_fuzzer.options | 2 ++ tests/fuzz/runner.py | 24 +++++++++++++++--------- 2 files changed, 17 insertions(+), 9 deletions(-) create mode 100644 tests/fuzz/clickhouse_fuzzer.options diff --git a/tests/fuzz/clickhouse_fuzzer.options b/tests/fuzz/clickhouse_fuzzer.options new file mode 100644 index 00000000000..a22ba7b3b88 --- /dev/null +++ b/tests/fuzz/clickhouse_fuzzer.options @@ -0,0 +1,2 @@ +[CI] +FUZZER_ARGS = true diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 2c1d57ce5eb..40b55700623 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -44,6 +44,7 @@ def run_fuzzer(fuzzer: str, timeout: int): options_file = f"{fuzzer}.options" custom_libfuzzer_options = "" fuzzer_arguments = "" + use_fuzzer_args = False with Path(options_file) as path: if path.exists() and path.is_file(): @@ -78,24 +79,28 @@ def run_fuzzer(fuzzer: str, timeout: int): for key, value in parser["fuzzer_arguments"].items() ) + use_fuzzer_args = parser.getboolean("CI", "FUZZER_ARGS", fallback=False) + exact_artifact_path = f"{OUTPUT}/{fuzzer}.unit" status_path = f"{OUTPUT}/{fuzzer}.status" out_path = f"{OUTPUT}/{fuzzer}.out" stdout_path = f"{OUTPUT}/{fuzzer}.stdout" - cmd_line = f"{DEBUGGER} ./{fuzzer} {active_corpus_dir} {seed_corpus_dir}" + if not "-dict=" in custom_libfuzzer_options and Path(f"{fuzzer}.dict").exists(): + custom_libfuzzer_options += f" -dict={fuzzer}.dict" + custom_libfuzzer_options += f" -exact_artifact_path={exact_artifact_path}" - cmd_line += f" -exact_artifact_path={exact_artifact_path}" + libfuzzer_corpora = f"{active_corpus_dir} {seed_corpus_dir}" - if custom_libfuzzer_options: - cmd_line += f" {custom_libfuzzer_options}" - if fuzzer_arguments: - cmd_line += f" {fuzzer_arguments}" + cmd_line = f"{DEBUGGER} ./{fuzzer} {fuzzer_arguments}" - if not "-dict=" in cmd_line and Path(f"{fuzzer}.dict").exists(): - cmd_line += f" -dict={fuzzer}.dict" + env = None + if use_fuzzer_args: + env = {"FUZZER_ARGS": f"{custom_libfuzzer_options} {libfuzzer_corpora}"} + else: + cmd_line += f" {custom_libfuzzer_options} {libfuzzer_corpora}" - logging.info("...will execute: %s", cmd_line) + logging.info("...will execute: %s%s", cmd_line, f" with FUZZER_ARGS {env["FUZZER_ARGS"]}" if use_fuzzer_args else "") stopwatch = Stopwatch() try: @@ -110,6 +115,7 @@ def run_fuzzer(fuzzer: str, timeout: int): shell=False, errors="replace", timeout=timeout, + env=env, ) except subprocess.CalledProcessError: logging.info("Fail running %s", fuzzer) From 19cdbf62c53070085e49657e890b74e9f5979a9f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 23 Oct 2024 14:57:05 +0000 Subject: [PATCH 158/353] fix --- tests/fuzz/runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 40b55700623..62f1666e77f 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -95,12 +95,14 @@ def run_fuzzer(fuzzer: str, timeout: int): cmd_line = f"{DEBUGGER} ./{fuzzer} {fuzzer_arguments}" env = None + with_fuzzer_args = "" if use_fuzzer_args: env = {"FUZZER_ARGS": f"{custom_libfuzzer_options} {libfuzzer_corpora}"} + with_fuzzer_args = f" with FUZZER_ARGS '{custom_libfuzzer_options} {libfuzzer_corpora}'" else: cmd_line += f" {custom_libfuzzer_options} {libfuzzer_corpora}" - logging.info("...will execute: %s%s", cmd_line, f" with FUZZER_ARGS {env["FUZZER_ARGS"]}" if use_fuzzer_args else "") + logging.info("...will execute: '%s'%s", cmd_line, with_fuzzer_args) stopwatch = Stopwatch() try: From a5e3f7a213c3c830ddff7ba6b937909b174ce0a1 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 23 Oct 2024 15:13:04 +0000 Subject: [PATCH 159/353] Automatic style fix --- tests/fuzz/runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 62f1666e77f..63f53be3766 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -98,7 +98,9 @@ def run_fuzzer(fuzzer: str, timeout: int): with_fuzzer_args = "" if use_fuzzer_args: env = {"FUZZER_ARGS": f"{custom_libfuzzer_options} {libfuzzer_corpora}"} - with_fuzzer_args = f" with FUZZER_ARGS '{custom_libfuzzer_options} {libfuzzer_corpora}'" + with_fuzzer_args = ( + f" with FUZZER_ARGS '{custom_libfuzzer_options} {libfuzzer_corpora}'" + ) else: cmd_line += f" {custom_libfuzzer_options} {libfuzzer_corpora}" From b17c6ba73ea18e0e86782966080ebd4841b893cf Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Wed, 23 Oct 2024 14:01:05 -0400 Subject: [PATCH 160/353] trigger build --- src/DataTypes/fuzzers/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/src/DataTypes/fuzzers/CMakeLists.txt b/src/DataTypes/fuzzers/CMakeLists.txt index 8dedd3470e2..8940586fc70 100644 --- a/src/DataTypes/fuzzers/CMakeLists.txt +++ b/src/DataTypes/fuzzers/CMakeLists.txt @@ -1,2 +1,3 @@ clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS}) + target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_aggregate_functions dbms) From dc1d1f080a3ecf97424138f3d6eb203a34ec3b1b Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 23 Oct 2024 20:24:54 +0000 Subject: [PATCH 161/353] fix --- tests/fuzz/runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 63f53be3766..5fb40173e0c 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -97,9 +97,9 @@ def run_fuzzer(fuzzer: str, timeout: int): env = None with_fuzzer_args = "" if use_fuzzer_args: - env = {"FUZZER_ARGS": f"{custom_libfuzzer_options} {libfuzzer_corpora}"} + env = {"FUZZER_ARGS": f"{custom_libfuzzer_options} {libfuzzer_corpora}".strip()} with_fuzzer_args = ( - f" with FUZZER_ARGS '{custom_libfuzzer_options} {libfuzzer_corpora}'" + f" with FUZZER_ARGS '{env['FUZZER_ARGS']}'" ) else: cmd_line += f" {custom_libfuzzer_options} {libfuzzer_corpora}" From 4c9743ca42b2806bc981d393931f05ed8ade0c99 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 23 Oct 2024 20:38:00 +0000 Subject: [PATCH 162/353] Automatic style fix --- tests/fuzz/runner.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 5fb40173e0c..af73a989ec3 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -98,9 +98,7 @@ def run_fuzzer(fuzzer: str, timeout: int): with_fuzzer_args = "" if use_fuzzer_args: env = {"FUZZER_ARGS": f"{custom_libfuzzer_options} {libfuzzer_corpora}".strip()} - with_fuzzer_args = ( - f" with FUZZER_ARGS '{env['FUZZER_ARGS']}'" - ) + with_fuzzer_args = f" with FUZZER_ARGS '{env['FUZZER_ARGS']}'" else: cmd_line += f" {custom_libfuzzer_options} {libfuzzer_corpora}" From f93ac138f109c6a30354231240c47324dc51541f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 23 Oct 2024 22:21:37 +0000 Subject: [PATCH 163/353] chown clickhouse data path to root --- tests/ci/libfuzzer_test_check.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 45370b0cd00..2a307d07231 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -215,6 +215,8 @@ def main(): stopwatch = Stopwatch() + os.chown("/var/lib/clickhouse", 0, 0) + temp_path = Path(TEMP_PATH) reports_path = Path(REPORT_PATH) temp_path.mkdir(parents=True, exist_ok=True) From 77c2b9e5fc2e19483a1c0f675e32757eec202a1f Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Wed, 23 Oct 2024 22:44:10 +0000 Subject: [PATCH 164/353] create clickhouse data dir --- tests/ci/libfuzzer_test_check.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 2a307d07231..7091f076b99 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -215,7 +215,8 @@ def main(): stopwatch = Stopwatch() - os.chown("/var/lib/clickhouse", 0, 0) + data_path = "/var/lib/clickhouse" + os.makedirs(data_path, exist_ok=True) temp_path = Path(TEMP_PATH) reports_path = Path(REPORT_PATH) From efd8ea7757deb9326abbe91c12e9b58629fd236c Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Thu, 24 Oct 2024 03:59:03 +0000 Subject: [PATCH 165/353] set uid gid --- tests/ci/libfuzzer_test_check.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 7091f076b99..379d681cb3e 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -81,10 +81,13 @@ def get_run_command( envs += [f"-e {e}" for e in additional_envs] env_str = " ".join(envs) + uid = os.getuid() + gid = os.getgid() return ( f"docker run " f"{ci_logs_args} " + f"--user {uid}:{gid} " f"--workdir=/fuzzers " f"--volume={fuzzers_path}:/fuzzers " f"--volume={repo_path}/tests:/usr/share/clickhouse-test " @@ -215,9 +218,6 @@ def main(): stopwatch = Stopwatch() - data_path = "/var/lib/clickhouse" - os.makedirs(data_path, exist_ok=True) - temp_path = Path(TEMP_PATH) reports_path = Path(REPORT_PATH) temp_path.mkdir(parents=True, exist_ok=True) From a228e4fa895979ee1d5bf6de71242ece82bc21e6 Mon Sep 17 00:00:00 2001 From: divanik Date: Thu, 24 Oct 2024 13:28:32 +0000 Subject: [PATCH 166/353] Fix issues with tests --- .../DataLakes/DataLakeConfiguration.h | 14 +++++++ .../ObjectStorage/StorageObjectStorage.cpp | 39 +++++++++++++++---- .../ObjectStorage/StorageObjectStorage.h | 11 ++++-- .../registerStorageObjectStorage.cpp | 22 ++++++++++- .../TableFunctionObjectStorage.cpp | 25 ++++-------- .../TableFunctionObjectStorage.h | 9 +++++ 6 files changed, 89 insertions(+), 31 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index d19b7f65640..c01e615acd9 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -10,6 +10,7 @@ # include # include # include +# include # include # include # include @@ -46,6 +47,18 @@ public: BaseStorageConfiguration::setPartitionColumns(current_metadata->getPartitionColumns()); } + std::optional tryGetTableStructureFromMetadata() const override + { + if (!current_metadata) + return std::nullopt; + auto schema_from_metadata = current_metadata->getTableSchema(); + if (!schema_from_metadata.empty()) + { + return ColumnsDescription(std::move(schema_from_metadata)); + } + return std::nullopt; + } + private: DataLakeMetadataPtr current_metadata; @@ -77,6 +90,7 @@ private: using StorageS3IcebergConfiguration = DataLakeConfiguration; using StorageAzureIcebergConfiguration = DataLakeConfiguration; using StorageLocalIcebergConfiguration = DataLakeConfiguration; +using StorageHDFSIcebergConfiguration = DataLakeConfiguration; using StorageS3DeltaLakeConfiguration = DataLakeConfiguration; using StorageS3HudiConfiguration = DataLakeConfiguration; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 86630b897d0..f24f152ecb4 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -14,14 +14,15 @@ #include #include -#include #include -#include -#include #include +#include #include #include -#include +#include +#include +#include +#include "Storages/ColumnsDescription.h" namespace DB @@ -252,6 +253,11 @@ ReadFromFormatInfo StorageObjectStorage::Configuration::prepareReadingFromFormat return DB::prepareReadingFromFormat(requested_columns, storage_snapshot, local_context, supports_subset_of_columns); } +std::optional StorageObjectStorage::Configuration::tryGetTableStructureFromMetadata() const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method tryGetTableStructureFromMetadata is not implemented for basic configuration"); +} + void StorageObjectStorage::read( QueryPlan & query_plan, const Names & column_names, @@ -409,6 +415,16 @@ ColumnsDescription StorageObjectStorage::resolveSchemaFromData( std::string & sample_path, const ContextPtr & context) { + if (configuration->isDataLakeConfiguration()) + { + configuration->update(object_storage, context); + auto table_structure = configuration->tryGetTableStructureFromMetadata(); + if (table_structure) + { + return table_structure.value(); + } + } + ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); auto schema = readSchemaFromFormat(configuration->format, format_settings, *iterator, context); @@ -489,10 +505,17 @@ void StorageObjectStorage::Configuration::initialize( if (configuration.format == "auto") { - configuration.format = FormatFactory::instance().tryGetFormatFromFileName( - configuration.isArchive() - ? configuration.getPathInArchive() - : configuration.getPath()).value_or("auto"); + if (configuration.isDataLakeConfiguration()) + { + configuration.format = "Parquet"; + } + else + { + configuration.format + = FormatFactory::instance() + .tryGetFormatFromFileName(configuration.isArchive() ? configuration.getPathInArchive() : configuration.getPath()) + .value_or("auto"); + } } else FormatFactory::instance().checkFormatName(configuration.format); diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 9781d5dbe6e..21a6cdeba6f 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -1,12 +1,13 @@ #pragma once -#include -#include #include -#include +#include #include -#include #include +#include #include +#include +#include +#include "Storages/ColumnsDescription.h" namespace DB { @@ -208,6 +209,8 @@ public: bool supports_subset_of_columns, ContextPtr local_context); + virtual std::optional tryGetTableStructureFromMetadata() const; + String format = "auto"; String compression_method = "auto"; String structure = "auto"; diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index 570e888da91..1e231a8e3e4 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -153,6 +153,7 @@ void registerStorageObjectStorage(StorageFactory & factory) void registerStorageIceberg(StorageFactory & factory) { +#if USE_AWS_S3 factory.registerStorage( "Iceberg", [&](const StorageFactory::Arguments & args) @@ -182,7 +183,8 @@ void registerStorageIceberg(StorageFactory & factory) .supports_schema_inference = true, .source_access_type = AccessType::S3, }); - +#endif +#if USE_AZURE_BLOB_STORAGE factory.registerStorage( "IcebergAzure", [&](const StorageFactory::Arguments & args) @@ -197,7 +199,7 @@ void registerStorageIceberg(StorageFactory & factory) .supports_schema_inference = true, .source_access_type = AccessType::AZURE, }); - +#endif factory.registerStorage( "IcebergLocal", [&](const StorageFactory::Arguments & args) @@ -212,6 +214,22 @@ void registerStorageIceberg(StorageFactory & factory) .supports_schema_inference = true, .source_access_type = AccessType::FILE, }); +#if USE_HDFS + factory.registerStorage( + "IcebergHDFS", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::HDFS, + }); +#endif } #endif diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index ecfc1e462f0..509ef92e8b2 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -251,6 +251,14 @@ void registerTableFunctionIceberg(TableFunctionFactory & factory) .categories{"DataLake"}}, .allow_readonly = false}); # endif +# if USE_HDFS + factory.registerFunction( + {.documentation + = {.description = R"(The table function can be used to read the Iceberg table stored on HDFS virtual filesystem.)", + .examples{{"icebergHDFS", "SELECT * FROM icebergHDFS(url)", ""}}, + .categories{"DataLake"}}, + .allow_readonly = false}); +# endif factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the Iceberg table stored locally.)", @@ -297,21 +305,4 @@ void registerDataLakeTableFunctions(TableFunctionFactory & factory) registerTableFunctionHudi(factory); #endif } - -#if USE_AVRO -# if USE_AWS_S3 -template class TableFunctionObjectStorage; -template class TableFunctionObjectStorage; -# endif -# if USE_AZURE_BLOB_STORAGE -template class TableFunctionObjectStorage; -# endif -template class TableFunctionObjectStorage; -#endif -#if USE_AWS_S3 -# if USE_PARQUET -template class TableFunctionObjectStorage; -# endif -template class TableFunctionObjectStorage; -#endif } diff --git a/src/TableFunctions/TableFunctionObjectStorage.h b/src/TableFunctions/TableFunctionObjectStorage.h index 3cf86f982d1..19cd637bd80 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.h +++ b/src/TableFunctions/TableFunctionObjectStorage.h @@ -86,6 +86,12 @@ struct IcebergLocalDefinition static constexpr auto storage_type_name = "Local"; }; +struct IcebergHDFSDefinition +{ + static constexpr auto name = "icebergHDFS"; + static constexpr auto storage_type_name = "HDFS"; +}; + struct DeltaLakeDefinition { static constexpr auto name = "deltaLake"; @@ -184,6 +190,9 @@ using TableFunctionIcebergS3 = TableFunctionObjectStorage; # endif +# if USE_HDFS +using TableFunctionIcebergHDFS = TableFunctionObjectStorage; +# endif using TableFunctionIcebergLocal = TableFunctionObjectStorage; #endif #if USE_AWS_S3 From a3f0d27d23ebf0776304d82be1765cdcb4a122e8 Mon Sep 17 00:00:00 2001 From: divanik Date: Thu, 24 Oct 2024 13:56:26 +0000 Subject: [PATCH 167/353] Resolve some issues --- .../DataLakes/DataLakeConfiguration.h | 32 ++++++++----------- .../DataLakes/DeltaLakeMetadata.cpp | 8 ++--- .../DataLakes/DeltaLakeMetadata.h | 6 ++-- .../ObjectStorage/DataLakes/HudiMetadata.cpp | 2 +- .../ObjectStorage/DataLakes/HudiMetadata.h | 14 +++----- .../DataLakes/IcebergMetadata.cpp | 4 +-- .../ObjectStorage/DataLakes/IcebergMetadata.h | 13 ++++---- .../ObjectStorage/StorageObjectStorage.cpp | 1 + .../ObjectStorage/StorageObjectStorage.h | 2 +- .../registerStorageObjectStorage.cpp | 1 + 10 files changed, 36 insertions(+), 47 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index c01e615acd9..27599452a59 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -1,23 +1,19 @@ #pragma once -#include "config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include -#if USE_AVRO - -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include - -# include +#include namespace DB @@ -96,5 +92,3 @@ using StorageS3HudiConfiguration = DataLakeConfigurationdata_files; } - static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObservePtr configuration, ContextPtr local_context) + static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context) { return std::make_unique(object_storage, configuration, local_context); } diff --git a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp index 8a93a0ea6d3..40730f6d057 100644 --- a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp @@ -87,7 +87,7 @@ Strings HudiMetadata::getDataFilesImpl() const return result; } -HudiMetadata::HudiMetadata(ObjectStoragePtr object_storage_, ConfigurationObservePtr configuration_, ContextPtr context_) +HudiMetadata::HudiMetadata(ObjectStoragePtr object_storage_, ConfigurationObserverPtr configuration_, ContextPtr context_) : WithContext(context_), object_storage(object_storage_), configuration(configuration_) { } diff --git a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h index b22dfacb0ad..cdab11c4277 100644 --- a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.h @@ -13,14 +13,11 @@ namespace DB class HudiMetadata final : public IDataLakeMetadata, private WithContext { public: - using ConfigurationObservePtr = StorageObjectStorage::ConfigurationObservePtr; + using ConfigurationObserverPtr = StorageObjectStorage::ConfigurationObserverPtr; static constexpr auto name = "Hudi"; - HudiMetadata( - ObjectStoragePtr object_storage_, - ConfigurationObservePtr configuration_, - ContextPtr context_); + HudiMetadata(ObjectStoragePtr object_storage_, ConfigurationObserverPtr configuration_, ContextPtr context_); Strings getDataFiles() const override; @@ -38,17 +35,14 @@ public: && data_files == hudi_metadata->data_files; } - static DataLakeMetadataPtr create( - ObjectStoragePtr object_storage, - ConfigurationObservePtr configuration, - ContextPtr local_context) + static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context) { return std::make_unique(object_storage, configuration, local_context); } private: const ObjectStoragePtr object_storage; - const ConfigurationObservePtr configuration; + const ConfigurationObserverPtr configuration; mutable Strings data_files; std::unordered_map column_name_to_physical_name; DataLakePartitionColumns partition_columns; diff --git a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp index 379b20ea636..f0a80a41d4e 100644 --- a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.cpp @@ -51,7 +51,7 @@ extern const int UNSUPPORTED_METHOD; IcebergMetadata::IcebergMetadata( ObjectStoragePtr object_storage_, - ConfigurationObservePtr configuration_, + ConfigurationObserverPtr configuration_, DB::ContextPtr context_, Int32 metadata_version_, Int32 format_version_, @@ -383,7 +383,7 @@ std::pair getMetadataFileAndVersion( } DataLakeMetadataPtr -IcebergMetadata::create(ObjectStoragePtr object_storage, ConfigurationObservePtr configuration, ContextPtr local_context) +IcebergMetadata::create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context) { auto configuration_ptr = configuration.lock(); diff --git a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h index 7811bcd8b4b..eb5cac591f2 100644 --- a/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/IcebergMetadata.h @@ -1,5 +1,7 @@ #pragma once +#include "config.h" + #if USE_AVRO /// StorageIceberg depending on Avro to parse metadata with Avro format. #include @@ -61,13 +63,13 @@ namespace DB class IcebergMetadata : public IDataLakeMetadata, private WithContext { public: - using ConfigurationObservePtr = StorageObjectStorage::ConfigurationObservePtr; + using ConfigurationObserverPtr = StorageObjectStorage::ConfigurationObserverPtr; static constexpr auto name = "Iceberg"; IcebergMetadata( ObjectStoragePtr object_storage_, - ConfigurationObservePtr configuration_, + ConfigurationObserverPtr configuration_, ContextPtr context_, Int32 metadata_version_, Int32 format_version_, @@ -92,16 +94,13 @@ public: return iceberg_metadata && getVersion() == iceberg_metadata->getVersion(); } - static DataLakeMetadataPtr create( - ObjectStoragePtr object_storage, - ConfigurationObservePtr configuration, - ContextPtr local_context); + static DataLakeMetadataPtr create(ObjectStoragePtr object_storage, ConfigurationObserverPtr configuration, ContextPtr local_context); private: size_t getVersion() const { return metadata_version; } const ObjectStoragePtr object_storage; - const ConfigurationObservePtr configuration; + const ConfigurationObserverPtr configuration; Int32 metadata_version; Int32 format_version; String manifest_list_file; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index f24f152ecb4..a67c1628b6d 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -87,6 +87,7 @@ StorageObjectStorage::StorageObjectStorage( , distributed_processing(distributed_processing_) , log(getLogger(fmt::format("Storage{}({})", configuration->getEngineName(), table_id_.getFullTableName()))) { + configuration_->update(object_storage_, context); ColumnsDescription columns{columns_}; std::string sample_path; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 21a6cdeba6f..dc461e5861d 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -26,7 +26,7 @@ class StorageObjectStorage : public IStorage public: class Configuration; using ConfigurationPtr = std::shared_ptr; - using ConfigurationObservePtr = std::weak_ptr; + using ConfigurationObserverPtr = std::weak_ptr; using ObjectInfo = RelativePathWithMetadata; using ObjectInfoPtr = std::shared_ptr; using ObjectInfos = std::vector; diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index 1e231a8e3e4..823556470b0 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -29,6 +29,7 @@ static std::shared_ptr createStorageObjectStorage( StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, context, false); + // Use format settings from global server context + settings from // the SETTINGS clause of the create query. Settings from current // session and user are ignored. From 1b6979c5cd80666ba6c5164dae23c54b762a0d58 Mon Sep 17 00:00:00 2001 From: divanik Date: Thu, 24 Oct 2024 15:28:57 +0000 Subject: [PATCH 168/353] Correct ifdefs --- .../DataLakes/DataLakeConfiguration.h | 23 ++++++++++++++++--- .../DataLakes/DeltaLakeMetadata.h | 6 +++++ .../ObjectStorage/DataLakes/HudiMetadata.cpp | 9 ++++---- 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index 27599452a59..69968dff942 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -83,12 +83,29 @@ private: } }; +#if USE_AVRO +# if USE_AWS_S3 using StorageS3IcebergConfiguration = DataLakeConfiguration; +# endif + +# if USE_AZURE_BLOB_STORAGE using StorageAzureIcebergConfiguration = DataLakeConfiguration; -using StorageLocalIcebergConfiguration = DataLakeConfiguration; +# endif + +# if USE_HDFS using StorageHDFSIcebergConfiguration = DataLakeConfiguration; +# endif + +using StorageLocalIcebergConfiguration = DataLakeConfiguration; +#endif + +#if USE_PARQUET +# if USE_AWS_S3 using StorageS3DeltaLakeConfiguration = DataLakeConfiguration; +# endif +#endif + +#if USE_AWS_S3 using StorageS3HudiConfiguration = DataLakeConfiguration; - - +#endif } diff --git a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h index caa637cec75..031d1fb9e96 100644 --- a/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h +++ b/src/Storages/ObjectStorage/DataLakes/DeltaLakeMetadata.h @@ -1,5 +1,9 @@ #pragma once +#include "config.h" + +#if USE_PARQUET + #include #include #include @@ -46,3 +50,5 @@ private: }; } + +#endif diff --git a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp index 40730f6d057..77ef769ed0e 100644 --- a/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp +++ b/src/Storages/ObjectStorage/DataLakes/HudiMetadata.cpp @@ -1,11 +1,10 @@ -#include -#include #include -#include +#include +#include +#include #include #include -#include "config.h" -#include +#include namespace DB { From 8a0c6897f8c349d4a63d1330c226ffcce849df9e Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:21:58 -0400 Subject: [PATCH 169/353] enable enable_job_stack_trace by default --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 1790697d03e..d3c993250fb 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2830,7 +2830,7 @@ Limit on size of multipart/form-data content. This setting cannot be parsed from DECLARE(Bool, calculate_text_stack_trace, true, R"( Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when a huge amount of wrong queries are executed. In normal cases, you should not disable this option. )", 0) \ - DECLARE(Bool, enable_job_stack_trace, false, R"( + DECLARE(Bool, enable_job_stack_trace, true, R"( Output stack trace of a job creator when job results in exception )", 0) \ DECLARE(Bool, allow_ddl, true, R"( From 31490438d95f514e8ff285b80345c55872b2b485 Mon Sep 17 00:00:00 2001 From: divanik Date: Fri, 25 Oct 2024 11:09:03 +0000 Subject: [PATCH 170/353] Corrected smoe ifdef issues --- .../registerStorageObjectStorage.cpp | 31 ++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index 823556470b0..b0122de3bf7 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -201,20 +201,6 @@ void registerStorageIceberg(StorageFactory & factory) .source_access_type = AccessType::AZURE, }); #endif - factory.registerStorage( - "IcebergLocal", - [&](const StorageFactory::Arguments & args) - { - auto configuration = std::make_shared(); - StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); - - return createStorageObjectStorage(args, configuration, args.getLocalContext()); - }, - { - .supports_settings = false, - .supports_schema_inference = true, - .source_access_type = AccessType::FILE, - }); #if USE_HDFS factory.registerStorage( "IcebergHDFS", @@ -231,10 +217,26 @@ void registerStorageIceberg(StorageFactory & factory) .source_access_type = AccessType::HDFS, }); #endif + factory.registerStorage( + "IcebergLocal", + [&](const StorageFactory::Arguments & args) + { + auto configuration = std::make_shared(); + StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, args.getLocalContext(), false); + + return createStorageObjectStorage(args, configuration, args.getLocalContext()); + }, + { + .supports_settings = false, + .supports_schema_inference = true, + .source_access_type = AccessType::FILE, + }); } #endif + +#if USE_AWS_S3 #if USE_PARQUET void registerStorageDeltaLake(StorageFactory & factory) { @@ -272,4 +274,5 @@ void registerStorageHudi(StorageFactory & factory) .source_access_type = AccessType::S3, }); } +#endif } From ca040906c3bca0e283fc5df57451d4d0805336b3 Mon Sep 17 00:00:00 2001 From: divanik Date: Fri, 25 Oct 2024 13:37:12 +0000 Subject: [PATCH 171/353] Fix some ifdef issues --- .../DataLakes/DataLakeConfiguration.h | 8 +++--- .../registerStorageObjectStorage.cpp | 10 +++---- src/Storages/registerStorages.cpp | 3 +- .../TableFunctionObjectStorage.cpp | 28 +++++++++---------- 4 files changed, 24 insertions(+), 25 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index 69968dff942..866ef24aa91 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -84,15 +84,15 @@ private: }; #if USE_AVRO -# if USE_AWS_S3 +#if USE_AWS_S3 using StorageS3IcebergConfiguration = DataLakeConfiguration; # endif -# if USE_AZURE_BLOB_STORAGE +#if USE_AZURE_BLOB_STORAGE using StorageAzureIcebergConfiguration = DataLakeConfiguration; # endif -# if USE_HDFS +#if USE_HDFS using StorageHDFSIcebergConfiguration = DataLakeConfiguration; # endif @@ -100,7 +100,7 @@ using StorageLocalIcebergConfiguration = DataLakeConfiguration; # endif #endif diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index b0122de3bf7..cb1826b2976 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -11,8 +11,6 @@ namespace DB { -#if USE_AWS_S3 || USE_AZURE_BLOB_STORAGE || USE_HDFS - namespace ErrorCodes { extern const int BAD_ARGUMENTS; @@ -65,8 +63,6 @@ static std::shared_ptr createStorageObjectStorage( partition_by); } -#endif - #if USE_AZURE_BLOB_STORAGE void registerStorageAzure(StorageFactory & factory) { @@ -236,10 +232,10 @@ void registerStorageIceberg(StorageFactory & factory) #endif -#if USE_AWS_S3 #if USE_PARQUET void registerStorageDeltaLake(StorageFactory & factory) { +#if USE_AWS_S3 factory.registerStorage( "DeltaLake", [&](const StorageFactory::Arguments & args) @@ -254,11 +250,13 @@ void registerStorageDeltaLake(StorageFactory & factory) .supports_schema_inference = true, .source_access_type = AccessType::S3, }); +#endif } #endif void registerStorageHudi(StorageFactory & factory) { +#if USE_AWS_S3 factory.registerStorage( "Hudi", [&](const StorageFactory::Arguments & args) @@ -273,6 +271,6 @@ void registerStorageHudi(StorageFactory & factory) .supports_schema_inference = true, .source_access_type = AccessType::S3, }); -} #endif } +} diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index cfd406ccbe2..4eb90955a6c 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -41,10 +41,11 @@ void registerStorageS3Queue(StorageFactory & factory); #if USE_PARQUET void registerStorageDeltaLake(StorageFactory & factory); #endif +#endif + #if USE_AVRO void registerStorageIceberg(StorageFactory & factory); #endif -#endif #if USE_AZURE_BLOB_STORAGE void registerStorageAzureQueue(StorageFactory & factory); diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index 509ef92e8b2..66c90b15c0b 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -228,7 +228,7 @@ template class TableFunctionObjectStorage( {.documentation = {.description = R"(The table function can be used to read the Iceberg table stored on S3 object store. Alias to icebergS3)", @@ -242,23 +242,23 @@ void registerTableFunctionIceberg(TableFunctionFactory & factory) .categories{"DataLake"}}, .allow_readonly = false}); -# endif -# if USE_AZURE_BLOB_STORAGE +#endif +#if USE_AZURE_BLOB_STORAGE factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the Iceberg table stored on Azure object store.)", .examples{{"icebergAzure", "SELECT * FROM icebergAzure(url, access_key_id, secret_access_key)", ""}}, .categories{"DataLake"}}, .allow_readonly = false}); -# endif -# if USE_HDFS +#endif +#if USE_HDFS factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the Iceberg table stored on HDFS virtual filesystem.)", .examples{{"icebergHDFS", "SELECT * FROM icebergHDFS(url)", ""}}, .categories{"DataLake"}}, .allow_readonly = false}); -# endif +#endif factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the Iceberg table stored locally.)", @@ -268,29 +268,31 @@ void registerTableFunctionIceberg(TableFunctionFactory & factory) } #endif -#if USE_AWS_S3 -# if USE_PARQUET +#if USE_PARQUET void registerTableFunctionDeltaLake(TableFunctionFactory & factory) { +#if USE_AWS_S3 factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the DeltaLake table stored on object store.)", .examples{{"deltaLake", "SELECT * FROM deltaLake(url, access_key_id, secret_access_key)", ""}}, .categories{"DataLake"}}, .allow_readonly = false}); +#endif } -# endif +#endif void registerTableFunctionHudi(TableFunctionFactory & factory) { +#if USE_AWS_S3 factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the Hudi table stored on object store.)", .examples{{"hudi", "SELECT * FROM hudi(url, access_key_id, secret_access_key)", ""}}, .categories{"DataLake"}}, .allow_readonly = false}); -} #endif +} void registerDataLakeTableFunctions(TableFunctionFactory & factory) { @@ -298,11 +300,9 @@ void registerDataLakeTableFunctions(TableFunctionFactory & factory) #if USE_AVRO registerTableFunctionIceberg(factory); #endif -#if USE_AWS_S3 -# if USE_PARQUET +#if USE_PARQUET registerTableFunctionDeltaLake(factory); -# endif - registerTableFunctionHudi(factory); #endif + registerTableFunctionHudi(factory); } } From a7b23292f962eada087b2b7518c231b57ca71493 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Fri, 25 Oct 2024 17:58:43 +0000 Subject: [PATCH 172/353] add staleness to sql --- src/Analyzer/QueryTreeBuilder.cpp | 2 ++ src/Analyzer/Resolve/QueryAnalyzer.cpp | 43 ++++++++++++++++++++++-- src/Analyzer/Resolve/QueryAnalyzer.h | 3 +- src/Analyzer/SortNode.cpp | 8 +++++ src/Analyzer/SortNode.h | 21 +++++++++++- src/Parsers/ASTOrderByElement.cpp | 5 +++ src/Parsers/ASTOrderByElement.h | 3 ++ src/Parsers/CommonParsers.h | 1 + src/Parsers/ExpressionElementParsers.cpp | 6 ++++ src/Planner/Planner.cpp | 3 ++ src/Planner/PlannerActionsVisitor.cpp | 3 ++ src/Planner/PlannerSorting.cpp | 24 +++++++++++-- 12 files changed, 115 insertions(+), 7 deletions(-) diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index 39c59d27e2c..d3c88d39213 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -498,6 +498,8 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express sort_node->getFillTo() = buildExpression(order_by_element.getFillTo(), context); if (order_by_element.getFillStep()) sort_node->getFillStep() = buildExpression(order_by_element.getFillStep(), context); + if (order_by_element.getFillStaleness()) + sort_node->getFillStaleness() = buildExpression(order_by_element.getFillStaleness(), context); list_node->getNodes().push_back(std::move(sort_node)); } diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index 381edee607d..ab29373f5fb 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -432,8 +432,13 @@ ProjectionName QueryAnalyzer::calculateWindowProjectionName(const QueryTreeNodeP return buffer.str(); } -ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeNodePtr & sort_column_node, const ProjectionName & sort_expression_projection_name, - const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, const ProjectionName & fill_step_expression_projection_name) +ProjectionName QueryAnalyzer::calculateSortColumnProjectionName( + const QueryTreeNodePtr & sort_column_node, + const ProjectionName & sort_expression_projection_name, + const ProjectionName & fill_from_expression_projection_name, + const ProjectionName & fill_to_expression_projection_name, + const ProjectionName & fill_step_expression_projection_name, + const ProjectionName & fill_staleness_expression_projection_name) { auto & sort_node_typed = sort_column_node->as(); @@ -463,6 +468,9 @@ ProjectionName QueryAnalyzer::calculateSortColumnProjectionName(const QueryTreeN if (sort_node_typed.hasFillStep()) sort_column_projection_name_buffer << " STEP " << fill_step_expression_projection_name; + + if (sort_node_typed.hasFillStaleness()) + sort_column_projection_name_buffer << " STALENESS " << fill_staleness_expression_projection_name; } return sort_column_projection_name_buffer.str(); @@ -3993,6 +4001,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ ProjectionNames fill_from_expression_projection_names; ProjectionNames fill_to_expression_projection_names; ProjectionNames fill_step_expression_projection_names; + ProjectionNames fill_staleness_expression_projection_names; auto & sort_node_list_typed = sort_node_list->as(); for (auto & node : sort_node_list_typed.getNodes()) @@ -4083,11 +4092,38 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ fill_step_expression_projection_names_size); } + if (sort_node.hasFillStaleness()) + { + fill_staleness_expression_projection_names = resolveExpressionNode(sort_node.getFillStaleness(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/); + + const auto * constant_node = sort_node.getFillStaleness()->as(); + if (!constant_node) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStaleness()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + bool is_number = isColumnedAsNumber(constant_node->getResultType()); + bool is_interval = WhichDataType(constant_node->getResultType()).isInterval(); + if (!is_number && !is_interval) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Sort FILL STALENESS expression must be constant with numeric or interval type. Actual {}. In scope {}", + sort_node.getFillStaleness()->formatASTForErrorMessage(), + scope.scope_node->formatASTForErrorMessage()); + + size_t fill_staleness_expression_projection_names_size = fill_staleness_expression_projection_names.size(); + if (fill_staleness_expression_projection_names_size != 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Sort FILL STALENESS expression expected 1 projection name. Actual {}", + fill_staleness_expression_projection_names_size); + } + auto sort_column_projection_name = calculateSortColumnProjectionName(node, sort_expression_projection_names[0], fill_from_expression_projection_names.empty() ? "" : fill_from_expression_projection_names.front(), fill_to_expression_projection_names.empty() ? "" : fill_to_expression_projection_names.front(), - fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front()); + fill_step_expression_projection_names.empty() ? "" : fill_step_expression_projection_names.front(), + fill_staleness_expression_projection_names.empty() ? "" : fill_staleness_expression_projection_names.front()); result_projection_names.push_back(std::move(sort_column_projection_name)); @@ -4095,6 +4131,7 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_ fill_from_expression_projection_names.clear(); fill_to_expression_projection_names.clear(); fill_step_expression_projection_names.clear(); + fill_staleness_expression_projection_names.clear(); } return result_projection_names; diff --git a/src/Analyzer/Resolve/QueryAnalyzer.h b/src/Analyzer/Resolve/QueryAnalyzer.h index 0d4309843e6..d24bede561e 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.h +++ b/src/Analyzer/Resolve/QueryAnalyzer.h @@ -140,7 +140,8 @@ private: const ProjectionName & sort_expression_projection_name, const ProjectionName & fill_from_expression_projection_name, const ProjectionName & fill_to_expression_projection_name, - const ProjectionName & fill_step_expression_projection_name); + const ProjectionName & fill_step_expression_projection_name, + const ProjectionName & fill_staleness_expression_projection_name); QueryTreeNodePtr tryGetLambdaFromSQLUserDefinedFunctions(const std::string & function_name, ContextPtr context); diff --git a/src/Analyzer/SortNode.cpp b/src/Analyzer/SortNode.cpp index e891046626a..42c010e4784 100644 --- a/src/Analyzer/SortNode.cpp +++ b/src/Analyzer/SortNode.cpp @@ -69,6 +69,12 @@ void SortNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, si buffer << '\n' << std::string(indent + 2, ' ') << "FILL STEP\n"; getFillStep()->dumpTreeImpl(buffer, format_state, indent + 4); } + + if (hasFillStaleness()) + { + buffer << '\n' << std::string(indent + 2, ' ') << "FILL STALENESS\n"; + getFillStaleness()->dumpTreeImpl(buffer, format_state, indent + 4); + } } bool SortNode::isEqualImpl(const IQueryTreeNode & rhs, CompareOptions) const @@ -132,6 +138,8 @@ ASTPtr SortNode::toASTImpl(const ConvertToASTOptions & options) const result->setFillTo(getFillTo()->toAST(options)); if (hasFillStep()) result->setFillStep(getFillStep()->toAST(options)); + if (hasFillStaleness()) + result->setFillStaleness(getFillStaleness()->toAST(options)); return result; } diff --git a/src/Analyzer/SortNode.h b/src/Analyzer/SortNode.h index 0ebdde61912..d9086dc9ed7 100644 --- a/src/Analyzer/SortNode.h +++ b/src/Analyzer/SortNode.h @@ -105,6 +105,24 @@ public: return children[fill_step_child_index]; } + /// Returns true if sort node has fill step, false otherwise + bool hasFillStaleness() const + { + return children[fill_staleness_child_index] != nullptr; + } + + /// Get fill step + const QueryTreeNodePtr & getFillStaleness() const + { + return children[fill_staleness_child_index]; + } + + /// Get fill step + QueryTreeNodePtr & getFillStaleness() + { + return children[fill_staleness_child_index]; + } + /// Get collator const std::shared_ptr & getCollator() const { @@ -144,7 +162,8 @@ private: static constexpr size_t fill_from_child_index = 1; static constexpr size_t fill_to_child_index = 2; static constexpr size_t fill_step_child_index = 3; - static constexpr size_t children_size = fill_step_child_index + 1; + static constexpr size_t fill_staleness_child_index = 4; + static constexpr size_t children_size = fill_staleness_child_index + 1; SortDirection sort_direction = SortDirection::ASCENDING; std::optional nulls_sort_direction; diff --git a/src/Parsers/ASTOrderByElement.cpp b/src/Parsers/ASTOrderByElement.cpp index 09193a8b5e1..d87c296d398 100644 --- a/src/Parsers/ASTOrderByElement.cpp +++ b/src/Parsers/ASTOrderByElement.cpp @@ -54,6 +54,11 @@ void ASTOrderByElement::formatImpl(const FormatSettings & settings, FormatState settings.ostr << (settings.hilite ? hilite_keyword : "") << " STEP " << (settings.hilite ? hilite_none : ""); fill_step->formatImpl(settings, state, frame); } + if (auto fill_staleness = getFillStaleness()) + { + settings.ostr << (settings.hilite ? hilite_keyword : "") << " STALENESS " << (settings.hilite ? hilite_none : ""); + fill_staleness->formatImpl(settings, state, frame); + } } } diff --git a/src/Parsers/ASTOrderByElement.h b/src/Parsers/ASTOrderByElement.h index 6edf84d7bde..4dc35dac217 100644 --- a/src/Parsers/ASTOrderByElement.h +++ b/src/Parsers/ASTOrderByElement.h @@ -18,6 +18,7 @@ private: FILL_FROM, FILL_TO, FILL_STEP, + FILL_STALENESS, }; public: @@ -32,12 +33,14 @@ public: void setFillFrom(ASTPtr node) { setChild(Child::FILL_FROM, node); } void setFillTo(ASTPtr node) { setChild(Child::FILL_TO, node); } void setFillStep(ASTPtr node) { setChild(Child::FILL_STEP, node); } + void setFillStaleness(ASTPtr node) { setChild(Child::FILL_STALENESS, node); } /** Collation for locale-specific string comparison. If empty, then sorting done by bytes. */ ASTPtr getCollation() const { return getChild(Child::COLLATION); } ASTPtr getFillFrom() const { return getChild(Child::FILL_FROM); } ASTPtr getFillTo() const { return getChild(Child::FILL_TO); } ASTPtr getFillStep() const { return getChild(Child::FILL_STEP); } + ASTPtr getFillStaleness() const { return getChild(Child::FILL_STALENESS); } String getID(char) const override { return "OrderByElement"; } diff --git a/src/Parsers/CommonParsers.h b/src/Parsers/CommonParsers.h index 8ea9fb12b86..c10e4879214 100644 --- a/src/Parsers/CommonParsers.h +++ b/src/Parsers/CommonParsers.h @@ -541,6 +541,7 @@ namespace DB MR_MACROS(YY, "YY") \ MR_MACROS(YYYY, "YYYY") \ MR_MACROS(ZKPATH, "ZKPATH") \ + MR_MACROS(STALENESS, "STALENESS") \ /// The list of keywords where underscore is intentional #define APPLY_FOR_PARSER_KEYWORDS_WITH_UNDERSCORES(MR_MACROS) \ diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 31efcb16f02..ad062d27a37 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -2178,6 +2178,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect ParserKeyword from(Keyword::FROM); ParserKeyword to(Keyword::TO); ParserKeyword step(Keyword::STEP); + ParserKeyword staleness(Keyword::STALENESS); ParserStringLiteral collate_locale_parser; ParserExpressionWithOptionalAlias exp_parser(false); @@ -2219,6 +2220,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect ASTPtr fill_from; ASTPtr fill_to; ASTPtr fill_step; + ASTPtr fill_staleness; if (with_fill.ignore(pos, expected)) { has_with_fill = true; @@ -2230,6 +2232,9 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect if (step.ignore(pos, expected) && !exp_parser.parse(pos, fill_step, expected)) return false; + + if (staleness.ignore(pos, expected) && !exp_parser.parse(pos, fill_staleness, expected)) + return false; } auto elem = std::make_shared(); @@ -2244,6 +2249,7 @@ bool ParserOrderByElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expect elem->setFillFrom(fill_from); elem->setFillTo(fill_to); elem->setFillStep(fill_step); + elem->setFillStaleness(fill_staleness); node = elem; diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index 8d3c75fdabb..f1c752aecd0 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -847,6 +847,9 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, interpolate_description = std::make_shared(std::move(interpolate_actions_dag), empty_aliases); } + if (interpolate_description) + LOG_DEBUG(getLogger("addWithFillStepIfNeeded"), "InterpolateDescription: {}", interpolate_description->actions.dumpDAG()); + const auto & query_context = planner_context->getQueryContext(); const Settings & settings = query_context->getSettingsRef(); auto filling_step = std::make_unique( diff --git a/src/Planner/PlannerActionsVisitor.cpp b/src/Planner/PlannerActionsVisitor.cpp index aea304e0ecc..aa233109fa9 100644 --- a/src/Planner/PlannerActionsVisitor.cpp +++ b/src/Planner/PlannerActionsVisitor.cpp @@ -391,6 +391,9 @@ public: if (sort_node.hasFillStep()) buffer << " STEP " << calculateActionNodeName(sort_node.getFillStep()); + + if (sort_node.hasFillStaleness()) + buffer << " STALENESS " << calculateActionNodeName(sort_node.getFillStaleness()); } if (i + 1 != order_by_nodes_size) diff --git a/src/Planner/PlannerSorting.cpp b/src/Planner/PlannerSorting.cpp index af51afdef13..0a33e2f0828 100644 --- a/src/Planner/PlannerSorting.cpp +++ b/src/Planner/PlannerSorting.cpp @@ -43,7 +43,7 @@ std::pair extractWithFillValue(const QueryTreeNodePtr & node return result; } -std::pair> extractWithFillStepValue(const QueryTreeNodePtr & node) +std::pair> extractWithFillValueWithIntervalKind(const QueryTreeNodePtr & node) { const auto & constant_node = node->as(); @@ -77,7 +77,7 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) if (sort_node.hasFillStep()) { - auto extract_result = extractWithFillStepValue(sort_node.getFillStep()); + auto extract_result = extractWithFillValueWithIntervalKind(sort_node.getFillStep()); fill_column_description.fill_step = std::move(extract_result.first); fill_column_description.step_kind = std::move(extract_result.second); } @@ -87,10 +87,30 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) fill_column_description.fill_step = Field(direction_value); } + if (sort_node.getFillStaleness()) + { + auto extract_result = extractWithFillValueWithIntervalKind(sort_node.getFillStaleness()); + fill_column_description.fill_staleness = std::move(extract_result.first); + fill_column_description.staleness_kind = std::move(extract_result.second); + } + + /////////////////////////////////// + if (applyVisitor(FieldVisitorAccurateEquals(), fill_column_description.fill_step, Field{0})) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be zero"); + if (sort_node.hasFillStaleness()) + { + if (sort_node.hasFillFrom()) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS cannot be used together with WITH FILL FROM"); + + if (applyVisitor(FieldVisitorAccurateLessOrEqual(), fill_column_description.fill_staleness, Field{0})) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be less or equal zero"); + } + if (sort_node.getSortDirection() == SortDirection::ASCENDING) { if (applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_step, Field{0})) From 8807fe3bb5ff125e3a907354757552957e52b646 Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Mon, 28 Oct 2024 00:57:13 +0100 Subject: [PATCH 173/353] Better log messages --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 8b3c7bdf3fb..c0464946752 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -254,7 +254,8 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart if (status == SelectPartsDecision::SELECTED) res.insert(all_partition_ids[i]); else - LOG_TEST(log, "Nothing to merge in partition {}: {}", all_partition_ids[i], out_disable_reason.text); + LOG_TEST(log, "Nothing to merge in partition {} with max_total_size_to_merge = {} (looked up {} ranges): {}", + all_partition_ids[i], ReadableSize(max_total_size_to_merge), ranges_per_partition[i].size(), out_disable_reason.text); } String best_partition_id_to_optimize = getBestPartitionToOptimizeEntire(info.partitions_info); From 07508cb3819a89fec7e63604e2de64ff1bd4904a Mon Sep 17 00:00:00 2001 From: divanik Date: Mon, 28 Oct 2024 11:47:01 +0000 Subject: [PATCH 174/353] Handle some problems with tests --- src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp | 3 +-- src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h | 3 +++ src/Storages/ObjectStorage/StorageObjectStorage.cpp | 3 ++- src/Storages/ObjectStorage/registerStorageObjectStorage.cpp | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index cd36429d0a2..4e6d0d985dd 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -500,8 +500,7 @@ void S3ObjectStorage::applyNewSettings( } auto current_settings = s3_settings.get(); - if (options.allow_client_change - && (current_settings->auth_settings.hasUpdates(modified_settings->auth_settings) || for_disk_s3)) + if (options.allow_client_change && (current_settings->auth_settings.hasUpdates(modified_settings->auth_settings) || for_disk_s3)) { auto new_client = getClient(uri, *modified_settings, context, for_disk_s3); client.set(std::move(new_client)); diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index 866ef24aa91..18ff6d93c46 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -30,10 +30,13 @@ public: bool isDataLakeConfiguration() const override { return true; } + bool isStaticConfiguration() const override { return false; } + std::string getEngineName() const override { return DataLakeMetadata::name; } void update(ObjectStoragePtr object_storage, ContextPtr local_context) override { + BaseStorageConfiguration::update(object_storage, local_context); auto new_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); if (current_metadata && *current_metadata == *new_metadata) return; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index a67c1628b6d..ddc6276a8a1 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -87,8 +87,9 @@ StorageObjectStorage::StorageObjectStorage( , distributed_processing(distributed_processing_) , log(getLogger(fmt::format("Storage{}({})", configuration->getEngineName(), table_id_.getFullTableName()))) { - configuration_->update(object_storage_, context); ColumnsDescription columns{columns_}; + LOG_DEBUG(&Poco::Logger::get("StorageObjectStorage Creation"), "Columns size {}", columns.size()); + configuration->update(object_storage, context); std::string sample_path; resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, sample_path, context); diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index cb1826b2976..9a525b4e21a 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -27,7 +27,6 @@ static std::shared_ptr createStorageObjectStorage( StorageObjectStorage::Configuration::initialize(*configuration, args.engine_args, context, false); - // Use format settings from global server context + settings from // the SETTINGS clause of the create query. Settings from current // session and user are ignored. @@ -251,6 +250,7 @@ void registerStorageDeltaLake(StorageFactory & factory) .source_access_type = AccessType::S3, }); #endif + UNUSED(factory); } #endif @@ -272,5 +272,6 @@ void registerStorageHudi(StorageFactory & factory) .source_access_type = AccessType::S3, }); #endif + UNUSED(factory); } } From 7ff2d5c98114d5d364e33cc5d0db88f5a1a06b8e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 14:01:37 +0000 Subject: [PATCH 175/353] add baseline --- src/Common/FieldVisitorMul.cpp | 50 ++++++ src/Common/FieldVisitorMul.h | 53 ++++++ src/Core/Field.h | 8 + src/Core/SortDescription.h | 5 +- src/Interpreters/FillingRow.cpp | 94 +++++++++-- src/Interpreters/FillingRow.h | 9 +- .../Transforms/FillingTransform.cpp | 159 +++++++++++------- 7 files changed, 306 insertions(+), 72 deletions(-) create mode 100644 src/Common/FieldVisitorMul.cpp create mode 100644 src/Common/FieldVisitorMul.h diff --git a/src/Common/FieldVisitorMul.cpp b/src/Common/FieldVisitorMul.cpp new file mode 100644 index 00000000000..36c32c40c05 --- /dev/null +++ b/src/Common/FieldVisitorMul.cpp @@ -0,0 +1,50 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + + +FieldVisitorMul::FieldVisitorMul(const Field & rhs_) : rhs(rhs_) {} + +// We can add all ints as unsigned regardless of their actual signedness. +bool FieldVisitorMul::operator() (Int64 & x) const { return this->operator()(reinterpret_cast(x)); } +bool FieldVisitorMul::operator() (UInt64 & x) const +{ + x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); + return x != 0; +} + +bool FieldVisitorMul::operator() (Float64 & x) const { + x *= rhs.safeGet(); + return x != 0; +} + +bool FieldVisitorMul::operator() (Null &) const +{ + /// Do not add anything + return false; +} + +bool FieldVisitorMul::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } +bool FieldVisitorMul::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } +bool FieldVisitorMul::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } +bool FieldVisitorMul::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } +bool FieldVisitorMul::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } +bool FieldVisitorMul::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } +bool FieldVisitorMul::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } +bool FieldVisitorMul::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } +bool FieldVisitorMul::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } + +bool FieldVisitorMul::operator() (AggregateFunctionStateData &) const +{ + throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); +} + +bool FieldVisitorMul::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } + +} diff --git a/src/Common/FieldVisitorMul.h b/src/Common/FieldVisitorMul.h new file mode 100644 index 00000000000..5bce41f1e71 --- /dev/null +++ b/src/Common/FieldVisitorMul.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +/** Implements `*=` operation. + * Returns false if the result is zero. + */ +class FieldVisitorMul : public StaticVisitor +{ +private: + const Field & rhs; +public: + explicit FieldVisitorMul(const Field & rhs_); + + // We can add all ints as unsigned regardless of their actual signedness. + bool operator() (Int64 & x) const; + bool operator() (UInt64 & x) const; + bool operator() (Float64 & x) const; + bool operator() (Null &) const; + bool operator() (String &) const; + bool operator() (Array &) const; + bool operator() (Tuple &) const; + bool operator() (Map &) const; + bool operator() (Object &) const; + bool operator() (UUID &) const; + bool operator() (IPv4 &) const; + bool operator() (IPv6 &) const; + bool operator() (AggregateFunctionStateData &) const; + bool operator() (CustomType &) const; + bool operator() (bool &) const; + + template + bool operator() (DecimalField & x) const + { + x *= rhs.safeGet>(); + return x.getValue() != T(0); + } + + template + requires is_big_int_v + bool operator() (T & x) const + { + x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); + return x != T(0); + } +}; + +} diff --git a/src/Core/Field.h b/src/Core/Field.h index 7b916d30646..47df5c2907e 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -185,6 +185,14 @@ public: return *this; } + const DecimalField & operator *= (const DecimalField & r) + { + if (scale != r.getScale()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Multiply different decimal fields"); + dec *= r.getValue(); + return *this; + } + const DecimalField & operator -= (const DecimalField & r) { if (scale != r.getScale()) diff --git a/src/Core/SortDescription.h b/src/Core/SortDescription.h index 5c6f3e3150a..7a7c92f3b53 100644 --- a/src/Core/SortDescription.h +++ b/src/Core/SortDescription.h @@ -33,9 +33,12 @@ struct FillColumnDescription DataTypePtr fill_to_type; Field fill_step; /// Default = +1 or -1 according to direction std::optional step_kind; + Field fill_staleness; /// Default = Null - should not be considered + std::optional staleness_kind; - using StepFunction = std::function; + using StepFunction = std::function; StepFunction step_func; + StepFunction staleness_step_func; }; /// Description of the sorting rule by one column. diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 21b5b04bca3..1d3eae03ddd 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -28,6 +28,7 @@ FillingRow::FillingRow(const SortDescription & sort_description_) : sort_description(sort_description_) { row.resize(sort_description.size()); + staleness_base_row.resize(sort_description.size()); } bool FillingRow::operator<(const FillingRow & other) const @@ -63,7 +64,53 @@ bool FillingRow::isNull() const return true; } -std::pair FillingRow::next(const FillingRow & to_row) +std::optional FillingRow::doJump(const FillColumnDescription& descr, size_t column_ind) +{ + Field next_value = row[column_ind]; + descr.step_func(next_value, 1); + + if (!descr.fill_to.isNull() && less(descr.fill_to, next_value, getDirection(column_ind))) + return std::nullopt; + + if (!descr.fill_staleness.isNull()) { + Field staleness_border = staleness_base_row[column_ind]; + descr.staleness_step_func(staleness_border, 1); + + if (less(next_value, staleness_border, getDirection(column_ind))) + return next_value; + else + return std::nullopt; + } + + return next_value; +} + +std::optional FillingRow::doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to) +{ + Field shifted_value = row[column_ind]; + + if (less(to, shifted_value, getDirection(column_ind))) + return std::nullopt; + + for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) { + Field next_value = shifted_value; + descr.step_func(next_value, step_len); + + if (less(next_value, to, getDirection(0))) + { + shifted_value = std::move(next_value); + step_len *= 2; + } + else + { + step_len /= 2; + } + } + + return shifted_value; +} + +std::pair FillingRow::next(const FillingRow & to_row, bool long_jump) { const size_t row_size = size(); size_t pos = 0; @@ -85,23 +132,43 @@ std::pair FillingRow::next(const FillingRow & to_row) if (fill_column_desc.fill_to.isNull() || row[i].isNull()) continue; - Field next_value = row[i]; - fill_column_desc.step_func(next_value); - if (less(next_value, fill_column_desc.fill_to, getDirection(i))) + auto next_value = doJump(fill_column_desc, i); + if (next_value.has_value() && !equals(next_value.value(), fill_column_desc.fill_to)) { - row[i] = next_value; + row[i] = std::move(next_value.value()); initFromDefaults(i + 1); return {true, true}; } } - auto next_value = row[pos]; - getFillDescription(pos).step_func(next_value); + auto & fill_column_desc = getFillDescription(pos); + std::optional next_value; - if (less(to_row.row[pos], next_value, getDirection(pos)) || equals(next_value, getFillDescription(pos).fill_to)) - return {false, false}; + if (long_jump) + { + next_value = doLongJump(fill_column_desc, pos, to_row[pos]); - row[pos] = next_value; + if (!next_value.has_value()) + return {false, false}; + + Field calibration_jump_value = next_value.value(); + fill_column_desc.step_func(calibration_jump_value, 1); + + if (equals(calibration_jump_value, to_row[pos])) + next_value = calibration_jump_value; + + if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + return {false, false}; + } + else + { + next_value = doJump(fill_column_desc, pos); + + if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + return {false, false}; + } + + row[pos] = std::move(next_value.value()); if (equals(row[pos], to_row.row[pos])) { bool is_less = false; @@ -128,6 +195,13 @@ void FillingRow::initFromDefaults(size_t from_pos) row[i] = getFillDescription(i).fill_from; } +void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) +{ + for (size_t i = 0; i < size(); ++i) { + staleness_base_row[i] = (*base_row[i])[row_ind]; + } +} + String FillingRow::dump() const { WriteBufferFromOwnString out; diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index 004b417542c..14b6034ce35 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -1,6 +1,6 @@ #pragma once -#include +#include namespace DB { @@ -15,6 +15,9 @@ bool equals(const Field & lhs, const Field & rhs); */ class FillingRow { + std::optional doJump(const FillColumnDescription & descr, size_t column_ind); + std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); + public: explicit FillingRow(const SortDescription & sort_description); @@ -22,9 +25,10 @@ public: /// Return pair of boolean /// apply - true if filling values should be inserted into result set /// value_changed - true if filling row value was changed - std::pair next(const FillingRow & to_row); + std::pair next(const FillingRow & to_row, bool long_jump); void initFromDefaults(size_t from_pos = 0); + void initStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } const Field & operator[](size_t index) const { return row[index]; } @@ -42,6 +46,7 @@ public: private: Row row; + Row staleness_base_row; SortDescription sort_description; }; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 95f4a674ebb..1d68f73e8c2 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -7,15 +7,17 @@ #include #include #include +#include #include #include #include +#include namespace DB { -constexpr bool debug_logging_enabled = false; +constexpr bool debug_logging_enabled = true; template void logDebug(String key, const T & value, const char * separator = " : ") @@ -60,15 +62,78 @@ static FillColumnDescription::StepFunction getStepFunction( { #define DECLARE_CASE(NAME) \ case IntervalKind::Kind::NAME: \ - return [step, scale, &date_lut](Field & field) { \ + return [step, scale, &date_lut](Field & field, Int32 jumps_count) { \ field = Add##NAME##sImpl::execute(static_cast(\ - field.safeGet()), static_cast(step), date_lut, utc_time_zone, scale); }; + field.safeGet()), static_cast(step) * jumps_count, date_lut, utc_time_zone, scale); }; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) #undef DECLARE_CASE } } +static FillColumnDescription::StepFunction getStepFunction(const Field & step, const std::optional & step_kind, const DataTypePtr & type) +{ + WhichDataType which(type); + + if (step_kind) + { + if (which.isDate() || which.isDate32()) + { + Int64 avg_seconds = step.safeGet() * step_kind->toAvgSeconds(); + if (std::abs(avg_seconds) < 86400) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); + } + + if (which.isDate()) + return getStepFunction(step_kind.value(), step.safeGet(), DateLUT::instance()); + else if (which.isDate32()) + return getStepFunction(step_kind.value(), step.safeGet(), DateLUT::instance()); + else if (const auto * date_time = checkAndGetDataType(type.get())) + return getStepFunction(step_kind.value(), step.safeGet(), date_time->getTimeZone()); + else if (const auto * date_time64 = checkAndGetDataType(type.get())) + { + const auto & step_dec = step.safeGet &>(); + Int64 converted_step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); + static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); + + switch (step_kind.value()) // NOLINT(bugprone-switch-missing-default-case) + { +#define DECLARE_CASE(NAME) \ + case IntervalKind::Kind::NAME: \ + return [converted_step, &time_zone = date_time64->getTimeZone()](Field & field, Int32 jumps_count) \ + { \ + auto field_decimal = field.safeGet>(); \ + auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), converted_step * jumps_count, time_zone, utc_time_zone, field_decimal.getScale()); \ + field = DecimalField(res, field_decimal.getScale()); \ + }; \ + break; + + FOR_EACH_INTERVAL_KIND(DECLARE_CASE) +#undef DECLARE_CASE + } + } + else + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); + } + else + { + return [step](Field & field, Int32 jumps_count) + { + auto shifted_step = step; + if (jumps_count != 1) + applyVisitor(FieldVisitorMul(jumps_count), shifted_step); + + logDebug("field", field.dump()); + logDebug("step", step.dump()); + logDebug("shifted field", shifted_step.dump()); + + applyVisitor(FieldVisitorSum(shifted_step), field); + }; + } +} + static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & type) { auto max_type = Field::Types::Null; @@ -125,7 +190,8 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & if (descr.fill_from.getType() > max_type || descr.fill_to.getType() > max_type - || descr.fill_step.getType() > max_type) + || descr.fill_step.getType() > max_type + || descr.fill_staleness.getType() > max_type) return false; if (!descr.fill_from.isNull()) @@ -134,56 +200,11 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & descr.fill_to = convertFieldToTypeOrThrow(descr.fill_to, *to_type); if (!descr.fill_step.isNull()) descr.fill_step = convertFieldToTypeOrThrow(descr.fill_step, *to_type); + if (!descr.fill_staleness.isNull()) + descr.fill_staleness = convertFieldToTypeOrThrow(descr.fill_staleness, *to_type); - if (descr.step_kind) - { - if (which.isDate() || which.isDate32()) - { - Int64 avg_seconds = descr.fill_step.safeGet() * descr.step_kind->toAvgSeconds(); - if (std::abs(avg_seconds) < 86400) - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); - } - - if (which.isDate()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); - else if (which.isDate32()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); - else if (const auto * date_time = checkAndGetDataType(type.get())) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), date_time->getTimeZone()); - else if (const auto * date_time64 = checkAndGetDataType(type.get())) - { - const auto & step_dec = descr.fill_step.safeGet &>(); - Int64 step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); - static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); - - switch (*descr.step_kind) // NOLINT(bugprone-switch-missing-default-case) - { -#define DECLARE_CASE(NAME) \ - case IntervalKind::Kind::NAME: \ - descr.step_func = [step, &time_zone = date_time64->getTimeZone()](Field & field) \ - { \ - auto field_decimal = field.safeGet>(); \ - auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), step, time_zone, utc_time_zone, field_decimal.getScale()); \ - field = DecimalField(res, field_decimal.getScale()); \ - }; \ - break; - - FOR_EACH_INTERVAL_KIND(DECLARE_CASE) -#undef DECLARE_CASE - } - } - else - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "STEP of Interval type can be used only with Date/DateTime types, but got {}", type->getName()); - } - else - { - descr.step_func = [step = descr.fill_step](Field & field) - { - applyVisitor(FieldVisitorSum(step), field); - }; - } + descr.step_func = getStepFunction(descr.fill_step, descr.step_kind, type); + descr.staleness_step_func = getStepFunction(descr.fill_staleness, descr.staleness_kind, type); return true; } @@ -482,8 +503,8 @@ bool FillingTransform::generateSuffixIfNeeded( MutableColumnRawPtrs res_sort_prefix_columns, MutableColumnRawPtrs res_other_columns) { - logDebug("generateSuffixIfNeeded() filling_row", filling_row); - logDebug("generateSuffixIfNeeded() next_row", next_row); + logDebug("generateSuffixIfNeeded filling_row", filling_row); + logDebug("generateSuffixIfNeeded next_row", next_row); /// Determines if we should insert filling row before start generating next rows bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || next_row.isNull(); @@ -492,11 +513,11 @@ bool FillingTransform::generateSuffixIfNeeded( for (size_t i = 0, size = filling_row.size(); i < size; ++i) next_row[i] = filling_row.getFillDescription(i).fill_to; - logDebug("generateSuffixIfNeeded() next_row updated", next_row); + logDebug("generateSuffixIfNeeded next_row updated", next_row); if (filling_row >= next_row) { - logDebug("generateSuffixIfNeeded()", "no need to generate suffix"); + logDebug("generateSuffixIfNeeded", "no need to generate suffix"); return false; } @@ -516,7 +537,7 @@ bool FillingTransform::generateSuffixIfNeeded( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); + const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); filling_row_changed = changed; if (!apply) break; @@ -593,6 +614,9 @@ void FillingTransform::transformRange( const auto current_value = (*input_fill_columns[i])[range_begin]; const auto & fill_from = filling_row.getFillDescription(i).fill_from; + logDebug("current value", current_value.dump()); + logDebug("fill from", fill_from.dump()); + if (!fill_from.isNull() && !equals(current_value, fill_from)) { filling_row.initFromDefaults(i); @@ -609,6 +633,9 @@ void FillingTransform::transformRange( } } + /// Init staleness first interval + filling_row.initStalenessRow(input_fill_columns, range_begin); + for (size_t row_ind = range_begin; row_ind < range_end; ++row_ind) { logDebug("row", row_ind); @@ -623,6 +650,9 @@ void FillingTransform::transformRange( const auto current_value = (*input_fill_columns[i])[row_ind]; const auto & fill_to = filling_row.getFillDescription(i).fill_to; + logDebug("current value", current_value.dump()); + logDebug("fill to", fill_to.dump()); + if (fill_to.isNull() || less(current_value, fill_to, filling_row.getDirection(i))) next_row[i] = current_value; else @@ -643,7 +673,7 @@ void FillingTransform::transformRange( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); + const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); filling_row_changed = changed; if (!apply) break; @@ -652,6 +682,14 @@ void FillingTransform::transformRange( insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); } + + const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/true); + logDebug("apply", apply); + logDebug("changed", changed); + + if (changed) + filling_row_changed = true; + /// new valid filling row was generated but not inserted, will use it during suffix generation if (filling_row_changed) filling_row_inserted = false; @@ -662,6 +700,9 @@ void FillingTransform::transformRange( copyRowFromColumns(res_interpolate_columns, input_interpolate_columns, row_ind); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); copyRowFromColumns(res_other_columns, input_other_columns, row_ind); + + /// Init next staleness interval with current row, because we have already made the long jump to it + filling_row.initStalenessRow(input_fill_columns, row_ind); } /// save sort prefix of last row in the range, it's used to generate suffix From 8f9d577c453573d82a529186fde60697d509e6f2 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy <99031427+yakov-olkhovskiy@users.noreply.github.com> Date: Mon, 28 Oct 2024 10:12:59 -0400 Subject: [PATCH 176/353] add enable_job_stack_trace to change history --- src/Core/SettingsChangesHistory.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index d958d091975..02601f12d56 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -68,6 +68,7 @@ static std::initializer_list Date: Mon, 28 Oct 2024 15:13:33 +0000 Subject: [PATCH 177/353] change mul to scale --- src/Common/FieldVisitorMul.cpp | 50 ----------------- src/Common/FieldVisitorMul.h | 53 ------------------- src/Common/FieldVisitorScale.cpp | 30 +++++++++++ src/Common/FieldVisitorScale.h | 46 ++++++++++++++++ .../Transforms/FillingTransform.cpp | 4 +- 5 files changed, 78 insertions(+), 105 deletions(-) delete mode 100644 src/Common/FieldVisitorMul.cpp delete mode 100644 src/Common/FieldVisitorMul.h create mode 100644 src/Common/FieldVisitorScale.cpp create mode 100644 src/Common/FieldVisitorScale.h diff --git a/src/Common/FieldVisitorMul.cpp b/src/Common/FieldVisitorMul.cpp deleted file mode 100644 index 36c32c40c05..00000000000 --- a/src/Common/FieldVisitorMul.cpp +++ /dev/null @@ -1,50 +0,0 @@ -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - - -FieldVisitorMul::FieldVisitorMul(const Field & rhs_) : rhs(rhs_) {} - -// We can add all ints as unsigned regardless of their actual signedness. -bool FieldVisitorMul::operator() (Int64 & x) const { return this->operator()(reinterpret_cast(x)); } -bool FieldVisitorMul::operator() (UInt64 & x) const -{ - x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); - return x != 0; -} - -bool FieldVisitorMul::operator() (Float64 & x) const { - x *= rhs.safeGet(); - return x != 0; -} - -bool FieldVisitorMul::operator() (Null &) const -{ - /// Do not add anything - return false; -} - -bool FieldVisitorMul::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } -bool FieldVisitorMul::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } -bool FieldVisitorMul::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } -bool FieldVisitorMul::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } -bool FieldVisitorMul::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } -bool FieldVisitorMul::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } -bool FieldVisitorMul::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } -bool FieldVisitorMul::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } -bool FieldVisitorMul::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } - -bool FieldVisitorMul::operator() (AggregateFunctionStateData &) const -{ - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); -} - -bool FieldVisitorMul::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } - -} diff --git a/src/Common/FieldVisitorMul.h b/src/Common/FieldVisitorMul.h deleted file mode 100644 index 5bce41f1e71..00000000000 --- a/src/Common/FieldVisitorMul.h +++ /dev/null @@ -1,53 +0,0 @@ -#pragma once - -#include -#include - - -namespace DB -{ - -/** Implements `*=` operation. - * Returns false if the result is zero. - */ -class FieldVisitorMul : public StaticVisitor -{ -private: - const Field & rhs; -public: - explicit FieldVisitorMul(const Field & rhs_); - - // We can add all ints as unsigned regardless of their actual signedness. - bool operator() (Int64 & x) const; - bool operator() (UInt64 & x) const; - bool operator() (Float64 & x) const; - bool operator() (Null &) const; - bool operator() (String &) const; - bool operator() (Array &) const; - bool operator() (Tuple &) const; - bool operator() (Map &) const; - bool operator() (Object &) const; - bool operator() (UUID &) const; - bool operator() (IPv4 &) const; - bool operator() (IPv6 &) const; - bool operator() (AggregateFunctionStateData &) const; - bool operator() (CustomType &) const; - bool operator() (bool &) const; - - template - bool operator() (DecimalField & x) const - { - x *= rhs.safeGet>(); - return x.getValue() != T(0); - } - - template - requires is_big_int_v - bool operator() (T & x) const - { - x *= applyVisitor(FieldVisitorConvertToNumber(), rhs); - return x != T(0); - } -}; - -} diff --git a/src/Common/FieldVisitorScale.cpp b/src/Common/FieldVisitorScale.cpp new file mode 100644 index 00000000000..fdb566007c3 --- /dev/null +++ b/src/Common/FieldVisitorScale.cpp @@ -0,0 +1,30 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +FieldVisitorScale::FieldVisitorScale(Int32 rhs_) : rhs(rhs_) {} + +void FieldVisitorScale::operator() (Int64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (UInt64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (Float64 & x) const { x *= rhs; } +void FieldVisitorScale::operator() (Null &) const { /*Do not scale anything*/ } + +void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } +void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } +void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } +void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } +void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } +void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } +void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } +void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } +void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } +void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); } +void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } + +} diff --git a/src/Common/FieldVisitorScale.h b/src/Common/FieldVisitorScale.h new file mode 100644 index 00000000000..45bacdccc9c --- /dev/null +++ b/src/Common/FieldVisitorScale.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include +#include "base/Decimal.h" +#include "base/extended_types.h" + +namespace DB +{ + +/** Implements `*=` operation by number + */ +class FieldVisitorScale : public StaticVisitor +{ +private: + Int32 rhs; + +public: + explicit FieldVisitorScale(Int32 rhs_); + + void operator() (Int64 & x) const; + void operator() (UInt64 & x) const; + void operator() (Float64 & x) const; + void operator() (Null &) const; + [[noreturn]] void operator() (String &) const; + [[noreturn]] void operator() (Array &) const; + [[noreturn]] void operator() (Tuple &) const; + [[noreturn]] void operator() (Map &) const; + [[noreturn]] void operator() (Object &) const; + [[noreturn]] void operator() (UUID &) const; + [[noreturn]] void operator() (IPv4 &) const; + [[noreturn]] void operator() (IPv6 &) const; + [[noreturn]] void operator() (AggregateFunctionStateData &) const; + [[noreturn]] void operator() (CustomType &) const; + [[noreturn]] void operator() (bool &) const; + + template + void operator() (DecimalField & x) const { x = DecimalField(x.getValue() * T(rhs), x.getScale()); } + + template + requires is_big_int_v + void operator() (T & x) const { x *= rhs; } +}; + +} diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 1d68f73e8c2..54331186302 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include #include @@ -123,7 +123,7 @@ static FillColumnDescription::StepFunction getStepFunction(const Field & step, c { auto shifted_step = step; if (jumps_count != 1) - applyVisitor(FieldVisitorMul(jumps_count), shifted_step); + applyVisitor(FieldVisitorScale(jumps_count), shifted_step); logDebug("field", field.dump()); logDebug("step", step.dump()); From 2d7de40ba70d6609f6fd79c5ef8534002803b707 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:24:03 +0000 Subject: [PATCH 178/353] fix sparse tables --- src/Processors/Transforms/FillingTransform.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 54331186302..635b46de3ee 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -458,7 +458,7 @@ void FillingTransform::initColumns( non_const_columns.reserve(input_columns.size()); for (const auto & column : input_columns) - non_const_columns.push_back(column->convertToFullColumnIfConst()); + non_const_columns.push_back(column->convertToFullColumnIfConst()->convertToFullColumnIfSparse()); for (const auto & column : non_const_columns) output_columns.push_back(column->cloneEmpty()->assumeMutable()); From 37f691bf9d1168431500c39c47432722a441a29e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:42:52 +0000 Subject: [PATCH 179/353] add test --- .../03266_with_fill_staleness.reference | 28 +++++++++++++++++ .../0_stateless/03266_with_fill_staleness.sql | 31 +++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness.reference create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness.sql diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.reference b/tests/queries/0_stateless/03266_with_fill_staleness.reference new file mode 100644 index 00000000000..6061ecfe400 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness.reference @@ -0,0 +1,28 @@ +add samples +regular with fill +2016-06-15 23:00:00 0 +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:03 0 +2016-06-15 23:00:04 0 +2016-06-15 23:00:05 5 +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:08 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:13 10 +2016-06-15 23:00:14 10 +2016-06-15 23:00:15 15 +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:18 15 +2016-06-15 23:00:19 15 +2016-06-15 23:00:20 20 +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:23 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.sql b/tests/queries/0_stateless/03266_with_fill_staleness.sql new file mode 100644 index 00000000000..3ab9be63a08 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS with_fill_staleness; +CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; + +SELECT 'add samples'; + +INSERT INTO with_fill_staleness +SELECT + toDateTime('2016-06-15 23:00:00') + number AS a, a as b, number as c +FROM numbers(30) +WHERE (number % 5) == 0; + +SELECT 'regular with fill'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL INTERPOLATE (c); + +SELECT 'staleness 1 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 1 SECOND INTERPOLATE (c); + +SELECT 'staleness 3 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 3 SECOND INTERPOLATE (c); + +SELECT 'descending order'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a DESC WITH FILL STALENESS INTERVAL -2 SECOND INTERPOLATE (c); + +SELECT 'staleness with to and step'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL TO toDateTime('2016-06-15 23:00:40') STEP 3 STALENESS INTERVAL 7 SECOND INTERPOLATE (c); + +SELECT 'staleness with another regular with fill'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL FROM 0 TO 3 INTERPOLATE (c); + +SELECT 'double staleness'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL TO toDateTime('2016-06-15 23:01:00') STEP 2 STALENESS 5 INTERPOLATE (c); From 9760d39efe82339403de7a7177706c42c8d8c5a5 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:43:15 +0000 Subject: [PATCH 180/353] allow negative staleness for descending order --- src/Planner/PlannerSorting.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/Planner/PlannerSorting.cpp b/src/Planner/PlannerSorting.cpp index 0a33e2f0828..9476ae348c5 100644 --- a/src/Planner/PlannerSorting.cpp +++ b/src/Planner/PlannerSorting.cpp @@ -105,10 +105,6 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) if (sort_node.hasFillFrom()) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STALENESS cannot be used together with WITH FILL FROM"); - - if (applyVisitor(FieldVisitorAccurateLessOrEqual(), fill_column_description.fill_staleness, Field{0})) - throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, - "WITH FILL STALENESS value cannot be less or equal zero"); } if (sort_node.getSortDirection() == SortDirection::ASCENDING) @@ -117,6 +113,10 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be negative for sorting in ascending direction"); + if (applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_staleness, Field{0})) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be negative for sorting in ascending direction"); + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_to, fill_column_description.fill_from)) { @@ -130,6 +130,10 @@ FillColumnDescription extractWithFillDescription(const SortNode & sort_node) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "WITH FILL STEP value cannot be positive for sorting in descending direction"); + if (applyVisitor(FieldVisitorAccurateLess(), Field{0}, fill_column_description.fill_staleness)) + throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, + "WITH FILL STALENESS value cannot be positive for sorting in descending direction"); + if (!fill_column_description.fill_from.isNull() && !fill_column_description.fill_to.isNull() && applyVisitor(FieldVisitorAccurateLess(), fill_column_description.fill_from, fill_column_description.fill_to)) { From fc33593ff05ab3c5ca4271b79ba4eb39957fa057 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:45:02 +0000 Subject: [PATCH 181/353] fix style --- src/Interpreters/FillingRow.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 1d3eae03ddd..fdd3b55b66b 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -72,7 +72,8 @@ std::optional FillingRow::doJump(const FillColumnDescription& descr, size if (!descr.fill_to.isNull() && less(descr.fill_to, next_value, getDirection(column_ind))) return std::nullopt; - if (!descr.fill_staleness.isNull()) { + if (!descr.fill_staleness.isNull()) + { Field staleness_border = staleness_base_row[column_ind]; descr.staleness_step_func(staleness_border, 1); @@ -92,7 +93,8 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, if (less(to, shifted_value, getDirection(column_ind))) return std::nullopt; - for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) { + for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) + { Field next_value = shifted_value; descr.step_func(next_value, step_len); @@ -197,9 +199,8 @@ void FillingRow::initFromDefaults(size_t from_pos) void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) { - for (size_t i = 0; i < size(); ++i) { + for (size_t i = 0; i < size(); ++i) staleness_base_row[i] = (*base_row[i])[row_ind]; - } } String FillingRow::dump() const From 4c9d865e7592985507accd7aa805647ef9335d72 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 17:45:27 +0000 Subject: [PATCH 182/353] disable debug logs --- src/Processors/Transforms/FillingTransform.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 635b46de3ee..7f81b86697c 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -17,7 +17,7 @@ namespace DB { -constexpr bool debug_logging_enabled = true; +constexpr bool debug_logging_enabled = false; template void logDebug(String key, const T & value, const char * separator = " : ") From 83844841b4f00a24a654ac7ce9f665c321b4df85 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 18:04:00 +0000 Subject: [PATCH 183/353] fix test timezone --- .../03266_with_fill_staleness.reference | 163 +++++++++++++++--- .../0_stateless/03266_with_fill_staleness.sql | 2 + 2 files changed, 139 insertions(+), 26 deletions(-) diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.reference b/tests/queries/0_stateless/03266_with_fill_staleness.reference index 6061ecfe400..6b090443359 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.reference +++ b/tests/queries/0_stateless/03266_with_fill_staleness.reference @@ -1,28 +1,139 @@ add samples regular with fill -2016-06-15 23:00:00 0 -2016-06-15 23:00:01 0 -2016-06-15 23:00:02 0 -2016-06-15 23:00:03 0 -2016-06-15 23:00:04 0 -2016-06-15 23:00:05 5 -2016-06-15 23:00:06 5 -2016-06-15 23:00:07 5 -2016-06-15 23:00:08 5 -2016-06-15 23:00:09 5 -2016-06-15 23:00:10 10 -2016-06-15 23:00:11 10 -2016-06-15 23:00:12 10 -2016-06-15 23:00:13 10 -2016-06-15 23:00:14 10 -2016-06-15 23:00:15 15 -2016-06-15 23:00:16 15 -2016-06-15 23:00:17 15 -2016-06-15 23:00:18 15 -2016-06-15 23:00:19 15 -2016-06-15 23:00:20 20 -2016-06-15 23:00:21 20 -2016-06-15 23:00:22 20 -2016-06-15 23:00:23 20 -2016-06-15 23:00:24 20 -2016-06-15 23:00:25 25 +2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:03 0 +2016-06-15 23:00:04 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:08 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:13 10 +2016-06-15 23:00:14 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:18 15 +2016-06-15 23:00:19 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:23 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 original +staleness 1 seconds +2016-06-15 23:00:00 0 original +2016-06-15 23:00:05 5 original +2016-06-15 23:00:10 10 original +2016-06-15 23:00:15 15 original +2016-06-15 23:00:20 20 original +2016-06-15 23:00:25 25 original +staleness 3 seconds +2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 0 +2016-06-15 23:00:02 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:07 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:11 10 +2016-06-15 23:00:12 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:16 15 +2016-06-15 23:00:17 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:22 20 +2016-06-15 23:00:25 25 original +descending order +2016-06-15 23:00:25 25 original +2016-06-15 23:00:24 25 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:19 20 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:14 15 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:09 10 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:04 5 +2016-06-15 23:00:00 0 original +staleness with to and step +2016-06-15 23:00:00 0 original +2016-06-15 23:00:03 0 +2016-06-15 23:00:05 5 original +2016-06-15 23:00:06 5 +2016-06-15 23:00:09 5 +2016-06-15 23:00:10 10 original +2016-06-15 23:00:12 10 +2016-06-15 23:00:15 15 original +2016-06-15 23:00:18 15 +2016-06-15 23:00:20 20 original +2016-06-15 23:00:21 20 +2016-06-15 23:00:24 20 +2016-06-15 23:00:25 25 original +2016-06-15 23:00:27 25 +2016-06-15 23:00:30 25 +staleness with another regular with fill +2016-06-15 23:00:00 1970-01-01 01:00:00 0 +2016-06-15 23:00:00 1970-01-01 01:00:01 0 +2016-06-15 23:00:00 1970-01-01 01:00:02 0 +2016-06-15 23:00:00 2016-06-15 23:00:00 0 original +2016-06-15 23:00:01 1970-01-01 01:00:00 0 +2016-06-15 23:00:01 1970-01-01 01:00:01 0 +2016-06-15 23:00:01 1970-01-01 01:00:02 0 +2016-06-15 23:00:05 2016-06-15 23:00:05 5 original +2016-06-15 23:00:05 1970-01-01 01:00:01 5 +2016-06-15 23:00:05 1970-01-01 01:00:02 5 +2016-06-15 23:00:06 1970-01-01 01:00:00 5 +2016-06-15 23:00:06 1970-01-01 01:00:01 5 +2016-06-15 23:00:06 1970-01-01 01:00:02 5 +2016-06-15 23:00:10 2016-06-15 23:00:10 10 original +2016-06-15 23:00:10 1970-01-01 01:00:01 10 +2016-06-15 23:00:10 1970-01-01 01:00:02 10 +2016-06-15 23:00:11 1970-01-01 01:00:00 10 +2016-06-15 23:00:11 1970-01-01 01:00:01 10 +2016-06-15 23:00:11 1970-01-01 01:00:02 10 +2016-06-15 23:00:15 2016-06-15 23:00:15 15 original +2016-06-15 23:00:15 1970-01-01 01:00:01 15 +2016-06-15 23:00:15 1970-01-01 01:00:02 15 +2016-06-15 23:00:16 1970-01-01 01:00:00 15 +2016-06-15 23:00:16 1970-01-01 01:00:01 15 +2016-06-15 23:00:16 1970-01-01 01:00:02 15 +2016-06-15 23:00:20 2016-06-15 23:00:20 20 original +2016-06-15 23:00:20 1970-01-01 01:00:01 20 +2016-06-15 23:00:20 1970-01-01 01:00:02 20 +2016-06-15 23:00:21 1970-01-01 01:00:00 20 +2016-06-15 23:00:21 1970-01-01 01:00:01 20 +2016-06-15 23:00:21 1970-01-01 01:00:02 20 +2016-06-15 23:00:25 2016-06-15 23:00:25 25 original +2016-06-15 23:00:25 1970-01-01 01:00:01 25 +2016-06-15 23:00:25 1970-01-01 01:00:02 25 +double staleness +2016-06-15 23:00:00 2016-06-15 23:00:00 0 original +2016-06-15 23:00:00 2016-06-15 23:00:02 0 +2016-06-15 23:00:00 2016-06-15 23:00:04 0 +2016-06-15 23:00:01 1970-01-01 01:00:00 0 +2016-06-15 23:00:05 2016-06-15 23:00:05 5 original +2016-06-15 23:00:05 2016-06-15 23:00:07 5 +2016-06-15 23:00:05 2016-06-15 23:00:09 5 +2016-06-15 23:00:06 1970-01-01 01:00:00 5 +2016-06-15 23:00:10 2016-06-15 23:00:10 10 original +2016-06-15 23:00:10 2016-06-15 23:00:12 10 +2016-06-15 23:00:10 2016-06-15 23:00:14 10 +2016-06-15 23:00:11 1970-01-01 01:00:00 10 +2016-06-15 23:00:15 2016-06-15 23:00:15 15 original +2016-06-15 23:00:15 2016-06-15 23:00:17 15 +2016-06-15 23:00:15 2016-06-15 23:00:19 15 +2016-06-15 23:00:16 1970-01-01 01:00:00 15 +2016-06-15 23:00:20 2016-06-15 23:00:20 20 original +2016-06-15 23:00:20 2016-06-15 23:00:22 20 +2016-06-15 23:00:20 2016-06-15 23:00:24 20 +2016-06-15 23:00:21 1970-01-01 01:00:00 20 +2016-06-15 23:00:25 2016-06-15 23:00:25 25 original +2016-06-15 23:00:25 2016-06-15 23:00:27 25 +2016-06-15 23:00:25 2016-06-15 23:00:29 25 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.sql b/tests/queries/0_stateless/03266_with_fill_staleness.sql index 3ab9be63a08..fff702ffd83 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.sql +++ b/tests/queries/0_stateless/03266_with_fill_staleness.sql @@ -1,3 +1,5 @@ +SET session_timezone='Europe/Amsterdam'; + DROP TABLE IF EXISTS with_fill_staleness; CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; From 60f0efa67689c28bd5b155eefd3266f385822b94 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 18:08:25 +0000 Subject: [PATCH 184/353] remove debug log --- src/Planner/Planner.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/Planner/Planner.cpp b/src/Planner/Planner.cpp index f1c752aecd0..8d3c75fdabb 100644 --- a/src/Planner/Planner.cpp +++ b/src/Planner/Planner.cpp @@ -847,9 +847,6 @@ void addWithFillStepIfNeeded(QueryPlan & query_plan, interpolate_description = std::make_shared(std::move(interpolate_actions_dag), empty_aliases); } - if (interpolate_description) - LOG_DEBUG(getLogger("addWithFillStepIfNeeded"), "InterpolateDescription: {}", interpolate_description->actions.dumpDAG()); - const auto & query_context = planner_context->getQueryContext(); const Settings & settings = query_context->getSettingsRef(); auto filling_step = std::make_unique( From 64d038c4408f500ae58a6a3cdd68e99c2901faa0 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 18:14:56 +0000 Subject: [PATCH 185/353] cleanup --- src/Analyzer/SortNode.h | 6 ++--- src/Common/FieldVisitorScale.cpp | 22 +++++++++---------- src/Common/FieldVisitorScale.h | 3 --- src/Core/Field.h | 8 ------- .../Transforms/FillingTransform.cpp | 8 ++----- 5 files changed, 16 insertions(+), 31 deletions(-) diff --git a/src/Analyzer/SortNode.h b/src/Analyzer/SortNode.h index d9086dc9ed7..6f0010abdaa 100644 --- a/src/Analyzer/SortNode.h +++ b/src/Analyzer/SortNode.h @@ -105,19 +105,19 @@ public: return children[fill_step_child_index]; } - /// Returns true if sort node has fill step, false otherwise + /// Returns true if sort node has fill staleness, false otherwise bool hasFillStaleness() const { return children[fill_staleness_child_index] != nullptr; } - /// Get fill step + /// Get fill staleness const QueryTreeNodePtr & getFillStaleness() const { return children[fill_staleness_child_index]; } - /// Get fill step + /// Get fill staleness QueryTreeNodePtr & getFillStaleness() { return children[fill_staleness_child_index]; diff --git a/src/Common/FieldVisitorScale.cpp b/src/Common/FieldVisitorScale.cpp index fdb566007c3..a6c0f6d0c5b 100644 --- a/src/Common/FieldVisitorScale.cpp +++ b/src/Common/FieldVisitorScale.cpp @@ -15,16 +15,16 @@ void FieldVisitorScale::operator() (UInt64 & x) const { x *= rhs; } void FieldVisitorScale::operator() (Float64 & x) const { x *= rhs; } void FieldVisitorScale::operator() (Null &) const { /*Do not scale anything*/ } -void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Strings"); } -void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Arrays"); } -void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Tuples"); } -void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Maps"); } -void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Objects"); } -void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply UUIDs"); } -void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv4s"); } -void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply IPv6s"); } -void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply custom type {}", x.getTypeName()); } -void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply AggregateFunctionStates"); } -void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot multiply Bools"); } +void FieldVisitorScale::operator() (String &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Strings"); } +void FieldVisitorScale::operator() (Array &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Arrays"); } +void FieldVisitorScale::operator() (Tuple &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Tuples"); } +void FieldVisitorScale::operator() (Map &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Maps"); } +void FieldVisitorScale::operator() (Object &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Objects"); } +void FieldVisitorScale::operator() (UUID &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale UUIDs"); } +void FieldVisitorScale::operator() (IPv4 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv4s"); } +void FieldVisitorScale::operator() (IPv6 &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale IPv6s"); } +void FieldVisitorScale::operator() (CustomType & x) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale custom type {}", x.getTypeName()); } +void FieldVisitorScale::operator() (AggregateFunctionStateData &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale AggregateFunctionStates"); } +void FieldVisitorScale::operator() (bool &) const { throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot scale Bools"); } } diff --git a/src/Common/FieldVisitorScale.h b/src/Common/FieldVisitorScale.h index 45bacdccc9c..90d86cc53bd 100644 --- a/src/Common/FieldVisitorScale.h +++ b/src/Common/FieldVisitorScale.h @@ -1,10 +1,7 @@ #pragma once -#include #include #include -#include "base/Decimal.h" -#include "base/extended_types.h" namespace DB { diff --git a/src/Core/Field.h b/src/Core/Field.h index 47df5c2907e..7b916d30646 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -185,14 +185,6 @@ public: return *this; } - const DecimalField & operator *= (const DecimalField & r) - { - if (scale != r.getScale()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Multiply different decimal fields"); - dec *= r.getValue(); - return *this; - } - const DecimalField & operator -= (const DecimalField & r) { if (scale != r.getScale()) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 7f81b86697c..46a670394a5 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -125,10 +125,6 @@ static FillColumnDescription::StepFunction getStepFunction(const Field & step, c if (jumps_count != 1) applyVisitor(FieldVisitorScale(jumps_count), shifted_step); - logDebug("field", field.dump()); - logDebug("step", step.dump()); - logDebug("shifted field", shifted_step.dump()); - applyVisitor(FieldVisitorSum(shifted_step), field); }; } @@ -684,8 +680,8 @@ void FillingTransform::transformRange( } const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/true); - logDebug("apply", apply); - logDebug("changed", changed); + logDebug("long jump apply", apply); + logDebug("long jump changed", changed); if (changed) filling_row_changed = true; From f905c804f5b5aa0c0b14e9aaab1034fa8fbbef03 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 19:58:53 +0000 Subject: [PATCH 186/353] fix calibration jump --- src/Interpreters/FillingRow.cpp | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index fdd3b55b66b..49ee558cb20 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -153,23 +153,17 @@ std::pair FillingRow::next(const FillingRow & to_row, bool long_jump if (!next_value.has_value()) return {false, false}; - Field calibration_jump_value = next_value.value(); - fill_column_desc.step_func(calibration_jump_value, 1); - - if (equals(calibration_jump_value, to_row[pos])) - next_value = calibration_jump_value; - - if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) - return {false, false}; + /// We need value >= to_row[pos] + fill_column_desc.step_func(next_value.value(), 1); } else { next_value = doJump(fill_column_desc, pos); - - if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) - return {false, false}; } + if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + return {false, false}; + row[pos] = std::move(next_value.value()); if (equals(row[pos], to_row.row[pos])) { From 6772d3fe6623f73edb4509a7d6e9cbdc5e9883f9 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Mon, 28 Oct 2024 22:08:38 +0000 Subject: [PATCH 187/353] little improvement --- src/Interpreters/FillingRow.cpp | 17 ++++++++++------- src/Interpreters/FillingRow.h | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 49ee558cb20..8c5f102bcd6 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -28,7 +28,7 @@ FillingRow::FillingRow(const SortDescription & sort_description_) : sort_description(sort_description_) { row.resize(sort_description.size()); - staleness_base_row.resize(sort_description.size()); + staleness_border.resize(sort_description.size()); } bool FillingRow::operator<(const FillingRow & other) const @@ -74,10 +74,7 @@ std::optional FillingRow::doJump(const FillColumnDescription& descr, size if (!descr.fill_staleness.isNull()) { - Field staleness_border = staleness_base_row[column_ind]; - descr.staleness_step_func(staleness_border, 1); - - if (less(next_value, staleness_border, getDirection(column_ind))) + if (less(next_value, staleness_border[column_ind], getDirection(column_ind))) return next_value; else return std::nullopt; @@ -93,7 +90,7 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, if (less(to, shifted_value, getDirection(column_ind))) return std::nullopt; - for (int32_t step_len = 1, step_no = 0; step_no < 100; ++step_no) + for (int32_t step_len = 1, step_no = 0; step_no < 100 && step_len > 0; ++step_no) { Field next_value = shifted_value; descr.step_func(next_value, step_len); @@ -194,7 +191,13 @@ void FillingRow::initFromDefaults(size_t from_pos) void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) { for (size_t i = 0; i < size(); ++i) - staleness_base_row[i] = (*base_row[i])[row_ind]; + { + staleness_border[i] = (*base_row[i])[row_ind]; + + const auto& descr = getFillDescription(i); + if (!descr.fill_staleness.isNull()) + descr.staleness_step_func(staleness_border[i], 1); + } } String FillingRow::dump() const diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index 14b6034ce35..dc787173191 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -46,7 +46,7 @@ public: private: Row row; - Row staleness_base_row; + Row staleness_border; SortDescription sort_description; }; From 219cc4e5d241201d8bb4838cc440735ec5c905ea Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Tue, 29 Oct 2024 12:15:13 +0800 Subject: [PATCH 188/353] fix mismatched aggreage function name of quantileExactWeightedInterpolated --- .../AggregateFunctionQuantileExactWeighted.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp index 58b3b75b056..116b04bf4ba 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp +++ b/src/AggregateFunctions/AggregateFunctionQuantileExactWeighted.cpp @@ -387,7 +387,7 @@ template using FuncQuantileExactWeighted = AggregateFunctionQuantile< Value, QuantileExactWeighted, - NameQuantileExactWeighted, + std::conditional_t, true, std::conditional_t, false, @@ -396,7 +396,7 @@ template using FuncQuantilesExactWeighted = AggregateFunctionQuantile< Value, QuantileExactWeighted, - NameQuantilesExactWeighted, + std::conditional_t, true, std::conditional_t, true, From 190703b603fe8bfef6d92cc883f9e0107fdce83c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Tue, 29 Oct 2024 05:32:52 +0100 Subject: [PATCH 189/353] Close #8687 --- .../03258_multiple_array_joins.reference | 8 +++++++ .../03258_multiple_array_joins.sql | 24 +++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 tests/queries/0_stateless/03258_multiple_array_joins.reference create mode 100644 tests/queries/0_stateless/03258_multiple_array_joins.sql diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.reference b/tests/queries/0_stateless/03258_multiple_array_joins.reference new file mode 100644 index 00000000000..4d357c8ac80 --- /dev/null +++ b/tests/queries/0_stateless/03258_multiple_array_joins.reference @@ -0,0 +1,8 @@ +1 Michel Foucault alive no +1 Michel Foucault profession philosopher +1 Thomas Aquinas alive no +1 Thomas Aquinas profession philosopher +2 Nicola Tesla alive no +2 Nicola Tesla profession inventor +2 Thomas Edison alive no +2 Thomas Edison profession inventor diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.sql b/tests/queries/0_stateless/03258_multiple_array_joins.sql new file mode 100644 index 00000000000..5afe7725d3f --- /dev/null +++ b/tests/queries/0_stateless/03258_multiple_array_joins.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS test_multiple_array_join; + +CREATE TABLE test_multiple_array_join ( + id UInt64, + person Nested ( + name String, + surname String + ), + properties Nested ( + key String, + value String + ) +) Engine=MergeTree ORDER BY id; + +INSERT INTO test_multiple_array_join VALUES (1, ['Thomas', 'Michel'], ['Aquinas', 'Foucault'], ['profession', 'alive'], ['philosopher', 'no']); +INSERT INTO test_multiple_array_join VALUES (2, ['Thomas', 'Nicola'], ['Edison', 'Tesla'], ['profession', 'alive'], ['inventor', 'no']); + +SELECT * +FROM test_multiple_array_join +ARRAY JOIN person +ARRAY JOIN properties +ORDER BY ALL; + +DROP TABLE test_multiple_array_join; From af7aa7de568063c53d849150be83ee625413dc7d Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 29 Oct 2024 10:03:02 +0000 Subject: [PATCH 190/353] Fix some bugs --- .../ObjectStorage/DataLakes/Common.cpp | 7 +++ .../DataLakes/DataLakeConfiguration.h | 2 +- .../ObjectStorage/StorageObjectStorage.cpp | 46 +++++++++++++++++-- .../ObjectStorage/StorageObjectStorage.h | 2 + .../registerStorageObjectStorage.cpp | 3 +- .../TableFunctionObjectStorage.cpp | 5 +- .../TableFunctionObjectStorageCluster.cpp | 7 +-- .../configs/config.d/filesystem_caches.xml | 1 + .../integration/test_storage_iceberg/test.py | 14 ++++-- 9 files changed, 74 insertions(+), 13 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/Common.cpp b/src/Storages/ObjectStorage/DataLakes/Common.cpp index 4830cc52a90..c21c0486eca 100644 --- a/src/Storages/ObjectStorage/DataLakes/Common.cpp +++ b/src/Storages/ObjectStorage/DataLakes/Common.cpp @@ -1,6 +1,9 @@ #include "Common.h" #include #include +#include +#include +#include #include namespace DB @@ -13,6 +16,10 @@ std::vector listFiles( { auto key = std::filesystem::path(configuration.getPath()) / prefix; RelativePathsWithMetadata files_with_metadata; + // time_t now = time(nullptr); + Poco::DateTime now; + std::string formatted = Poco::DateTimeFormatter::format(now, Poco::DateTimeFormat::ISO8601_FORMAT); + LOG_ERROR(&Poco::Logger::get("Inside listFiles"), "Time of files listing: {}", formatted); object_storage.listObjects(key, files_with_metadata, 0); Strings res; for (const auto & file_with_metadata : files_with_metadata) diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index 18ff6d93c46..8a4147308f3 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -36,7 +36,7 @@ public: void update(ObjectStoragePtr object_storage, ContextPtr local_context) override { - BaseStorageConfiguration::update(object_storage, local_context); + // BaseStorageConfiguration::update(object_storage, local_context); auto new_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); if (current_metadata && *current_metadata == *new_metadata) return; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index ddc6276a8a1..6f4c0787e81 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -22,6 +22,7 @@ #include #include #include +#include "Databases/LoadingStrictnessLevel.h" #include "Storages/ColumnsDescription.h" @@ -68,6 +69,27 @@ String StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, Con return ""; } +void printConfiguration(const Poco::Util::AbstractConfiguration & config, std::string log_name, const std::string & prefix = "") +{ + Poco::Util::AbstractConfiguration::Keys keys; + config.keys(prefix, keys); + + for (const auto & key : keys) + { + std::string fullKey = prefix.empty() ? key : (prefix + "." + key); + + if (config.hasProperty(fullKey)) + { + std::string value = config.getString(fullKey); + LOG_DEBUG(&Poco::Logger::get(log_name), "{} = {}", fullKey, value); + } + + // Recursively print sub-configurations + printConfiguration(config, fullKey, log_name); + } +} + + StorageObjectStorage::StorageObjectStorage( ConfigurationPtr configuration_, ObjectStoragePtr object_storage_, @@ -77,6 +99,7 @@ StorageObjectStorage::StorageObjectStorage( const ConstraintsDescription & constraints_, const String & comment, std::optional format_settings_, + LoadingStrictnessLevel mode, bool distributed_processing_, ASTPtr partition_by_) : IStorage(table_id_) @@ -87,11 +110,27 @@ StorageObjectStorage::StorageObjectStorage( , distributed_processing(distributed_processing_) , log(getLogger(fmt::format("Storage{}({})", configuration->getEngineName(), table_id_.getFullTableName()))) { - ColumnsDescription columns{columns_}; - LOG_DEBUG(&Poco::Logger::get("StorageObjectStorage Creation"), "Columns size {}", columns.size()); - configuration->update(object_storage, context); + // LOG_DEBUG(&Poco::Logger::get("StorageObjectStorage Creation"), "Columns size {}", columns.size()); + printConfiguration(context->getConfigRef(), "Storage create"); + try + { + // configuration->update(object_storage, context); + } + catch (...) + { + if (mode <= LoadingStrictnessLevel::CREATE) + { + throw; + } + else + { + tryLogCurrentException(__PRETTY_FUNCTION__); + return; + } + } std::string sample_path; + ColumnsDescription columns{columns_}; resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, sample_path, context); configuration->check(context); @@ -271,6 +310,7 @@ void StorageObjectStorage::read( size_t num_streams) { configuration->update(object_storage, local_context); + printConfiguration(local_context->getConfigRef(), "Select query"); if (partition_by && configuration->withPartitionWildcard()) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index dc461e5861d..6ca1613e65c 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -57,6 +57,7 @@ public: const ConstraintsDescription & constraints_, const String & comment, std::optional format_settings_, + LoadingStrictnessLevel mode, bool distributed_processing_ = false, ASTPtr partition_by_ = nullptr); @@ -217,6 +218,7 @@ public: virtual void update(ObjectStoragePtr object_storage, ContextPtr local_context); + protected: virtual void fromNamedCollection(const NamedCollection & collection, ContextPtr context) = 0; virtual void fromAST(ASTs & args, ContextPtr context, bool with_structure) = 0; diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index 9a525b4e21a..a0393ea3e6a 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -51,13 +51,14 @@ static std::shared_ptr createStorageObjectStorage( return std::make_shared( configuration, - configuration->createObjectStorage(context, /* is_readonly */false), + configuration->createObjectStorage(context, /* is_readonly */ false), args.getContext(), args.table_id, args.columns, args.constraints, args.comment, format_settings, + args.mode, /* distributed_processing */ false, partition_by); } diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index 66c90b15c0b..6d81269f2d7 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -117,8 +117,9 @@ StoragePtr TableFunctionObjectStorage::executeImpl( columns, ConstraintsDescription{}, String{}, - /* format_settings */std::nullopt, - /* distributed_processing */false, + /* format_settings */ std::nullopt, + /* mode */ LoadingStrictnessLevel::CREATE, + /* distributed_processing */ false, nullptr); storage->startup(); diff --git a/src/TableFunctions/TableFunctionObjectStorageCluster.cpp b/src/TableFunctions/TableFunctionObjectStorageCluster.cpp index 449bd2c8c49..5ca26aabe32 100644 --- a/src/TableFunctions/TableFunctionObjectStorageCluster.cpp +++ b/src/TableFunctions/TableFunctionObjectStorageCluster.cpp @@ -41,9 +41,10 @@ StoragePtr TableFunctionObjectStorageCluster::execute StorageID(Base::getDatabaseName(), table_name), columns, ConstraintsDescription{}, - /* comment */String{}, - /* format_settings */std::nullopt, /// No format_settings - /* distributed_processing */true, + /* comment */ String{}, + /* format_settings */ std::nullopt, /// No format_settings + /* mode */ LoadingStrictnessLevel::CREATE, + /* distributed_processing */ true, /*partition_by_=*/nullptr); } else diff --git a/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml b/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml index e91362640fe..3b1b2aeb37e 100644 --- a/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml +++ b/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml @@ -5,4 +5,5 @@ cache1 + diff --git a/tests/integration/test_storage_iceberg/test.py b/tests/integration/test_storage_iceberg/test.py index 36aba550dbd..ca78fbea667 100644 --- a/tests/integration/test_storage_iceberg/test.py +++ b/tests/integration/test_storage_iceberg/test.py @@ -6,6 +6,8 @@ import time import uuid from datetime import datetime +from logging import log + import pyspark import pytest from azure.storage.blob import BlobServiceClient @@ -856,14 +858,20 @@ def test_restart_broken_s3(started_cluster): ) minio_client.remove_bucket(bucket) + print("Before restart: ", datetime.now()) + instance.restart_clickhouse() - assert "NoSuchBucket" in instance.query_and_get_error( - f"SELECT count() FROM {TABLE_NAME}" - ) + # assert "NoSuchBucket" in instance.query_and_get_error( + # f"SELECT count() FROM {TABLE_NAME}" + # ) + + time.sleep(10) minio_client.make_bucket(bucket) + print("Before successful select: ", datetime.now()) + files = default_upload_directory( started_cluster, "s3", From b81e024c70cb27c41daacef6372846cd9478e654 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 29 Oct 2024 13:54:22 +0000 Subject: [PATCH 191/353] Debug prints --- .../DataLakes/DataLakeConfiguration.h | 7 +++++-- .../ObjectStorage/StorageObjectStorage.cpp | 16 ++++++++-------- .../ObjectStorage/StorageObjectStorage.h | 2 +- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index 8a4147308f3..9bb02436df1 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -34,9 +34,12 @@ public: std::string getEngineName() const override { return DataLakeMetadata::name; } - void update(ObjectStoragePtr object_storage, ContextPtr local_context) override + void update(ObjectStoragePtr object_storage, ContextPtr local_context, bool update_base) override { - // BaseStorageConfiguration::update(object_storage, local_context); + if (update_base) + { + BaseStorageConfiguratixon::update(object_storage, local_context); + } auto new_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); if (current_metadata && *current_metadata == *new_metadata) return; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 6f4c0787e81..de5a4a08358 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -76,16 +76,16 @@ void printConfiguration(const Poco::Util::AbstractConfiguration & config, std::s for (const auto & key : keys) { - std::string fullKey = prefix.empty() ? key : (prefix + "." + key); + std::string full_key = prefix.empty() ? key : (prefix + "." + key); - if (config.hasProperty(fullKey)) + if (config.hasProperty(full_key)) { - std::string value = config.getString(fullKey); - LOG_DEBUG(&Poco::Logger::get(log_name), "{} = {}", fullKey, value); + std::string value = config.getString(full_key); + LOG_DEBUG(&Poco::Logger::get(log_name), "{} = {}", full_key, value); } // Recursively print sub-configurations - printConfiguration(config, fullKey, log_name); + printConfiguration(config, full_key, log_name); } } @@ -114,7 +114,7 @@ StorageObjectStorage::StorageObjectStorage( printConfiguration(context->getConfigRef(), "Storage create"); try { - // configuration->update(object_storage, context); + configuration->update(object_storage, context); } catch (...) { @@ -166,7 +166,7 @@ bool StorageObjectStorage::supportsSubsetOfColumns(const ContextPtr & context) c return FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(configuration->format, context, format_settings); } -void StorageObjectStorage::Configuration::update(ObjectStoragePtr object_storage_ptr, ContextPtr context) +void StorageObjectStorage::Configuration::update(ObjectStoragePtr object_storage_ptr, ContextPtr context, [[maybe_unused]] bool update_base) { IObjectStorage::ApplyNewSettingsOptions options{.allow_client_change = !isStaticConfiguration()}; object_storage_ptr->applyNewSettings(context->getConfigRef(), getTypeName() + ".", context, options); @@ -309,7 +309,7 @@ void StorageObjectStorage::read( size_t max_block_size, size_t num_streams) { - configuration->update(object_storage, local_context); + configuration->update(object_storage, local_context, true); printConfiguration(local_context->getConfigRef(), "Select query"); if (partition_by && configuration->withPartitionWildcard()) { diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 6ca1613e65c..3a85a2532f2 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -216,7 +216,7 @@ public: String compression_method = "auto"; String structure = "auto"; - virtual void update(ObjectStoragePtr object_storage, ContextPtr local_context); + virtual void update(ObjectStoragePtr object_storage, ContextPtr local_context, [[maybe_unused]] bool update_base = false); protected: From a54df544050633074e9680049ffc315a1b143f72 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 29 Oct 2024 15:04:30 +0000 Subject: [PATCH 192/353] Add changes --- src/Storages/ObjectStorage/StorageObjectStorage.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 3a85a2532f2..6ca1613e65c 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -216,7 +216,7 @@ public: String compression_method = "auto"; String structure = "auto"; - virtual void update(ObjectStoragePtr object_storage, ContextPtr local_context, [[maybe_unused]] bool update_base = false); + virtual void update(ObjectStoragePtr object_storage, ContextPtr local_context); protected: From 886603d62541818f74d7e206209ef58f87c07e70 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 29 Oct 2024 15:18:05 +0000 Subject: [PATCH 193/353] Fixed some bugs --- .../ObjectStorage/DataLakes/DataLakeConfiguration.h | 9 ++------- src/Storages/ObjectStorage/StorageObjectStorage.cpp | 4 ++-- tests/integration/test_storage_iceberg/test.py | 6 +++--- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h index 9bb02436df1..1a694a25dff 100644 --- a/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h +++ b/src/Storages/ObjectStorage/DataLakes/DataLakeConfiguration.h @@ -30,16 +30,11 @@ public: bool isDataLakeConfiguration() const override { return true; } - bool isStaticConfiguration() const override { return false; } - std::string getEngineName() const override { return DataLakeMetadata::name; } - void update(ObjectStoragePtr object_storage, ContextPtr local_context, bool update_base) override + void update(ObjectStoragePtr object_storage, ContextPtr local_context) override { - if (update_base) - { - BaseStorageConfiguratixon::update(object_storage, local_context); - } + BaseStorageConfiguration::update(object_storage, local_context); auto new_metadata = DataLakeMetadata::create(object_storage, weak_from_this(), local_context); if (current_metadata && *current_metadata == *new_metadata) return; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index de5a4a08358..89a5bfe9469 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -166,7 +166,7 @@ bool StorageObjectStorage::supportsSubsetOfColumns(const ContextPtr & context) c return FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(configuration->format, context, format_settings); } -void StorageObjectStorage::Configuration::update(ObjectStoragePtr object_storage_ptr, ContextPtr context, [[maybe_unused]] bool update_base) +void StorageObjectStorage::Configuration::update(ObjectStoragePtr object_storage_ptr, ContextPtr context) { IObjectStorage::ApplyNewSettingsOptions options{.allow_client_change = !isStaticConfiguration()}; object_storage_ptr->applyNewSettings(context->getConfigRef(), getTypeName() + ".", context, options); @@ -309,7 +309,7 @@ void StorageObjectStorage::read( size_t max_block_size, size_t num_streams) { - configuration->update(object_storage, local_context, true); + configuration->update(object_storage, local_context); printConfiguration(local_context->getConfigRef(), "Select query"); if (partition_by && configuration->withPartitionWildcard()) { diff --git a/tests/integration/test_storage_iceberg/test.py b/tests/integration/test_storage_iceberg/test.py index ca78fbea667..3d93c1b163c 100644 --- a/tests/integration/test_storage_iceberg/test.py +++ b/tests/integration/test_storage_iceberg/test.py @@ -862,9 +862,9 @@ def test_restart_broken_s3(started_cluster): instance.restart_clickhouse() - # assert "NoSuchBucket" in instance.query_and_get_error( - # f"SELECT count() FROM {TABLE_NAME}" - # ) + assert "NoSuchBucket" in instance.query_and_get_error( + f"SELECT count() FROM {TABLE_NAME}" + ) time.sleep(10) From 9425b19f848ace4c7c183d2c36e1660986ce394d Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Tue, 29 Oct 2024 15:26:35 +0000 Subject: [PATCH 194/353] Automatic style fix --- tests/integration/test_storage_iceberg/test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/integration/test_storage_iceberg/test.py b/tests/integration/test_storage_iceberg/test.py index 3d93c1b163c..690ebeeffbf 100644 --- a/tests/integration/test_storage_iceberg/test.py +++ b/tests/integration/test_storage_iceberg/test.py @@ -5,7 +5,6 @@ import os import time import uuid from datetime import datetime - from logging import log import pyspark From 7d2fc48b6d37c5120372349892f5382823cafa06 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 29 Oct 2024 17:02:43 +0000 Subject: [PATCH 195/353] Fixed restart broken --- src/Storages/ObjectStorage/StorageObjectStorage.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 89a5bfe9469..9fa7b669b79 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -118,14 +118,15 @@ StorageObjectStorage::StorageObjectStorage( } catch (...) { - if (mode <= LoadingStrictnessLevel::CREATE) + if (mode <= LoadingStrictnessLevel::CREATE || columns_.empty() + || (configuration->format + == "auto")) // If we don't have format or schema yet, we can't ignore failed configuration update, because relevant configuration is crucial for format and schema inference { throw; } else { tryLogCurrentException(__PRETTY_FUNCTION__); - return; } } From 9b435388deb183edc2dfee520107391e6b96a2f4 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 29 Oct 2024 17:20:53 +0000 Subject: [PATCH 196/353] Remove useless stuff --- .../ObjectStorages/S3/S3ObjectStorage.cpp | 3 +- .../ObjectStorage/DataLakes/Common.cpp | 7 ----- .../ObjectStorage/StorageObjectStorage.cpp | 28 ++----------------- .../configs/config.d/filesystem_caches.xml | 1 - .../integration/test_storage_iceberg/test.py | 7 ----- 5 files changed, 4 insertions(+), 42 deletions(-) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 44aeabc1c28..47ef97401f2 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -501,7 +501,8 @@ void S3ObjectStorage::applyNewSettings( } auto current_settings = s3_settings.get(); - if (options.allow_client_change && (current_settings->auth_settings.hasUpdates(modified_settings->auth_settings) || for_disk_s3)) + if (options.allow_client_change + && (current_settings->auth_settings.hasUpdates(modified_settings->auth_settings) || for_disk_s3)) { auto new_client = getClient(uri, *modified_settings, context, for_disk_s3); client.set(std::move(new_client)); diff --git a/src/Storages/ObjectStorage/DataLakes/Common.cpp b/src/Storages/ObjectStorage/DataLakes/Common.cpp index c21c0486eca..4830cc52a90 100644 --- a/src/Storages/ObjectStorage/DataLakes/Common.cpp +++ b/src/Storages/ObjectStorage/DataLakes/Common.cpp @@ -1,9 +1,6 @@ #include "Common.h" #include #include -#include -#include -#include #include namespace DB @@ -16,10 +13,6 @@ std::vector listFiles( { auto key = std::filesystem::path(configuration.getPath()) / prefix; RelativePathsWithMetadata files_with_metadata; - // time_t now = time(nullptr); - Poco::DateTime now; - std::string formatted = Poco::DateTimeFormatter::format(now, Poco::DateTimeFormat::ISO8601_FORMAT); - LOG_ERROR(&Poco::Logger::get("Inside listFiles"), "Time of files listing: {}", formatted); object_storage.listObjects(key, files_with_metadata, 0); Strings res; for (const auto & file_with_metadata : files_with_metadata) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 9fa7b669b79..1ed6e137a31 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -69,27 +69,6 @@ String StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, Con return ""; } -void printConfiguration(const Poco::Util::AbstractConfiguration & config, std::string log_name, const std::string & prefix = "") -{ - Poco::Util::AbstractConfiguration::Keys keys; - config.keys(prefix, keys); - - for (const auto & key : keys) - { - std::string full_key = prefix.empty() ? key : (prefix + "." + key); - - if (config.hasProperty(full_key)) - { - std::string value = config.getString(full_key); - LOG_DEBUG(&Poco::Logger::get(log_name), "{} = {}", full_key, value); - } - - // Recursively print sub-configurations - printConfiguration(config, full_key, log_name); - } -} - - StorageObjectStorage::StorageObjectStorage( ConfigurationPtr configuration_, ObjectStoragePtr object_storage_, @@ -110,17 +89,14 @@ StorageObjectStorage::StorageObjectStorage( , distributed_processing(distributed_processing_) , log(getLogger(fmt::format("Storage{}({})", configuration->getEngineName(), table_id_.getFullTableName()))) { - // LOG_DEBUG(&Poco::Logger::get("StorageObjectStorage Creation"), "Columns size {}", columns.size()); - printConfiguration(context->getConfigRef(), "Storage create"); try { configuration->update(object_storage, context); } catch (...) { - if (mode <= LoadingStrictnessLevel::CREATE || columns_.empty() - || (configuration->format - == "auto")) // If we don't have format or schema yet, we can't ignore failed configuration update, because relevant configuration is crucial for format and schema inference + // If we don't have format or schema yet, we can't ignore failed configuration update, because relevant configuration is crucial for format and schema inference + if (mode <= LoadingStrictnessLevel::CREATE || columns_.empty() || (configuration->format == "auto")) { throw; } diff --git a/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml b/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml index 3b1b2aeb37e..e91362640fe 100644 --- a/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml +++ b/tests/integration/test_storage_iceberg/configs/config.d/filesystem_caches.xml @@ -5,5 +5,4 @@ cache1 - diff --git a/tests/integration/test_storage_iceberg/test.py b/tests/integration/test_storage_iceberg/test.py index 690ebeeffbf..36aba550dbd 100644 --- a/tests/integration/test_storage_iceberg/test.py +++ b/tests/integration/test_storage_iceberg/test.py @@ -5,7 +5,6 @@ import os import time import uuid from datetime import datetime -from logging import log import pyspark import pytest @@ -857,20 +856,14 @@ def test_restart_broken_s3(started_cluster): ) minio_client.remove_bucket(bucket) - print("Before restart: ", datetime.now()) - instance.restart_clickhouse() assert "NoSuchBucket" in instance.query_and_get_error( f"SELECT count() FROM {TABLE_NAME}" ) - time.sleep(10) - minio_client.make_bucket(bucket) - print("Before successful select: ", datetime.now()) - files = default_upload_directory( started_cluster, "s3", From 98c9afda2e48053877ec38a5dbe3eb48f0b5d8a4 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 29 Oct 2024 17:24:30 +0000 Subject: [PATCH 197/353] Remove build ifdef issue --- src/Storages/registerStorages.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/Storages/registerStorages.cpp b/src/Storages/registerStorages.cpp index 4eb90955a6c..6f6d9c3148f 100644 --- a/src/Storages/registerStorages.cpp +++ b/src/Storages/registerStorages.cpp @@ -145,6 +145,10 @@ void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]]) registerStorageAzureQueue(factory); #endif +#if USE_AVRO + registerStorageIceberg(factory); +#endif + #if USE_AWS_S3 registerStorageHudi(factory); registerStorageS3Queue(factory); @@ -153,14 +157,10 @@ void registerStorages(bool use_legacy_mongodb_integration [[maybe_unused]]) registerStorageDeltaLake(factory); #endif - #if USE_AVRO - registerStorageIceberg(factory); - #endif +#endif - #endif - - #if USE_HDFS - #if USE_HIVE +#if USE_HDFS +# if USE_HIVE registerStorageHive(factory); #endif #endif From 33d986927036bcef001f220092523fd256baa350 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Tue, 29 Oct 2024 19:42:43 +0100 Subject: [PATCH 198/353] Update settings.md --- docs/en/operations/settings/settings.md | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index 821d08cad7b..e1af24a0b8e 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -717,22 +717,6 @@ Default value: 0 In CREATE TABLE statement allows specifying Variant type with similar variant types (for example, with different numeric or date types). Enabling this setting may introduce some ambiguity when working with values with similar types. -## allow_suspicious_types_in_group_by {#allow_suspicious_types_in_group_by} - -Type: Bool - -Default value: 0 - -Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in GROUP BY keys. - -## allow_suspicious_types_in_order_by {#allow_suspicious_types_in_order_by} - -Type: Bool - -Default value: 0 - -Allows or restricts using [Variant](../../sql-reference/data-types/variant.md) and [Dynamic](../../sql-reference/data-types/dynamic.md) types in ORDER BY keys. - ## allow_unrestricted_reads_from_keeper {#allow_unrestricted_reads_from_keeper} Type: Bool From 170a24a4187bda9a5bc25fa8263222e502963b10 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Tue, 29 Oct 2024 19:43:13 +0100 Subject: [PATCH 199/353] Update SettingsChangesHistory.cpp --- src/Core/SettingsChangesHistory.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 169429d1c34..fc5066029e8 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -64,6 +64,8 @@ static std::initializer_list Date: Tue, 29 Oct 2024 19:44:00 +0100 Subject: [PATCH 200/353] Update settings.md --- docs/en/operations/settings/settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index e1af24a0b8e..b9b81022d4f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -9746,3 +9746,5 @@ Type: Int64 Default value: 0 Allows you to select the max window log of ZSTD (it will not be used for MergeTree family) + + From bb9355b3d3fd2748ed1877d839ff555580f1be70 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 29 Oct 2024 22:52:36 +0100 Subject: [PATCH 201/353] stash --- src/Planner/findParallelReplicasQuery.cpp | 101 +++++++++++++++++++--- 1 file changed, 88 insertions(+), 13 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index b97a9a36381..91cbc492fdc 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -17,10 +17,12 @@ #include #include #include +#include #include #include #include #include +#include "Processors/QueryPlan/SortingStep.h" namespace DB { @@ -52,22 +54,30 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre { case QueryTreeNodeType::TABLE: { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const auto & table_node = query_tree_node->as(); const auto & storage = table_node.getStorage(); /// Here we check StorageDummy as well, to support a query tree with replaced storages. if (std::dynamic_pointer_cast(storage) || typeid_cast(storage.get())) { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); /// parallel replicas is not supported with FINAL if (table_node.getTableExpressionModifiers() && table_node.getTableExpressionModifiers()->hasFinal()) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; + } + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; } + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; } case QueryTreeNodeType::TABLE_FUNCTION: { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; } case QueryTreeNodeType::QUERY: @@ -75,6 +85,7 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre const auto & query_node_to_process = query_tree_node->as(); query_tree_node = query_node_to_process.getJoinTree().get(); res.push(&query_node_to_process); + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } case QueryTreeNodeType::UNION: @@ -83,15 +94,20 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre const auto & union_queries = union_node.getQueries().getNodes(); if (union_queries.empty()) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; + } query_tree_node = union_queries.front().get(); + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } case QueryTreeNodeType::ARRAY_JOIN: { const auto & array_join_node = query_tree_node->as(); query_tree_node = array_join_node.getTableExpression().get(); + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } case QueryTreeNodeType::JOIN: @@ -105,9 +121,13 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All); if (!can_parallelize_join) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; + } query_tree_node = join_node.getLeftTableExpression().get(); + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } default: @@ -173,75 +193,114 @@ const QueryNode * findQueryForParallelReplicas( const QueryPlan::Node * prev_checked_node = nullptr; const QueryNode * res = nullptr; + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); while (!stack.empty()) { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const QueryNode * subquery_node = stack.top(); stack.pop(); auto it = mapping.find(subquery_node); /// This should not happen ideally. if (it == mapping.end()) - break; - - const QueryPlan::Node * curr_node = it->second; - const QueryPlan::Node * next_node_to_check = curr_node; - bool can_distribute_full_node = true; - - while (next_node_to_check && next_node_to_check != prev_checked_node) { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); + break; + } + + const QueryPlan::Node * const curr_node = it->second; + std::deque> nodes_to_check; + nodes_to_check.push_front(std::make_pair(curr_node, false)); + bool can_distribute_full_node = true; + bool in = false; + + while (!nodes_to_check.empty() /* && nodes_to_check.front() != prev_checked_node*/) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); + const auto & [next_node_to_check, digging_into_rabbit_hole] = nodes_to_check.front(); + LOG_DEBUG( + &Poco::Logger::get("debug"), + "next_node_to_check->step->getName()={}, next_node_to_check->step->getStepDescription());={}", + next_node_to_check->step->getName(), + next_node_to_check->step->getStepDescription()); + nodes_to_check.pop_front(); const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); if (children.empty()) { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); /// Found a source step. This should be possible only in the first iteration. if (prev_checked_node) - return nullptr; + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); + // return nullptr; + } - next_node_to_check = nullptr; + nodes_to_check = {}; } else if (children.size() == 1) { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const auto * expression = typeid_cast(step); const auto * filter = typeid_cast(step); + const auto * sorting = typeid_cast(step); const auto * creating_sets = typeid_cast(step); bool allowed_creating_sets = settings[Setting::parallel_replicas_allow_in_with_subquery] && creating_sets; - if (!expression && !filter && !allowed_creating_sets) + if (!expression && !filter && !allowed_creating_sets && !(sorting && sorting->getStepDescription().contains("before JOIN"))) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); can_distribute_full_node = false; + in = digging_into_rabbit_hole; + } - next_node_to_check = children.front(); + nodes_to_check.push_front(std::pair(children.front(), digging_into_rabbit_hole)); } else { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const auto * join = typeid_cast(step); /// We've checked that JOIN is INNER/LEFT in query tree. /// Don't distribute UNION node. if (!join) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; + } - next_node_to_check = children.front(); + for (const auto & child : children) + nodes_to_check.push_front(std::make_pair(child, true)); } } + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); + /// Current node contains steps like GROUP BY / DISTINCT /// Will try to execute query up to WithMergableStage if (!can_distribute_full_node) { /// Current query node does not contain subqueries. /// We can execute parallel replicas over storage::read. + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); if (!res) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; + } - return subquery_node; + return in ? res : subquery_node; } + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); + /// Query is simple enough to be fully distributed. res = subquery_node; prev_checked_node = curr_node; } + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; } @@ -261,16 +320,26 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto context = query_node ? query_node->getContext() : union_node->getContext(); if (!context->canUseParallelReplicasOnInitiator()) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; + } + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; + } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; + } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -295,16 +364,22 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// Now, return a query from initial stack. if (res) { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); while (!new_stack.empty()) { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); if (res == new_stack.top()) + { + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return stack.top(); + } stack.pop(); new_stack.pop(); } } + LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; } From 41bd99510a3de0936ff6aab8c28f93a7f78107fb Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 29 Oct 2024 23:08:51 +0100 Subject: [PATCH 202/353] stash --- src/Planner/findParallelReplicasQuery.cpp | 74 +---------------------- 1 file changed, 2 insertions(+), 72 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 91cbc492fdc..a5d3e863521 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -17,12 +17,11 @@ #include #include #include -#include +#include #include #include #include #include -#include "Processors/QueryPlan/SortingStep.h" namespace DB { @@ -54,30 +53,22 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre { case QueryTreeNodeType::TABLE: { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const auto & table_node = query_tree_node->as(); const auto & storage = table_node.getStorage(); /// Here we check StorageDummy as well, to support a query tree with replaced storages. if (std::dynamic_pointer_cast(storage) || typeid_cast(storage.get())) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); /// parallel replicas is not supported with FINAL if (table_node.getTableExpressionModifiers() && table_node.getTableExpressionModifiers()->hasFinal()) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; - } - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; } - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; } case QueryTreeNodeType::TABLE_FUNCTION: { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; } case QueryTreeNodeType::QUERY: @@ -85,7 +76,6 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre const auto & query_node_to_process = query_tree_node->as(); query_tree_node = query_node_to_process.getJoinTree().get(); res.push(&query_node_to_process); - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } case QueryTreeNodeType::UNION: @@ -94,20 +84,15 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre const auto & union_queries = union_node.getQueries().getNodes(); if (union_queries.empty()) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; - } query_tree_node = union_queries.front().get(); - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } case QueryTreeNodeType::ARRAY_JOIN: { const auto & array_join_node = query_tree_node->as(); query_tree_node = array_join_node.getTableExpression().get(); - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } case QueryTreeNodeType::JOIN: @@ -121,13 +106,9 @@ std::stack getSupportingParallelReplicasQuery(const IQueryTre || (join_kind == JoinKind::Inner && join_strictness == JoinStrictness::All); if (!can_parallelize_join) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return {}; - } query_tree_node = join_node.getLeftTableExpression().get(); - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; } default: @@ -190,23 +171,17 @@ const QueryNode * findQueryForParallelReplicas( const std::unordered_map & mapping, const Settings & settings) { - const QueryPlan::Node * prev_checked_node = nullptr; const QueryNode * res = nullptr; - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); while (!stack.empty()) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const QueryNode * subquery_node = stack.top(); stack.pop(); auto it = mapping.find(subquery_node); /// This should not happen ideally. if (it == mapping.end()) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); break; - } const QueryPlan::Node * const curr_node = it->second; std::deque> nodes_to_check; @@ -214,34 +189,20 @@ const QueryNode * findQueryForParallelReplicas( bool can_distribute_full_node = true; bool in = false; - while (!nodes_to_check.empty() /* && nodes_to_check.front() != prev_checked_node*/) + while (!nodes_to_check.empty()) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const auto & [next_node_to_check, digging_into_rabbit_hole] = nodes_to_check.front(); - LOG_DEBUG( - &Poco::Logger::get("debug"), - "next_node_to_check->step->getName()={}, next_node_to_check->step->getStepDescription());={}", - next_node_to_check->step->getName(), - next_node_to_check->step->getStepDescription()); nodes_to_check.pop_front(); const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); if (children.empty()) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); /// Found a source step. This should be possible only in the first iteration. - if (prev_checked_node) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); - // return nullptr; - } - nodes_to_check = {}; } else if (children.size() == 1) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const auto * expression = typeid_cast(step); const auto * filter = typeid_cast(step); const auto * sorting = typeid_cast(step); @@ -251,7 +212,6 @@ const QueryNode * findQueryForParallelReplicas( if (!expression && !filter && !allowed_creating_sets && !(sorting && sorting->getStepDescription().contains("before JOIN"))) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); can_distribute_full_node = false; in = digging_into_rabbit_hole; } @@ -260,47 +220,33 @@ const QueryNode * findQueryForParallelReplicas( } else { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); const auto * join = typeid_cast(step); /// We've checked that JOIN is INNER/LEFT in query tree. /// Don't distribute UNION node. if (!join) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; - } for (const auto & child : children) nodes_to_check.push_front(std::make_pair(child, true)); } } - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); - /// Current node contains steps like GROUP BY / DISTINCT /// Will try to execute query up to WithMergableStage if (!can_distribute_full_node) { /// Current query node does not contain subqueries. /// We can execute parallel replicas over storage::read. - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); if (!res) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; - } return in ? res : subquery_node; } - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); - /// Query is simple enough to be fully distributed. res = subquery_node; - prev_checked_node = curr_node; } - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; } @@ -320,26 +266,16 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr auto context = query_node ? query_node->getContext() : union_node->getContext(); if (!context->canUseParallelReplicasOnInitiator()) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; - } - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); auto stack = getSupportingParallelReplicasQuery(query_tree_node.get()); /// Empty stack means that storage does not support parallel replicas. if (stack.empty()) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; - } /// We don't have any subquery and storage can process parallel replicas by itself. if (stack.top() == query_tree_node.get()) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return nullptr; - } /// This is needed to avoid infinite recursion. auto mutable_context = Context::createCopy(context); @@ -364,22 +300,16 @@ const QueryNode * findQueryForParallelReplicas(const QueryTreeNodePtr & query_tr /// Now, return a query from initial stack. if (res) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); while (!new_stack.empty()) { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); if (res == new_stack.top()) - { - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return stack.top(); - } stack.pop(); new_stack.pop(); } } - LOG_DEBUG(&Poco::Logger::get("debug"), "__PRETTY_FUNCTION__={}, __LINE__={}", __PRETTY_FUNCTION__, __LINE__); return res; } From 1ad1d372b2461101c1cf4d7180c1423b6424bdf0 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 29 Oct 2024 23:08:56 +0100 Subject: [PATCH 203/353] stash --- src/Planner/findParallelReplicasQuery.cpp | 30 ++++++++++++++--------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index a5d3e863521..fbcf5386620 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -171,11 +171,17 @@ const QueryNode * findQueryForParallelReplicas( const std::unordered_map & mapping, const Settings & settings) { + struct Frame + { + const QueryPlan::Node * node = nullptr; + bool inside_join = false; + }; + const QueryNode * res = nullptr; while (!stack.empty()) { - const QueryNode * subquery_node = stack.top(); + const QueryNode * const subquery_node = stack.top(); stack.pop(); auto it = mapping.find(subquery_node); @@ -183,23 +189,22 @@ const QueryNode * findQueryForParallelReplicas( if (it == mapping.end()) break; - const QueryPlan::Node * const curr_node = it->second; - std::deque> nodes_to_check; - nodes_to_check.push_front(std::make_pair(curr_node, false)); + std::stack nodes_to_check; + nodes_to_check.push({.node = it->second, .inside_join = false}); bool can_distribute_full_node = true; - bool in = false; + bool currently_inside_join = false; while (!nodes_to_check.empty()) { - const auto & [next_node_to_check, digging_into_rabbit_hole] = nodes_to_check.front(); - nodes_to_check.pop_front(); + const auto & [next_node_to_check, inside_join] = nodes_to_check.top(); + nodes_to_check.pop(); const auto & children = next_node_to_check->children; auto * step = next_node_to_check->step.get(); if (children.empty()) { /// Found a source step. This should be possible only in the first iteration. - nodes_to_check = {}; + break; } else if (children.size() == 1) { @@ -213,10 +218,10 @@ const QueryNode * findQueryForParallelReplicas( if (!expression && !filter && !allowed_creating_sets && !(sorting && sorting->getStepDescription().contains("before JOIN"))) { can_distribute_full_node = false; - in = digging_into_rabbit_hole; + currently_inside_join = inside_join; } - nodes_to_check.push_front(std::pair(children.front(), digging_into_rabbit_hole)); + nodes_to_check.push({.node = children.front(), .inside_join = inside_join}); } else { @@ -227,7 +232,7 @@ const QueryNode * findQueryForParallelReplicas( return res; for (const auto & child : children) - nodes_to_check.push_front(std::make_pair(child, true)); + nodes_to_check.push({.node = child, .inside_join = true}); } } @@ -240,7 +245,8 @@ const QueryNode * findQueryForParallelReplicas( if (!res) return nullptr; - return in ? res : subquery_node; + /// todo + return currently_inside_join ? res : subquery_node; } /// Query is simple enough to be fully distributed. From d9f427deba385b6ab708c8e57cb6caad14cfdfc4 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 29 Oct 2024 23:33:45 +0100 Subject: [PATCH 204/353] stash --- src/Planner/PlannerJoinTree.cpp | 5 +- src/Planner/findParallelReplicasQuery.cpp | 2 +- src/Processors/QueryPlan/SortingStep.cpp | 6 +-- src/Processors/QueryPlan/SortingStep.h | 6 ++- ...rallel_replicas_join_with_totals.reference | 10 ++++ ...3254_parallel_replicas_join_with_totals.sh | 46 +++++++++++++++++++ 6 files changed, 65 insertions(+), 10 deletions(-) create mode 100644 tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.reference create mode 100755 tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index 39c1352c9cf..5c153f6db39 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -1555,10 +1555,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ SortingStep::Settings sort_settings(*query_context); auto sorting_step = std::make_unique( - plan.getCurrentHeader(), - std::move(sort_description), - 0 /*limit*/, - sort_settings); + plan.getCurrentHeader(), std::move(sort_description), 0 /*limit*/, sort_settings, true /*is_sorting_for_merge_join*/); sorting_step->setStepDescription(fmt::format("Sort {} before JOIN", join_table_side)); plan.addStep(std::move(sorting_step)); }; diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index fbcf5386620..66c7c6440c4 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -215,7 +215,7 @@ const QueryNode * findQueryForParallelReplicas( const auto * creating_sets = typeid_cast(step); bool allowed_creating_sets = settings[Setting::parallel_replicas_allow_in_with_subquery] && creating_sets; - if (!expression && !filter && !allowed_creating_sets && !(sorting && sorting->getStepDescription().contains("before JOIN"))) + if (!expression && !filter && !allowed_creating_sets && !(sorting && sorting->isSortingForMergeJoin())) { can_distribute_full_node = false; currently_inside_join = inside_join; diff --git a/src/Processors/QueryPlan/SortingStep.cpp b/src/Processors/QueryPlan/SortingStep.cpp index 5ad2f1f62d5..c15c45ee269 100644 --- a/src/Processors/QueryPlan/SortingStep.cpp +++ b/src/Processors/QueryPlan/SortingStep.cpp @@ -77,13 +77,11 @@ static ITransformingStep::Traits getTraits(size_t limit) } SortingStep::SortingStep( - const Header & input_header, - SortDescription description_, - UInt64 limit_, - const Settings & settings_) + const Header & input_header, SortDescription description_, UInt64 limit_, const Settings & settings_, bool is_sorting_for_merge_join_) : ITransformingStep(input_header, input_header, getTraits(limit_)) , type(Type::Full) , result_description(std::move(description_)) + , is_sorting_for_merge_join(is_sorting_for_merge_join_) , limit(limit_) , sort_settings(settings_) { diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index 6cdf626d4c8..9af591d603a 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -39,7 +39,8 @@ public: const Header & input_header, SortDescription description_, UInt64 limit_, - const Settings & settings_); + const Settings & settings_, + bool is_sorting_for_merge_join_ = false); /// Full with partitioning SortingStep( @@ -81,6 +82,8 @@ public: bool hasPartitions() const { return !partition_by_description.empty(); } + bool isSortingForMergeJoin() const { return is_sorting_for_merge_join; } + void convertToFinishSorting(SortDescription prefix_description, bool use_buffering_); Type getType() const { return type; } @@ -124,6 +127,7 @@ private: const SortDescription result_description; SortDescription partition_by_description; + bool is_sorting_for_merge_join = false; UInt64 limit; bool always_read_till_end = false; diff --git a/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.reference b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.reference new file mode 100644 index 00000000000..f87bb786c46 --- /dev/null +++ b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.reference @@ -0,0 +1,10 @@ +1 1 +1 1 + +0 0 +----- +1 1 +1 1 + +0 0 +----- diff --git a/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh new file mode 100755 index 00000000000..d3780d12ae0 --- /dev/null +++ b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} --query=" +CREATE TABLE t +( + item_id UInt64, + price_sold Float32, + date Date +) +ENGINE = MergeTree +ORDER BY item_id; + +INSERT INTO t VALUES (1, 100, '1970-01-01'), (1, 200, '1970-01-02'); +" + +for enable_parallel_replicas in {0..1}; do + ${CLICKHOUSE_CLIENT} --query=" + set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; + + SELECT * + FROM + ( + SELECT item_id + FROM t + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t + GROUP BY item_id + WITH TOTALS + ORDER BY item_id ASC + ) AS r ON l.item_id = r.item_id; + + SELECT '-----'; + " +done + +${CLICKHOUSE_CLIENT} --query=" +DROP TABLE t; +" From ba9587c728d7af72f01618e44e58dfe9cc156e06 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 30 Oct 2024 10:34:12 +0000 Subject: [PATCH 205/353] Removed trash --- src/Storages/ObjectStorage/StorageObjectStorage.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index 1ed6e137a31..a72fd16abc2 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -287,7 +287,6 @@ void StorageObjectStorage::read( size_t num_streams) { configuration->update(object_storage, local_context); - printConfiguration(local_context->getConfigRef(), "Select query"); if (partition_by && configuration->withPartitionWildcard()) { throw Exception(ErrorCodes::NOT_IMPLEMENTED, From e7fe8fed22db3c8772f9b6fe1bd9eb233e50c36c Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 30 Oct 2024 11:13:03 +0000 Subject: [PATCH 206/353] Added flag for parquet files --- .../registerStorageObjectStorage.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp index a0393ea3e6a..e94f1860176 100644 --- a/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/registerStorageObjectStorage.cpp @@ -16,10 +16,14 @@ namespace ErrorCodes extern const int BAD_ARGUMENTS; } -static std::shared_ptr createStorageObjectStorage( - const StorageFactory::Arguments & args, - StorageObjectStorage::ConfigurationPtr configuration, - ContextPtr context) +namespace +{ + +// LocalObjectStorage is only supported for Iceberg Datalake operations where Avro format is required. For regular file access, use FileStorage instead. +#if USE_AWS_S3 || USE_AZURE_BLOB_STORAGE || USE_HDFS || USE_AVRO + +std::shared_ptr +createStorageObjectStorage(const StorageFactory::Arguments & args, StorageObjectStorage::ConfigurationPtr configuration, ContextPtr context) { auto & engine_args = args.engine_args; if (engine_args.empty()) @@ -63,6 +67,9 @@ static std::shared_ptr createStorageObjectStorage( partition_by); } +#endif +} + #if USE_AZURE_BLOB_STORAGE void registerStorageAzure(StorageFactory & factory) { From 5e2355b1231774c7f3525c296df0e56ecb3d9c9f Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 30 Oct 2024 13:01:20 +0100 Subject: [PATCH 207/353] better --- src/Planner/findParallelReplicasQuery.cpp | 23 ++++++++++++------- src/Processors/QueryPlan/SortingStep.h | 2 ++ ...3254_parallel_replicas_join_with_totals.sh | 2 ++ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 66c7c6440c4..8a806045111 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -174,6 +174,14 @@ const QueryNode * findQueryForParallelReplicas( struct Frame { const QueryPlan::Node * node = nullptr; + /// Below we will check subqueries from `stack` to find outtermost subquery that could be executed remotely. + /// Currently traversal algorithm considers only steps with 0 or 1 children and JOIN specifically. + /// When we found some step that requires finalization on the initiator (e.g. GROUP BY) there are two options: + /// 1. If plan looks like a single path (e.g. AggregatingStep -> ExpressionStep -> Reading) we can execute + /// current subquery as a whole with replicas. + /// 2. If we were inside JOIN we cannot offload the whole subquery to replicas because at least one side + /// of the JOIN needs to be finalized on the initiator. + /// So this flag is used to track what subquery to return once we hit a step that needs finalization. bool inside_join = false; }; @@ -203,19 +211,21 @@ const QueryNode * findQueryForParallelReplicas( if (children.empty()) { - /// Found a source step. This should be possible only in the first iteration. - break; + /// Found a source step. } else if (children.size() == 1) { const auto * expression = typeid_cast(step); const auto * filter = typeid_cast(step); - const auto * sorting = typeid_cast(step); const auto * creating_sets = typeid_cast(step); - bool allowed_creating_sets = settings[Setting::parallel_replicas_allow_in_with_subquery] && creating_sets; + const bool allowed_creating_sets = settings[Setting::parallel_replicas_allow_in_with_subquery] && creating_sets; - if (!expression && !filter && !allowed_creating_sets && !(sorting && sorting->isSortingForMergeJoin())) + const auto * sorting = typeid_cast(step); + /// Sorting for merge join is supposed to be done locally before join itself, so it doesn't need finalization. + const bool allowed_sorting = sorting && sorting->isSortingForMergeJoin(); + + if (!expression && !filter && !allowed_creating_sets && !allowed_sorting) { can_distribute_full_node = false; currently_inside_join = inside_join; @@ -236,8 +246,6 @@ const QueryNode * findQueryForParallelReplicas( } } - /// Current node contains steps like GROUP BY / DISTINCT - /// Will try to execute query up to WithMergableStage if (!can_distribute_full_node) { /// Current query node does not contain subqueries. @@ -245,7 +253,6 @@ const QueryNode * findQueryForParallelReplicas( if (!res) return nullptr; - /// todo return currently_inside_join ? res : subquery_node; } diff --git a/src/Processors/QueryPlan/SortingStep.h b/src/Processors/QueryPlan/SortingStep.h index 9af591d603a..be2e4b0149c 100644 --- a/src/Processors/QueryPlan/SortingStep.h +++ b/src/Processors/QueryPlan/SortingStep.h @@ -127,6 +127,8 @@ private: const SortDescription result_description; SortDescription partition_by_description; + + /// See `findQueryForParallelReplicas` bool is_sorting_for_merge_join = false; UInt64 limit; diff --git a/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh index d3780d12ae0..365d7abed7a 100755 --- a/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh +++ b/tests/queries/0_stateless/03254_parallel_replicas_join_with_totals.sh @@ -20,6 +20,8 @@ INSERT INTO t VALUES (1, 100, '1970-01-01'), (1, 200, '1970-01-02'); for enable_parallel_replicas in {0..1}; do ${CLICKHOUSE_CLIENT} --query=" + --- Old analyzer uses different code path and it produces wrong result in this case. + set enable_analyzer=1; set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; SELECT * From 0dcb2b9c2c61674be298b706498763e8fcae7018 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 12:24:39 +0000 Subject: [PATCH 208/353] try another approach --- src/Interpreters/FillingRow.cpp | 315 +++++++++++++++--- src/Interpreters/FillingRow.h | 18 +- .../Transforms/FillingTransform.cpp | 92 +++-- 3 files changed, 348 insertions(+), 77 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 8c5f102bcd6..caf6ad9e3ba 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -1,4 +1,7 @@ +#include #include +#include "Common/Logger.h" +#include "Common/logger_useful.h" #include #include @@ -95,108 +98,326 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, Field next_value = shifted_value; descr.step_func(next_value, step_len); - if (less(next_value, to, getDirection(0))) + // if (less(next_value, to, getDirection(0))) + // { + // shifted_value = std::move(next_value); + // step_len *= 2; + // } + // else + // { + // step_len /= 2; + // } + + if (less(to, next_value, getDirection(0))) { - shifted_value = std::move(next_value); - step_len *= 2; + step_len /= 2; } else { - step_len /= 2; + shifted_value = std::move(next_value); + step_len *= 2; } } return shifted_value; } -std::pair FillingRow::next(const FillingRow & to_row, bool long_jump) +Field findMin(Field a, Field b, Field c, int dir) { + auto logger = getLogger("FillingRow"); + LOG_DEBUG(logger, "a: {} b: {} c: {}", a.dump(), b.dump(), c.dump()); + + if (a.isNull() || (!b.isNull() && less(b, a, dir))) + a = b; + + if (a.isNull() || (!c.isNull() && less(c, a, dir))) + a = c; + + return a; +} + +std::pair FillingRow::next(const FillingRow & next_original_row) +{ + auto logger = getLogger("FillingRow"); + const size_t row_size = size(); size_t pos = 0; /// Find position we need to increment for generating next row. for (; pos < row_size; ++pos) - if (!row[pos].isNull() && !to_row.row[pos].isNull() && !equals(row[pos], to_row.row[pos])) - break; + { + if (row[pos].isNull()) + continue; - if (pos == row_size || less(to_row.row[pos], row[pos], getDirection(pos))) + const auto & descr = getFillDescription(pos); + auto min_constr = findMin(next_original_row[pos], staleness_border[pos], descr.fill_to, getDirection(pos)); + LOG_DEBUG(logger, "min_constr: {}", min_constr); + + if (!min_constr.isNull() && !equals(row[pos], min_constr)) + break; + } + + LOG_DEBUG(logger, "pos: {}", pos); + + if (pos == row_size) return {false, false}; - /// If we have any 'fill_to' value at position greater than 'pos', - /// we need to generate rows up to 'fill_to' value. + const auto & pos_descr = getFillDescription(pos); + + if (!next_original_row[pos].isNull() && less(next_original_row[pos], row[pos], getDirection(pos))) + return {false, false}; + + if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], getDirection(pos))) + return {false, false}; + + if (!pos_descr.fill_to.isNull() && !less(row[pos], pos_descr.fill_to, getDirection(pos))) + return {false, false}; + + /// If we have any 'fill_to' value at position greater than 'pos' or configured staleness, + /// we need to generate rows up to one of this borders. for (size_t i = row_size - 1; i > pos; --i) { auto & fill_column_desc = getFillDescription(i); - if (fill_column_desc.fill_to.isNull() || row[i].isNull()) + if (row[i].isNull()) continue; - auto next_value = doJump(fill_column_desc, i); - if (next_value.has_value() && !equals(next_value.value(), fill_column_desc.fill_to)) - { - row[i] = std::move(next_value.value()); - initFromDefaults(i + 1); - return {true, true}; - } + if (fill_column_desc.fill_to.isNull() && staleness_border[i].isNull()) + continue; + + Field next_value = row[i]; + fill_column_desc.step_func(next_value, 1); + + if (!staleness_border[i].isNull() && !less(next_value, staleness_border[i], getDirection(i))) + continue; + + if (!fill_column_desc.fill_to.isNull() && !less(next_value, fill_column_desc.fill_to, getDirection(i))) + continue; + + row[i] = next_value; + initWithFrom(i + 1); + return {true, true}; } - auto & fill_column_desc = getFillDescription(pos); - std::optional next_value; + auto next_value = row[pos]; + getFillDescription(pos).step_func(next_value, 1); - if (long_jump) - { - next_value = doLongJump(fill_column_desc, pos, to_row[pos]); - - if (!next_value.has_value()) - return {false, false}; - - /// We need value >= to_row[pos] - fill_column_desc.step_func(next_value.value(), 1); - } - else - { - next_value = doJump(fill_column_desc, pos); - } - - if (!next_value.has_value() || less(to_row.row[pos], next_value.value(), getDirection(pos)) || equals(next_value.value(), getFillDescription(pos).fill_to)) + if (!next_original_row[pos].isNull() && less(next_original_row[pos], next_value, getDirection(pos))) return {false, false}; - row[pos] = std::move(next_value.value()); - if (equals(row[pos], to_row.row[pos])) + if (!staleness_border[pos].isNull() && !less(next_value, staleness_border[pos], getDirection(pos))) + return {false, false}; + + if (!pos_descr.fill_to.isNull() && !less(next_value, pos_descr.fill_to, getDirection(pos))) + return {false, false}; + + row[pos] = next_value; + if (equals(row[pos], next_original_row[pos])) { bool is_less = false; for (size_t i = pos + 1; i < row_size; ++i) { - const auto & fill_from = getFillDescription(i).fill_from; - if (!fill_from.isNull()) - row[i] = fill_from; + const auto & descr = getFillDescription(i); + if (!descr.fill_from.isNull()) + row[i] = descr.fill_from; else - row[i] = to_row.row[i]; - is_less |= less(row[i], to_row.row[i], getDirection(i)); + row[i] = next_original_row[i]; + + is_less |= ( + (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && + (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && + (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + ); } return {is_less, true}; } - initFromDefaults(pos + 1); + initWithFrom(pos + 1); return {true, true}; } -void FillingRow::initFromDefaults(size_t from_pos) +bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) +{ + auto logger = getLogger("FillingRow::shift"); + LOG_DEBUG(logger, "next_original_row: {}, current: {}", next_original_row.dump(), dump()); + + for (size_t pos = 0; pos < size(); ++pos) + { + if (row[pos].isNull() || next_original_row[pos].isNull() || equals(row[pos], next_original_row[pos])) + continue; + + if (less(next_original_row[pos], row[pos], getDirection(pos))) + return false; + + std::optional next_value = doLongJump(getFillDescription(pos), pos, next_original_row[pos]); + + if (!next_value.has_value()) + { + LOG_DEBUG(logger, "next value: {}", "None"); + continue; + } + else + { + LOG_DEBUG(logger, "next value: {}", next_value->dump()); + } + + row[pos] = std::move(next_value.value()); + + if (equals(row[pos], next_original_row[pos])) + { + bool is_less = false; + for (size_t i = pos + 1; i < size(); ++i) + { + const auto & descr = getFillDescription(i); + if (!descr.fill_from.isNull()) + row[i] = descr.fill_from; + else + row[i] = next_original_row[i]; + + is_less |= ( + (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && + (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && + (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + ); + } + + LOG_DEBUG(logger, "is less: {}", is_less); + + value_changed = true; + return is_less; + } + else + { + // getFillDescription(pos).step_func(row[pos], 1); + initWithTo(/*from_pos=*/pos + 1); + + value_changed = false; + return false; + } + } + + return false; +} + +bool FillingRow::isConstraintComplete(size_t pos) const +{ + auto logger = getLogger("FillingRow::isConstraintComplete"); + + if (row[pos].isNull()) + { + LOG_DEBUG(logger, "disabled"); + return true; /// disabled + } + + const auto & descr = getFillDescription(pos); + int direction = getDirection(pos); + + if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) + { + LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); + return false; + } + + if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], direction)) + { + LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); + return false; + } + + return true; +} + +bool FillingRow::isConstraintsComplete() const +{ + for (size_t pos = 0; pos < size(); ++pos) + { + if (isConstraintComplete(pos)) + return true; + } + + return false; +} + +bool FillingRow::isLessStaleness() const +{ + auto logger = getLogger("FillingRow::isLessStaleness"); + + for (size_t pos = 0; pos < size(); ++pos) + { + LOG_DEBUG(logger, "staleness border: {}, row: {}", staleness_border[pos].dump(), row[pos].dump()); + + if (row[pos].isNull() || staleness_border[pos].isNull()) + continue; + + if (less(row[pos], staleness_border[pos], getDirection(pos))) + return true; + } + + return false; +} + +bool FillingRow::isStalenessConfigured() const +{ + for (size_t pos = 0; pos < size(); ++pos) + if (!getFillDescription(pos).fill_staleness.isNull()) + return true; + + return false; +} + +bool FillingRow::isLessFillTo() const +{ + auto logger = getLogger("FillingRow::isLessFillTo"); + + for (size_t pos = 0; pos < size(); ++pos) + { + const auto & descr = getFillDescription(pos); + + LOG_DEBUG(logger, "fill to: {}, row: {}", descr.fill_to.dump(), row[pos].dump()); + + if (row[pos].isNull() || descr.fill_to.isNull()) + continue; + + if (less(row[pos], descr.fill_to, getDirection(pos))) + return true; + } + + return false; +} + +bool FillingRow::isFillToConfigured() const +{ + for (size_t pos = 0; pos < size(); ++pos) + if (!getFillDescription(pos).fill_to.isNull()) + return true; + + return false; +} + + +void FillingRow::initWithFrom(size_t from_pos) { for (size_t i = from_pos; i < sort_description.size(); ++i) row[i] = getFillDescription(i).fill_from; } +void FillingRow::initWithTo(size_t from_pos) +{ + for (size_t i = from_pos; i < sort_description.size(); ++i) + row[i] = getFillDescription(i).fill_to; +} + void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) { for (size_t i = 0; i < size(); ++i) { - staleness_border[i] = (*base_row[i])[row_ind]; - const auto& descr = getFillDescription(i); if (!descr.fill_staleness.isNull()) + { + staleness_border[i] = (*base_row[i])[row_ind]; descr.staleness_step_func(staleness_border[i], 1); + } } } diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index dc787173191..a5e622e4c6e 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -25,9 +25,22 @@ public: /// Return pair of boolean /// apply - true if filling values should be inserted into result set /// value_changed - true if filling row value was changed - std::pair next(const FillingRow & to_row, bool long_jump); + std::pair next(const FillingRow & next_original_row); - void initFromDefaults(size_t from_pos = 0); + /// Returns true if need to generate some prefix for to_row + bool shift(const FillingRow & next_original_row, bool& value_changed); + + bool isConstraintComplete(size_t pos) const; + bool isConstraintsComplete() const; + + bool isLessStaleness() const; + bool isStalenessConfigured() const; + + bool isLessFillTo() const; + bool isFillToConfigured() const; + + void initWithFrom(size_t from_pos = 0); + void initWithTo(size_t from_pos = 0); void initStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } @@ -39,6 +52,7 @@ public: bool isNull() const; int getDirection(size_t index) const { return sort_description[index].direction; } + Field getStalenessBorder(size_t index) const { return staleness_border[index]; } FillColumnDescription & getFillDescription(size_t index) { return sort_description[index].fill_description; } const FillColumnDescription & getFillDescription(size_t index) const { return sort_description[index].fill_description; } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 46a670394a5..a3a185929dc 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -11,13 +11,14 @@ #include #include #include +#include "Interpreters/FillingRow.h" #include namespace DB { -constexpr bool debug_logging_enabled = false; +constexpr bool debug_logging_enabled = true; template void logDebug(String key, const T & value, const char * separator = " : ") @@ -507,18 +508,39 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) - next_row[i] = filling_row.getFillDescription(i).fill_to; + next_row[i] = Field{}; logDebug("generateSuffixIfNeeded next_row updated", next_row); - if (filling_row >= next_row) + // if (!filling_row.isFillToConfigured() && !filling_row.isStalenessConfigured()) + // { + // logDebug("generateSuffixIfNeeded", "no other constraints, will not generate suffix"); + // return false; + // } + + // logDebug("filling_row.isLessFillTo()", filling_row.isLessFillTo()); + // logDebug("filling_row.isLessStaleness()", filling_row.isLessStaleness()); + + // if (filling_row.isFillToConfigured() && !filling_row.isLessFillTo()) + // { + // logDebug("generateSuffixIfNeeded", "not less than fill to, will not generate suffix"); + // return false; + // } + + // if (filling_row.isStalenessConfigured() && !filling_row.isLessStaleness()) + // { + // logDebug("generateSuffixIfNeeded", "not less than staleness border, will not generate suffix"); + // return false; + // } + + if (!filling_row.isConstraintsComplete()) { - logDebug("generateSuffixIfNeeded", "no need to generate suffix"); + logDebug("generateSuffixIfNeeded", "will not generate suffix"); return false; } Block interpolate_block; - if (should_insert_first && filling_row < next_row) + if (should_insert_first) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); @@ -533,7 +555,7 @@ bool FillingTransform::generateSuffixIfNeeded( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); + const auto [apply, changed] = filling_row.next(next_row); filling_row_changed = changed; if (!apply) break; @@ -615,7 +637,7 @@ void FillingTransform::transformRange( if (!fill_from.isNull() && !equals(current_value, fill_from)) { - filling_row.initFromDefaults(i); + filling_row.initWithFrom(i); filling_row_inserted = false; if (less(fill_from, current_value, filling_row.getDirection(i))) { @@ -642,24 +664,14 @@ void FillingTransform::transformRange( logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) - { - const auto current_value = (*input_fill_columns[i])[row_ind]; - const auto & fill_to = filling_row.getFillDescription(i).fill_to; + next_row[i] = (*input_fill_columns[i])[row_ind]; - logDebug("current value", current_value.dump()); - logDebug("fill to", fill_to.dump()); - - if (fill_to.isNull() || less(current_value, fill_to, filling_row.getDirection(i))) - next_row[i] = current_value; - else - next_row[i] = fill_to; - } logDebug("next_row updated", next_row); /// The condition is true when filling row is initialized by value(s) in FILL FROM, /// and there are row(s) in current range with value(s) < then in the filling row. /// It can happen only once for a range. - if (should_insert_first && filling_row < next_row) + if (should_insert_first && filling_row < next_row && filling_row.isConstraintsComplete()) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); @@ -669,7 +681,7 @@ void FillingTransform::transformRange( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/false); + const auto [apply, changed] = filling_row.next(next_row); filling_row_changed = changed; if (!apply) break; @@ -679,12 +691,36 @@ void FillingTransform::transformRange( copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); } - const auto [apply, changed] = filling_row.next(next_row, /*long_jump=*/true); - logDebug("long jump apply", apply); - logDebug("long jump changed", changed); + { + filling_row.initStalenessRow(input_fill_columns, row_ind); - if (changed) - filling_row_changed = true; + bool shift_apply = filling_row.shift(next_row, filling_row_changed); + logDebug("shift_apply", shift_apply); + logDebug("filling_row_changed", filling_row_changed); + + while (shift_apply) + { + logDebug("after shift", filling_row); + + while (true) + { + logDebug("filling_row in prefix", filling_row); + + interpolate(result_columns, interpolate_block); + insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); + copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + + const auto [apply, changed] = filling_row.next(next_row); + logDebug("filling_row in prefix", filling_row); + + filling_row_changed = changed; + if (!apply) + break; + } + + shift_apply = filling_row.shift(next_row, filling_row_changed); + } + } /// new valid filling row was generated but not inserted, will use it during suffix generation if (filling_row_changed) @@ -697,8 +733,8 @@ void FillingTransform::transformRange( copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); copyRowFromColumns(res_other_columns, input_other_columns, row_ind); - /// Init next staleness interval with current row, because we have already made the long jump to it - filling_row.initStalenessRow(input_fill_columns, row_ind); + // /// Init next staleness interval with current row, because we have already made the long jump to it + // filling_row.initStalenessRow(input_fill_columns, row_ind); } /// save sort prefix of last row in the range, it's used to generate suffix @@ -744,7 +780,7 @@ void FillingTransform::transform(Chunk & chunk) /// if no data was processed, then need to initialize filling_row if (last_row.empty()) { - filling_row.initFromDefaults(); + filling_row.initWithFrom(); filling_row_inserted = false; } From 98f358baa3cac9813ed071067686af56653792c5 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 30 Oct 2024 13:42:27 +0100 Subject: [PATCH 209/353] add test --- ...eplicas_join_algo_and_analyzer_4.reference | 29 ++++++ ...allel_replicas_join_algo_and_analyzer_4.sh | 93 +++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference create mode 100755 tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference new file mode 100644 index 00000000000..9fc156b5fb0 --- /dev/null +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference @@ -0,0 +1,29 @@ +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` ALL LEFT JOIN (SELECT `__table4`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table4`) AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh new file mode 100755 index 00000000000..a588fa47c2d --- /dev/null +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh @@ -0,0 +1,93 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +${CLICKHOUSE_CLIENT} --query=" +CREATE TABLE t +( + item_id UInt64, + price_sold Float32, + date Date +) +ENGINE = MergeTree +ORDER BY item_id; + +CREATE TABLE t1 +( + item_id UInt64, + price_sold Float32, + date Date +) +ENGINE = MergeTree +ORDER BY item_id; + +INSERT INTO t SELECT number, number % 10, toDate(number) FROM numbers(100000); +INSERT INTO t1 SELECT number, number % 10, toDate(number) FROM numbers(100000); +" + +query1=" + SELECT sum(item_id) + FROM + ( + SELECT item_id + FROM t + GROUP BY item_id + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t1 + ) AS r ON l.item_id = r.item_id +" + +query2=" + SELECT sum(item_id) + FROM + ( + SELECT item_id + FROM t + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t1 + GROUP BY item_id + ) AS r ON l.item_id = r.item_id +" + +query3=" + SELECT sum(item_id) + FROM + ( + SELECT item_id, price_sold + FROM t + ) AS l + LEFT JOIN + ( + SELECT item_id + FROM t1 + ) AS r ON l.item_id = r.item_id + GROUP BY price_sold + ORDER BY price_sold +" + +for query in "${query1}" "${query2}" "${query3}"; do + for enable_parallel_replicas in {0..1}; do + ${CLICKHOUSE_CLIENT} --query=" + set enable_analyzer=1; + set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; + + ${query}; + + SELECT replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1') + FROM + ( + EXPLAIN actions=1 ${query} + ) + WHERE explain LIKE '%ParallelReplicas%'; + " + done +done From e76f66d865540f86e32ac415974cfcd9b35c6b65 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 30 Oct 2024 13:58:33 +0100 Subject: [PATCH 210/353] fix typo --- src/Planner/findParallelReplicasQuery.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Planner/findParallelReplicasQuery.cpp b/src/Planner/findParallelReplicasQuery.cpp index 8a806045111..fce86a6cda0 100644 --- a/src/Planner/findParallelReplicasQuery.cpp +++ b/src/Planner/findParallelReplicasQuery.cpp @@ -174,7 +174,7 @@ const QueryNode * findQueryForParallelReplicas( struct Frame { const QueryPlan::Node * node = nullptr; - /// Below we will check subqueries from `stack` to find outtermost subquery that could be executed remotely. + /// Below we will check subqueries from `stack` to find outermost subquery that could be executed remotely. /// Currently traversal algorithm considers only steps with 0 or 1 children and JOIN specifically. /// When we found some step that requires finalization on the initiator (e.g. GROUP BY) there are two options: /// 1. If plan looks like a single path (e.g. AggregatingStep -> ExpressionStep -> Reading) we can execute From 0840f7854c9ff286623d2165b79cec72254cdc67 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 30 Oct 2024 13:40:27 +0000 Subject: [PATCH 211/353] Fix ifdefs in ObjectStorageObject table --- src/TableFunctions/TableFunctionObjectStorage.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index 6d81269f2d7..12de08afad0 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -269,41 +269,43 @@ void registerTableFunctionIceberg(TableFunctionFactory & factory) } #endif + +#if USE_AWS_S3 #if USE_PARQUET void registerTableFunctionDeltaLake(TableFunctionFactory & factory) { -#if USE_AWS_S3 factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the DeltaLake table stored on object store.)", .examples{{"deltaLake", "SELECT * FROM deltaLake(url, access_key_id, secret_access_key)", ""}}, .categories{"DataLake"}}, .allow_readonly = false}); -#endif } #endif void registerTableFunctionHudi(TableFunctionFactory & factory) { -#if USE_AWS_S3 factory.registerFunction( {.documentation = {.description = R"(The table function can be used to read the Hudi table stored on object store.)", .examples{{"hudi", "SELECT * FROM hudi(url, access_key_id, secret_access_key)", ""}}, .categories{"DataLake"}}, .allow_readonly = false}); -#endif } +#endif + void registerDataLakeTableFunctions(TableFunctionFactory & factory) { UNUSED(factory); #if USE_AVRO registerTableFunctionIceberg(factory); #endif +#if USE_AWS_S3 #if USE_PARQUET registerTableFunctionDeltaLake(factory); #endif registerTableFunctionHudi(factory); +#endif } } From b9829c703fd4ceae38b5d195ae195c2321e17444 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 13:44:59 +0000 Subject: [PATCH 212/353] change constraints check --- src/Interpreters/FillingRow.cpp | 75 ++++++++++++------- src/Interpreters/FillingRow.h | 6 +- .../Transforms/FillingTransform.cpp | 2 +- 3 files changed, 53 insertions(+), 30 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index caf6ad9e3ba..825b0b1488a 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -3,6 +3,7 @@ #include "Common/Logger.h" #include "Common/logger_useful.h" #include +#include "base/defines.h" #include @@ -122,6 +123,43 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, return shifted_value; } +bool FillingRow::hasSomeConstraints(size_t pos) const +{ + const auto & descr = getFillDescription(pos); + + if (!descr.fill_to.isNull()) + return true; + + if (!descr.fill_staleness.isNull()) + return true; + + return false; +} + +bool FillingRow::isConstraintsComplete(size_t pos) const +{ + auto logger = getLogger("FillingRow::isConstraintComplete"); + chassert(!row[pos].isNull()); + chassert(hasSomeConstraints(pos)); + + const auto & descr = getFillDescription(pos); + int direction = getDirection(pos); + + if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) + { + LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); + return false; + } + + if (!descr.fill_staleness.isNull() && !less(row[pos], staleness_border[pos], direction)) + { + LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); + return false; + } + + return true; +} + Field findMin(Field a, Field b, Field c, int dir) { auto logger = getLogger("FillingRow"); @@ -300,43 +338,26 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed return false; } -bool FillingRow::isConstraintComplete(size_t pos) const +bool FillingRow::hasSomeConstraints() const { - auto logger = getLogger("FillingRow::isConstraintComplete"); + for (size_t pos = 0; pos < size(); ++pos) + if (hasSomeConstraints(pos)) + return true; - if (row[pos].isNull()) - { - LOG_DEBUG(logger, "disabled"); - return true; /// disabled - } - - const auto & descr = getFillDescription(pos); - int direction = getDirection(pos); - - if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) - { - LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); - return false; - } - - if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], direction)) - { - LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); - return false; - } - - return true; + return false; } bool FillingRow::isConstraintsComplete() const { for (size_t pos = 0; pos < size(); ++pos) { - if (isConstraintComplete(pos)) - return true; + if (row[pos].isNull() || !hasSomeConstraints(pos)) + continue; + + return isConstraintsComplete(pos); } - return false; + return true; } bool FillingRow::isLessStaleness() const diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index a5e622e4c6e..bd5a1b877a5 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -18,6 +18,9 @@ class FillingRow std::optional doJump(const FillColumnDescription & descr, size_t column_ind); std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); + bool hasSomeConstraints(size_t pos) const; + bool isConstraintsComplete(size_t pos) const; + public: explicit FillingRow(const SortDescription & sort_description); @@ -30,7 +33,7 @@ public: /// Returns true if need to generate some prefix for to_row bool shift(const FillingRow & next_original_row, bool& value_changed); - bool isConstraintComplete(size_t pos) const; + bool hasSomeConstraints() const; bool isConstraintsComplete() const; bool isLessStaleness() const; @@ -52,7 +55,6 @@ public: bool isNull() const; int getDirection(size_t index) const { return sort_description[index].direction; } - Field getStalenessBorder(size_t index) const { return staleness_border[index]; } FillColumnDescription & getFillDescription(size_t index) { return sort_description[index].fill_description; } const FillColumnDescription & getFillDescription(size_t index) const { return sort_description[index].fill_description; } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index a3a185929dc..ce804c94d8e 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -533,7 +533,7 @@ bool FillingTransform::generateSuffixIfNeeded( // return false; // } - if (!filling_row.isConstraintsComplete()) + if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsComplete()) { logDebug("generateSuffixIfNeeded", "will not generate suffix"); return false; From 433523c6f29a55d28930ec86fe268edffc16738e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 13:49:42 +0000 Subject: [PATCH 213/353] update test --- .../03266_with_fill_staleness.reference | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.reference b/tests/queries/0_stateless/03266_with_fill_staleness.reference index 6b090443359..25d7b7c3f24 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.reference +++ b/tests/queries/0_stateless/03266_with_fill_staleness.reference @@ -50,6 +50,8 @@ staleness 3 seconds 2016-06-15 23:00:21 20 2016-06-15 23:00:22 20 2016-06-15 23:00:25 25 original +2016-06-15 23:00:26 25 +2016-06-15 23:00:27 25 descending order 2016-06-15 23:00:25 25 original 2016-06-15 23:00:24 25 @@ -62,6 +64,7 @@ descending order 2016-06-15 23:00:05 5 original 2016-06-15 23:00:04 5 2016-06-15 23:00:00 0 original +2016-06-15 22:59:59 0 staleness with to and step 2016-06-15 23:00:00 0 original 2016-06-15 23:00:03 0 @@ -86,33 +89,41 @@ staleness with another regular with fill 2016-06-15 23:00:01 1970-01-01 01:00:00 0 2016-06-15 23:00:01 1970-01-01 01:00:01 0 2016-06-15 23:00:01 1970-01-01 01:00:02 0 +2016-06-15 23:00:05 1970-01-01 01:00:00 0 +2016-06-15 23:00:05 1970-01-01 01:00:01 0 +2016-06-15 23:00:05 1970-01-01 01:00:02 0 2016-06-15 23:00:05 2016-06-15 23:00:05 5 original -2016-06-15 23:00:05 1970-01-01 01:00:01 5 -2016-06-15 23:00:05 1970-01-01 01:00:02 5 2016-06-15 23:00:06 1970-01-01 01:00:00 5 2016-06-15 23:00:06 1970-01-01 01:00:01 5 2016-06-15 23:00:06 1970-01-01 01:00:02 5 +2016-06-15 23:00:10 1970-01-01 01:00:00 5 +2016-06-15 23:00:10 1970-01-01 01:00:01 5 +2016-06-15 23:00:10 1970-01-01 01:00:02 5 2016-06-15 23:00:10 2016-06-15 23:00:10 10 original -2016-06-15 23:00:10 1970-01-01 01:00:01 10 -2016-06-15 23:00:10 1970-01-01 01:00:02 10 2016-06-15 23:00:11 1970-01-01 01:00:00 10 2016-06-15 23:00:11 1970-01-01 01:00:01 10 2016-06-15 23:00:11 1970-01-01 01:00:02 10 +2016-06-15 23:00:15 1970-01-01 01:00:00 10 +2016-06-15 23:00:15 1970-01-01 01:00:01 10 +2016-06-15 23:00:15 1970-01-01 01:00:02 10 2016-06-15 23:00:15 2016-06-15 23:00:15 15 original -2016-06-15 23:00:15 1970-01-01 01:00:01 15 -2016-06-15 23:00:15 1970-01-01 01:00:02 15 2016-06-15 23:00:16 1970-01-01 01:00:00 15 2016-06-15 23:00:16 1970-01-01 01:00:01 15 2016-06-15 23:00:16 1970-01-01 01:00:02 15 +2016-06-15 23:00:20 1970-01-01 01:00:00 15 +2016-06-15 23:00:20 1970-01-01 01:00:01 15 +2016-06-15 23:00:20 1970-01-01 01:00:02 15 2016-06-15 23:00:20 2016-06-15 23:00:20 20 original -2016-06-15 23:00:20 1970-01-01 01:00:01 20 -2016-06-15 23:00:20 1970-01-01 01:00:02 20 2016-06-15 23:00:21 1970-01-01 01:00:00 20 2016-06-15 23:00:21 1970-01-01 01:00:01 20 2016-06-15 23:00:21 1970-01-01 01:00:02 20 +2016-06-15 23:00:25 1970-01-01 01:00:00 20 +2016-06-15 23:00:25 1970-01-01 01:00:01 20 +2016-06-15 23:00:25 1970-01-01 01:00:02 20 2016-06-15 23:00:25 2016-06-15 23:00:25 25 original -2016-06-15 23:00:25 1970-01-01 01:00:01 25 -2016-06-15 23:00:25 1970-01-01 01:00:02 25 +2016-06-15 23:00:26 1970-01-01 01:00:00 25 +2016-06-15 23:00:26 1970-01-01 01:00:01 25 +2016-06-15 23:00:26 1970-01-01 01:00:02 25 double staleness 2016-06-15 23:00:00 2016-06-15 23:00:00 0 original 2016-06-15 23:00:00 2016-06-15 23:00:02 0 @@ -137,3 +148,4 @@ double staleness 2016-06-15 23:00:25 2016-06-15 23:00:25 25 original 2016-06-15 23:00:25 2016-06-15 23:00:27 25 2016-06-15 23:00:25 2016-06-15 23:00:29 25 +2016-06-15 23:00:26 1970-01-01 01:00:00 25 From e5fe7a0f52625d3460ca04a21982a1af24e0adcd Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 14:35:29 +0000 Subject: [PATCH 214/353] add more tests --- .../0_stateless/03266_with_fill_staleness.sql | 1 + .../03266_with_fill_staleness_cases.reference | 67 +++++++++++++++++++ .../03266_with_fill_staleness_cases.sql | 25 +++++++ 3 files changed, 93 insertions(+) create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_cases.reference create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_cases.sql diff --git a/tests/queries/0_stateless/03266_with_fill_staleness.sql b/tests/queries/0_stateless/03266_with_fill_staleness.sql index fff702ffd83..de47d8287ad 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness.sql +++ b/tests/queries/0_stateless/03266_with_fill_staleness.sql @@ -1,4 +1,5 @@ SET session_timezone='Europe/Amsterdam'; +SET enable_analyzer=1; DROP TABLE IF EXISTS with_fill_staleness; CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference b/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference new file mode 100644 index 00000000000..bf8e5bbe331 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_cases.reference @@ -0,0 +1,67 @@ +test-1 +0 5 10 original +0 5 13 +0 5 16 +0 5 19 +0 5 22 +0 7 0 +7 8 15 original +7 8 18 +7 8 21 +7 8 24 +7 10 0 +14 10 20 original +14 10 23 +14 12 0 +test-2-1 +1 0 original +1 1 +1 2 +1 3 +1 4 original +1 5 +1 6 +1 7 +1 8 original +1 9 +1 10 +1 11 +1 12 original +test-2-2 +1 0 original +1 1 +1 2 +1 3 +1 4 original +1 5 +1 6 +1 7 +1 8 original +1 9 +1 10 +1 11 +1 12 original +1 13 +1 14 +2 0 +3 0 +4 0 +test-3-1 +25 -10 +25 -8 +25 -6 +25 -4 +25 -2 +25 0 +25 2 +25 4 +25 6 +25 8 +25 10 +25 12 +25 14 +25 16 +25 17 original +28 -10 +30 18 original +31 -10 diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql b/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql new file mode 100644 index 00000000000..9e28041c9a1 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_cases.sql @@ -0,0 +1,25 @@ +SET enable_analyzer=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int64, b Int64, c Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test(a, b, c) VALUES (0, 5, 10), (7, 8, 15), (14, 10, 20); + +SELECT 'test-1'; +SELECT *, 'original' AS orig FROM test ORDER BY a, b WITH FILL TO 20 STEP 2 STALENESS 3, c WITH FILL TO 25 step 3; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test2 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test2(a, b) values (1, 0), (1, 4), (1, 8), (1, 12); + +SELECT 'test-2-1'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a, b WITH FILL; + +SELECT 'test-2-2'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a WITH FILL to 20 STALENESS 4, b WITH FILL TO 15 STALENESS 7; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test3 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test3(a, b) VALUES (25, 17), (30, 18); + +SELECT 'test-3-1'; +SELECT a, b, 'original' AS orig FROM test3 ORDER BY a WITH FILL TO 33 STEP 3, b WITH FILL FROM -10 STEP 2; From 2cda4dd9012059b6c287df7c615cef8e310b2d8e Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 14:46:56 +0000 Subject: [PATCH 215/353] cleanup --- src/Interpreters/FillingRow.cpp | 97 +------------------ src/Interpreters/FillingRow.h | 12 +-- .../Transforms/FillingTransform.cpp | 30 +----- 3 files changed, 11 insertions(+), 128 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 825b0b1488a..a87ca418b7b 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -68,25 +68,6 @@ bool FillingRow::isNull() const return true; } -std::optional FillingRow::doJump(const FillColumnDescription& descr, size_t column_ind) -{ - Field next_value = row[column_ind]; - descr.step_func(next_value, 1); - - if (!descr.fill_to.isNull() && less(descr.fill_to, next_value, getDirection(column_ind))) - return std::nullopt; - - if (!descr.fill_staleness.isNull()) - { - if (less(next_value, staleness_border[column_ind], getDirection(column_ind))) - return next_value; - else - return std::nullopt; - } - - return next_value; -} - std::optional FillingRow::doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to) { Field shifted_value = row[column_ind]; @@ -99,16 +80,6 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, Field next_value = shifted_value; descr.step_func(next_value, step_len); - // if (less(next_value, to, getDirection(0))) - // { - // shifted_value = std::move(next_value); - // step_len *= 2; - // } - // else - // { - // step_len /= 2; - // } - if (less(to, next_value, getDirection(0))) { step_len /= 2; @@ -233,7 +204,7 @@ std::pair FillingRow::next(const FillingRow & next_original_row) continue; row[i] = next_value; - initWithFrom(i + 1); + initUsingFrom(i + 1); return {true, true}; } @@ -271,7 +242,7 @@ std::pair FillingRow::next(const FillingRow & next_original_row) return {is_less, true}; } - initWithFrom(pos + 1); + initUsingFrom(pos + 1); return {true, true}; } @@ -327,8 +298,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed } else { - // getFillDescription(pos).step_func(row[pos], 1); - initWithTo(/*from_pos=*/pos + 1); + initUsingTo(/*from_pos=*/pos + 1); value_changed = false; return false; @@ -360,70 +330,13 @@ bool FillingRow::isConstraintsComplete() const return true; } -bool FillingRow::isLessStaleness() const -{ - auto logger = getLogger("FillingRow::isLessStaleness"); - - for (size_t pos = 0; pos < size(); ++pos) - { - LOG_DEBUG(logger, "staleness border: {}, row: {}", staleness_border[pos].dump(), row[pos].dump()); - - if (row[pos].isNull() || staleness_border[pos].isNull()) - continue; - - if (less(row[pos], staleness_border[pos], getDirection(pos))) - return true; - } - - return false; -} - -bool FillingRow::isStalenessConfigured() const -{ - for (size_t pos = 0; pos < size(); ++pos) - if (!getFillDescription(pos).fill_staleness.isNull()) - return true; - - return false; -} - -bool FillingRow::isLessFillTo() const -{ - auto logger = getLogger("FillingRow::isLessFillTo"); - - for (size_t pos = 0; pos < size(); ++pos) - { - const auto & descr = getFillDescription(pos); - - LOG_DEBUG(logger, "fill to: {}, row: {}", descr.fill_to.dump(), row[pos].dump()); - - if (row[pos].isNull() || descr.fill_to.isNull()) - continue; - - if (less(row[pos], descr.fill_to, getDirection(pos))) - return true; - } - - return false; -} - -bool FillingRow::isFillToConfigured() const -{ - for (size_t pos = 0; pos < size(); ++pos) - if (!getFillDescription(pos).fill_to.isNull()) - return true; - - return false; -} - - -void FillingRow::initWithFrom(size_t from_pos) +void FillingRow::initUsingFrom(size_t from_pos) { for (size_t i = from_pos; i < sort_description.size(); ++i) row[i] = getFillDescription(i).fill_from; } -void FillingRow::initWithTo(size_t from_pos) +void FillingRow::initUsingTo(size_t from_pos) { for (size_t i = from_pos; i < sort_description.size(); ++i) row[i] = getFillDescription(i).fill_to; diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index bd5a1b877a5..d33e3f95541 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -15,7 +15,7 @@ bool equals(const Field & lhs, const Field & rhs); */ class FillingRow { - std::optional doJump(const FillColumnDescription & descr, size_t column_ind); + /// finds last value <= to std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); bool hasSomeConstraints(size_t pos) const; @@ -36,14 +36,8 @@ public: bool hasSomeConstraints() const; bool isConstraintsComplete() const; - bool isLessStaleness() const; - bool isStalenessConfigured() const; - - bool isLessFillTo() const; - bool isFillToConfigured() const; - - void initWithFrom(size_t from_pos = 0); - void initWithTo(size_t from_pos = 0); + void initUsingFrom(size_t from_pos = 0); + void initUsingTo(size_t from_pos = 0); void initStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index ce804c94d8e..40650b485f8 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -21,7 +21,7 @@ namespace DB constexpr bool debug_logging_enabled = true; template -void logDebug(String key, const T & value, const char * separator = " : ") +static void logDebug(String key, const T & value, const char * separator = " : ") { if constexpr (debug_logging_enabled) { @@ -512,27 +512,6 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("generateSuffixIfNeeded next_row updated", next_row); - // if (!filling_row.isFillToConfigured() && !filling_row.isStalenessConfigured()) - // { - // logDebug("generateSuffixIfNeeded", "no other constraints, will not generate suffix"); - // return false; - // } - - // logDebug("filling_row.isLessFillTo()", filling_row.isLessFillTo()); - // logDebug("filling_row.isLessStaleness()", filling_row.isLessStaleness()); - - // if (filling_row.isFillToConfigured() && !filling_row.isLessFillTo()) - // { - // logDebug("generateSuffixIfNeeded", "not less than fill to, will not generate suffix"); - // return false; - // } - - // if (filling_row.isStalenessConfigured() && !filling_row.isLessStaleness()) - // { - // logDebug("generateSuffixIfNeeded", "not less than staleness border, will not generate suffix"); - // return false; - // } - if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsComplete()) { logDebug("generateSuffixIfNeeded", "will not generate suffix"); @@ -637,7 +616,7 @@ void FillingTransform::transformRange( if (!fill_from.isNull() && !equals(current_value, fill_from)) { - filling_row.initWithFrom(i); + filling_row.initUsingFrom(i); filling_row_inserted = false; if (less(fill_from, current_value, filling_row.getDirection(i))) { @@ -732,9 +711,6 @@ void FillingTransform::transformRange( copyRowFromColumns(res_interpolate_columns, input_interpolate_columns, row_ind); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); copyRowFromColumns(res_other_columns, input_other_columns, row_ind); - - // /// Init next staleness interval with current row, because we have already made the long jump to it - // filling_row.initStalenessRow(input_fill_columns, row_ind); } /// save sort prefix of last row in the range, it's used to generate suffix @@ -780,7 +756,7 @@ void FillingTransform::transform(Chunk & chunk) /// if no data was processed, then need to initialize filling_row if (last_row.empty()) { - filling_row.initWithFrom(); + filling_row.initUsingFrom(); filling_row_inserted = false; } From 7af2e822e7eb486ae95319a09364ea36498bb49b Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 15:22:45 +0000 Subject: [PATCH 216/353] cleanup --- src/Interpreters/FillingRow.cpp | 37 +++++++++------- src/Interpreters/FillingRow.h | 6 +-- .../Transforms/FillingTransform.cpp | 44 ++++++------------- 3 files changed, 36 insertions(+), 51 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index a87ca418b7b..df93ece2af4 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -1,10 +1,10 @@ #include -#include -#include "Common/Logger.h" -#include "Common/logger_useful.h" -#include -#include "base/defines.h" + #include +#include +#include +#include +#include namespace DB @@ -145,7 +145,7 @@ Field findMin(Field a, Field b, Field c, int dir) return a; } -std::pair FillingRow::next(const FillingRow & next_original_row) +bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) { auto logger = getLogger("FillingRow"); @@ -169,18 +169,18 @@ std::pair FillingRow::next(const FillingRow & next_original_row) LOG_DEBUG(logger, "pos: {}", pos); if (pos == row_size) - return {false, false}; + return false; const auto & pos_descr = getFillDescription(pos); if (!next_original_row[pos].isNull() && less(next_original_row[pos], row[pos], getDirection(pos))) - return {false, false}; + return false; if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], getDirection(pos))) - return {false, false}; + return false; if (!pos_descr.fill_to.isNull() && !less(row[pos], pos_descr.fill_to, getDirection(pos))) - return {false, false}; + return false; /// If we have any 'fill_to' value at position greater than 'pos' or configured staleness, /// we need to generate rows up to one of this borders. @@ -205,20 +205,22 @@ std::pair FillingRow::next(const FillingRow & next_original_row) row[i] = next_value; initUsingFrom(i + 1); - return {true, true}; + + value_changed = true; + return true; } auto next_value = row[pos]; getFillDescription(pos).step_func(next_value, 1); if (!next_original_row[pos].isNull() && less(next_original_row[pos], next_value, getDirection(pos))) - return {false, false}; + return false; if (!staleness_border[pos].isNull() && !less(next_value, staleness_border[pos], getDirection(pos))) - return {false, false}; + return false; if (!pos_descr.fill_to.isNull() && !less(next_value, pos_descr.fill_to, getDirection(pos))) - return {false, false}; + return false; row[pos] = next_value; if (equals(row[pos], next_original_row[pos])) @@ -239,11 +241,14 @@ std::pair FillingRow::next(const FillingRow & next_original_row) ); } - return {is_less, true}; + value_changed = true; + return is_less; } initUsingFrom(pos + 1); - return {true, true}; + + value_changed = true; + return true; } bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index d33e3f95541..d4590d7b81c 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -25,10 +25,8 @@ public: explicit FillingRow(const SortDescription & sort_description); /// Generates next row according to fill 'from', 'to' and 'step' values. - /// Return pair of boolean - /// apply - true if filling values should be inserted into result set - /// value_changed - true if filling row value was changed - std::pair next(const FillingRow & next_original_row); + /// Returns true if filling values should be inserted into result set + bool next(const FillingRow & next_original_row, bool& value_changed); /// Returns true if need to generate some prefix for to_row bool shift(const FillingRow & next_original_row, bool& value_changed); diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 40650b485f8..f23ffec43de 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -11,7 +11,6 @@ #include #include #include -#include "Interpreters/FillingRow.h" #include @@ -534,9 +533,7 @@ bool FillingTransform::generateSuffixIfNeeded( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); - filling_row_changed = changed; - if (!apply) + if (!filling_row.next(next_row, filling_row_changed)) break; interpolate(result_columns, interpolate_block); @@ -660,9 +657,7 @@ void FillingTransform::transformRange( bool filling_row_changed = false; while (true) { - const auto [apply, changed] = filling_row.next(next_row); - filling_row_changed = changed; - if (!apply) + if (!filling_row.next(next_row, filling_row_changed)) break; interpolate(result_columns, interpolate_block); @@ -670,35 +665,22 @@ void FillingTransform::transformRange( copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); } + /// Initialize staleness border for current row to generate it's prefix + filling_row.initStalenessRow(input_fill_columns, row_ind); + + while (filling_row.shift(next_row, filling_row_changed)) { - filling_row.initStalenessRow(input_fill_columns, row_ind); + logDebug("filling_row after shift", filling_row); - bool shift_apply = filling_row.shift(next_row, filling_row_changed); - logDebug("shift_apply", shift_apply); - logDebug("filling_row_changed", filling_row_changed); - - while (shift_apply) + do { - logDebug("after shift", filling_row); + logDebug("inserting prefix filling_row", filling_row); - while (true) - { - logDebug("filling_row in prefix", filling_row); + interpolate(result_columns, interpolate_block); + insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); + copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); - interpolate(result_columns, interpolate_block); - insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); - copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); - - const auto [apply, changed] = filling_row.next(next_row); - logDebug("filling_row in prefix", filling_row); - - filling_row_changed = changed; - if (!apply) - break; - } - - shift_apply = filling_row.shift(next_row, filling_row_changed); - } + } while (filling_row.next(next_row, filling_row_changed)); } /// new valid filling row was generated but not inserted, will use it during suffix generation From ab5738b9f1e87cf8b49b3d74a3bbd05e53c39850 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 16:11:40 +0000 Subject: [PATCH 217/353] merge constraints --- src/Interpreters/FillingRow.cpp | 92 +++++++------------ src/Interpreters/FillingRow.h | 4 +- .../Transforms/FillingTransform.cpp | 4 +- 3 files changed, 37 insertions(+), 63 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index df93ece2af4..67827567e04 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -32,7 +32,10 @@ FillingRow::FillingRow(const SortDescription & sort_description_) : sort_description(sort_description_) { row.resize(sort_description.size()); - staleness_border.resize(sort_description.size()); + + constraints.reserve(sort_description.size()); + for (size_t i = 0; i < size(); ++i) + constraints.push_back(getFillDescription(i).fill_to); } bool FillingRow::operator<(const FillingRow & other) const @@ -96,53 +99,33 @@ std::optional FillingRow::doLongJump(const FillColumnDescription & descr, bool FillingRow::hasSomeConstraints(size_t pos) const { - const auto & descr = getFillDescription(pos); - - if (!descr.fill_to.isNull()) - return true; - - if (!descr.fill_staleness.isNull()) - return true; - - return false; + return !constraints[pos].isNull(); } bool FillingRow::isConstraintsComplete(size_t pos) const { - auto logger = getLogger("FillingRow::isConstraintComplete"); + auto logger = getLogger("FillingRow::isConstraintsComplete"); chassert(!row[pos].isNull()); chassert(hasSomeConstraints(pos)); - const auto & descr = getFillDescription(pos); int direction = getDirection(pos); + LOG_DEBUG(logger, "constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); - if (!descr.fill_to.isNull() && !less(row[pos], descr.fill_to, direction)) - { - LOG_DEBUG(logger, "fill to: {}, row: {}, direction: {}", descr.fill_to.dump(), row[pos].dump(), direction); - return false; - } - - if (!descr.fill_staleness.isNull() && !less(row[pos], staleness_border[pos], direction)) - { - LOG_DEBUG(logger, "staleness border: {}, row: {}, direction: {}", staleness_border[pos].dump(), row[pos].dump(), direction); - return false; - } - - return true; + return less(row[pos], constraints[pos], direction); } -Field findMin(Field a, Field b, Field c, int dir) +static const Field & findBorder(const Field & constraint, const Field & next_original, int direction) { - auto logger = getLogger("FillingRow"); - LOG_DEBUG(logger, "a: {} b: {} c: {}", a.dump(), b.dump(), c.dump()); + if (constraint.isNull()) + return next_original; - if (a.isNull() || (!b.isNull() && less(b, a, dir))) - a = b; + if (next_original.isNull()) + return constraint; - if (a.isNull() || (!c.isNull() && less(c, a, dir))) - a = c; + if (less(constraint, next_original, direction)) + return constraint; - return a; + return next_original; } bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) @@ -158,11 +141,10 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (row[pos].isNull()) continue; - const auto & descr = getFillDescription(pos); - auto min_constr = findMin(next_original_row[pos], staleness_border[pos], descr.fill_to, getDirection(pos)); - LOG_DEBUG(logger, "min_constr: {}", min_constr); + const Field & border = findBorder(constraints[pos], next_original_row[pos], getDirection(pos)); + LOG_DEBUG(logger, "border: {}", border); - if (!min_constr.isNull() && !equals(row[pos], min_constr)) + if (!border.isNull() && !equals(row[pos], border)) break; } @@ -171,15 +153,10 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (pos == row_size) return false; - const auto & pos_descr = getFillDescription(pos); - if (!next_original_row[pos].isNull() && less(next_original_row[pos], row[pos], getDirection(pos))) return false; - if (!staleness_border[pos].isNull() && !less(row[pos], staleness_border[pos], getDirection(pos))) - return false; - - if (!pos_descr.fill_to.isNull() && !less(row[pos], pos_descr.fill_to, getDirection(pos))) + if (!constraints[pos].isNull() && !less(row[pos], constraints[pos], getDirection(pos))) return false; /// If we have any 'fill_to' value at position greater than 'pos' or configured staleness, @@ -191,16 +168,13 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (row[i].isNull()) continue; - if (fill_column_desc.fill_to.isNull() && staleness_border[i].isNull()) + if (constraints[i].isNull()) continue; Field next_value = row[i]; fill_column_desc.step_func(next_value, 1); - if (!staleness_border[i].isNull() && !less(next_value, staleness_border[i], getDirection(i))) - continue; - - if (!fill_column_desc.fill_to.isNull() && !less(next_value, fill_column_desc.fill_to, getDirection(i))) + if (!less(next_value, constraints[i], getDirection(i))) continue; row[i] = next_value; @@ -216,10 +190,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) if (!next_original_row[pos].isNull() && less(next_original_row[pos], next_value, getDirection(pos))) return false; - if (!staleness_border[pos].isNull() && !less(next_value, staleness_border[pos], getDirection(pos))) - return false; - - if (!pos_descr.fill_to.isNull() && !less(next_value, pos_descr.fill_to, getDirection(pos))) + if (!constraints[pos].isNull() && !less(next_value, constraints[pos], getDirection(pos))) return false; row[pos] = next_value; @@ -236,8 +207,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) is_less |= ( (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && - (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && - (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + (constraints[i].isNull() || less(row[i], constraints[i], getDirection(i))) ); } @@ -291,8 +261,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed is_less |= ( (next_original_row[i].isNull() || less(row[i], next_original_row[i], getDirection(i))) && - (staleness_border[i].isNull() || less(row[i], staleness_border[i], getDirection(i))) && - (descr.fill_to.isNull() || less(row[i], descr.fill_to, getDirection(i))) + (constraints[i].isNull() || less(row[i], constraints[i], getDirection(i))) ); } @@ -347,15 +316,20 @@ void FillingRow::initUsingTo(size_t from_pos) row[i] = getFillDescription(i).fill_to; } -void FillingRow::initStalenessRow(const Columns& base_row, size_t row_ind) +void FillingRow::updateConstraintsWithStalenessRow(const Columns& base_row, size_t row_ind) { for (size_t i = 0; i < size(); ++i) { const auto& descr = getFillDescription(i); + constraints[i] = descr.fill_to; + if (!descr.fill_staleness.isNull()) { - staleness_border[i] = (*base_row[i])[row_ind]; - descr.staleness_step_func(staleness_border[i], 1); + Field staleness_border = (*base_row[i])[row_ind]; + descr.staleness_step_func(staleness_border, 1); + + if (constraints[i].isNull() || less(staleness_border, constraints[i], getDirection(i))) + constraints[i] = std::move(staleness_border); } } } diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index d4590d7b81c..edcaba02aa7 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -36,7 +36,7 @@ public: void initUsingFrom(size_t from_pos = 0); void initUsingTo(size_t from_pos = 0); - void initStalenessRow(const Columns& base_row, size_t row_ind); + void updateConstraintsWithStalenessRow(const Columns& base_row, size_t row_ind); Field & operator[](size_t index) { return row[index]; } const Field & operator[](size_t index) const { return row[index]; } @@ -54,7 +54,7 @@ public: private: Row row; - Row staleness_border; + Row constraints; SortDescription sort_description; }; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index f23ffec43de..407a79efb93 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -628,7 +628,7 @@ void FillingTransform::transformRange( } /// Init staleness first interval - filling_row.initStalenessRow(input_fill_columns, range_begin); + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, range_begin); for (size_t row_ind = range_begin; row_ind < range_end; ++row_ind) { @@ -666,7 +666,7 @@ void FillingTransform::transformRange( } /// Initialize staleness border for current row to generate it's prefix - filling_row.initStalenessRow(input_fill_columns, row_ind); + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, row_ind); while (filling_row.shift(next_row, filling_row_changed)) { From 5b4d55dd3f0ff4393e81a7a36ad092eee46be2c6 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 16:41:02 +0000 Subject: [PATCH 218/353] move logs under flag --- src/Interpreters/FillingRow.cpp | 33 +++++++++---------- .../Transforms/FillingTransform.cpp | 2 +- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 67827567e04..deb4c765d31 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -10,6 +10,15 @@ namespace DB { +constexpr static bool debug_logging_enabled = true; + +template +static void logDebug(String fmt_str, Args&&... args) +{ + if constexpr (debug_logging_enabled) + LOG_DEBUG(getLogger("FillingRow"), "{}", fmt::format(fmt::runtime(fmt_str), std::forward(args)...)); +} + bool less(const Field & lhs, const Field & rhs, int direction) { if (direction == -1) @@ -104,12 +113,11 @@ bool FillingRow::hasSomeConstraints(size_t pos) const bool FillingRow::isConstraintsComplete(size_t pos) const { - auto logger = getLogger("FillingRow::isConstraintsComplete"); chassert(!row[pos].isNull()); chassert(hasSomeConstraints(pos)); int direction = getDirection(pos); - LOG_DEBUG(logger, "constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); + logDebug("constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); return less(row[pos], constraints[pos], direction); } @@ -130,7 +138,6 @@ static const Field & findBorder(const Field & constraint, const Field & next_ori bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) { - auto logger = getLogger("FillingRow"); const size_t row_size = size(); size_t pos = 0; @@ -142,13 +149,13 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) continue; const Field & border = findBorder(constraints[pos], next_original_row[pos], getDirection(pos)); - LOG_DEBUG(logger, "border: {}", border); + logDebug("border: {}", border); if (!border.isNull() && !equals(row[pos], border)) break; } - LOG_DEBUG(logger, "pos: {}", pos); + logDebug("pos: {}", pos); if (pos == row_size) return false; @@ -223,8 +230,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) { - auto logger = getLogger("FillingRow::shift"); - LOG_DEBUG(logger, "next_original_row: {}, current: {}", next_original_row.dump(), dump()); + logDebug("next_original_row: {}, current: {}", next_original_row.dump(), dump()); for (size_t pos = 0; pos < size(); ++pos) { @@ -235,16 +241,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed return false; std::optional next_value = doLongJump(getFillDescription(pos), pos, next_original_row[pos]); - - if (!next_value.has_value()) - { - LOG_DEBUG(logger, "next value: {}", "None"); - continue; - } - else - { - LOG_DEBUG(logger, "next value: {}", next_value->dump()); - } + logDebug("jumped to next value: {}", next_value.value_or("Did not complete")); row[pos] = std::move(next_value.value()); @@ -265,7 +262,7 @@ bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed ); } - LOG_DEBUG(logger, "is less: {}", is_less); + logDebug("is less: {}", is_less); value_changed = true; return is_less; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 407a79efb93..81d93a6eadb 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -17,7 +17,7 @@ namespace DB { -constexpr bool debug_logging_enabled = true; +constexpr static bool debug_logging_enabled = true; template static void logDebug(String key, const T & value, const char * separator = " : ") From 82783fe020b83425590ab14949d5b5face7c9fd6 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 16:41:38 +0000 Subject: [PATCH 219/353] disable logs --- src/Interpreters/FillingRow.cpp | 2 +- src/Processors/Transforms/FillingTransform.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index deb4c765d31..3b40c2b6cdd 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -10,7 +10,7 @@ namespace DB { -constexpr static bool debug_logging_enabled = true; +constexpr static bool debug_logging_enabled = false; template static void logDebug(String fmt_str, Args&&... args) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 81d93a6eadb..dc0bafba3e3 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -17,7 +17,7 @@ namespace DB { -constexpr static bool debug_logging_enabled = true; +constexpr static bool debug_logging_enabled = false; template static void logDebug(String key, const T & value, const char * separator = " : ") From b6bd776355171896abb3ef95d2dfdb204799a4b1 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:09:35 +0000 Subject: [PATCH 220/353] cleanup --- src/Interpreters/FillingRow.cpp | 8 ++++---- src/Interpreters/FillingRow.h | 4 ++-- src/Processors/Transforms/FillingTransform.cpp | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 3b40c2b6cdd..98c18e9b2ae 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -13,7 +13,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -static void logDebug(String fmt_str, Args&&... args) +inline static void logDebug(String fmt_str, Args&&... args) { if constexpr (debug_logging_enabled) LOG_DEBUG(getLogger("FillingRow"), "{}", fmt::format(fmt::runtime(fmt_str), std::forward(args)...)); @@ -111,7 +111,7 @@ bool FillingRow::hasSomeConstraints(size_t pos) const return !constraints[pos].isNull(); } -bool FillingRow::isConstraintsComplete(size_t pos) const +bool FillingRow::isConstraintsSatisfied(size_t pos) const { chassert(!row[pos].isNull()); chassert(hasSomeConstraints(pos)); @@ -288,14 +288,14 @@ bool FillingRow::hasSomeConstraints() const return false; } -bool FillingRow::isConstraintsComplete() const +bool FillingRow::isConstraintsSatisfied() const { for (size_t pos = 0; pos < size(); ++pos) { if (row[pos].isNull() || !hasSomeConstraints(pos)) continue; - return isConstraintsComplete(pos); + return isConstraintsSatisfied(pos); } return true; diff --git a/src/Interpreters/FillingRow.h b/src/Interpreters/FillingRow.h index edcaba02aa7..08d624a2405 100644 --- a/src/Interpreters/FillingRow.h +++ b/src/Interpreters/FillingRow.h @@ -19,7 +19,7 @@ class FillingRow std::optional doLongJump(const FillColumnDescription & descr, size_t column_ind, const Field & to); bool hasSomeConstraints(size_t pos) const; - bool isConstraintsComplete(size_t pos) const; + bool isConstraintsSatisfied(size_t pos) const; public: explicit FillingRow(const SortDescription & sort_description); @@ -32,7 +32,7 @@ public: bool shift(const FillingRow & next_original_row, bool& value_changed); bool hasSomeConstraints() const; - bool isConstraintsComplete() const; + bool isConstraintsSatisfied() const; void initUsingFrom(size_t from_pos = 0); void initUsingTo(size_t from_pos = 0); diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index dc0bafba3e3..a5c6460db0a 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -20,7 +20,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -static void logDebug(String key, const T & value, const char * separator = " : ") +inline static void logDebug(String key, const T & value, const char * separator = " : ") { if constexpr (debug_logging_enabled) { @@ -511,7 +511,7 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("generateSuffixIfNeeded next_row updated", next_row); - if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsComplete()) + if (!filling_row.hasSomeConstraints() || !filling_row.isConstraintsSatisfied()) { logDebug("generateSuffixIfNeeded", "will not generate suffix"); return false; @@ -647,7 +647,7 @@ void FillingTransform::transformRange( /// The condition is true when filling row is initialized by value(s) in FILL FROM, /// and there are row(s) in current range with value(s) < then in the filling row. /// It can happen only once for a range. - if (should_insert_first && filling_row < next_row && filling_row.isConstraintsComplete()) + if (should_insert_first && filling_row < next_row && filling_row.isConstraintsSatisfied()) { interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); From c8b94a3c61330fb0649ee92ec69ffe6e6059860b Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:21:29 +0000 Subject: [PATCH 221/353] fix empty stream filling --- src/Processors/Transforms/FillingTransform.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index a5c6460db0a..4a8965dcfaa 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -503,7 +503,7 @@ bool FillingTransform::generateSuffixIfNeeded( logDebug("generateSuffixIfNeeded next_row", next_row); /// Determines if we should insert filling row before start generating next rows - bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || next_row.isNull(); + bool should_insert_first = (next_row < filling_row && !filling_row_inserted) || (next_row.isNull() && !filling_row.isNull()); logDebug("should_insert_first", should_insert_first); for (size_t i = 0, size = filling_row.size(); i < size; ++i) From a99428fcd9d10da6b6f6fea10d033b485e558b1c Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:25:06 +0000 Subject: [PATCH 222/353] add errors test --- .../0_stateless/03266_with_fill_staleness_errors.reference | 0 .../queries/0_stateless/03266_with_fill_staleness_errors.sql | 5 +++++ 2 files changed, 5 insertions(+) create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_errors.reference create mode 100644 tests/queries/0_stateless/03266_with_fill_staleness_errors.sql diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.reference b/tests/queries/0_stateless/03266_with_fill_staleness_errors.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql new file mode 100644 index 00000000000..339747e4343 --- /dev/null +++ b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql @@ -0,0 +1,5 @@ +SET enable_analyzer=1; + +SELECT 1 AS a, 2 AS b ORDER BY a, b WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL FROM 0 TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } From 10088a0947aaf16a3ce1664c422d66daea3324d2 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:26:31 +0000 Subject: [PATCH 223/353] extend fuzzer dict with staleness --- tests/fuzz/dictionaries/keywords.dict | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/fuzz/dictionaries/keywords.dict b/tests/fuzz/dictionaries/keywords.dict index abaaf9e53b5..a37675ebcad 100644 --- a/tests/fuzz/dictionaries/keywords.dict +++ b/tests/fuzz/dictionaries/keywords.dict @@ -538,6 +538,7 @@ "WITH ADMIN OPTION" "WITH CHECK" "WITH FILL" +"STALENESS" "WITH GRANT OPTION" "WITH NAME" "WITH REPLACE OPTION" From e50176c62f18a95648c6b65627b17a095bdccbe5 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Wed, 30 Oct 2024 17:29:08 +0000 Subject: [PATCH 224/353] improve test --- .../queries/0_stateless/03266_with_fill_staleness_errors.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql index 339747e4343..fbfaf3743ca 100644 --- a/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql +++ b/tests/queries/0_stateless/03266_with_fill_staleness_errors.sql @@ -1,5 +1,5 @@ SET enable_analyzer=1; SELECT 1 AS a, 2 AS b ORDER BY a, b WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } -SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } -SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL FROM 0 TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } From 0cfbe95ca69d0bb52578c83570b34f4f40de92df Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Wed, 30 Oct 2024 21:20:11 +0100 Subject: [PATCH 225/353] Update 03258_multiple_array_joins.sql --- tests/queries/0_stateless/03258_multiple_array_joins.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03258_multiple_array_joins.sql b/tests/queries/0_stateless/03258_multiple_array_joins.sql index 5afe7725d3f..ddfac1da080 100644 --- a/tests/queries/0_stateless/03258_multiple_array_joins.sql +++ b/tests/queries/0_stateless/03258_multiple_array_joins.sql @@ -1,3 +1,4 @@ +SET enable_analyzer = 1; DROP TABLE IF EXISTS test_multiple_array_join; CREATE TABLE test_multiple_array_join ( From fc1fd46686722c5bb13c95edf7051c4e21be7b68 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Wed, 30 Oct 2024 23:36:15 +0100 Subject: [PATCH 226/353] fix test --- ...eplicas_join_algo_and_analyzer_4.reference | 29 ++++++++++++++++++ ...allel_replicas_join_algo_and_analyzer_4.sh | 30 +++++++++++-------- 2 files changed, 46 insertions(+), 13 deletions(-) diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference index 9fc156b5fb0..8464317f7e6 100644 --- a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference @@ -27,3 +27,32 @@ SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP 500030000 500040000 SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` ALL LEFT JOIN (SELECT `__table4`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table4`) AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` ALL LEFT JOIN (SELECT `__table4`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table4`) AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh index a588fa47c2d..0e1f07b6ac5 100755 --- a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# Tags: long, no-random-settings, no-random-merge-tree-settings CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -74,20 +75,23 @@ query3=" ORDER BY price_sold " -for query in "${query1}" "${query2}" "${query3}"; do - for enable_parallel_replicas in {0..1}; do - ${CLICKHOUSE_CLIENT} --query=" - set enable_analyzer=1; - set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; +for prefer_local_plan in {0..1}; do + for query in "${query1}" "${query2}" "${query3}"; do + for enable_parallel_replicas in {0..1}; do + ${CLICKHOUSE_CLIENT} --query=" + set enable_analyzer=1; + set parallel_replicas_local_plan=${prefer_local_plan}; + set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; - ${query}; + ${query}; - SELECT replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1') - FROM - ( - EXPLAIN actions=1 ${query} - ) - WHERE explain LIKE '%ParallelReplicas%'; - " + SELECT replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1') + FROM + ( + EXPLAIN actions=1 ${query} + ) + WHERE explain LIKE '%ParallelReplicas%'; + " + done done done From 1563689c034992866c2de6ede7776c41888395ac Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 31 Oct 2024 13:31:54 +0100 Subject: [PATCH 227/353] Transfer changes from sync --- src/Core/Settings.cpp | 6 +++++ src/Core/SettingsChangesHistory.cpp | 4 +++- .../IO/CachedOnDiskReadBufferFromFile.cpp | 6 +++++ src/IO/ReadSettings.h | 2 ++ src/Interpreters/Cache/FileSegment.cpp | 9 +++++++- src/Interpreters/Context.cpp | 5 +++++ src/Storages/MergeTree/DataPartsExchange.cpp | 2 +- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 +- src/Storages/MergeTree/MergeTask.cpp | 4 ++-- src/Storages/MergeTree/MergeTreeData.cpp | 22 +++++++++---------- src/Storages/MergeTree/MergeTreeData.h | 2 +- .../MergeTree/MergeTreeDataPartBuilder.cpp | 18 +++++++++------ .../MergeTree/MergeTreeDataPartBuilder.h | 12 ++++++---- .../MergeTree/MergeTreeDataWriter.cpp | 2 +- .../MergeTree/MergeTreePartsMover.cpp | 2 +- src/Storages/MergeTree/MutateTask.cpp | 2 +- src/Storages/StorageReplicatedMergeTree.cpp | 2 +- 17 files changed, 69 insertions(+), 33 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index cdaa305e804..6b16cc132bc 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4842,6 +4842,12 @@ Limit on size of a single batch of file segments that a read buffer can request )", 0) \ M(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, R"( Wait time to lock cache for space reservation in filesystem cache +)", 0) \ + M(Bool, filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage, true, R"( +Wait time to lock cache for space reservation in filesystem cache +)", 0) \ + M(Bool, filesystem_cache_enable_background_download_during_fetch, true, R"( +Wait time to lock cache for space reservation in filesystem cache )", 0) \ M(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ad9499c6d86..c36add485bb 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -103,7 +103,9 @@ static std::initializer_listfront(), read_type); } + + if (file_segments && !file_segments->empty() && !file_segments->front().isCompleted()) + { + file_segments->completeAndPopFront(settings.filesystem_cache_allow_background_download); + file_segments = {}; + } } void CachedOnDiskReadBufferFromFile::predownload(FileSegment & file_segment) diff --git a/src/IO/ReadSettings.h b/src/IO/ReadSettings.h index ac3d7fc9faf..24392891e72 100644 --- a/src/IO/ReadSettings.h +++ b/src/IO/ReadSettings.h @@ -107,6 +107,8 @@ struct ReadSettings size_t filesystem_cache_segments_batch_size = 20; size_t filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = 1000; bool filesystem_cache_allow_background_download = true; + bool filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage = true; + bool filesystem_cache_allow_background_download_during_fetch = true; bool use_page_cache_for_disks_without_file_cache = false; bool read_from_page_cache_if_exists_otherwise_bypass_cache = false; diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 7081ac81ae4..5e42bf0113a 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -1003,7 +1003,14 @@ void FileSegmentsHolder::reset() ProfileEvents::increment(ProfileEvents::FilesystemCacheUnusedHoldFileSegments, file_segments.size()); for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();) - file_segment_it = completeAndPopFrontImpl(false); + { + /// One might think it would have been more correct to do `false` here, + /// not to allow background download for file segments that we actually did not start reading. + /// But actually we would only do that, if those file segments were already read partially by some other thread/query + /// but they were not put to the download queue, because current thread was holding them in Holder. + /// So as a culprit, we need to allow to happen what would have happened if we did not exist. + file_segment_it = completeAndPopFrontImpl(true); + } file_segments.clear(); } diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 8962be59f86..9b775b9eb61 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -193,6 +193,8 @@ namespace Setting extern const SettingsUInt64 filesystem_cache_max_download_size; extern const SettingsUInt64 filesystem_cache_reserve_space_wait_lock_timeout_milliseconds; extern const SettingsUInt64 filesystem_cache_segments_batch_size; + extern const SettingsBool filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage; + extern const SettingsBool filesystem_cache_enable_background_download_during_fetch; extern const SettingsBool http_make_head_request; extern const SettingsUInt64 http_max_fields; extern const SettingsUInt64 http_max_field_name_size; @@ -5687,6 +5689,9 @@ ReadSettings Context::getReadSettings() const res.filesystem_cache_segments_batch_size = settings_ref[Setting::filesystem_cache_segments_batch_size]; res.filesystem_cache_reserve_space_wait_lock_timeout_milliseconds = settings_ref[Setting::filesystem_cache_reserve_space_wait_lock_timeout_milliseconds]; + res.filesystem_cache_allow_background_download_for_metadata_files_in_packed_storage + = settings_ref[Setting::filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage]; + res.filesystem_cache_allow_background_download_during_fetch = settings_ref[Setting::filesystem_cache_enable_background_download_during_fetch]; res.filesystem_cache_max_download_size = settings_ref[Setting::filesystem_cache_max_download_size]; res.skip_download_if_exceeds_query_cache = settings_ref[Setting::skip_download_if_exceeds_query_cache]; diff --git a/src/Storages/MergeTree/DataPartsExchange.cpp b/src/Storages/MergeTree/DataPartsExchange.cpp index e13ec5a7515..1d79ae5aacb 100644 --- a/src/Storages/MergeTree/DataPartsExchange.cpp +++ b/src/Storages/MergeTree/DataPartsExchange.cpp @@ -908,7 +908,7 @@ MergeTreeData::MutableDataPartPtr Fetcher::downloadPartToDisk( { part_storage_for_loading->commitTransaction(); - MergeTreeDataPartBuilder builder(data, part_name, volume, part_relative_path, part_dir); + MergeTreeDataPartBuilder builder(data, part_name, volume, part_relative_path, part_dir, getReadSettings()); new_data_part = builder.withPartFormatFromDisk().build(); new_data_part->version.setCreationTID(Tx::PrehistoricTID, nullptr); diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 20d7528d38a..41783ffddb0 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -833,7 +833,7 @@ MergeTreeDataPartBuilder IMergeTreeDataPart::getProjectionPartBuilder(const Stri { const char * projection_extension = is_temp_projection ? ".tmp_proj" : ".proj"; auto projection_storage = getDataPartStorage().getProjection(projection_name + projection_extension, !is_temp_projection); - MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage); + MergeTreeDataPartBuilder builder(storage, projection_name, projection_storage, getReadSettings()); return builder.withPartInfo(MergeListElement::FAKE_RESULT_PART_FOR_PROJECTION).withParentPart(this); } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index 74d6d60ba1b..06471bbe2ba 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -342,13 +342,13 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() const if (global_ctx->parent_part) { auto data_part_storage = global_ctx->parent_part->getDataPartStorage().getProjection(local_tmp_part_basename, /* use parent transaction */ false); - builder.emplace(*global_ctx->data, global_ctx->future_part->name, data_part_storage); + builder.emplace(*global_ctx->data, global_ctx->future_part->name, data_part_storage, getReadSettings()); builder->withParentPart(global_ctx->parent_part); } else { auto local_single_disk_volume = std::make_shared("volume_" + global_ctx->future_part->name, global_ctx->disk, 0); - builder.emplace(global_ctx->data->getDataPartBuilder(global_ctx->future_part->name, local_single_disk_volume, local_tmp_part_basename)); + builder.emplace(global_ctx->data->getDataPartBuilder(global_ctx->future_part->name, local_single_disk_volume, local_tmp_part_basename, getReadSettings())); builder->withPartStorageType(global_ctx->future_part->part_format.storage_type); } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 0ebb082f399..1ed70f7dd4e 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1423,7 +1423,7 @@ void MergeTreeData::loadUnexpectedDataPart(UnexpectedPartLoadState & state) try { - state.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + state.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartInfo(part_info) .withPartFormatFromDisk() .build(); @@ -1438,7 +1438,7 @@ void MergeTreeData::loadUnexpectedDataPart(UnexpectedPartLoadState & state) /// Build a fake part and mark it as broken in case of filesystem error. /// If the error impacts part directory instead of single files, /// an exception will be thrown during detach and silently ignored. - state.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + state.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -1472,7 +1472,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( /// Build a fake part and mark it as broken in case of filesystem error. /// If the error impacts part directory instead of single files, /// an exception will be thrown during detach and silently ignored. - res.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + res.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -1493,7 +1493,7 @@ MergeTreeData::LoadPartResult MergeTreeData::loadDataPart( try { - res.part = getDataPartBuilder(part_name, single_disk_volume, part_name) + res.part = getDataPartBuilder(part_name, single_disk_volume, part_name, getReadSettings()) .withPartInfo(part_info) .withPartFormatFromDisk() .build(); @@ -3732,9 +3732,9 @@ MergeTreeDataPartFormat MergeTreeData::choosePartFormatOnDisk(size_t bytes_uncom } MergeTreeDataPartBuilder MergeTreeData::getDataPartBuilder( - const String & name, const VolumePtr & volume, const String & part_dir) const + const String & name, const VolumePtr & volume, const String & part_dir, const ReadSettings & read_settings_) const { - return MergeTreeDataPartBuilder(*this, name, volume, relative_data_path, part_dir); + return MergeTreeDataPartBuilder(*this, name, volume, relative_data_path, part_dir, read_settings_); } void MergeTreeData::changeSettings( @@ -5812,7 +5812,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(cons /// Load this part from the directory `temp_part_dir`. auto load_part = [&] { - MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, parent_part_dir, part_dir_name); + MergeTreeDataPartBuilder builder(*this, part_name, single_disk_volume, parent_part_dir, part_dir_name, getReadSettings()); builder.withPartFormatFromDisk(); part = std::move(builder).build(); part->version.setCreationTID(Tx::PrehistoricTID, nullptr); @@ -5827,7 +5827,7 @@ MergeTreeData::MutableDataPartPtr MergeTreeData::loadPartRestoredFromBackup(cons if (!part) { /// Make a fake data part only to copy its files to /detached/. - part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, parent_part_dir, part_dir_name} + part = MergeTreeDataPartBuilder{*this, part_name, single_disk_volume, parent_part_dir, part_dir_name, getReadSettings()} .withPartStorageType(MergeTreeDataPartStorageType::Full) .withPartType(MergeTreeDataPartType::Wide) .build(); @@ -6473,7 +6473,7 @@ MergeTreeData::MutableDataPartsVector MergeTreeData::tryLoadPartsToAttach(const LOG_DEBUG(log, "Checking part {}", new_name); auto single_disk_volume = std::make_shared("volume_" + old_name, disk); - auto part = getDataPartBuilder(old_name, single_disk_volume, source_dir / new_name) + auto part = getDataPartBuilder(old_name, single_disk_volume, source_dir / new_name, getReadSettings()) .withPartFormatFromDisk() .build(); @@ -7528,7 +7528,7 @@ std::pair MergeTreeData::cloneAn std::string(fs::path(dst_part_storage->getFullRootPath()) / tmp_dst_part_name), with_copy); - auto dst_data_part = MergeTreeDataPartBuilder(*this, dst_part_name, dst_part_storage) + auto dst_data_part = MergeTreeDataPartBuilder(*this, dst_part_name, dst_part_storage, getReadSettings()) .withPartFormatFromDisk() .build(); @@ -8786,7 +8786,7 @@ std::pair MergeTreeData::createE VolumePtr data_part_volume = createVolumeFromReservation(reservation, volume); auto tmp_dir_holder = getTemporaryPartDirectoryHolder(EMPTY_PART_TMP_PREFIX + new_part_name); - auto new_data_part = getDataPartBuilder(new_part_name, data_part_volume, EMPTY_PART_TMP_PREFIX + new_part_name) + auto new_data_part = getDataPartBuilder(new_part_name, data_part_volume, EMPTY_PART_TMP_PREFIX + new_part_name, getReadSettings()) .withBytesAndRowsOnDisk(0, 0) .withPartInfo(new_part_info) .build(); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 7a9730e8627..8438ac412c9 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -241,7 +241,7 @@ public: MergeTreeDataPartFormat choosePartFormat(size_t bytes_uncompressed, size_t rows_count) const; MergeTreeDataPartFormat choosePartFormatOnDisk(size_t bytes_uncompressed, size_t rows_count) const; - MergeTreeDataPartBuilder getDataPartBuilder(const String & name, const VolumePtr & volume, const String & part_dir) const; + MergeTreeDataPartBuilder getDataPartBuilder(const String & name, const VolumePtr & volume, const String & part_dir, const ReadSettings & read_settings_) const; /// Auxiliary object to add a set of parts into the working set in two steps: /// * First, as PreActive parts (the parts are ready, but not yet in the active set). diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp index 37f578b0c25..6ec4bc31d90 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.cpp @@ -14,20 +14,22 @@ namespace ErrorCodes } MergeTreeDataPartBuilder::MergeTreeDataPartBuilder( - const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_) + const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_, const ReadSettings & read_settings_) : data(data_) , name(std::move(name_)) , volume(std::move(volume_)) , root_path(std::move(root_path_)) , part_dir(std::move(part_dir_)) + , read_settings(read_settings_) { } MergeTreeDataPartBuilder::MergeTreeDataPartBuilder( - const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_) + const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_, const ReadSettings & read_settings_) : data(data_) , name(std::move(name_)) , part_storage(std::move(part_storage_)) + , read_settings(read_settings_) { } @@ -73,7 +75,8 @@ MutableDataPartStoragePtr MergeTreeDataPartBuilder::getPartStorageByType( MergeTreeDataPartStorageType storage_type_, const VolumePtr & volume_, const String & root_path_, - const String & part_dir_) + const String & part_dir_, + const ReadSettings &) /// Unused here, but used in private repo. { if (!volume_) throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot create part storage, because volume is not specified"); @@ -112,7 +115,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartType(MergeTreeDataP MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartStorageType(MergeTreeDataPartStorageType storage_type_) { - part_storage = getPartStorageByType(storage_type_, volume, root_path, part_dir); + part_storage = getPartStorageByType(storage_type_, volume, root_path, part_dir, read_settings); return *this; } @@ -126,7 +129,8 @@ MergeTreeDataPartBuilder::PartStorageAndMarkType MergeTreeDataPartBuilder::getPartStorageAndMarkType( const VolumePtr & volume_, const String & root_path_, - const String & part_dir_) + const String & part_dir_, + const ReadSettings & read_settings_) { auto disk = volume_->getDisk(); auto part_relative_path = fs::path(root_path_) / part_dir_; @@ -138,7 +142,7 @@ MergeTreeDataPartBuilder::getPartStorageAndMarkType( if (MarkType::isMarkFileExtension(ext)) { - auto storage = getPartStorageByType(MergeTreeDataPartStorageType::Full, volume_, root_path_, part_dir_); + auto storage = getPartStorageByType(MergeTreeDataPartStorageType::Full, volume_, root_path_, part_dir_, read_settings_); return {std::move(storage), MarkType(ext)}; } } @@ -156,7 +160,7 @@ MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromDisk() MergeTreeDataPartBuilder & MergeTreeDataPartBuilder::withPartFormatFromVolume() { assert(volume); - auto [storage, mark_type] = getPartStorageAndMarkType(volume, root_path, part_dir); + auto [storage, mark_type] = getPartStorageAndMarkType(volume, root_path, part_dir, read_settings); if (!storage || !mark_type) { diff --git a/src/Storages/MergeTree/MergeTreeDataPartBuilder.h b/src/Storages/MergeTree/MergeTreeDataPartBuilder.h index 0f54ff0a631..bce881a1970 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartBuilder.h +++ b/src/Storages/MergeTree/MergeTreeDataPartBuilder.h @@ -21,8 +21,8 @@ using VolumePtr = std::shared_ptr; class MergeTreeDataPartBuilder { public: - MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_); - MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_); + MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, VolumePtr volume_, String root_path_, String part_dir_, const ReadSettings & read_settings_); + MergeTreeDataPartBuilder(const MergeTreeData & data_, String name_, MutableDataPartStoragePtr part_storage_, const ReadSettings & read_settings_); std::shared_ptr build(); @@ -42,7 +42,8 @@ public: static PartStorageAndMarkType getPartStorageAndMarkType( const VolumePtr & volume_, const String & root_path_, - const String & part_dir_); + const String & part_dir_, + const ReadSettings & read_settings); private: Self & withPartFormatFromVolume(); @@ -52,7 +53,8 @@ private: MergeTreeDataPartStorageType storage_type_, const VolumePtr & volume_, const String & root_path_, - const String & part_dir_); + const String & part_dir_, + const ReadSettings & read_settings); const MergeTreeData & data; const String name; @@ -64,6 +66,8 @@ private: std::optional part_type; MutableDataPartStoragePtr part_storage; const IMergeTreeDataPart * parent_part = nullptr; + + const ReadSettings read_settings; }; } diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index 67fef759ed4..12dbd529f70 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -609,7 +609,7 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( } } - auto new_data_part = data.getDataPartBuilder(part_name, data_part_volume, part_dir) + auto new_data_part = data.getDataPartBuilder(part_name, data_part_volume, part_dir, getReadSettings()) .withPartFormat(data.choosePartFormat(expected_size, block.rows())) .withPartInfo(new_part_info) .build(); diff --git a/src/Storages/MergeTree/MergeTreePartsMover.cpp b/src/Storages/MergeTree/MergeTreePartsMover.cpp index 48a4a37f444..e9c9f2b4b06 100644 --- a/src/Storages/MergeTree/MergeTreePartsMover.cpp +++ b/src/Storages/MergeTree/MergeTreePartsMover.cpp @@ -280,7 +280,7 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME, read_settings, write_settings, cancellation_hook); } - MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage); + MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage, getReadSettings()); cloned_part.part = std::move(builder).withPartFormatFromDisk().build(); LOG_TRACE(log, "Part {} was cloned to {}", part->name, cloned_part.part->getDataPartStorage().getFullPath()); diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 2e7847fc99f..92e0193fff9 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -2286,7 +2286,7 @@ bool MutateTask::prepare() String tmp_part_dir_name = prefix + ctx->future_part->name; ctx->temporary_directory_lock = ctx->data->getTemporaryPartDirectoryHolder(tmp_part_dir_name); - auto builder = ctx->data->getDataPartBuilder(ctx->future_part->name, single_disk_volume, tmp_part_dir_name); + auto builder = ctx->data->getDataPartBuilder(ctx->future_part->name, single_disk_volume, tmp_part_dir_name, getReadSettings()); builder.withPartFormat(ctx->future_part->part_format); builder.withPartInfo(ctx->future_part->part_info); diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index b5b07a129bd..e5b40c07f69 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -2092,7 +2092,7 @@ MergeTreeData::MutableDataPartPtr StorageReplicatedMergeTree::attachPartHelperFo const auto part_old_name = part_info->getPartNameV1(); const auto volume = std::make_shared("volume_" + part_old_name, disk); - auto part = getDataPartBuilder(entry.new_part_name, volume, fs::path(DETACHED_DIR_NAME) / part_old_name) + auto part = getDataPartBuilder(entry.new_part_name, volume, fs::path(DETACHED_DIR_NAME) / part_old_name, getReadSettings()) .withPartFormatFromDisk() .build(); From 1fd66d0472d90bc6da1d0f04dce8140b83fd6bb7 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:58:27 +0100 Subject: [PATCH 228/353] Update SerializationObject.cpp --- src/DataTypes/Serializations/SerializationObject.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index cf63797b0c2..19e12d777e4 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -365,7 +365,7 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationObject::deserializeOb auto structure_state = std::make_shared(serialization_version); if (structure_state->serialization_version.value == ObjectSerializationVersion::Value::V1 || structure_state->serialization_version.value == ObjectSerializationVersion::Value::V2) { - if (structure_state->structure_version.value == ObjectSerializationVersion::Value::V1) + if (structure_state->serialization_version.value == ObjectSerializationVersion::Value::V1) { /// Skip max_dynamic_paths parameter in V1 serialization version. size_t max_dynamic_paths; From 936d6b22518e7711adc4991663f6474b42805eb8 Mon Sep 17 00:00:00 2001 From: MikhailBurdukov Date: Thu, 31 Oct 2024 14:05:33 +0000 Subject: [PATCH 229/353] Fix unescaping in named collections --- .../NamedCollectionsMetadataStorage.cpp | 2 +- tests/integration/test_named_collections/test.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp index b8413bfadd7..8bb411f1437 100644 --- a/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp +++ b/src/Common/NamedCollections/NamedCollectionsMetadataStorage.cpp @@ -568,7 +568,7 @@ std::vector NamedCollectionsMetadataStorage::listCollections() cons std::vector collections; collections.reserve(paths.size()); for (const auto & path : paths) - collections.push_back(std::filesystem::path(path).stem()); + collections.push_back(unescapeForFileName(std::filesystem::path(path).stem())); return collections; } diff --git a/tests/integration/test_named_collections/test.py b/tests/integration/test_named_collections/test.py index ed80898ebc7..bd04bb9e3c8 100644 --- a/tests/integration/test_named_collections/test.py +++ b/tests/integration/test_named_collections/test.py @@ -794,3 +794,17 @@ def test_keeper_storage_remove_on_cluster(cluster, ignore, expected_raise): node.query( f"DROP NAMED COLLECTION test_nc ON CLUSTER `replicated_nc_nodes_cluster`" ) + + +@pytest.mark.parametrize( + "instance_name", + [("node"), ("node_with_keeper")], +) +def test_name_escaping(cluster, instance_name): + node = cluster.instances[instance_name] + + node.query("DROP NAMED COLLECTION IF EXISTS test;") + node.query("CREATE NAMED COLLECTION `test_!strange/symbols!` AS key1=1, key2=2") + node.restart_clickhouse() + + node.query("DROP NAMED COLLECTION `test_!strange/symbols!`") From 542dac1815858e55147a5db80e58690bb8b72df2 Mon Sep 17 00:00:00 2001 From: avogar Date: Mon, 28 Oct 2024 10:31:50 +0000 Subject: [PATCH 230/353] Implement simple CAST from Map/Tuple/Object to new JSON through serialization/deserialization from JSON string --- src/DataTypes/DataTypeObject.cpp | 10 +++++ src/DataTypes/DataTypeObject.h | 3 ++ .../Serializations/SerializationObject.cpp | 11 +---- .../Serializations/SerializationObject.h | 3 -- .../SerializationObjectDynamicPath.cpp | 8 ++-- .../Serializations/SerializationSubObject.cpp | 8 ++-- src/Functions/FunctionsConversion.cpp | 42 ++++++++++++++----- ...61_tuple_map_object_to_json_cast.reference | 23 ++++++++++ .../03261_tuple_map_object_to_json_cast.sql | 14 +++++++ 9 files changed, 91 insertions(+), 31 deletions(-) create mode 100644 tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.reference create mode 100644 tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 18bfed9c5c3..d744e851ea9 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -1,6 +1,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -522,6 +525,13 @@ static DataTypePtr createObject(const ASTPtr & arguments, const DataTypeObject:: return std::make_shared(schema_format, std::move(typed_paths), std::move(paths_to_skip), std::move(path_regexps_to_skip), max_dynamic_paths, max_dynamic_types); } +const DataTypePtr & DataTypeObject::getTypeOfSharedData() +{ + /// Array(Tuple(String, String)) + static const DataTypePtr type = std::make_shared(std::make_shared(DataTypes{std::make_shared(), std::make_shared()}, Names{"paths", "values"})); + return type; +} + static DataTypePtr createJSON(const ASTPtr & arguments) { auto context = CurrentThread::getQueryContext(); diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 7eb2e7729de..32ed6a7ee86 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -63,6 +63,9 @@ public: size_t getMaxDynamicTypes() const { return max_dynamic_types; } size_t getMaxDynamicPaths() const { return max_dynamic_paths; } + /// Shared data has type Array(Tuple(String, String)). + static const DataTypePtr & getTypeOfSharedData(); + private: SchemaFormat schema_format; /// Set of paths with types that were specified in type declaration. diff --git a/src/DataTypes/Serializations/SerializationObject.cpp b/src/DataTypes/Serializations/SerializationObject.cpp index 0fbf8c54a22..3e1badb25ca 100644 --- a/src/DataTypes/Serializations/SerializationObject.cpp +++ b/src/DataTypes/Serializations/SerializationObject.cpp @@ -25,7 +25,7 @@ SerializationObject::SerializationObject( : typed_path_serializations(std::move(typed_path_serializations_)) , paths_to_skip(paths_to_skip_) , dynamic_serialization(std::make_shared()) - , shared_data_serialization(getTypeOfSharedData()->getDefaultSerialization()) + , shared_data_serialization(DataTypeObject::getTypeOfSharedData()->getDefaultSerialization()) { /// We will need sorted order of typed paths to serialize them in order for consistency. sorted_typed_paths.reserve(typed_path_serializations.size()); @@ -38,13 +38,6 @@ SerializationObject::SerializationObject( path_regexps_to_skip.emplace_back(regexp_str); } -const DataTypePtr & SerializationObject::getTypeOfSharedData() -{ - /// Array(Tuple(String, String)) - static const DataTypePtr type = std::make_shared(std::make_shared(DataTypes{std::make_shared(), std::make_shared()}, Names{"paths", "values"})); - return type; -} - bool SerializationObject::shouldSkipPath(const String & path) const { if (paths_to_skip.contains(path)) @@ -168,7 +161,7 @@ void SerializationObject::enumerateStreams(EnumerateStreamsSettings & settings, settings.path.push_back(Substream::ObjectSharedData); auto shared_data_substream_data = SubstreamData(shared_data_serialization) - .withType(getTypeOfSharedData()) + .withType(DataTypeObject::getTypeOfSharedData()) .withColumn(column_object ? column_object->getSharedDataPtr() : nullptr) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); diff --git a/src/DataTypes/Serializations/SerializationObject.h b/src/DataTypes/Serializations/SerializationObject.h index 420293ba428..8bc72312da1 100644 --- a/src/DataTypes/Serializations/SerializationObject.h +++ b/src/DataTypes/Serializations/SerializationObject.h @@ -111,9 +111,6 @@ private: DeserializeBinaryBulkSettings & settings, SubstreamsDeserializeStatesCache * cache); - /// Shared data has type Array(Tuple(String, String)). - static const DataTypePtr & getTypeOfSharedData(); - struct TypedPathSubcolumnCreator : public ISubcolumnCreator { String path; diff --git a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp index 5323079c54b..c1f26eca792 100644 --- a/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp +++ b/src/DataTypes/Serializations/SerializationObjectDynamicPath.cpp @@ -18,7 +18,7 @@ SerializationObjectDynamicPath::SerializationObjectDynamicPath( , path(path_) , path_subcolumn(path_subcolumn_) , dynamic_serialization(std::make_shared()) - , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) + , shared_data_serialization(DataTypeObject::getTypeOfSharedData()->getDefaultSerialization()) , max_dynamic_types(max_dynamic_types_) { } @@ -67,8 +67,8 @@ void SerializationObjectDynamicPath::enumerateStreams( { settings.path.push_back(Substream::ObjectSharedData); auto shared_data_substream_data = SubstreamData(shared_data_serialization) - .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) - .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withType(data.type ? DataTypeObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? DataTypeObject::getTypeOfSharedData()->createColumn() : nullptr) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state->nested_state); settings.path.back().data = shared_data_substream_data; @@ -164,7 +164,7 @@ void SerializationObjectDynamicPath::deserializeBinaryBulkWithMultipleStreams( settings.path.push_back(Substream::ObjectSharedData); /// Initialize shared_data column if needed. if (result_column->empty()) - dynamic_path_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + dynamic_path_state->shared_data = DataTypeObject::getTypeOfSharedData()->createColumn(); size_t prev_size = result_column->size(); shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(dynamic_path_state->shared_data, limit, settings, dynamic_path_state->nested_state, cache); /// If we need to read a subcolumn from Dynamic column, create an empty Dynamic column, fill it and extract subcolumn. diff --git a/src/DataTypes/Serializations/SerializationSubObject.cpp b/src/DataTypes/Serializations/SerializationSubObject.cpp index 9084d46f9b2..ff61cb55572 100644 --- a/src/DataTypes/Serializations/SerializationSubObject.cpp +++ b/src/DataTypes/Serializations/SerializationSubObject.cpp @@ -17,7 +17,7 @@ SerializationSubObject::SerializationSubObject( : path_prefix(path_prefix_) , typed_paths_serializations(typed_paths_serializations_) , dynamic_serialization(std::make_shared()) - , shared_data_serialization(SerializationObject::getTypeOfSharedData()->getDefaultSerialization()) + , shared_data_serialization(DataTypeObject::getTypeOfSharedData()->getDefaultSerialization()) { } @@ -64,8 +64,8 @@ void SerializationSubObject::enumerateStreams( /// We will need to read shared data to find all paths with requested prefix. settings.path.push_back(Substream::ObjectSharedData); auto shared_data_substream_data = SubstreamData(shared_data_serialization) - .withType(data.type ? SerializationObject::getTypeOfSharedData() : nullptr) - .withColumn(data.column ? SerializationObject::getTypeOfSharedData()->createColumn() : nullptr) + .withType(data.type ? DataTypeObject::getTypeOfSharedData() : nullptr) + .withColumn(data.column ? DataTypeObject::getTypeOfSharedData()->createColumn() : nullptr) .withSerializationInfo(data.serialization_info) .withDeserializeState(deserialize_state ? deserialize_state->shared_data_state : nullptr); settings.path.back().data = shared_data_substream_data; @@ -208,7 +208,7 @@ void SerializationSubObject::deserializeBinaryBulkWithMultipleStreams( settings.path.push_back(Substream::ObjectSharedData); /// If it's a new object column, reinitialize column for shared data. if (result_column->empty()) - sub_object_state->shared_data = SerializationObject::getTypeOfSharedData()->createColumn(); + sub_object_state->shared_data = DataTypeObject::getTypeOfSharedData()->createColumn(); size_t prev_size = column_object.size(); shared_data_serialization->deserializeBinaryBulkWithMultipleStreams(sub_object_state->shared_data, limit, settings, sub_object_state->shared_data_state, cache); settings.path.pop_back(); diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 0f6311c9716..ee04916e7b4 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -3921,7 +3921,7 @@ private: } } - WrapperType createTupleToObjectWrapper(const DataTypeTuple & from_tuple, bool has_nullable_subcolumns) const + WrapperType createTupleToObjectDeprecatedWrapper(const DataTypeTuple & from_tuple, bool has_nullable_subcolumns) const { if (!from_tuple.haveExplicitNames()) throw Exception(ErrorCodes::TYPE_MISMATCH, @@ -3968,7 +3968,7 @@ private: }; } - WrapperType createMapToObjectWrapper(const DataTypeMap & from_map, bool has_nullable_subcolumns) const + WrapperType createMapToObjectDeprecatedWrapper(const DataTypeMap & from_map, bool has_nullable_subcolumns) const { auto key_value_types = from_map.getKeyValueTypes(); @@ -4048,11 +4048,11 @@ private: { if (const auto * from_tuple = checkAndGetDataType(from_type.get())) { - return createTupleToObjectWrapper(*from_tuple, to_type->hasNullableSubcolumns()); + return createTupleToObjectDeprecatedWrapper(*from_tuple, to_type->hasNullableSubcolumns()); } else if (const auto * from_map = checkAndGetDataType(from_type.get())) { - return createMapToObjectWrapper(*from_map, to_type->hasNullableSubcolumns()); + return createMapToObjectDeprecatedWrapper(*from_map, to_type->hasNullableSubcolumns()); } else if (checkAndGetDataType(from_type.get())) { @@ -4081,23 +4081,43 @@ private: "Cast to Object can be performed only from flatten named Tuple, Map or String. Got: {}", from_type->getName()); } + WrapperType createObjectWrapper(const DataTypePtr & from_type, const DataTypeObject * to_object) const { if (checkAndGetDataType(from_type.get())) { return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) { - auto res = ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context)->assumeMutable(); - res->finalize(); - return res; + return ConvertImplGenericFromString::execute(arguments, result_type, nullable_source, input_rows_count, context); + }; + } + + /// Cast Tuple/Object/Map to JSON type through serializing into JSON string and parsing back into JSON column. + /// Potentially we can do smarter conversion Tuple -> JSON with type preservation, but it's questionable how exactly Tuple should be + /// converted to JSON (for example, should we recursively convert nested Array(Tuple) to Array(JSON) or not, should we infer types from String fields, etc). + if (checkAndGetDataType(from_type.get()) || checkAndGetDataType(from_type.get()) || checkAndGetDataType(from_type.get())) + { + return [this](ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * nullable_source, size_t input_rows_count) + { + auto json_string = ColumnString::create(); + ColumnStringHelpers::WriteHelper write_helper(assert_cast(*json_string), input_rows_count); + auto & write_buffer = write_helper.getWriteBuffer(); + FormatSettings format_settings = context ? getFormatSettings(context) : FormatSettings{}; + auto serialization = arguments[0].type->getDefaultSerialization(); + for (size_t i = 0; i < input_rows_count; ++i) + { + serialization->serializeTextJSON(*arguments[0].column, i, write_buffer, format_settings); + write_helper.rowWritten(); + } + write_helper.finalize(); + + ColumnsWithTypeAndName args_with_json_string = {ColumnWithTypeAndName(json_string->getPtr(), std::make_shared(), "")}; + return ConvertImplGenericFromString::execute(args_with_json_string, result_type, nullable_source, input_rows_count, context); }; } /// TODO: support CAST between JSON types with different parameters - /// support CAST from Map to JSON - /// support CAST from Tuple to JSON - /// support CAST from Object('json') to JSON - throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to {} can be performed only from String. Got: {}", magic_enum::enum_name(to_object->getSchemaFormat()), from_type->getName()); + throw Exception(ErrorCodes::TYPE_MISMATCH, "Cast to {} can be performed only from String/Map/Object/Tuple. Got: {}", magic_enum::enum_name(to_object->getSchemaFormat()), from_type->getName()); } WrapperType createVariantToVariantWrapper(const DataTypeVariant & from_variant, const DataTypeVariant & to_variant) const diff --git a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.reference b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.reference new file mode 100644 index 00000000000..0ae94e68663 --- /dev/null +++ b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.reference @@ -0,0 +1,23 @@ +Map to JSON +{"a":"0","b":"1970-01-01","c":[],"d":[{"e":"0"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"1","b":"1970-01-02","c":["0"],"d":[{"e":"1"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"2","b":"1970-01-03","c":["0","1"],"d":[{"e":"2"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"3","b":"1970-01-04","c":["0","1","2"],"d":[{"e":"3"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"4","b":"1970-01-05","c":["0","1","2","3"],"d":[{"e":"4"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a0":"0","b0":"1970-01-01","c0":[],"d0":[{"e0":"0"}]} {'a0':'Int64','b0':'Date','c0':'Array(Nullable(String))','d0':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a1":"1","b1":"1970-01-02","c1":["0"],"d1":[{"e1":"1"}]} {'a1':'Int64','b1':'Date','c1':'Array(Nullable(String))','d1':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a2":"2","b2":"1970-01-03","c2":["0","1"],"d2":[{"e2":"2"}]} {'a2':'Int64','b2':'Date','c2':'Array(Nullable(String))','d2':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a0":"3","b0":"1970-01-04","c0":["0","1","2"],"d0":[{"e0":"3"}]} {'a0':'Int64','b0':'Date','c0':'Array(Nullable(String))','d0':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a1":"4","b1":"1970-01-05","c1":["0","1","2","3"],"d1":[{"e1":"4"}]} {'a1':'Int64','b1':'Date','c1':'Array(Nullable(String))','d1':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +Tuple to JSON +{"a":"0","b":"1970-01-01","c":[],"d":[{"e":"0"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"1","b":"1970-01-02","c":["0"],"d":[{"e":"1"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"2","b":"1970-01-03","c":["0","1"],"d":[{"e":"2"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"3","b":"1970-01-04","c":["0","1","2"],"d":[{"e":"3"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +{"a":"4","b":"1970-01-05","c":["0","1","2","3"],"d":[{"e":"4"}]} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d':'Array(JSON(max_dynamic_types=16, max_dynamic_paths=256))'} +Object to JSON +{"a":"0","b":"1970-01-01","c":[],"d":{"e":["0"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"1","b":"1970-01-02","c":["0"],"d":{"e":["1"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"2","b":"1970-01-03","c":["0","1"],"d":{"e":["2"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"3","b":"1970-01-04","c":["0","1","2"],"d":{"e":["3"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} +{"a":"4","b":"1970-01-05","c":["0","1","2","3"],"d":{"e":["4"]}} {'a':'Int64','b':'Date','c':'Array(Nullable(String))','d.e':'Array(Nullable(Int64))'} diff --git a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql new file mode 100644 index 00000000000..fcec7eb3af4 --- /dev/null +++ b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql @@ -0,0 +1,14 @@ +set allow_experimental_json_type = 1; +set allow_experimental_object_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +select 'Map to JSON'; +select map('a', number::UInt32, 'b', toDate(number), 'c', range(number), 'd', [map('e', number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); +select map('a' || number % 3, number::UInt32, 'b' || number % 3, toDate(number), 'c' || number % 3, range(number), 'd' || number % 3, [map('e' || number % 3, number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); + +select 'Tuple to JSON'; +select tuple(number::UInt32 as a, toDate(number) as b, range(number) as c, [tuple(number::UInt32 as e)] as d)::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); + +select 'Object to JSON'; +select toJSONString(map('a', number::UInt32, 'b', toDate(number), 'c', range(number), 'd', [map('e', number::UInt32)]))::Object('json')::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); From 83f434dffb6bad82abdc791179196b32e1a7f347 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Thu, 31 Oct 2024 16:25:17 +0000 Subject: [PATCH 231/353] fix simple path --- src/Processors/Transforms/FillingTransform.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 4a8965dcfaa..dd116a9972a 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -608,9 +608,6 @@ void FillingTransform::transformRange( const auto current_value = (*input_fill_columns[i])[range_begin]; const auto & fill_from = filling_row.getFillDescription(i).fill_from; - logDebug("current value", current_value.dump()); - logDebug("fill from", fill_from.dump()); - if (!fill_from.isNull() && !equals(current_value, fill_from)) { filling_row.initUsingFrom(i); @@ -663,6 +660,7 @@ void FillingTransform::transformRange( interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; } /// Initialize staleness border for current row to generate it's prefix @@ -679,6 +677,7 @@ void FillingTransform::transformRange( interpolate(result_columns, interpolate_block); insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; } while (filling_row.next(next_row, filling_row_changed)); } From 1000ef0e022516536cbd680fa6a206bf5401295c Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Thu, 31 Oct 2024 16:39:31 +0000 Subject: [PATCH 232/353] some improves --- src/Interpreters/FillingRow.cpp | 20 ++++++++----- .../Transforms/FillingTransform.cpp | 30 +++++++++++-------- src/Processors/Transforms/FillingTransform.h | 1 + 3 files changed, 31 insertions(+), 20 deletions(-) diff --git a/src/Interpreters/FillingRow.cpp b/src/Interpreters/FillingRow.cpp index 98c18e9b2ae..384ad669206 100644 --- a/src/Interpreters/FillingRow.cpp +++ b/src/Interpreters/FillingRow.cpp @@ -13,7 +13,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -inline static void logDebug(String fmt_str, Args&&... args) +inline static void logDebug(const char * fmt_str, Args&&... args) { if constexpr (debug_logging_enabled) LOG_DEBUG(getLogger("FillingRow"), "{}", fmt::format(fmt::runtime(fmt_str), std::forward(args)...)); @@ -117,7 +117,7 @@ bool FillingRow::isConstraintsSatisfied(size_t pos) const chassert(hasSomeConstraints(pos)); int direction = getDirection(pos); - logDebug("constraint: {}, row: {}, direction: {}", constraints[pos].dump(), row[pos].dump(), direction); + logDebug("constraint: {}, row: {}, direction: {}", constraints[pos], row[pos], direction); return less(row[pos], constraints[pos], direction); } @@ -230,7 +230,7 @@ bool FillingRow::next(const FillingRow & next_original_row, bool& value_changed) bool FillingRow::shift(const FillingRow & next_original_row, bool& value_changed) { - logDebug("next_original_row: {}, current: {}", next_original_row.dump(), dump()); + logDebug("next_original_row: {}, current: {}", next_original_row, *this); for (size_t pos = 0; pos < size(); ++pos) { @@ -318,15 +318,12 @@ void FillingRow::updateConstraintsWithStalenessRow(const Columns& base_row, size for (size_t i = 0; i < size(); ++i) { const auto& descr = getFillDescription(i); - constraints[i] = descr.fill_to; if (!descr.fill_staleness.isNull()) { Field staleness_border = (*base_row[i])[row_ind]; descr.staleness_step_func(staleness_border, 1); - - if (constraints[i].isNull() || less(staleness_border, constraints[i], getDirection(i))) - constraints[i] = std::move(staleness_border); + constraints[i] = findBorder(descr.fill_to, staleness_border, getDirection(i)); } } } @@ -350,3 +347,12 @@ WriteBuffer & operator<<(WriteBuffer & out, const FillingRow & row) } } + +template <> +struct fmt::formatter : fmt::formatter +{ + constexpr auto format(const DB::FillingRow & row, format_context & ctx) const + { + return fmt::format_to(ctx.out(), "{}", row.dump()); + } +}; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index dd116a9972a..ab782f3e521 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -20,7 +20,7 @@ namespace DB constexpr static bool debug_logging_enabled = false; template -inline static void logDebug(String key, const T & value, const char * separator = " : ") +inline static void logDebug(const char * key, const T & value, const char * separator = " : ") { if constexpr (debug_logging_enabled) { @@ -235,6 +235,7 @@ FillingTransform::FillingTransform( fill_column_positions.push_back(block_position); auto & descr = filling_row.getFillDescription(i); + running_with_staleness |= !descr.fill_staleness.isNull(); const Block & output_header = getOutputPort().getHeader(); const DataTypePtr & type = removeNullable(output_header.getByPosition(block_position).type); @@ -663,23 +664,26 @@ void FillingTransform::transformRange( filling_row_changed = false; } - /// Initialize staleness border for current row to generate it's prefix - filling_row.updateConstraintsWithStalenessRow(input_fill_columns, row_ind); - - while (filling_row.shift(next_row, filling_row_changed)) + if (running_with_staleness) { - logDebug("filling_row after shift", filling_row); + /// Initialize staleness border for current row to generate it's prefix + filling_row.updateConstraintsWithStalenessRow(input_fill_columns, row_ind); - do + while (filling_row.shift(next_row, filling_row_changed)) { - logDebug("inserting prefix filling_row", filling_row); + logDebug("filling_row after shift", filling_row); - interpolate(result_columns, interpolate_block); - insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); - copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); - filling_row_changed = false; + do + { + logDebug("inserting prefix filling_row", filling_row); - } while (filling_row.next(next_row, filling_row_changed)); + interpolate(result_columns, interpolate_block); + insertFromFillingRow(res_fill_columns, res_interpolate_columns, res_other_columns, interpolate_block); + copyRowFromColumns(res_sort_prefix_columns, input_sort_prefix_columns, row_ind); + filling_row_changed = false; + + } while (filling_row.next(next_row, filling_row_changed)); + } } /// new valid filling row was generated but not inserted, will use it during suffix generation diff --git a/src/Processors/Transforms/FillingTransform.h b/src/Processors/Transforms/FillingTransform.h index a8866a97103..92ca4fe6c9e 100644 --- a/src/Processors/Transforms/FillingTransform.h +++ b/src/Processors/Transforms/FillingTransform.h @@ -84,6 +84,7 @@ private: SortDescription sort_prefix; const InterpolateDescriptionPtr interpolate_description; /// Contains INTERPOLATE columns + bool running_with_staleness = false; /// True if STALENESS clause was used. FillingRow filling_row; /// Current row, which is used to fill gaps. FillingRow next_row; /// Row to which we need to generate filling rows. bool filling_row_inserted = false; From 9021aeaaff66f7a0c0daeb37d1cd42157c5a15aa Mon Sep 17 00:00:00 2001 From: avogar Date: Thu, 31 Oct 2024 16:57:51 +0000 Subject: [PATCH 233/353] Add docs --- docs/en/sql-reference/data-types/newjson.md | 46 +++++++++++++++++++-- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/docs/en/sql-reference/data-types/newjson.md b/docs/en/sql-reference/data-types/newjson.md index 68952590eb9..2f54d45cd64 100644 --- a/docs/en/sql-reference/data-types/newjson.md +++ b/docs/en/sql-reference/data-types/newjson.md @@ -58,10 +58,10 @@ SELECT json FROM test; └───────────────────────────────────┘ ``` -Using CAST from 'String': +Using CAST from `String`: ```sql -SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json; +SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON AS json; ``` ```text @@ -70,7 +70,47 @@ SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json └────────────────────────────────────────────────┘ ``` -CAST from `JSON`, named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later. +Using CAST from `Tuple`: + +```sql +SELECT (tuple(42 AS b) AS a, [1, 2, 3] AS c, 'Hello, World!' AS d)::JSON AS json; +``` + +```text +┌─json───────────────────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +Using CAST from `Map`: + +```sql +SELECT map('a', map('b', 42), 'c', [1,2,3], 'd', 'Hello, World!')::JSON AS json; +``` + +```text +┌─json───────────────────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +Using CAST from deprecated `Object('json')`: + +```sql + SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::Object('json')::JSON AS json; + ``` + +```text +┌─json───────────────────────────────────────────┐ +│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │ +└────────────────────────────────────────────────┘ +``` + +:::note +CAST from `Tuple`/`Map`/`Object('json')` to `JSON` is implemented via serializing the column into `String` column containing JSON objects and deserializing it back to `JSON` type column. +::: + +CAST between `JSON` types with different arguments will be supported later. ## Reading JSON paths as subcolumns From ca389d0d71c96998f0c9feeca6ffae913a02fa77 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 31 Oct 2024 18:43:56 +0100 Subject: [PATCH 234/353] Move settings to cloud level --- src/Core/Settings.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 404f5a6b090..ee814e72447 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4846,12 +4846,6 @@ Limit on size of a single batch of file segments that a read buffer can request )", 0) \ DECLARE(UInt64, filesystem_cache_reserve_space_wait_lock_timeout_milliseconds, 1000, R"( Wait time to lock cache for space reservation in filesystem cache -)", 0) \ - DECLARE(Bool, filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage, true, R"( -Wait time to lock cache for space reservation in filesystem cache -)", 0) \ - DECLARE(Bool, filesystem_cache_enable_background_download_during_fetch, true, R"( -Wait time to lock cache for space reservation in filesystem cache )", 0) \ DECLARE(UInt64, temporary_data_in_cache_reserve_space_wait_lock_timeout_milliseconds, (10 * 60 * 1000), R"( Wait time to lock cache for space reservation for temporary data in filesystem cache @@ -5112,6 +5106,12 @@ Only in ClickHouse Cloud. A maximum number of unacknowledged in-flight packets i )", 0) \ DECLARE(UInt64, distributed_cache_data_packet_ack_window, DistributedCache::ACK_DATA_PACKET_WINDOW, R"( Only in ClickHouse Cloud. A window for sending ACK for DataPacket sequence in a single distributed cache read request +)", 0) \ + DECLARE(Bool, filesystem_cache_enable_background_download_for_metadata_files_in_packed_storage, true, R"( +Only in ClickHouse Cloud. Wait time to lock cache for space reservation in filesystem cache +)", 0) \ + DECLARE(Bool, filesystem_cache_enable_background_download_during_fetch, true, R"( +Only in ClickHouse Cloud. Wait time to lock cache for space reservation in filesystem cache )", 0) \ \ DECLARE(Bool, parallelize_output_from_storages, true, R"( @@ -5122,6 +5122,7 @@ The setting allows a user to provide own deduplication semantic in MergeTree/Rep For example, by providing a unique value for the setting in each INSERT statement, user can avoid the same inserted data being deduplicated. + Possible values: - Any string From b9232c20063054525f0c192f528d77d85e1af9ff Mon Sep 17 00:00:00 2001 From: taiyang-li <654010905@qq.com> Date: Fri, 1 Nov 2024 10:09:54 +0800 Subject: [PATCH 235/353] add uts --- .../0_stateless/03258_quantile_exact_weighted_issue.reference | 2 ++ .../queries/0_stateless/03258_quantile_exact_weighted_issue.sql | 2 ++ 2 files changed, 4 insertions(+) create mode 100644 tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference create mode 100644 tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql diff --git a/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference new file mode 100644 index 00000000000..69afec5d545 --- /dev/null +++ b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.reference @@ -0,0 +1,2 @@ +AggregateFunction(quantilesExactWeighted(0.2, 0.4, 0.6, 0.8), UInt64, UInt8) +AggregateFunction(quantilesExactWeightedInterpolated(0.2, 0.4, 0.6, 0.8), UInt64, UInt8) diff --git a/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql new file mode 100644 index 00000000000..3069389f4e2 --- /dev/null +++ b/tests/queries/0_stateless/03258_quantile_exact_weighted_issue.sql @@ -0,0 +1,2 @@ +SELECT toTypeName(quantilesExactWeightedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); +SELECT toTypeName(quantilesExactWeightedInterpolatedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); From e851e8f3e48df739ac270d7b8672b1cd38dbad2e Mon Sep 17 00:00:00 2001 From: MikhailBurdukov Date: Fri, 1 Nov 2024 08:29:12 +0000 Subject: [PATCH 236/353] Restart CI From a50bc3bac15867ce0ee2d90afa480efdc9c98670 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Fri, 1 Nov 2024 08:50:54 +0000 Subject: [PATCH 237/353] Update version_date.tsv and changelogs after v24.10.1.2812-stable --- SECURITY.md | 3 +- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v24.10.1.2812-stable.md | 412 ++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 6 files changed, 418 insertions(+), 4 deletions(-) create mode 100644 docs/changelogs/v24.10.1.2812-stable.md diff --git a/SECURITY.md b/SECURITY.md index db302da8ecd..1b0648dc489 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit | Version | Supported | |:-|:-| +| 24.10 | ✔️ | | 24.9 | ✔️ | | 24.8 | ✔️ | -| 24.7 | ✔️ | +| 24.7 | ❌ | | 24.6 | ❌ | | 24.5 | ❌ | | 24.4 | ❌ | diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index dfe6a420260..bc76bdbb619 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 991c25ad142..93acf1a5773 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 5dc88b49e31..506a627b11c 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docs/changelogs/v24.10.1.2812-stable.md b/docs/changelogs/v24.10.1.2812-stable.md new file mode 100644 index 00000000000..c26bbf706ff --- /dev/null +++ b/docs/changelogs/v24.10.1.2812-stable.md @@ -0,0 +1,412 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.10.1.2812-stable (9cd0a3738d5) FIXME as compared to v24.10.1.1-new (b12a3677418) + +#### Backward Incompatible Change +* Allow to write `SETTINGS` before `FORMAT` in a chain of queries with `UNION` when subqueries are inside parentheses. This closes [#39712](https://github.com/ClickHouse/ClickHouse/issues/39712). Change the behavior when a query has the SETTINGS clause specified twice in a sequence. The closest SETTINGS clause will have a preference for the corresponding subquery. In the previous versions, the outermost SETTINGS clause could take a preference over the inner one. [#68614](https://github.com/ClickHouse/ClickHouse/pull/68614) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Reordering of filter conditions from `[PRE]WHERE` clause is now allowed by default. It could be disabled by setting `allow_reorder_prewhere_conditions` to `false`. [#70657](https://github.com/ClickHouse/ClickHouse/pull/70657) ([Nikita Taranov](https://github.com/nickitat)). +* Fix `optimize_functions_to_subcolumns` optimization (previously could lead to `Invalid column type for ColumnUnique::insertRangeFrom. Expected String, got LowCardinality(String)` error), by preserving `LowCardinality` type in `mapKeys`/`mapValues`. [#70716](https://github.com/ClickHouse/ClickHouse/pull/70716) ([Azat Khuzhin](https://github.com/azat)). +* Remove the `idxd-config` library, which has an incompatible license. This also removes the experimental Intel DeflateQPL codec. [#70987](https://github.com/ClickHouse/ClickHouse/pull/70987) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### New Feature +* MongoDB integration refactored: migration to new driver mongocxx from deprecated Poco::MongoDB, remove support for deprecated old protocol, support for connection by URI, support for all MongoDB types, support for WHERE and ORDER BY statements on MongoDB side, restriction for expression unsupported by MongoDB. [#63279](https://github.com/ClickHouse/ClickHouse/pull/63279) ([Kirill Nikiforov](https://github.com/allmazz)). +* A new `--progress-table` option in clickhouse-client prints a table with metrics changing during query execution; a new `--enable-progress-table-toggle` is associated with the `--progress-table` option, and toggles the rendering of the progress table by pressing the control key (Space). [#63689](https://github.com/ClickHouse/ClickHouse/pull/63689) ([Maria Khristenko](https://github.com/mariaKhr)). +* This allows to grant access to the wildcard prefixes. `GRANT SELECT ON db.table_pefix_* TO user`. [#65311](https://github.com/ClickHouse/ClickHouse/pull/65311) ([pufit](https://github.com/pufit)). +* Add system.query_metric_log which contains history of memory and metric values from table system.events for individual queries, periodically flushed to disk. [#66532](https://github.com/ClickHouse/ClickHouse/pull/66532) ([Pablo Marcos](https://github.com/pamarcos)). +* A simple SELECT query can be written with implicit SELECT to enable calculator-style expressions, e.g., `ch "1 + 2"`. This is controlled by a new setting, `implicit_select`. [#68502](https://github.com/ClickHouse/ClickHouse/pull/68502) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Support --copy mode for clickhouse local as a shortcut for format conversion [#68503](https://github.com/ClickHouse/ClickHouse/issues/68503). [#68583](https://github.com/ClickHouse/ClickHouse/pull/68583) ([Denis Hananein](https://github.com/denis-hananein)). +* Add support for `arrayUnion` function. [#68989](https://github.com/ClickHouse/ClickHouse/pull/68989) ([Peter Nguyen](https://github.com/petern48)). +* Support aggreate function `quantileExactWeightedInterpolated`, which is a interpolated version based on quantileExactWeighted. Some people may wonder why we need a new `quantileExactWeightedInterpolated` since we already have `quantileExactInterpolatedWeighted`. The reason is the new one is more accurate than the old one. BTW, it is for spark compatiability in Apache Gluten. [#69619](https://github.com/ClickHouse/ClickHouse/pull/69619) ([李扬](https://github.com/taiyang-li)). +* Support function arrayElementOrNull. It returns null if array index is out of range or map key not found. [#69646](https://github.com/ClickHouse/ClickHouse/pull/69646) ([李扬](https://github.com/taiyang-li)). +* Allows users to specify regular expressions through new `message_regexp` and `message_regexp_negative` fields in the `config.xml` file to filter out logging. The logging is applied to the formatted un-colored text for the most intuitive developer experience. [#69657](https://github.com/ClickHouse/ClickHouse/pull/69657) ([Peter Nguyen](https://github.com/petern48)). +* Support Dynamic type in most functions by executing them on internal types inside Dynamic. [#69691](https://github.com/ClickHouse/ClickHouse/pull/69691) ([Pavel Kruglov](https://github.com/Avogar)). +* Re-added `RIPEMD160` function, which computes the RIPEMD-160 cryptographic hash of a string. Example: `SELECT HEX(RIPEMD160('The quick brown fox jumps over the lazy dog'))` returns `37F332F68DB77BD9D7EDD4969571AD671CF9DD3B`. [#70087](https://github.com/ClickHouse/ClickHouse/pull/70087) ([Dergousov Maxim](https://github.com/m7kss1)). +* Allow to cache read files for object storage table engines and data lakes using hash from ETag + file path as cache key. [#70135](https://github.com/ClickHouse/ClickHouse/pull/70135) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Support reading Iceberg tables on HDFS. [#70268](https://github.com/ClickHouse/ClickHouse/pull/70268) ([flynn](https://github.com/ucasfl)). +* Allow to read/write JSON type as binary string in RowBinary format under settings `input_format_binary_read_json_as_string/output_format_binary_write_json_as_string`. [#70288](https://github.com/ClickHouse/ClickHouse/pull/70288) ([Pavel Kruglov](https://github.com/Avogar)). +* Allow to serialize/deserialize JSON column as single String column in Native format. For output use setting `output_format_native_write_json_as_string`. For input, use serialization version `1` before the column data. [#70312](https://github.com/ClickHouse/ClickHouse/pull/70312) ([Pavel Kruglov](https://github.com/Avogar)). +* Supports standard CTE, `with insert`, as previously only supports `insert ... with ...`. [#70593](https://github.com/ClickHouse/ClickHouse/pull/70593) ([Shichao Jin](https://github.com/jsc0218)). + +#### Performance Improvement +* Support minmax index for `pointInPolygon`. [#62085](https://github.com/ClickHouse/ClickHouse/pull/62085) ([JackyWoo](https://github.com/JackyWoo)). +* Add support for parquet bloom filters. [#62966](https://github.com/ClickHouse/ClickHouse/pull/62966) ([Arthur Passos](https://github.com/arthurpassos)). +* Lock-free parts rename to avoid INSERT affect SELECT (due to parts lock) (under normal circumstances with `fsync_part_directory`, QPS of SELECT with INSERT in parallel, increased 2x, under heavy load the effect is even bigger). Note, this only includes `ReplicatedMergeTree` for now. [#64955](https://github.com/ClickHouse/ClickHouse/pull/64955) ([Azat Khuzhin](https://github.com/azat)). +* Respect `ttl_only_drop_parts` on `materialize ttl`; only read necessary columns to recalculate TTL and drop parts by replacing them with an empty one. [#65488](https://github.com/ClickHouse/ClickHouse/pull/65488) ([Andrey Zvonov](https://github.com/zvonand)). +* Refactor `IDisk` and `IObjectStorage` for better performance. Tables from `plain` and `plain_rewritable` object storages will initialize faster. [#68146](https://github.com/ClickHouse/ClickHouse/pull/68146) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Optimized thread creation in the ThreadPool to minimize lock contention. Thread creation is now performed outside of the critical section to avoid delays in job scheduling and thread management under high load conditions. This leads to a much more responsive ClickHouse under heavy concurrent load. [#68694](https://github.com/ClickHouse/ClickHouse/pull/68694) ([filimonov](https://github.com/filimonov)). +* Enable reading LowCardinality string columns from ORC. [#69481](https://github.com/ClickHouse/ClickHouse/pull/69481) ([李扬](https://github.com/taiyang-li)). +* Added an ability to parse data directly into sparse columns. [#69828](https://github.com/ClickHouse/ClickHouse/pull/69828) ([Anton Popov](https://github.com/CurtizJ)). +* Supports parallel reading of parquet row groups and prefetching of row groups in single-threaded mode. [#69862](https://github.com/ClickHouse/ClickHouse/pull/69862) ([LiuNeng](https://github.com/liuneng1994)). +* Improved performance of parsing formats with high number of missed values (e.g. `JSONEachRow`). [#69875](https://github.com/ClickHouse/ClickHouse/pull/69875) ([Anton Popov](https://github.com/CurtizJ)). +* Use `LowCardinality` for `ProfileEvents` in system logs such as `part_log`, `query_views_log`, `filesystem_cache_log`. [#70152](https://github.com/ClickHouse/ClickHouse/pull/70152) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Improve performance of FromUnixTimestamp/ToUnixTimestamp functions. [#71042](https://github.com/ClickHouse/ClickHouse/pull/71042) ([kevinyhzou](https://github.com/KevinyhZou)). + +#### Improvement +* Allow parametrised SQL aliases. [#50665](https://github.com/ClickHouse/ClickHouse/pull/50665) ([Anton Kozlov](https://github.com/tonickkozlov)). +* Fixed [#57616](https://github.com/ClickHouse/ClickHouse/issues/57616) this problem occurs because all positive number arguments are automatically identified as `uint64` type, leading to an inability to match int type data in `summapfiltered`. the issue of non-matching is indeed confusing, as the `uint64` parameters are not specified by the user. additionally, if the arguments are `[1,2,3,toint8(-3)]`, due to the `getleastsupertype()`, these parameters will be uniformly treated as `int` type, causing `'1,2,3'` to also fail in matching the `uint` type data in `summapfiltered`. [#58408](https://github.com/ClickHouse/ClickHouse/pull/58408) ([Chen768959](https://github.com/Chen768959)). +* `ALTER TABLE .. REPLACE PARTITION` doesn't wait anymore for mutations/merges that happen in other partitions. [#59138](https://github.com/ClickHouse/ClickHouse/pull/59138) ([Vasily Nemkov](https://github.com/Enmk)). +* Refreshable materialized views are now supported in Replicated databases. [#60669](https://github.com/ClickHouse/ClickHouse/pull/60669) ([Michael Kolupaev](https://github.com/al13n321)). +* Symbolic links for tables in the `data/database_name/` directory are created for the actual paths to the table's data, depending on the storage policy, instead of the `store/...` directory on the default disk. [#61777](https://github.com/ClickHouse/ClickHouse/pull/61777) ([Kirill](https://github.com/kirillgarbar)). +* Apply configuration updates in global context object. It fixes issues like [#62308](https://github.com/ClickHouse/ClickHouse/issues/62308). [#62944](https://github.com/ClickHouse/ClickHouse/pull/62944) ([Amos Bird](https://github.com/amosbird)). +* Reworked settings that control the behavior of parallel replicas algorithms. A quick recap: ClickHouse has four different algorithms for parallel reading involving multiple replicas, which is reflected in the setting `parallel_replicas_mode`, the default value for it is `read_tasks` Additionally, the toggle-switch setting `enable_parallel_replicas` has been added. [#63151](https://github.com/ClickHouse/ClickHouse/pull/63151) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix `ReadSettings` not using user set values, because defaults were only used. [#65625](https://github.com/ClickHouse/ClickHouse/pull/65625) ([Kseniia Sumarokova](https://github.com/kssenii)). +* While parsing an Enum field from JSON, a string containing an integer will be interpreted as the corresponding Enum element. This closes [#65119](https://github.com/ClickHouse/ClickHouse/issues/65119). [#66801](https://github.com/ClickHouse/ClickHouse/pull/66801) ([scanhex12](https://github.com/scanhex12)). +* Allow `TRIM` -ing `LEADING` or `TRAILING` empty string as a no-op. Closes [#67792](https://github.com/ClickHouse/ClickHouse/issues/67792). [#68455](https://github.com/ClickHouse/ClickHouse/pull/68455) ([Peter Nguyen](https://github.com/petern48)). +* Support creating a table with a query: `CREATE TABLE ... CLONE AS ...`. It clones the source table's schema and then attaches all partitions to the newly created table. This feature is only supported with tables of the `MergeTree` family Closes [#65015](https://github.com/ClickHouse/ClickHouse/issues/65015). [#69091](https://github.com/ClickHouse/ClickHouse/pull/69091) ([tuanpach](https://github.com/tuanpach)). +* In Gluten ClickHouse, Spark's timestamp type is mapped to ClickHouse's datetime64(6) type. When casting timestamp '2012-01-01 00:11:22' as a string, Spark returns '2012-01-01 00:11:22', while Gluten ClickHouse returns '2012-01-01 00:11:22.000000'. [#69179](https://github.com/ClickHouse/ClickHouse/pull/69179) ([Wenzheng Liu](https://github.com/lwz9103)). +* Always use the new analyzer to calculate constant expressions when `enable_analyzer` is set to `true`. Support calculation of `executable()` table function arguments without using `SELECT` query for constant expression. [#69292](https://github.com/ClickHouse/ClickHouse/pull/69292) ([Dmitry Novik](https://github.com/novikd)). +* Add `enable_secure_identifiers` to disallow insecure identifiers. [#69411](https://github.com/ClickHouse/ClickHouse/pull/69411) ([tuanpach](https://github.com/tuanpach)). +* Add `show_create_query_identifier_quoting_rule` to define identifier quoting behavior of the show create query result. Possible values: - `user_display`: When the identifiers is a keyword. - `when_necessary`: When the identifiers is one of `{"distinct", "all", "table"}`, or it can cause ambiguity: column names, dictionary attribute names. - `always`: Always quote identifiers. [#69448](https://github.com/ClickHouse/ClickHouse/pull/69448) ([tuanpach](https://github.com/tuanpach)). +* Follow-up to https://github.com/ClickHouse/ClickHouse/pull/69346 Point 4 described there will work now as well:. [#69563](https://github.com/ClickHouse/ClickHouse/pull/69563) ([Vitaly Baranov](https://github.com/vitlibar)). +* Implement generic SerDe between Avro Union and ClickHouse Variant type. Resolves [#69713](https://github.com/ClickHouse/ClickHouse/issues/69713). [#69712](https://github.com/ClickHouse/ClickHouse/pull/69712) ([Jiří Kozlovský](https://github.com/jirislav)). +* 1. CREATE TABLE AS will copy PRIMARY KEY, ORDER BY, and similar clauses. Now it is supported only for the MergeTree family of table engines. 2. For example, the follow SQL statements will trigger exception in the past, but this PR fixes it: if the destination table do not provide an `ORDER BY` or `PRIMARY KEY` expression in the table definition, we will copy that from source table. [#69739](https://github.com/ClickHouse/ClickHouse/pull/69739) ([sakulali](https://github.com/sakulali)). +* Added user-level settings `min_free_disk_bytes_to_throw_insert` and `min_free_disk_ratio_to_throw_insert` to prevent insertions on disks that are almost full. [#69755](https://github.com/ClickHouse/ClickHouse/pull/69755) ([Marco Vilas Boas](https://github.com/marco-vb)). +* If you run `clickhouse-client` or other CLI application and it starts up slowly due to an overloaded server, and you start typing your query, such as `SELECT`, the previous versions will display the remaining of the terminal echo contents before printing the greetings message, such as `SELECTClickHouse local version 24.10.1.1.` instead of `ClickHouse local version 24.10.1.1.`. Now it is fixed. This closes [#31696](https://github.com/ClickHouse/ClickHouse/issues/31696). [#69856](https://github.com/ClickHouse/ClickHouse/pull/69856) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Change the join to sort settings type to unsigned int. [#69886](https://github.com/ClickHouse/ClickHouse/pull/69886) ([kevinyhzou](https://github.com/KevinyhZou)). +* Support 64-bit XID in Keeper. It can be enabled with `use_xid_64` config. [#69908](https://github.com/ClickHouse/ClickHouse/pull/69908) ([Antonio Andelic](https://github.com/antonio2368)). +* New function getSettingOrDefault() added to return the default value and avoid exception if a custom setting is not found in the current profile. [#69917](https://github.com/ClickHouse/ClickHouse/pull/69917) ([Shankar](https://github.com/shiyer7474)). +* Allow empty needle in function replace, the same behavior with PostgreSQL. [#69918](https://github.com/ClickHouse/ClickHouse/pull/69918) ([zhanglistar](https://github.com/zhanglistar)). +* Enhance OpenTelemetry span logging to include query settings. [#70011](https://github.com/ClickHouse/ClickHouse/pull/70011) ([sharathks118](https://github.com/sharathks118)). +* Allow empty needle in functions replaceRegexp*, like https://github.com/ClickHouse/ClickHouse/pull/69918. [#70053](https://github.com/ClickHouse/ClickHouse/pull/70053) ([zhanglistar](https://github.com/zhanglistar)). +* Add info to higher-order array functions if lambda result type is unexpected. [#70093](https://github.com/ClickHouse/ClickHouse/pull/70093) ([ttanay](https://github.com/ttanay)). +* Keeper improvement: less blocking during cluster changes. [#70275](https://github.com/ClickHouse/ClickHouse/pull/70275) ([Antonio Andelic](https://github.com/antonio2368)). +* Embedded documentation for settings will be strictly more detailed and complete than the documentation on the website. This is the first step before making the website documentation always auto-generated from the source code. This has long-standing implications: - it will be guaranteed to have every setting; - there is no chance of having default values obsolete; - we can generate this documentation for each ClickHouse version; - the documentation can be displayed by the server itself even without Internet access. Generate the docs on the website from the source code. [#70289](https://github.com/ClickHouse/ClickHouse/pull/70289) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add `WITH IMPLICIT` and `FINAL` keywords to the `SHOW GRANTS` command. Fix a minor bug with implicit grants: [#70094](https://github.com/ClickHouse/ClickHouse/issues/70094). [#70293](https://github.com/ClickHouse/ClickHouse/pull/70293) ([pufit](https://github.com/pufit)). +* Don't disable nonblocking read from page cache for the entire server when reading from a blocking I/O. [#70299](https://github.com/ClickHouse/ClickHouse/pull/70299) ([Antonio Andelic](https://github.com/antonio2368)). +* Respect `compatibility` for MergeTree settings. The `compatibility` value is taken from the `default` profile on server startup, and default MergeTree settings are changed accordingly. Further changes of the `compatibility` setting do not affect MergeTree settings. [#70322](https://github.com/ClickHouse/ClickHouse/pull/70322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Clickhouse-client realtime metrics follow-up: restore cursor when ctrl-c cancels query; immediately stop intercepting keystrokes when the query is canceled; display the metrics table if `--progress-table` is on, and toggling is disabled. [#70423](https://github.com/ClickHouse/ClickHouse/pull/70423) ([Julia Kartseva](https://github.com/jkartseva)). +* Command-line arguments for Bool settings are set to true when no value is provided for the argument (e.g. `clickhouse-client --optimize_aggregation_in_order --query "SELECT 1"`). [#70459](https://github.com/ClickHouse/ClickHouse/pull/70459) ([davidtsuk](https://github.com/davidtsuk)). +* Avoid spamming the logs with large HTTP response bodies in case of errors during inter-server communication. [#70487](https://github.com/ClickHouse/ClickHouse/pull/70487) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Added a new setting `max_parts_to_move` to control the maximum number of parts that can be moved at once. [#70520](https://github.com/ClickHouse/ClickHouse/pull/70520) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Limit the frequency of certain log messages. [#70601](https://github.com/ClickHouse/ClickHouse/pull/70601) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Don't do validation when synchronizing user_directories from keeper. [#70644](https://github.com/ClickHouse/ClickHouse/pull/70644) ([Raúl Marín](https://github.com/Algunenano)). +* Introduced a special (experimental) mode of a merge selector for MergeTree tables which makes it more aggressive for the partitions that are close to the limit by the number of parts. It is controlled by the `merge_selector_use_blurry_base` MergeTree-level setting. [#70645](https://github.com/ClickHouse/ClickHouse/pull/70645) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* `CHECK TABLE` with `PART` qualifier was incorrectly formatted in the client. [#70660](https://github.com/ClickHouse/ClickHouse/pull/70660) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Support write column index and offset index using parquet native writer. [#70669](https://github.com/ClickHouse/ClickHouse/pull/70669) ([LiuNeng](https://github.com/liuneng1994)). +* Support parse `DateTime64` for microseond and timezone in joda syntax. [#70737](https://github.com/ClickHouse/ClickHouse/pull/70737) ([kevinyhzou](https://github.com/KevinyhZou)). +* Changed an approach to figure out if a cloud storage supports [batch delete](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html) or not. [#70786](https://github.com/ClickHouse/ClickHouse/pull/70786) ([Vitaly Baranov](https://github.com/vitlibar)). +* Support for Parquet page V2 on native reader. [#70807](https://github.com/ClickHouse/ClickHouse/pull/70807) ([Arthur Passos](https://github.com/arthurpassos)). +* Add an HTML page for visualizing merges. [#70821](https://github.com/ClickHouse/ClickHouse/pull/70821) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71234](https://github.com/ClickHouse/ClickHouse/issues/71234): Do not call the object storage API when listing directories, as this may be cost-inefficient. Instead, store the list of filenames in the memory. The trade-offs are increased initial load time and memory required to store filenames. [#70823](https://github.com/ClickHouse/ClickHouse/pull/70823) ([Julia Kartseva](https://github.com/jkartseva)). +* A check if table has both `storage_policy` and `disk` set after alter query is added. A check if a new storage policy is compatible with an old one when using `disk` setting is added. [#70839](https://github.com/ClickHouse/ClickHouse/pull/70839) ([Kirill](https://github.com/kirillgarbar)). +* Add system.s3_queue_settings and system.azure_queue_settings. [#70841](https://github.com/ClickHouse/ClickHouse/pull/70841) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Functions `base58Encode` and `base58Decode` now accept arguments of type `FixedString`. Example: `SELECT base58Encode(toFixedString('plaintext', 9));`. [#70846](https://github.com/ClickHouse/ClickHouse/pull/70846) ([Faizan Patel](https://github.com/faizan2786)). +* Add the `partition` column to every entry type of the part log. Previously, it was set only for some entries. This closes [#70819](https://github.com/ClickHouse/ClickHouse/issues/70819). [#70848](https://github.com/ClickHouse/ClickHouse/pull/70848) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add merge start and mutate start events into `system.part_log` which helps with merges analysis and visualization. [#70850](https://github.com/ClickHouse/ClickHouse/pull/70850) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Do not call the LIST object storage API when determining if a file or directory exists on the plain rewritable disk, as it can be cost-inefficient. [#70852](https://github.com/ClickHouse/ClickHouse/pull/70852) ([Julia Kartseva](https://github.com/jkartseva)). +* Add a profile event about the number of merged source parts. It allows the monitoring of the fanout of the merge tree in production. [#70908](https://github.com/ClickHouse/ClickHouse/pull/70908) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Reduce the number of object storage HEAD API requests in the plain_rewritable disk. [#70915](https://github.com/ClickHouse/ClickHouse/pull/70915) ([Julia Kartseva](https://github.com/jkartseva)). +* Background downloads to filesystem cache was enabled back. [#70929](https://github.com/ClickHouse/ClickHouse/pull/70929) ([Nikita Taranov](https://github.com/nickitat)). +* Add a new merge selector algorithm, named `Trivial`, for professional usage only. It is worse than the `Simple` merge selector. [#70969](https://github.com/ClickHouse/ClickHouse/pull/70969) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Fix toHour-like conversion functions' monotonicity when optional time zone argument is passed. [#60264](https://github.com/ClickHouse/ClickHouse/pull/60264) ([Amos Bird](https://github.com/amosbird)). +* Relax `supportsPrewhere` check for StorageMerge. This fixes [#61064](https://github.com/ClickHouse/ClickHouse/issues/61064). It was hardened unnecessarily in [#60082](https://github.com/ClickHouse/ClickHouse/issues/60082). [#61091](https://github.com/ClickHouse/ClickHouse/pull/61091) ([Amos Bird](https://github.com/amosbird)). +* Fix `use_concurrency_control` setting handling for proper `concurrent_threads_soft_limit_num` limit enforcing. This enables concurrency control by default because previously it was broken. [#61473](https://github.com/ClickHouse/ClickHouse/pull/61473) ([Sergei Trifonov](https://github.com/serxa)). +* Fix incorrect JOIN ON section optimization in case of `IS NULL` check under any other function (like `NOT`) that may lead to wrong results. Closes [#67915](https://github.com/ClickHouse/ClickHouse/issues/67915). [#68049](https://github.com/ClickHouse/ClickHouse/pull/68049) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Prevent `ALTER` queries that would make the `CREATE` query of tables invalid. [#68574](https://github.com/ClickHouse/ClickHouse/pull/68574) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Fix inconsistent AST formatting for `negate` (`-`) and `NOT` functions with tuples and arrays. [#68600](https://github.com/ClickHouse/ClickHouse/pull/68600) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix insertion of incomplete type into Dynamic during deserialization. It could lead to `Parameter out of bound` errors. [#69291](https://github.com/ClickHouse/ClickHouse/pull/69291) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix inf loop after `restore replica` in the replicated merge tree with zero copy. [#69293](https://github.com/ClickHouse/ClickHouse/pull/69293) ([MikhailBurdukov](https://github.com/MikhailBurdukov)). +* Return back default value of `processing_threads_num` as number of cpu cores in storage `S3Queue`. [#69384](https://github.com/ClickHouse/ClickHouse/pull/69384) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Bypass try/catch flow when de/serializing nested repeated protobuf to nested columns ( fixes [#41971](https://github.com/ClickHouse/ClickHouse/issues/41971) ). [#69556](https://github.com/ClickHouse/ClickHouse/pull/69556) ([Eliot Hautefeuille](https://github.com/hileef)). +* Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)). +* Added `strict_once` mode to aggregate function `windowFunnel` to avoid counting one event several times in case it matches multiple conditions, close [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835). [#69738](https://github.com/ClickHouse/ClickHouse/pull/69738) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fixed `maxMapState` throwing 'Bad get' if value type is DateTime64. [#69787](https://github.com/ClickHouse/ClickHouse/pull/69787) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix `getSubcolumn` with `LowCardinality` columns by overriding `useDefaultImplementationForLowCardinalityColumns` to return `true`. [#69831](https://github.com/ClickHouse/ClickHouse/pull/69831) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Fix permanent blocked distributed sends if DROP of distributed table fails. [#69843](https://github.com/ClickHouse/ClickHouse/pull/69843) ([Azat Khuzhin](https://github.com/azat)). +* Fix non-cancellable queries containing WITH FILL with NaN keys. This closes [#69261](https://github.com/ClickHouse/ClickHouse/issues/69261). [#69845](https://github.com/ClickHouse/ClickHouse/pull/69845) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix analyzer default with old compatibility value. [#69895](https://github.com/ClickHouse/ClickHouse/pull/69895) ([Raúl Marín](https://github.com/Algunenano)). +* Don't check dependencies during CREATE OR REPLACE VIEW during DROP of old table. Previously CREATE OR REPLACE query failed when there are dependent tables of the recreated view. [#69907](https://github.com/ClickHouse/ClickHouse/pull/69907) ([Pavel Kruglov](https://github.com/Avogar)). +* Implement missing decimal cases for `zeroField`. Fixes [#69730](https://github.com/ClickHouse/ClickHouse/issues/69730). [#69978](https://github.com/ClickHouse/ClickHouse/pull/69978) ([Arthur Passos](https://github.com/arthurpassos)). +* Now SQL security will work with parameterized views correctly. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)). +* Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)). +* Fixed a bug when the timezone could change the result of the query with a `Date` or `Date32` arguments. [#70036](https://github.com/ClickHouse/ClickHouse/pull/70036) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Avoid reusing columns among different named tuples when evaluating `tuple` functions. This fixes [#70022](https://github.com/ClickHouse/ClickHouse/issues/70022). [#70103](https://github.com/ClickHouse/ClickHouse/pull/70103) ([Amos Bird](https://github.com/amosbird)). +* Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)). +* Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)). +* Proper error message for illegal query `JOIN ... ON *` , close [#68650](https://github.com/ClickHouse/ClickHouse/issues/68650). [#70124](https://github.com/ClickHouse/ClickHouse/pull/70124) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)). +* Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)). +* Now ClickHouse will consider more errors as retriable and will not mark data parts as broken in case of such errors. [#70145](https://github.com/ClickHouse/ClickHouse/pull/70145) ([alesapin](https://github.com/alesapin)). +* Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)). +* Fix event counter for native interface (InterfaceNativeSendBytes). [#70153](https://github.com/ClickHouse/ClickHouse/pull/70153) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)). +* Fix possible crash in JSON column. [#70172](https://github.com/ClickHouse/ClickHouse/pull/70172) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix multiple issues with arrayMin and arrayMax. [#70207](https://github.com/ClickHouse/ClickHouse/pull/70207) ([Raúl Marín](https://github.com/Algunenano)). +* Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Don't modify global settings with startup scripts. Previously, changing a setting in a startup script would change it globally. [#70310](https://github.com/ClickHouse/ClickHouse/pull/70310) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix ALTER of Dynamic type with reducing max_types parameter that could lead to server crash. [#70328](https://github.com/ClickHouse/ClickHouse/pull/70328) ([Pavel Kruglov](https://github.com/Avogar)). +* Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)). +* Fix possible use-after-free in `SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf`. [#70358](https://github.com/ClickHouse/ClickHouse/pull/70358) ([Azat Khuzhin](https://github.com/azat)). +* Fix crash during GROUP BY JSON sub-object subcolumn. [#70374](https://github.com/ClickHouse/ClickHouse/pull/70374) ([Pavel Kruglov](https://github.com/Avogar)). +* Don't prefetch parts for vertical merges if part has no rows. [#70452](https://github.com/ClickHouse/ClickHouse/pull/70452) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix crash in WHERE with lambda functions. [#70464](https://github.com/ClickHouse/ClickHouse/pull/70464) ([Raúl Marín](https://github.com/Algunenano)). +* Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). +* Fixed rare crashes in `SELECT`-s and merges after adding a column of `Array` type with non-empty default expression. [#70695](https://github.com/ClickHouse/ClickHouse/pull/70695) ([Anton Popov](https://github.com/CurtizJ)). +* Insert into table function s3 respect query settings. [#70696](https://github.com/ClickHouse/ClickHouse/pull/70696) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix infinite recursion when infering a proto schema with skip unsupported fields enabled. [#70697](https://github.com/ClickHouse/ClickHouse/pull/70697) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71122](https://github.com/ClickHouse/ClickHouse/issues/71122): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Disable enable_named_columns_in_function_tuple by default. [#70833](https://github.com/ClickHouse/ClickHouse/pull/70833) ([Raúl Marín](https://github.com/Algunenano)). +* Fix S3Queue table engine setting processing_threads_num not being effective in case it was deduced from the number of cpu cores on the server. [#70837](https://github.com/ClickHouse/ClickHouse/pull/70837) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Normalize named tuple arguments in aggregation states. This fixes [#69732](https://github.com/ClickHouse/ClickHouse/issues/69732) . [#70853](https://github.com/ClickHouse/ClickHouse/pull/70853) ([Amos Bird](https://github.com/amosbird)). +* Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71214](https://github.com/ClickHouse/ClickHouse/issues/71214): Fix logical error in `StorageS3Queue` "Cannot create a persistent node in /processed since it already exists". [#70984](https://github.com/ClickHouse/ClickHouse/pull/70984) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#71243](https://github.com/ClickHouse/ClickHouse/issues/71243): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). +* Backported in [#71157](https://github.com/ClickHouse/ClickHouse/issues/71157): Fix the bug that didn't consider _row_exists column in rebuild option of projection lightweight delete. [#71089](https://github.com/ClickHouse/ClickHouse/pull/71089) ([Shichao Jin](https://github.com/jsc0218)). +* Backported in [#71265](https://github.com/ClickHouse/ClickHouse/issues/71265): Fix wrong value in system.query_metric_log due to unexpected race condition. [#71124](https://github.com/ClickHouse/ClickHouse/pull/71124) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#71331](https://github.com/ClickHouse/ClickHouse/issues/71331): Fix async inserts with empty blocks via native protocol. [#71312](https://github.com/ClickHouse/ClickHouse/pull/71312) ([Anton Popov](https://github.com/CurtizJ)). + +#### Build/Testing/Packaging Improvement +* Docker in integration tests runner is updated to latest version. It was previously pinned u until patch release 24.0.3 was out. https://github.com/moby/moby/issues/45770#issuecomment-1618255130. - HDFS image was deprecated and not running with current docker version. Switched to newer version of a derivative image based on ubuntu. - HDFS tests were hardened to allow them to run with python-repeat. [#66867](https://github.com/ClickHouse/ClickHouse/pull/66867) ([Ilya Yatsishin](https://github.com/qoega)). +* Alpine docker images now use ubuntu 22.04 as glibc donor, results in upgrade of glibc version delivered with alpine images from 2.31 to 2.35. [#69033](https://github.com/ClickHouse/ClickHouse/pull/69033) ([filimonov](https://github.com/filimonov)). +* Makes dbms independent from clickhouse_functions. [#69914](https://github.com/ClickHouse/ClickHouse/pull/69914) ([Raúl Marín](https://github.com/Algunenano)). +* Fix FreeBSD compilation of the MariaDB connector. [#70007](https://github.com/ClickHouse/ClickHouse/pull/70007) ([Raúl Marín](https://github.com/Algunenano)). +* Building on Apple Mac OS X Darwin does not produce strange warnings anymore. [#70411](https://github.com/ClickHouse/ClickHouse/pull/70411) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix building with ARCH_NATIVE CMake flag. [#70585](https://github.com/ClickHouse/ClickHouse/pull/70585) ([Daniil Gentili](https://github.com/danog)). +* The universal installer will download Musl build on Alpine Linux. Some Docker containers are using Alpine Linux, but it was not possible to install ClickHouse there with `curl https://clickhouse.com/ | sh`. [#70767](https://github.com/ClickHouse/ClickHouse/pull/70767) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NO CL CATEGORY + +* Backported in [#71259](https://github.com/ClickHouse/ClickHouse/issues/71259):. [#71220](https://github.com/ClickHouse/ClickHouse/pull/71220) ([Raúl Marín](https://github.com/Algunenano)). + +#### NO CL ENTRY + +* NO CL ENTRY: 'Revert "JSONCompactWithProgress query output format"'. [#69989](https://github.com/ClickHouse/ClickHouse/pull/69989) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* NO CL ENTRY: 'Revert "Support CREATE OR REPLACE VIEW atomically"'. [#70535](https://github.com/ClickHouse/ClickHouse/pull/70535) ([Raúl Marín](https://github.com/Algunenano)). +* NO CL ENTRY: 'Revert "Revert "Support CREATE OR REPLACE VIEW atomically""'. [#70536](https://github.com/ClickHouse/ClickHouse/pull/70536) ([Raúl Marín](https://github.com/Algunenano)). +* NO CL ENTRY: 'Revert "Add projections size to system.projections"'. [#70858](https://github.com/ClickHouse/ClickHouse/pull/70858) ([Alexey Milovidov](https://github.com/alexey-milovidov)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Allow writing argument of `has` or `hasAny` or `hasAll` as string values if array element type is `Enum`. [#56555](https://github.com/ClickHouse/ClickHouse/pull/56555) ([Duc Canh Le](https://github.com/canhld94)). +* Rename FileSegmentKind::Ephemeral and other changes. [#66600](https://github.com/ClickHouse/ClickHouse/pull/66600) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Closes [#67345](https://github.com/ClickHouse/ClickHouse/issues/67345). [#67346](https://github.com/ClickHouse/ClickHouse/pull/67346) ([KrJin](https://github.com/jincong8973)). +* Because it is too complicated to support. [#68410](https://github.com/ClickHouse/ClickHouse/pull/68410) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Fix 01600_parts_states_metrics_long flakiness. [#68521](https://github.com/ClickHouse/ClickHouse/pull/68521) ([Azat Khuzhin](https://github.com/azat)). +* Reduce client start time in debug/sanitizer mode. [#68980](https://github.com/ClickHouse/ClickHouse/pull/68980) ([Raúl Marín](https://github.com/Algunenano)). +* Closes [#69038](https://github.com/ClickHouse/ClickHouse/issues/69038). [#69040](https://github.com/ClickHouse/ClickHouse/pull/69040) ([Nikolay Degterinsky](https://github.com/evillique)). +* Better exception for unsupported full_text index with non-full parts. [#69067](https://github.com/ClickHouse/ClickHouse/pull/69067) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Catch additional zk connection erros while creating table and make sure to cleanup dirs if necessary for retries. [#69093](https://github.com/ClickHouse/ClickHouse/pull/69093) ([Sumit](https://github.com/sum12)). +* Update version_date.tsv and changelog after v24.7.5.37-stable. [#69185](https://github.com/ClickHouse/ClickHouse/pull/69185) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* DOCS: Replace live view with refreshable since the former is deprecated. [#69392](https://github.com/ClickHouse/ClickHouse/pull/69392) ([Damian Kula](https://github.com/heavelock)). +* Update ORC to the current HEAD. [#69473](https://github.com/ClickHouse/ClickHouse/pull/69473) ([Nikita Taranov](https://github.com/nickitat)). +* Make a test ready for flaky check. [#69586](https://github.com/ClickHouse/ClickHouse/pull/69586) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Support antlr parser to parse sql with some keywords as alias, make the behaviour same as the clickhouse-server - remove redundant `for` in the `keyword` field. [#69614](https://github.com/ClickHouse/ClickHouse/pull/69614) ([Z.H.](https://github.com/onlyacat)). +* Allow default implementations for null in function mapFromArrays for spark compatiability in apache gluten. Current change doesn't have any side effects on clickhouse in theory. [#69715](https://github.com/ClickHouse/ClickHouse/pull/69715) ([李扬](https://github.com/taiyang-li)). +* Fix exception message in AzureBlobStorage. [#69728](https://github.com/ClickHouse/ClickHouse/pull/69728) ([Pavel Kruglov](https://github.com/Avogar)). +* Add test parsing s3 URL with a bucket name including a dot. [#69743](https://github.com/ClickHouse/ClickHouse/pull/69743) ([Kaushik Iska](https://github.com/iskakaushik)). +* Make `clang-tidy` happy. [#69765](https://github.com/ClickHouse/ClickHouse/pull/69765) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Prepare to enable `clang-tidy` `readability-else-after-return`. [#69768](https://github.com/ClickHouse/ClickHouse/pull/69768) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* S3Queue: support having deprecated settings to not fail server startup. [#69769](https://github.com/ClickHouse/ClickHouse/pull/69769) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Use only adaptive heuristic to choose task sizes for remote reading. [#69778](https://github.com/ClickHouse/ClickHouse/pull/69778) ([Nikita Taranov](https://github.com/nickitat)). +* Remove unused buggy code. [#69780](https://github.com/ClickHouse/ClickHouse/pull/69780) ([Raúl Marín](https://github.com/Algunenano)). +* Fix bugfix check. [#69789](https://github.com/ClickHouse/ClickHouse/pull/69789) ([Antonio Andelic](https://github.com/antonio2368)). +* Followup for [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69790](https://github.com/ClickHouse/ClickHouse/pull/69790) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Update version after release. [#69816](https://github.com/ClickHouse/ClickHouse/pull/69816) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Update ext-dict-functions.md. [#69819](https://github.com/ClickHouse/ClickHouse/pull/69819) ([kurikuQwQ](https://github.com/kurikuQwQ)). +* Allow cyrillic characters in generated contributor names. [#69820](https://github.com/ClickHouse/ClickHouse/pull/69820) ([Raúl Marín](https://github.com/Algunenano)). +* CI: praktika integration 1. [#69822](https://github.com/ClickHouse/ClickHouse/pull/69822) ([Max Kainov](https://github.com/maxknv)). +* Fix `test_delayed_replica_failover`. [#69826](https://github.com/ClickHouse/ClickHouse/pull/69826) ([Antonio Andelic](https://github.com/antonio2368)). +* minor change, less conflicts. [#69830](https://github.com/ClickHouse/ClickHouse/pull/69830) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Improve error message DDLWorker.cpp. [#69835](https://github.com/ClickHouse/ClickHouse/pull/69835) ([Denny Crane](https://github.com/den-crane)). +* Fix typo in description: mutation_sync -> mutations_sync. [#69838](https://github.com/ClickHouse/ClickHouse/pull/69838) ([Alexander Gololobov](https://github.com/davenger)). +* Fix changelog. [#69841](https://github.com/ClickHouse/ClickHouse/pull/69841) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This closes [#49940](https://github.com/ClickHouse/ClickHouse/issues/49940). [#69842](https://github.com/ClickHouse/ClickHouse/pull/69842) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This closes [#51036](https://github.com/ClickHouse/ClickHouse/issues/51036). [#69844](https://github.com/ClickHouse/ClickHouse/pull/69844) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update README.md - Update meetups. [#69849](https://github.com/ClickHouse/ClickHouse/pull/69849) ([Tanya Bragin](https://github.com/tbragin)). +* Revert [#69790](https://github.com/ClickHouse/ClickHouse/issues/69790) and [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69850](https://github.com/ClickHouse/ClickHouse/pull/69850) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* See [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69851](https://github.com/ClickHouse/ClickHouse/pull/69851) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#50928](https://github.com/ClickHouse/ClickHouse/issues/50928). [#69852](https://github.com/ClickHouse/ClickHouse/pull/69852) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#55981](https://github.com/ClickHouse/ClickHouse/issues/55981). [#69853](https://github.com/ClickHouse/ClickHouse/pull/69853) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#56823](https://github.com/ClickHouse/ClickHouse/issues/56823). [#69854](https://github.com/ClickHouse/ClickHouse/pull/69854) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* This closes [#62350](https://github.com/ClickHouse/ClickHouse/issues/62350). [#69855](https://github.com/ClickHouse/ClickHouse/pull/69855) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Refactor functions and variables in statistics code. [#69860](https://github.com/ClickHouse/ClickHouse/pull/69860) ([Robert Schulze](https://github.com/rschu1ze)). +* Resubmit [#63279](https://github.com/ClickHouse/ClickHouse/issues/63279). [#69861](https://github.com/ClickHouse/ClickHouse/pull/69861) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Improve stateless test runner. [#69864](https://github.com/ClickHouse/ClickHouse/pull/69864) ([Alexey Katsman](https://github.com/alexkats)). +* Adjust fast test time limit a bit. [#69874](https://github.com/ClickHouse/ClickHouse/pull/69874) ([Raúl Marín](https://github.com/Algunenano)). +* Add initial 24.9 CHANGELOG. [#69876](https://github.com/ClickHouse/ClickHouse/pull/69876) ([Raúl Marín](https://github.com/Algunenano)). +* Fix test `01278_random_string_utf8`. [#69878](https://github.com/ClickHouse/ClickHouse/pull/69878) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Fix minor fuzzer issue with experimental statistics. [#69881](https://github.com/ClickHouse/ClickHouse/pull/69881) ([Robert Schulze](https://github.com/rschu1ze)). +* Fix linking after settings refactoring. [#69882](https://github.com/ClickHouse/ClickHouse/pull/69882) ([Robert Schulze](https://github.com/rschu1ze)). +* Add Proj Obsolete Setting. [#69883](https://github.com/ClickHouse/ClickHouse/pull/69883) ([Shichao Jin](https://github.com/jsc0218)). +* Improve remote queries startup time. [#69884](https://github.com/ClickHouse/ClickHouse/pull/69884) ([Igor Nikonov](https://github.com/devcrafter)). +* Revert "Merge pull request [#69032](https://github.com/ClickHouse/ClickHouse/issues/69032) from alexon1234/include_real_time_execution_in_http_header". [#69885](https://github.com/ClickHouse/ClickHouse/pull/69885) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* A dedicated commits from https://github.com/ClickHouse/ClickHouse/pull/61473. [#69896](https://github.com/ClickHouse/ClickHouse/pull/69896) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Added aliases `time_bucket`(from TimescaleDB) and `date_bin`(from PostgreSQL) for `toStartOfInterval`. [#69900](https://github.com/ClickHouse/ClickHouse/pull/69900) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* RIPE is an acronym and thus should be capital. RIPE stands for **R**ACE **I**ntegrity **P**rimitives **E**valuation and RACE stands for **R**esearch and Development in **A**dvanced **C**ommunications **T**echnologies in **E**urope. [#69901](https://github.com/ClickHouse/ClickHouse/pull/69901) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Replace error codes with error names in stateless tests. [#69906](https://github.com/ClickHouse/ClickHouse/pull/69906) ([Dmitry Novik](https://github.com/novikd)). +* Move setting to 24.10. [#69913](https://github.com/ClickHouse/ClickHouse/pull/69913) ([Raúl Marín](https://github.com/Algunenano)). +* Minor: Reduce diff between public and private repo. [#69928](https://github.com/ClickHouse/ClickHouse/pull/69928) ([Robert Schulze](https://github.com/rschu1ze)). +* Followup for [#69861](https://github.com/ClickHouse/ClickHouse/issues/69861). [#69930](https://github.com/ClickHouse/ClickHouse/pull/69930) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix test_dictionaries_all_layouts_separate_sources. [#69962](https://github.com/ClickHouse/ClickHouse/pull/69962) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Fix test_keeper_mntr_data_size. [#69965](https://github.com/ClickHouse/ClickHouse/pull/69965) ([Antonio Andelic](https://github.com/antonio2368)). +* This closes [#49823](https://github.com/ClickHouse/ClickHouse/issues/49823). [#69981](https://github.com/ClickHouse/ClickHouse/pull/69981) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add changelog for 24.9. [#69982](https://github.com/ClickHouse/ClickHouse/pull/69982) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add a test for [#45303](https://github.com/ClickHouse/ClickHouse/issues/45303). [#69987](https://github.com/ClickHouse/ClickHouse/pull/69987) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update CHANGELOG.md. [#69988](https://github.com/ClickHouse/ClickHouse/pull/69988) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update README.md. [#69991](https://github.com/ClickHouse/ClickHouse/pull/69991) ([Tyler Hannan](https://github.com/tylerhannan)). +* Disable `03215_parallel_replicas_crash_after_refactoring.sql` for Azure. [#69992](https://github.com/ClickHouse/ClickHouse/pull/69992) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Update CHANGELOG.md. [#69993](https://github.com/ClickHouse/ClickHouse/pull/69993) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update CHANGELOG.md. [#70004](https://github.com/ClickHouse/ClickHouse/pull/70004) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Revert "Add RIPEMD160 function". [#70005](https://github.com/ClickHouse/ClickHouse/pull/70005) ([Robert Schulze](https://github.com/rschu1ze)). +* Update CHANGELOG.md. [#70009](https://github.com/ClickHouse/ClickHouse/pull/70009) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update CHANGELOG.md. [#70010](https://github.com/ClickHouse/ClickHouse/pull/70010) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Make the pylint stricter. [#70013](https://github.com/ClickHouse/ClickHouse/pull/70013) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Added a setting `restore_replace_external_dictionary_source_to_null` which enables replacing dictionary source with Null on restore for external dictionaries (useful for testing). [#70032](https://github.com/ClickHouse/ClickHouse/pull/70032) ([Alexander Tokmakov](https://github.com/tavplubix)). +* `isort` is a simple import sorter for the python to comply [pep-8](https://peps.python.org/pep-0008/#imports) requirements. It will allow to decrease conflicts during sync and beautify the code. The import block is divided into three sub-blocks: `standard library` -> `third-party libraries` -> `local imports` -> `.local imports`. Each sub-block is ordered alphabetically with sub-sub-blocks `import X` -> `from X import Y`. [#70038](https://github.com/ClickHouse/ClickHouse/pull/70038) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update version_date.tsv and changelog after v24.9.1.3278-stable. [#70049](https://github.com/ClickHouse/ClickHouse/pull/70049) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Despite the fact that we set the org-level workflow parameter `PYTHONUNBUFFERED`, it's not inherited in workflows. [#70050](https://github.com/ClickHouse/ClickHouse/pull/70050) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix ubsan issue in function sqid. [#70061](https://github.com/ClickHouse/ClickHouse/pull/70061) ([Robert Schulze](https://github.com/rschu1ze)). +* Delete a setting change. [#70071](https://github.com/ClickHouse/ClickHouse/pull/70071) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Fix `test_distributed_ddl`. [#70075](https://github.com/ClickHouse/ClickHouse/pull/70075) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Remove unused placeholder from exception message string. [#70086](https://github.com/ClickHouse/ClickHouse/pull/70086) ([Alsu Giliazova](https://github.com/alsugiliazova)). +* Better exception message when some of the permission is missing. [#70088](https://github.com/ClickHouse/ClickHouse/pull/70088) ([pufit](https://github.com/pufit)). +* Make vector similarity indexes work with adaptive granularity. [#70101](https://github.com/ClickHouse/ClickHouse/pull/70101) ([Robert Schulze](https://github.com/rschu1ze)). +* Add missing columns `total_rows`, `data_compressed_bytes`, and `data_uncompressed_bytes` to `system.projections`. Part of https://github.com/ClickHouse/ClickHouse/pull/68901. [#70106](https://github.com/ClickHouse/ClickHouse/pull/70106) ([Jordi Villar](https://github.com/jrdi)). +* Make `00938_fix_rwlock_segfault_long` non flaky. [#70109](https://github.com/ClickHouse/ClickHouse/pull/70109) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Remove TODO. [#70110](https://github.com/ClickHouse/ClickHouse/pull/70110) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Change the default threshold to enable hyper threading. [#70111](https://github.com/ClickHouse/ClickHouse/pull/70111) ([Jiebin Sun](https://github.com/jiebinn)). +* Fixed [#69092](https://github.com/ClickHouse/ClickHouse/issues/69092): if `materialized_postgresql_tables_list=table1(id, code),table(id,name)` (`table1` has name that is a substring for `table`) `getTableAllowedColumns` method returns `[id, code]` for `table` before this fix. [#70114](https://github.com/ClickHouse/ClickHouse/pull/70114) ([Kruglov Kirill](https://github.com/1on)). +* Reduce log level. [#70117](https://github.com/ClickHouse/ClickHouse/pull/70117) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Rename `getNumberOfPhysicalCPUCores` and fix its decription. [#70130](https://github.com/ClickHouse/ClickHouse/pull/70130) ([Nikita Taranov](https://github.com/nickitat)). +* Adding 24.10. [#70132](https://github.com/ClickHouse/ClickHouse/pull/70132) ([Tyler Hannan](https://github.com/tylerhannan)). +* (Re?)-enable libcxx asserts for debug builds. [#70134](https://github.com/ClickHouse/ClickHouse/pull/70134) ([Robert Schulze](https://github.com/rschu1ze)). +* Refactor reading from object storage. [#70141](https://github.com/ClickHouse/ClickHouse/pull/70141) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Silence UBSAN for integer overflows in some datetime functions. [#70142](https://github.com/ClickHouse/ClickHouse/pull/70142) ([Michael Kolupaev](https://github.com/al13n321)). +* Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix 'QueryPlan was not initialized' in 'loop' with empty MergeTree. [#70149](https://github.com/ClickHouse/ClickHouse/pull/70149) ([Michael Kolupaev](https://github.com/al13n321)). +* Remove QueryPlan DataStream. [#70158](https://github.com/ClickHouse/ClickHouse/pull/70158) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Small docs fix. [#70160](https://github.com/ClickHouse/ClickHouse/pull/70160) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Test: PR local plan, non-constant in source stream. [#70173](https://github.com/ClickHouse/ClickHouse/pull/70173) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix performance checks. [#70175](https://github.com/ClickHouse/ClickHouse/pull/70175) ([Antonio Andelic](https://github.com/antonio2368)). +* Simplify test 03246_range_literal_replacement_works. [#70176](https://github.com/ClickHouse/ClickHouse/pull/70176) ([Pablo Marcos](https://github.com/pamarcos)). +* Update 01079_parallel_alter_add_drop_column_zookeeper.sh. [#70196](https://github.com/ClickHouse/ClickHouse/pull/70196) ([Alexander Tokmakov](https://github.com/tavplubix)). +* Require bugfix job for a set of labels. [#70197](https://github.com/ClickHouse/ClickHouse/pull/70197) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* CI: Praktika integration, fast test. [#70239](https://github.com/ClickHouse/ClickHouse/pull/70239) ([Max Kainov](https://github.com/maxknv)). +* Avoid `Cannot schedule a task` error when loading parts. [#70257](https://github.com/ClickHouse/ClickHouse/pull/70257) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Bump usearch to v2.15.2 and SimSIMD to v5.0.0. [#70270](https://github.com/ClickHouse/ClickHouse/pull/70270) ([Robert Schulze](https://github.com/rschu1ze)). +* Instead of balancing tests by `crc32(file_name)` we'll use `add tests to a group with a minimal number of tests`. [#70272](https://github.com/ClickHouse/ClickHouse/pull/70272) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Closes [#70263](https://github.com/ClickHouse/ClickHouse/issues/70263). [#70273](https://github.com/ClickHouse/ClickHouse/pull/70273) ([flynn](https://github.com/ucasfl)). +* Hide MergeTreeSettings implementation. [#70285](https://github.com/ClickHouse/ClickHouse/pull/70285) ([Raúl Marín](https://github.com/Algunenano)). +* CI: Remove await feature from release branches. [#70294](https://github.com/ClickHouse/ClickHouse/pull/70294) ([Max Kainov](https://github.com/maxknv)). +* Fix `test_keeper_four_word_command`. [#70298](https://github.com/ClickHouse/ClickHouse/pull/70298) ([Antonio Andelic](https://github.com/antonio2368)). +* Update version_date.tsv and changelog after v24.9.2.42-stable. [#70301](https://github.com/ClickHouse/ClickHouse/pull/70301) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Synchronize settings with private. [#70320](https://github.com/ClickHouse/ClickHouse/pull/70320) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Add Ignore Option In DeduplicateMergeProjectionMode. [#70327](https://github.com/ClickHouse/ClickHouse/pull/70327) ([Shichao Jin](https://github.com/jsc0218)). +* CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)). +* There is [a failed CI job](https://s3.amazonaws.com/clickhouse-test-reports/69778/2d81c38874958bd9d54a25524173bdb1ddf2b75c/stateless_tests__release_.html) which is triggered by [03237_create_or_replace_view_atomically_with_atomic_engine](https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/03237_create_or_replace_view_atomically_with_atomic_engine.sh). [#70330](https://github.com/ClickHouse/ClickHouse/pull/70330) ([tuanpach](https://github.com/tuanpach)). +* Fix flaky test `03237_insert_sparse_columns_mem`. [#70333](https://github.com/ClickHouse/ClickHouse/pull/70333) ([Anton Popov](https://github.com/CurtizJ)). +* Rename enable_secure_identifiers -> enforce_strict_identifier_format. [#70335](https://github.com/ClickHouse/ClickHouse/pull/70335) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Attempt to fix flaky RabbitMQ tests. Maybe closes [#45160](https://github.com/ClickHouse/ClickHouse/issues/45160). [#70336](https://github.com/ClickHouse/ClickHouse/pull/70336) ([filimonov](https://github.com/filimonov)). +* Don't fail the stateless check script if we can't collect minio logs. [#70350](https://github.com/ClickHouse/ClickHouse/pull/70350) ([Raúl Marín](https://github.com/Algunenano)). +* Fix tiny mistake, responsible for some of kafka test flaps. Example [report](https://s3.amazonaws.com/clickhouse-test-reports/0/3198aafac59c368993e7b5f49d95674cc1b1be18/integration_tests__release__[2_4].html). [#70352](https://github.com/ClickHouse/ClickHouse/pull/70352) ([filimonov](https://github.com/filimonov)). +* Closes [#69634](https://github.com/ClickHouse/ClickHouse/issues/69634). [#70354](https://github.com/ClickHouse/ClickHouse/pull/70354) ([pufit](https://github.com/pufit)). +* Fix 02346_fulltext_index_bug52019. [#70357](https://github.com/ClickHouse/ClickHouse/pull/70357) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Use new JSON for collecting minio logs. [#70359](https://github.com/ClickHouse/ClickHouse/pull/70359) ([Antonio Andelic](https://github.com/antonio2368)). +* Update comments in VectorSimilarityCondition (WHERE is not supported). [#70360](https://github.com/ClickHouse/ClickHouse/pull/70360) ([Azat Khuzhin](https://github.com/azat)). +* Remove 02492_clickhouse_local_context_uaf test. [#70363](https://github.com/ClickHouse/ClickHouse/pull/70363) ([Azat Khuzhin](https://github.com/azat)). +* Fix `clang-19` build issues. [#70412](https://github.com/ClickHouse/ClickHouse/pull/70412) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Ignore "Invalid multibyte data detected" error during completion. [#70422](https://github.com/ClickHouse/ClickHouse/pull/70422) ([Azat Khuzhin](https://github.com/azat)). +* Make QueryPlan explain methods const. [#70444](https://github.com/ClickHouse/ClickHouse/pull/70444) ([Alexander Gololobov](https://github.com/davenger)). +* Fix 0.1 second delay for interactive queries (due to keystroke interceptor). [#70445](https://github.com/ClickHouse/ClickHouse/pull/70445) ([Azat Khuzhin](https://github.com/azat)). +* Increase lock timeout in attempt to fix 02125_many_mutations. [#70448](https://github.com/ClickHouse/ClickHouse/pull/70448) ([Azat Khuzhin](https://github.com/azat)). +* Fix order in 03249_dynamic_alter_consistency. [#70453](https://github.com/ClickHouse/ClickHouse/pull/70453) ([Alexander Gololobov](https://github.com/davenger)). +* Fix refreshable MV in system database breaking server startup. [#70460](https://github.com/ClickHouse/ClickHouse/pull/70460) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix flaky test_refreshable_mv_in_replicated_db. [#70462](https://github.com/ClickHouse/ClickHouse/pull/70462) ([Michael Kolupaev](https://github.com/al13n321)). +* Update version_date.tsv and changelog after v24.8.5.115-lts. [#70463](https://github.com/ClickHouse/ClickHouse/pull/70463) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Decrease probability of "Server died" due to 00913_many_threads. [#70473](https://github.com/ClickHouse/ClickHouse/pull/70473) ([Azat Khuzhin](https://github.com/azat)). +* Fixes for killing leftovers in clikhouse-test. [#70474](https://github.com/ClickHouse/ClickHouse/pull/70474) ([Azat Khuzhin](https://github.com/azat)). +* Update version_date.tsv and changelog after v24.3.12.75-lts. [#70485](https://github.com/ClickHouse/ClickHouse/pull/70485) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Use logging instead of print. [#70505](https://github.com/ClickHouse/ClickHouse/pull/70505) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)). +* Add timeouts for retry loops in test_storage_rabbitmq. It should prevent cascading failures of the whole test suite caused by deadloop in one of the test scenarios. Also added small sleeps in a 'tight' loops to make retries bit less agressive. [#70510](https://github.com/ClickHouse/ClickHouse/pull/70510) ([filimonov](https://github.com/filimonov)). +* CI: Fix for canceled Sync workflow. [#70521](https://github.com/ClickHouse/ClickHouse/pull/70521) ([Max Kainov](https://github.com/maxknv)). +* Debug build faild with clang-18 after https://github.com/ClickHouse/ClickHouse/pull/70412, don't know why it's ok in release build, simply changing `_` to `_1` is ok for both release and debug build. [#70532](https://github.com/ClickHouse/ClickHouse/pull/70532) ([Chang chen](https://github.com/baibaichen)). +* Refreshable materialized views are not experimental anymore. [#70550](https://github.com/ClickHouse/ClickHouse/pull/70550) ([Michael Kolupaev](https://github.com/al13n321)). +* Fix 24.9 setting compatibility `database_replicated_allow_explicit_uuid`. [#70565](https://github.com/ClickHouse/ClickHouse/pull/70565) ([Nikita Fomichev](https://github.com/fm4v)). +* Fix typos. [#70588](https://github.com/ClickHouse/ClickHouse/pull/70588) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Vector search: allow to specify HNSW parameter `ef_search` at query time. [#70616](https://github.com/ClickHouse/ClickHouse/pull/70616) ([Robert Schulze](https://github.com/rschu1ze)). +* Increase max_rows_to_read limit in some tests. [#70617](https://github.com/ClickHouse/ClickHouse/pull/70617) ([Raúl Marín](https://github.com/Algunenano)). +* Reduce sync efforts with private. [#70634](https://github.com/ClickHouse/ClickHouse/pull/70634) ([Raúl Marín](https://github.com/Algunenano)). +* Fix parsing of some formats into sparse columns. [#70635](https://github.com/ClickHouse/ClickHouse/pull/70635) ([Anton Popov](https://github.com/CurtizJ)). +* Fix typos. [#70637](https://github.com/ClickHouse/ClickHouse/pull/70637) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Try fix 00180_no_seek_avoiding_when_reading_from_cache. [#70640](https://github.com/ClickHouse/ClickHouse/pull/70640) ([Kseniia Sumarokova](https://github.com/kssenii)). +* When the `PR Check` status is set, it's a valid RunConfig job failure. [#70643](https://github.com/ClickHouse/ClickHouse/pull/70643) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Fix timeout in materialized pg tests. [#70646](https://github.com/ClickHouse/ClickHouse/pull/70646) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Introduced MergeTree setting which allow to change merge selecting algorithm. However we still have only one algorithm and it's mostly for future experiments. [#70647](https://github.com/ClickHouse/ClickHouse/pull/70647) ([alesapin](https://github.com/alesapin)). +* Docs: Follow-up for [#70585](https://github.com/ClickHouse/ClickHouse/issues/70585). [#70654](https://github.com/ClickHouse/ClickHouse/pull/70654) ([Robert Schulze](https://github.com/rschu1ze)). +* Remove strange file. [#70662](https://github.com/ClickHouse/ClickHouse/pull/70662) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Locally I had lots of errors like `'AllocList' does not refer to a value` around places which used `offsetof`. Changing it to `__builtin_offsetof ` helped and I didn't debug any further. [#70671](https://github.com/ClickHouse/ClickHouse/pull/70671) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)). +* Adding the report link to a test result and files' list. [#70677](https://github.com/ClickHouse/ClickHouse/pull/70677) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* materialized postgres: minor fixes. [#70710](https://github.com/ClickHouse/ClickHouse/pull/70710) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Probably fix flaky test_refreshable_mv_in_replicated_db. [#70714](https://github.com/ClickHouse/ClickHouse/pull/70714) ([Michael Kolupaev](https://github.com/al13n321)). +* Move more setting structs to pImpl. [#70739](https://github.com/ClickHouse/ClickHouse/pull/70739) ([Raúl Marín](https://github.com/Algunenano)). +* Reduce sync effort. [#70747](https://github.com/ClickHouse/ClickHouse/pull/70747) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71198](https://github.com/ClickHouse/ClickHouse/issues/71198): Check number of arguments for function with Dynamic argument. [#70749](https://github.com/ClickHouse/ClickHouse/pull/70749) ([Nikita Taranov](https://github.com/nickitat)). +* Add s3queue settings check for cloud. [#70750](https://github.com/ClickHouse/ClickHouse/pull/70750) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix readiness/health check for OpenLDAP container. [#70755](https://github.com/ClickHouse/ClickHouse/pull/70755) ([Julian Maicher](https://github.com/jmaicher)). +* Allow update plan headers for all the steps. [#70761](https://github.com/ClickHouse/ClickHouse/pull/70761) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Autogenerate documentation for settings. [#70768](https://github.com/ClickHouse/ClickHouse/pull/70768) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Not a logical error. [#70770](https://github.com/ClickHouse/ClickHouse/pull/70770) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* CI: Aarch64 build with Asan. [#70778](https://github.com/ClickHouse/ClickHouse/pull/70778) ([Max Kainov](https://github.com/maxknv)). +* Minor fix. [#70783](https://github.com/ClickHouse/ClickHouse/pull/70783) ([Anton Popov](https://github.com/CurtizJ)). +* The docs for settings should be located in the source code. Now, the CI supports that. [#70784](https://github.com/ClickHouse/ClickHouse/pull/70784) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Update style-test image. [#70785](https://github.com/ClickHouse/ClickHouse/pull/70785) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Avoid double finalization of `WriteBuffer` in library bridge. [#70799](https://github.com/ClickHouse/ClickHouse/pull/70799) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Make Array Field serialization consistent. [#70803](https://github.com/ClickHouse/ClickHouse/pull/70803) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* A follow-up for [#70785](https://github.com/ClickHouse/ClickHouse/issues/70785), [jwt](https://pypi.org/project/jwt/#history) looks very outdated, and we have issue with conflicting paths. [#70815](https://github.com/ClickHouse/ClickHouse/pull/70815) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Remove inneficient code. [#70816](https://github.com/ClickHouse/ClickHouse/pull/70816) ([Raúl Marín](https://github.com/Algunenano)). +* Allow large object files if OMIT_HEAVY_DEBUG_SYMBOLS = 0. [#70818](https://github.com/ClickHouse/ClickHouse/pull/70818) ([Michael Kolupaev](https://github.com/al13n321)). +* Add test with distributed queries for 15768. [#70834](https://github.com/ClickHouse/ClickHouse/pull/70834) ([Nikita Taranov](https://github.com/nickitat)). +* More setting structs to pImpl and reuse code. [#70840](https://github.com/ClickHouse/ClickHouse/pull/70840) ([Raúl Marín](https://github.com/Algunenano)). +* Update default HNSW parameter settings. [#70873](https://github.com/ClickHouse/ClickHouse/pull/70873) ([Robert Schulze](https://github.com/rschu1ze)). +* Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* Fix `limit by`, `limit with ties` for distributed and parallel replicas. [#70880](https://github.com/ClickHouse/ClickHouse/pull/70880) ([Nikita Taranov](https://github.com/nickitat)). +* Fix darwin build. [#70894](https://github.com/ClickHouse/ClickHouse/pull/70894) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Add dots for consistency. [#70909](https://github.com/ClickHouse/ClickHouse/pull/70909) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Logical error fix for substrings, found by fuzzer. [#70914](https://github.com/ClickHouse/ClickHouse/pull/70914) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). +* More setting structs to pImpl. [#70942](https://github.com/ClickHouse/ClickHouse/pull/70942) ([Raúl Marín](https://github.com/Algunenano)). +* Add logging for mock HTTP servers used in minio integration tests. [#70943](https://github.com/ClickHouse/ClickHouse/pull/70943) ([Vitaly Baranov](https://github.com/vitlibar)). +* Minor fixups of [#70011](https://github.com/ClickHouse/ClickHouse/issues/70011) and [#69918](https://github.com/ClickHouse/ClickHouse/issues/69918). [#70959](https://github.com/ClickHouse/ClickHouse/pull/70959) ([Robert Schulze](https://github.com/rschu1ze)). +* CI: Do not skip Build report and status fix. [#70965](https://github.com/ClickHouse/ClickHouse/pull/70965) ([Max Kainov](https://github.com/maxknv)). +* Fix Keeper entry serialization compatibility. [#70972](https://github.com/ClickHouse/ClickHouse/pull/70972) ([Antonio Andelic](https://github.com/antonio2368)). +* Update exception message. [#70975](https://github.com/ClickHouse/ClickHouse/pull/70975) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Fix `utils/c++expr` option `-b`. [#70978](https://github.com/ClickHouse/ClickHouse/pull/70978) ([Sergei Trifonov](https://github.com/serxa)). +* Fix `test_keeper_broken_logs`. [#70982](https://github.com/ClickHouse/ClickHouse/pull/70982) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix `01039_test_setting_parse`. [#70986](https://github.com/ClickHouse/ClickHouse/pull/70986) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Tests for languages support for Embedded Dictionaries. [#71004](https://github.com/ClickHouse/ClickHouse/pull/71004) ([Max Vostrikov](https://github.com/max-vostrikov)). +* Required for internal test runs with the same image build in public CI. [#71008](https://github.com/ClickHouse/ClickHouse/pull/71008) ([Ilya Yatsishin](https://github.com/qoega)). +* Move remaining settings objects to pImpl and start simplification. [#71019](https://github.com/ClickHouse/ClickHouse/pull/71019) ([Raúl Marín](https://github.com/Algunenano)). +* CI: Rearrange directories for praktika ci. [#71029](https://github.com/ClickHouse/ClickHouse/pull/71029) ([Max Kainov](https://github.com/maxknv)). +* Fix assert in RemoteSource::onAsyncJobReady(). [#71034](https://github.com/ClickHouse/ClickHouse/pull/71034) ([Igor Nikonov](https://github.com/devcrafter)). +* Fix showing error message in ReadBufferFromS3 when retrying. Without this PR information about a retryable failure in `ReadBufferFromS3` could look like this:. [#71038](https://github.com/ClickHouse/ClickHouse/pull/71038) ([Vitaly Baranov](https://github.com/vitlibar)). +* Fix `test_truncate_database`. [#71057](https://github.com/ClickHouse/ClickHouse/pull/71057) ([Antonio Andelic](https://github.com/antonio2368)). +* Fix clickhouse-test useless 5 second delay in case of multiple threads are used. [#71069](https://github.com/ClickHouse/ClickHouse/pull/71069) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#71142](https://github.com/ClickHouse/ClickHouse/issues/71142): Followup [#70520](https://github.com/ClickHouse/ClickHouse/issues/70520). [#71129](https://github.com/ClickHouse/ClickHouse/pull/71129) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Backported in [#71189](https://github.com/ClickHouse/ClickHouse/issues/71189): Update compatibility setting for `hnsw_candidate_list_size_for_search`. [#71133](https://github.com/ClickHouse/ClickHouse/pull/71133) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#71222](https://github.com/ClickHouse/ClickHouse/issues/71222): Fixes for interactive metrics. [#71173](https://github.com/ClickHouse/ClickHouse/pull/71173) ([Julia Kartseva](https://github.com/jkartseva)). +* Backported in [#71205](https://github.com/ClickHouse/ClickHouse/issues/71205): Maybe not GWPAsan by default. [#71174](https://github.com/ClickHouse/ClickHouse/pull/71174) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#71277](https://github.com/ClickHouse/ClickHouse/issues/71277): Fix LOGICAL_ERROR on wrong scalar subquery argument to table functions. [#71216](https://github.com/ClickHouse/ClickHouse/pull/71216) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71253](https://github.com/ClickHouse/ClickHouse/issues/71253): Disable enable_named_columns_in_function_tuple for 24.10. [#71219](https://github.com/ClickHouse/ClickHouse/pull/71219) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71303](https://github.com/ClickHouse/ClickHouse/issues/71303): Improve system.query_metric_log to remove flakiness. [#71295](https://github.com/ClickHouse/ClickHouse/pull/71295) ([Pablo Marcos](https://github.com/pamarcos)). +* Backported in [#71317](https://github.com/ClickHouse/ClickHouse/issues/71317): Fix debug log timestamp. [#71311](https://github.com/ClickHouse/ClickHouse/pull/71311) ([Pablo Marcos](https://github.com/pamarcos)). + +#### Not for changeling + +* Reverted. [#69812](https://github.com/ClickHouse/ClickHouse/pull/69812) ([tuanpach](https://github.com/tuanpach)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 10c55aa4bf5..da7ad3ebd88 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v24.10.1.2812-stable 2024-11-01 v24.9.2.42-stable 2024-10-03 v24.9.1.3278-stable 2024-09-26 v24.8.5.115-lts 2024-10-08 From d0394719c6da6c3a7d647332b7ae977f703636b6 Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 1 Nov 2024 12:11:07 +0100 Subject: [PATCH 238/353] More assertions --- .../IO/CachedOnDiskReadBufferFromFile.cpp | 1 + src/Interpreters/Cache/FileCache.cpp | 2 + src/Interpreters/Cache/FileSegment.cpp | 91 ++++++++++++++----- src/Interpreters/Cache/FileSegment.h | 2 +- src/Interpreters/Cache/Metadata.cpp | 21 +++-- 5 files changed, 89 insertions(+), 28 deletions(-) diff --git a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp index 51c6045cb68..0f0cc4c4139 100644 --- a/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp +++ b/src/Disks/IO/CachedOnDiskReadBufferFromFile.cpp @@ -784,6 +784,7 @@ bool CachedOnDiskReadBufferFromFile::writeCache(char * data, size_t size, size_t LOG_INFO(log, "Insert into cache is skipped due to insufficient disk space. ({})", e.displayText()); return false; } + chassert(file_segment.state() == FileSegment::State::PARTIALLY_DOWNLOADED_NO_CONTINUATION); throw; } diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index f7b7ffc5aea..ae3c9c58fc5 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -1438,6 +1438,8 @@ void FileCache::loadMetadataForKeys(const fs::path & keys_dir) "cached file `{}` does not fit in cache anymore (size: {})", size_limit, offset_it->path().string(), size); + chassert(false); /// TODO: remove before merge. + fs::remove(offset_it->path()); } } diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index c356800fa57..f5a7011833a 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -139,7 +139,7 @@ FileSegmentGuard::Lock FileSegment::lock() const void FileSegment::setDownloadState(State state, const FileSegmentGuard::Lock & lock) { - if (isCompleted(false) && state != State::DETACHED) + if (isCompleted(false)) { throw Exception( ErrorCodes::LOGICAL_ERROR, @@ -700,6 +700,8 @@ void FileSegment::complete() case State::PARTIALLY_DOWNLOADED: { chassert(current_downloaded_size > 0); + chassert(fs::exists(getPath())); + chassert(fs::file_size(getPath()) > 0); if (is_last_holder) { @@ -841,29 +843,60 @@ bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock & lock) } } - if (download_state == State::DOWNLOADED) + switch (download_state.load()) { - chassert(downloader_id.empty()); - chassert(downloaded_size == reserved_size); - chassert(downloaded_size == range().size()); - chassert(downloaded_size > 0); - chassert(std::filesystem::file_size(getPath()) > 0); - check_iterator(queue_iterator); - } - else - { - if (download_state == State::DOWNLOADING) - { - chassert(!downloader_id.empty()); - } - else if (download_state == State::PARTIALLY_DOWNLOADED - || download_state == State::EMPTY) + case State::EMPTY: { chassert(downloader_id.empty()); + chassert(!fs::exists(getPath())); + chassert(!queue_iterator); + break; } + case State::DOWNLOADED: + { + chassert(downloader_id.empty()); - chassert(reserved_size >= downloaded_size); - check_iterator(queue_iterator); + chassert(downloaded_size == reserved_size); + chassert(downloaded_size == range().size()); + chassert(downloaded_size > 0); + chassert(fs::file_size(getPath()) > 0); + + chassert(queue_iterator); + check_iterator(queue_iterator); + break; + } + case State::DOWNLOADING: + { + chassert(!downloader_id.empty()); + if (downloaded_size) + { + chassert(queue_iterator); + chassert(fs::file_size(getPath()) > 0); + } + break; + } + case State::PARTIALLY_DOWNLOADED: + { + chassert(downloader_id.empty()); + + chassert(reserved_size >= downloaded_size); + chassert(downloaded_size > 0); + chassert(fs::file_size(getPath()) > 0); + + chassert(queue_iterator); + check_iterator(queue_iterator); + break; + } + case State::PARTIALLY_DOWNLOADED_NO_CONTINUATION: + { + chassert(reserved_size >= downloaded_size); + check_iterator(queue_iterator); + break; + } + case State::DETACHED: + { + break; + } } return true; @@ -991,7 +1024,12 @@ FileSegmentsHolder::FileSegmentsHolder(FileSegments && file_segments_) FileSegmentPtr FileSegmentsHolder::getSingleFileSegment() const { if (file_segments.size() != 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected single file segment, got: {} in holder {}", file_segments.size(), toString()); + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Expected single file segment, got: {} in holder {}", + file_segments.size(), toString()); + } return file_segments.front(); } @@ -1001,7 +1039,18 @@ void FileSegmentsHolder::reset() ProfileEvents::increment(ProfileEvents::FilesystemCacheUnusedHoldFileSegments, file_segments.size()); for (auto file_segment_it = file_segments.begin(); file_segment_it != file_segments.end();) - file_segment_it = completeAndPopFrontImpl(); + { + try + { + file_segment_it = completeAndPopFrontImpl(); + } + catch (...) + { + chassert(false); + tryLogCurrentException(__PRETTY_FUNCTION__); + continue; + } + } file_segments.clear(); } diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index ee9aee1e354..79adc342329 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -254,7 +254,7 @@ private: const FileSegmentKind segment_kind; /// Size of the segment is not known until it is downloaded and /// can be bigger than max_file_segment_size. - const bool is_unbound = false; + const bool is_unbound; const bool background_download_enabled; std::atomic download_state; diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 99ea01aa4f1..49dbbc71fa2 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -940,7 +940,16 @@ KeyMetadata::iterator LockedKey::removeFileSegmentImpl( if (file_segment->queue_iterator && invalidate_queue_entry) file_segment->queue_iterator->invalidate(); - file_segment->detach(segment_lock, *this); + try + { + file_segment->detach(segment_lock, *this); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + chassert(false); + /// Do not rethrow, we much delete the file below. + } try { @@ -990,8 +999,8 @@ void LockedKey::shrinkFileSegmentToDownloadedSize( * because of no space left in cache, we need to be able to cut file segment's size to downloaded_size. */ - auto metadata = getByOffset(offset); - const auto & file_segment = metadata->file_segment; + auto file_segment_metadata = getByOffset(offset); + const auto & file_segment = file_segment_metadata->file_segment; chassert(file_segment->assertCorrectnessUnlocked(segment_lock)); const size_t downloaded_size = file_segment->getDownloadedSize(); @@ -1006,15 +1015,15 @@ void LockedKey::shrinkFileSegmentToDownloadedSize( chassert(file_segment->reserved_size >= downloaded_size); int64_t diff = file_segment->reserved_size - downloaded_size; - metadata->file_segment = std::make_shared( + file_segment_metadata->file_segment = std::make_shared( getKey(), offset, downloaded_size, FileSegment::State::DOWNLOADED, CreateFileSegmentSettings(file_segment->getKind()), false, file_segment->cache, key_metadata, file_segment->queue_iterator); if (diff) - metadata->getQueueIterator()->decrementSize(diff); + file_segment_metadata->getQueueIterator()->decrementSize(diff); - chassert(file_segment->assertCorrectnessUnlocked(segment_lock)); + chassert(file_segment_metadata->file_segment->assertCorrectnessUnlocked(segment_lock)); } bool LockedKey::addToDownloadQueue(size_t offset, const FileSegmentGuard::Lock &) From ce12f652c728df9513f5e8a940462558413bd58a Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 11:25:21 +0000 Subject: [PATCH 239/353] Fix test flakiness --- .../queries/0_stateless/03246_alter_from_string_to_json.sql.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 index e8760b659dc..2ccf2153699 100644 --- a/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 +++ b/tests/queries/0_stateless/03246_alter_from_string_to_json.sql.j2 @@ -1,3 +1,6 @@ +-- Random settings limits: index_granularity=(None, 60000) +-- Tags: long + set allow_experimental_json_type = 1; set max_block_size = 20000; From e83cff7360e1a7ec0459a09bf95c954263b4c27c Mon Sep 17 00:00:00 2001 From: kssenii Date: Fri, 1 Nov 2024 12:47:03 +0100 Subject: [PATCH 240/353] Fix typo --- src/Interpreters/Cache/FileSegment.cpp | 2 +- src/Interpreters/Cache/Metadata.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index f5a7011833a..080b54feb06 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -1046,8 +1046,8 @@ void FileSegmentsHolder::reset() } catch (...) { - chassert(false); tryLogCurrentException(__PRETTY_FUNCTION__); + chassert(false); continue; } } diff --git a/src/Interpreters/Cache/Metadata.cpp b/src/Interpreters/Cache/Metadata.cpp index 49dbbc71fa2..231545212cd 100644 --- a/src/Interpreters/Cache/Metadata.cpp +++ b/src/Interpreters/Cache/Metadata.cpp @@ -948,7 +948,7 @@ KeyMetadata::iterator LockedKey::removeFileSegmentImpl( { tryLogCurrentException(__PRETTY_FUNCTION__); chassert(false); - /// Do not rethrow, we much delete the file below. + /// Do not rethrow, we must delete the file below. } try From 2bafaa2fc675132d70d7683e16db4571dcddbd0e Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:08:45 +0100 Subject: [PATCH 241/353] Update 03261_tuple_map_object_to_json_cast.sql --- .../queries/0_stateless/03261_tuple_map_object_to_json_cast.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql index fcec7eb3af4..c0199452843 100644 --- a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql +++ b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql @@ -1,3 +1,5 @@ +-- Tags: no-fasttest + set allow_experimental_json_type = 1; set allow_experimental_object_type = 1; set allow_experimental_variant_type = 1; From 47ddd7fb6b230e0d9b0d2341e118bd88ba871d07 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 14:33:03 +0000 Subject: [PATCH 242/353] Check suspicious and experimental types in JSON type hints --- src/DataTypes/DataTypeObject.cpp | 9 +++++++++ src/DataTypes/DataTypeObject.h | 2 ++ .../0_stateless/03261_json_hints_types_check.reference | 0 .../queries/0_stateless/03261_json_hints_types_check.sql | 9 +++++++++ 4 files changed, 20 insertions(+) create mode 100644 tests/queries/0_stateless/03261_json_hints_types_check.reference create mode 100644 tests/queries/0_stateless/03261_json_hints_types_check.sql diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 18bfed9c5c3..69ae9b8e906 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -230,6 +230,15 @@ MutableColumnPtr DataTypeObject::createColumn() const return ColumnObject::create(std::move(typed_path_columns), max_dynamic_paths, max_dynamic_types); } +void DataTypeObject::forEachChild(const ChildCallback & callback) const +{ + for (const auto & [path, type] : typed_paths) + { + callback(*type); + type->forEachChild(callback); + } +} + namespace { diff --git a/src/DataTypes/DataTypeObject.h b/src/DataTypes/DataTypeObject.h index 7eb2e7729de..9321570fb75 100644 --- a/src/DataTypes/DataTypeObject.h +++ b/src/DataTypes/DataTypeObject.h @@ -50,6 +50,8 @@ public: bool equals(const IDataType & rhs) const override; + void forEachChild(const ChildCallback &) const override; + bool hasDynamicSubcolumnsData() const override { return true; } std::unique_ptr getDynamicSubcolumnData(std::string_view subcolumn_name, const SubstreamData & data, bool throw_if_null) const override; diff --git a/tests/queries/0_stateless/03261_json_hints_types_check.reference b/tests/queries/0_stateless/03261_json_hints_types_check.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_json_hints_types_check.sql b/tests/queries/0_stateless/03261_json_hints_types_check.sql new file mode 100644 index 00000000000..a407aa9474b --- /dev/null +++ b/tests/queries/0_stateless/03261_json_hints_types_check.sql @@ -0,0 +1,9 @@ +set allow_experimental_json_type=1; +set allow_experimental_variant_type=0; +set allow_experimental_object_type=0; + +select '{}'::JSON(a LowCardinality(Int128)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select '{}'::JSON(a FixedString(100000)); -- {serverError ILLEGAL_COLUMN} +select '{}'::JSON(a Variant(Int32)); -- {serverError ILLEGAL_COLUMN} +select '{}'::JSON(a Object('json')); -- {serverError ILLEGAL_COLUMN} + From 3fb4836f635a92ca59eda9dda519c8a466428bf9 Mon Sep 17 00:00:00 2001 From: Alexandre Snarskii Date: Fri, 1 Nov 2024 19:21:54 +0300 Subject: [PATCH 243/353] memory_worker shall be started on non-Linux OS too --- programs/server/Server.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 1f481381b2b..5159f95419e 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -1353,9 +1353,11 @@ try } FailPointInjection::enableFromGlobalConfig(config()); +#endif memory_worker.start(); +#if defined(OS_LINUX) int default_oom_score = 0; #if !defined(NDEBUG) From 7e476b62d286326445d1a720f483e64fd8eae9d7 Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 17:09:00 +0000 Subject: [PATCH 244/353] Fix tests --- tests/queries/0_stateless/03214_json_typed_dynamic_path.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql index 1f6a025825a..eee3d70b8da 100644 --- a/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql +++ b/tests/queries/0_stateless/03214_json_typed_dynamic_path.sql @@ -1,6 +1,7 @@ -- Tags: no-fasttest set allow_experimental_json_type = 1; +set allow_experimental_dynamic_type = 1; drop table if exists test; create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; insert into test select '{"a" : 42}'; From 22e48f6852adcb3b3092b9b5a9e78674d52c0997 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Fri, 1 Nov 2024 18:16:16 +0100 Subject: [PATCH 245/353] Update 03261_tuple_map_object_to_json_cast.sql --- .../queries/0_stateless/03261_tuple_map_object_to_json_cast.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql index c0199452843..91d3f504f92 100644 --- a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql +++ b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql @@ -4,6 +4,7 @@ set allow_experimental_json_type = 1; set allow_experimental_object_type = 1; set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; +set enable_named_columns_in_function_tuple = 1; select 'Map to JSON'; select map('a', number::UInt32, 'b', toDate(number), 'c', range(number), 'd', [map('e', number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); From bbde6ba51224c43cf88978adb92cd1a72b767313 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 17:53:32 +0000 Subject: [PATCH 246/353] update test --- ...rallel_replicas_join_algo_and_analyzer_1.sh | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh index 1d43f540138..8d54c2eed13 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -27,6 +27,8 @@ inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 SETTINGS allow_experimental_analyzer=1" +PARALLEL_REPLICAS_SETTINGS="allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 0" + ############## echo echo "simple (global) join with analyzer and parallel replicas" @@ -35,17 +37,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=0" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=0" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, send_logs_level='trace', -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=0" 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' @@ -57,17 +55,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=0" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=1" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS allow_experimental_analyzer=1, allow_experimental_parallel_reading_from_replicas = 2, send_logs_level='trace', -max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, -cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join=0, parallel_replicas_local_plan=1" 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' From 7315ad482052f50a98ea2eda433df34353e8d8d0 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Fri, 1 Nov 2024 17:55:49 +0000 Subject: [PATCH 247/353] Polishing --- .../02967_parallel_replicas_join_algo_and_analyzer_1.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh index 8d54c2eed13..d315257dbac 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -27,7 +27,7 @@ inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 SETTINGS allow_experimental_analyzer=1" -PARALLEL_REPLICAS_SETTINGS="allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 0" +PARALLEL_REPLICAS_SETTINGS="enable_parallel_replicas = 2, max_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_prefer_local_join = 0" ############## echo From 2cc2f31d9aebcf170b771be4d21cda63efcaf34e Mon Sep 17 00:00:00 2001 From: avogar Date: Fri, 1 Nov 2024 18:18:12 +0000 Subject: [PATCH 248/353] Fix error Invalid number of rows in Chunk with Variant column --- src/Columns/ColumnVariant.cpp | 2 +- .../0_stateless/03261_variant_permutation_bug.reference | 0 tests/queries/0_stateless/03261_variant_permutation_bug.sql | 6 ++++++ 3 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 tests/queries/0_stateless/03261_variant_permutation_bug.reference create mode 100644 tests/queries/0_stateless/03261_variant_permutation_bug.sql diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index 564b60e1c1d..d5c8386d35f 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -952,7 +952,7 @@ ColumnPtr ColumnVariant::permute(const Permutation & perm, size_t limit) const if (hasOnlyNulls()) { if (limit) - return cloneResized(limit); + return cloneResized(limit ? std::min(size(), limit) : size()); /// If no limit, we can just return current immutable column. return this->getPtr(); diff --git a/tests/queries/0_stateless/03261_variant_permutation_bug.reference b/tests/queries/0_stateless/03261_variant_permutation_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_variant_permutation_bug.sql b/tests/queries/0_stateless/03261_variant_permutation_bug.sql new file mode 100644 index 00000000000..373dd9e19fa --- /dev/null +++ b/tests/queries/0_stateless/03261_variant_permutation_bug.sql @@ -0,0 +1,6 @@ +set allow_experimental_variant_type=1; +create table test (x UInt64, d Variant(UInt64)) engine=Memory; +insert into test select number, null from numbers(200000); +select d from test order by d::String limit 32213 format Null; +drop table test; + From 9d0f256dfe87d0b914655570513e64f167cadeb0 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 1 Nov 2024 12:17:40 +0000 Subject: [PATCH 249/353] Enable SimSIMD backend in Usearch --- contrib/SimSIMD | 2 +- contrib/SimSIMD-cmake/CMakeLists.txt | 10 +++-- contrib/usearch-cmake/CMakeLists.txt | 64 +++++++++++++++++++++++++--- 3 files changed, 64 insertions(+), 12 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index 935fef2964b..d7798ac6cb7 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit 935fef2964bc38e995c5f465b42259a35b8cf0d3 +Subproject commit d7798ac6cb78ac1cb1cdc590f391643f983a2fd7 diff --git a/contrib/SimSIMD-cmake/CMakeLists.txt b/contrib/SimSIMD-cmake/CMakeLists.txt index f5dc4d63604..1d434490c7c 100644 --- a/contrib/SimSIMD-cmake/CMakeLists.txt +++ b/contrib/SimSIMD-cmake/CMakeLists.txt @@ -1,4 +1,6 @@ -set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") - -add_library(_simsimd INTERFACE) -target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include") +# See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86 +if (ARCH_AMD64) + set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") + add_library(_simsimd INTERFACE) + target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include") +endif() diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 25f6ca82a74..69a986de192 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -6,12 +6,62 @@ target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/incl target_link_libraries(_usearch INTERFACE _fp16) target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB) -# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) -# ^^ simsimd is not enabled at the moment. Reasons: -# - Vectorization is important for raw scans but not so much for HNSW. We use usearch only for HNSW. -# - Simsimd does compile-time dispatch (choice of SIMD kernels determined by capabilities of the build machine) or dynamic dispatch (SIMD -# kernels chosen at runtime based on cpuid instruction). Since current builds are limited to SSE 4.2 (x86) and NEON (ARM), the speedup of -# the former would be moderate compared to AVX-512 / SVE. The latter is at the moment too fragile with respect to portability across x86 -# and ARM machines ... certain conbinations of quantizations / distance functions / SIMD instructions are not implemented at the moment. +# Only x86 for now. On ARM, the linker goes down in flames. To make SimSIMD compile, I had to remove a macro checks in SimSIMD +# for AVX512 (x86, worked nicely) and __ARM_BF16_FORMAT_ALTERNATIVE. It is probably because of that. +if (ARCH_AMD64) + target_link_libraries(_usearch INTERFACE _simsimd) + target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD) + + target_compile_definitions(_usearch INTERFACE USEARCH_CAN_COMPILE_FLOAT16) + target_compile_definitions(_usearch INTERFACE USEARCH_CAN_COMPILE_BF16) +endif () add_library(ch_contrib::usearch ALIAS _usearch) + + + +# LLVM ERROR: Cannot select: 0x7996e7a73150: f32,ch = load<(load (s16) from %ir.22, !tbaa !54231), anyext from bf16> 0x79961cb737c0, 0x7996e7a1a500, undef:i64, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 +# 0x7996e7a1a500: i64 = add 0x79961e770d00, Constant:i64<-16>, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 +# 0x79961e770d00: i64,ch = CopyFromReg 0x79961cb737c0, Register:i64 %4, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 +# 0x7996e7a1ae10: i64 = Register %4 +# 0x7996e7a1b5f0: i64 = Constant<-16> +# 0x7996e7a1a730: i64 = undef +# In function: _ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd +# PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace. +# Stack dump: +# 0. Running pass 'Function Pass Manager' on module 'src/libdbms.a(MergeTreeIndexVectorSimilarity.cpp.o at 2312737440)'. +# 1. Running pass 'AArch64 Instruction Selection' on function '@_ZL23simsimd_dot_bf16_serialPKu6__bf16S0_yPd' +# #0 0x00007999e83a63bf llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda63bf) +# #1 0x00007999e83a44f9 llvm::sys::RunSignalHandlers() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda44f9) +# #2 0x00007999e83a6b00 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xda6b00) +# #3 0x00007999e6e45320 (/lib/x86_64-linux-gnu/libc.so.6+0x45320) +# #4 0x00007999e6e9eb1c pthread_kill (/lib/x86_64-linux-gnu/libc.so.6+0x9eb1c) +# #5 0x00007999e6e4526e raise (/lib/x86_64-linux-gnu/libc.so.6+0x4526e) +# #6 0x00007999e6e288ff abort (/lib/x86_64-linux-gnu/libc.so.6+0x288ff) +# #7 0x00007999e82fe0c2 llvm::report_fatal_error(llvm::Twine const&, bool) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcfe0c2) +# #8 0x00007999e8c2f8e3 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162f8e3) +# #9 0x00007999e8c2ed76 llvm::SelectionDAGISel::SelectCodeCommon(llvm::SDNode*, unsigned char const*, unsigned int) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162ed76) +# #10 0x00007999ea1adbcb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x2badbcb) +# #11 0x00007999e8c2611f llvm::SelectionDAGISel::DoInstructionSelection() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x162611f) +# #12 0x00007999e8c25790 llvm::SelectionDAGISel::CodeGenAndEmitDAG() (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1625790) +# #13 0x00007999e8c248de llvm::SelectionDAGISel::SelectAllBasicBlocks(llvm::Function const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x16248de) +# #14 0x00007999e8c22934 llvm::SelectionDAGISel::runOnMachineFunction(llvm::MachineFunction&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x1622934) +# #15 0x00007999e87826b9 llvm::MachineFunctionPass::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x11826b9) +# #16 0x00007999e84f7772 llvm::FPPassManager::runOnFunction(llvm::Function&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7772) +# #17 0x00007999e84fd2f4 llvm::FPPassManager::runOnModule(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xefd2f4) +# #18 0x00007999e84f7e9f llvm::legacy::PassManagerImpl::run(llvm::Module&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xef7e9f) +# #19 0x00007999e99f7d61 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f7d61) +# #20 0x00007999e99f8c91 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8c91) +# #21 0x00007999e99f8b10 llvm::lto::thinBackend(llvm::lto::Config const&, unsigned int, std::function>> (unsigned int, llvm::Twine const&)>, llvm::Module&, llvm::ModuleSummaryIndex const&, llvm::DenseMap, std::equal_to, std::allocator>, llvm::DenseMapInfo, llvm::detail::DenseMapPair, std::equal_to, std::allocator>>> const&, llvm::DenseMap, llvm::detail::DenseMapPair> const&, llvm::MapVector, llvm::detail::DenseMapPair>, llvm::SmallVector, 0u>>*, std::vector> const&) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f8b10) +# #22 0x00007999e99f248d (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f248d) +# #23 0x00007999e99f1cd6 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0x23f1cd6) +# #24 0x00007999e82c9beb (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xcc9beb) +# #25 0x00007999e834ebe3 llvm::ThreadPool::processTasks(llvm::ThreadPoolTaskGroup*) (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4ebe3) +# #26 0x00007999e834f704 (/usr/lib/llvm-18/bin/../lib/libLLVM.so.18.1+0xd4f704) +# #27 0x00007999e6e9ca94 (/lib/x86_64-linux-gnu/libc.so.6+0x9ca94) +# #28 0x00007999e6f29c3c (/lib/x86_64-linux-gnu/libc.so.6+0x129c3c) +# clang++-18: error: unable to execute command: Aborted (core dumped) +# clang++-18: error: linker command failed due to signal (use -v to see invocation) +# ^[[A^Cninja: build stopped: interrupted by user. From 3a042c080473957ffe40c5e299b06714868ab841 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Fri, 1 Nov 2024 12:55:02 +0000 Subject: [PATCH 250/353] Enable dynamic dispatch in SimSIMD --- contrib/SimSIMD-cmake/CMakeLists.txt | 6 ++++-- src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/contrib/SimSIMD-cmake/CMakeLists.txt b/contrib/SimSIMD-cmake/CMakeLists.txt index 1d434490c7c..8350417479a 100644 --- a/contrib/SimSIMD-cmake/CMakeLists.txt +++ b/contrib/SimSIMD-cmake/CMakeLists.txt @@ -1,6 +1,8 @@ # See contrib/usearch-cmake/CMakeLists.txt, why only enabled on x86 if (ARCH_AMD64) set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") - add_library(_simsimd INTERFACE) - target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include") + set(SIMSIMD_SRCS ${SIMSIMD_PROJECT_DIR}/c/lib.c) + add_library(_simsimd ${SIMSIMD_SRCS}) + target_include_directories(_simsimd SYSTEM PUBLIC "${SIMSIMD_PROJECT_DIR}/include") + target_compile_definitions(_simsimd PUBLIC SIMSIMD_DYNAMIC_DISPATCH) endif() diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 5a725922e14..0b5ffa659dc 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -118,6 +118,8 @@ USearchIndexWithSerialization::USearchIndexWithSerialization( if (!result) throw Exception(ErrorCodes::INCORRECT_DATA, "Could not create vector similarity index. Error: {}", String(result.error.release())); swap(result.index); + + /// LOG_TRACE(getLogger("XXX"), "{}", simsimd_uses_dynamic_dispatch()); } void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const From a4e576924b16ed199e3726313f96c241b604d4b6 Mon Sep 17 00:00:00 2001 From: 0xMihalich Date: Sat, 2 Nov 2024 18:48:57 +1000 Subject: [PATCH 251/353] Fix: ERROR: column "attgenerated" does not exist for old PostgreSQL databases Restore support for GreenPlum and older versions of PostgreSQL without affecting existing functionality. --- .../PostgreSQL/fetchPostgreSQLTableStructure.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 45fd52f27ab..5268dbcb59f 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -307,6 +307,13 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( if (!columns.empty()) columns_part = fmt::format(" AND attname IN ('{}')", boost::algorithm::join(columns, "','")); + /// Bypassing the error of the missing column `attgenerated` in the system table `pg_attribute` for PostgreSQL versions below 12. + /// This trick involves executing a special query to the DBMS in advance to obtain the correct line with comment /// if column has GENERATED. + /// The result of the query will be the name of the column `attgenerated` or an empty string declaration for PostgreSQL version 11 and below. + /// This change does not degrade the function's performance but restores support for older versions and fix ERROR: column "attgenerated" does not exist. + pqxx::result gen_result{tx.exec("select case when current_setting('server_version_num')::int < 120000 then '''''' else 'attgenerated' end as generated")}; + std::string generated = gen_result[0][0].as(); + std::string query = fmt::format( "SELECT attname AS name, " /// column name "format_type(atttypid, atttypmod) AS type, " /// data type @@ -315,11 +322,11 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "atttypid as type_id, " "atttypmod as type_modifier, " "attnum as att_num, " - "attgenerated as generated " /// if column has GENERATED + "{} as generated " /// if column has GENERATED "FROM pg_attribute " "WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) {}" "AND NOT attisdropped AND attnum > 0 " - "ORDER BY attnum ASC", where, columns_part); + "ORDER BY attnum ASC", generated, where, columns_part); /// Now we use variable `generated` to form query string. End of trick. auto postgres_table_with_schema = postgres_schema.empty() ? postgres_table : doubleQuoteString(postgres_schema) + '.' + doubleQuoteString(postgres_table); table.physical_columns = readNamesAndTypesList(tx, postgres_table_with_schema, query, use_nulls, false); From 64b405254c0c7dbe2217bd6251f3767556d01d75 Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sat, 2 Nov 2024 19:50:45 +0000 Subject: [PATCH 252/353] Fix --- .../02967_parallel_replicas_join_algo_and_analyzer_1.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh index d315257dbac..a6e755ebc35 100755 --- a/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh +++ b/tests/queries/0_stateless/02967_parallel_replicas_join_algo_and_analyzer_1.sh @@ -37,13 +37,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=0" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=0" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=0" 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=0" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' @@ -55,13 +55,13 @@ $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=1" +SETTINGS enable_analyzer=1, $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=1" $CLICKHOUSE_CLIENT -q " select * from (select key, value from num_1) l inner join (select key, value from num_2) r on l.key = r.key order by l.key limit 10 offset 700000 -SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTING, parallel_replicas_local_plan=1" 2>&1 | +SETTINGS enable_analyzer=1, send_logs_level='trace', $PARALLEL_REPLICAS_SETTINGS, parallel_replicas_local_plan=1" 2>&1 | grep "executeQuery\|.*Coordinator: Coordination done" | grep -o "SELECT.*WithMergeableState)\|.*Coordinator: Coordination done" | sed -re 's/_data_[[:digit:]]+_[[:digit:]]+/_data_/g' From 1d83bb2ddaeab407af0fa7d93307bb2465568b2b Mon Sep 17 00:00:00 2001 From: Igor Nikonov Date: Sun, 3 Nov 2024 07:39:38 +0000 Subject: [PATCH 253/353] Update settings changes history --- src/Core/SettingsChangesHistory.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index 317037070fc..9f314788505 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -71,6 +71,7 @@ static std::initializer_list Date: Sun, 3 Nov 2024 15:10:26 +0000 Subject: [PATCH 254/353] Fix test --- .../0_stateless/02354_vector_search_expansion_search.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02354_vector_search_expansion_search.sql b/tests/queries/0_stateless/02354_vector_search_expansion_search.sql index fcbe9ee42b9..f0cd5374be7 100644 --- a/tests/queries/0_stateless/02354_vector_search_expansion_search.sql +++ b/tests/queries/0_stateless/02354_vector_search_expansion_search.sql @@ -14,7 +14,7 @@ CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similar -- Generate random values but with a fixed seed (conceptually), so that the data is deterministic. -- Unfortunately, no random functions in ClickHouse accepts a seed. Instead, abuse the numbers table + hash functions to provide -- deterministic randomness. -INSERT INTO tab SELECT number, [sipHash64(number)/18446744073709551615, wyHash64(number)/18446744073709551615] FROM numbers(370000); -- 18446744073709551615 is the biggest UInt64 +INSERT INTO tab SELECT number, [sipHash64(number)/18446744073709551615, wyHash64(number)/18446744073709551615] FROM numbers(660000); -- 18446744073709551615 is the biggest UInt64 -- hnsw_candidate_list_size_for_search = 0 is illegal WITH [0.5, 0.5] AS reference_vec From 27241b484f8c26197ec4329212a8a5ef11d02007 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 3 Nov 2024 16:00:33 +0000 Subject: [PATCH 255/353] Fix linker warning --- contrib/usearch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/usearch b/contrib/usearch index 53799b84ca9..7efe8b710c9 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 53799b84ca9ad708b060d0b1cfa5f039371721cd +Subproject commit 7efe8b710c9831bfe06573b1df0fad001b04a2b5 From 27049f2cb599b4f93ae327783ab0cc588bef7dd1 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 3 Nov 2024 19:16:35 +0000 Subject: [PATCH 256/353] Demote log level for failed authentication --- src/Access/AccessControl.cpp | 7 ++++--- src/Common/Exception.cpp | 31 +++++++++++++++++++++---------- src/Common/Exception.h | 9 +++++---- src/Server/TCPHandler.cpp | 3 ++- 4 files changed, 32 insertions(+), 18 deletions(-) diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index e8ee363be1a..9b3b8d2a977 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -608,7 +608,7 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po } catch (...) { - tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed"); + tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed", LogsLevel::information); WriteBufferFromOwnString message; message << credentials.getUserName() << ": Authentication failed: password is incorrect, or there is no user with such name."; @@ -622,8 +622,9 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po << "and deleting this file will reset the password.\n" << "See also /etc/clickhouse-server/users.xml on the server where ClickHouse is installed.\n\n"; - /// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons, - /// only the log will show the exact reason. + /// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons. + /// Only the log ((*), above) will show the exact reason. Note that (*) logs at information level instead of the default error level as + /// authentication failures are not an unusual event. throw Exception(PreformattedMessage{message.str(), "{}: Authentication failed: password is incorrect, or there is no user with such name", std::vector{credentials.getUserName()}}, diff --git a/src/Common/Exception.cpp b/src/Common/Exception.cpp index 320fc06cb2f..644c9a19738 100644 --- a/src/Common/Exception.cpp +++ b/src/Common/Exception.cpp @@ -251,7 +251,7 @@ void Exception::setThreadFramePointers(ThreadFramePointersBase frame_pointers) thread_frame_pointers.frame_pointers = std::move(frame_pointers); } -static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message) +static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string & start_of_message, LogsLevel level) { if (!isLoggingEnabled()) return; @@ -262,14 +262,25 @@ static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string if (!start_of_message.empty()) message.text = fmt::format("{}: {}", start_of_message, message.text); - LOG_ERROR(logger, message); + switch (level) + { + case LogsLevel::none: break; + case LogsLevel::test: LOG_TEST(logger, message); break; + case LogsLevel::trace: LOG_TRACE(logger, message); break; + case LogsLevel::debug: LOG_DEBUG(logger, message); break; + case LogsLevel::information: LOG_INFO(logger, message); break; + case LogsLevel::warning: LOG_WARNING(logger, message); break; + case LogsLevel::error: LOG_ERROR(logger, message); break; + case LogsLevel::fatal: LOG_FATAL(logger, message); break; + } + } catch (...) // NOLINT(bugprone-empty-catch) { } } -void tryLogCurrentException(const char * log_name, const std::string & start_of_message) +void tryLogCurrentException(const char * log_name, const std::string & start_of_message, LogsLevel level) { if (!isLoggingEnabled()) return; @@ -283,10 +294,10 @@ void tryLogCurrentException(const char * log_name, const std::string & start_of_ /// getLogger can allocate memory too auto logger = getLogger(log_name); - tryLogCurrentExceptionImpl(logger.get(), start_of_message); + tryLogCurrentExceptionImpl(logger.get(), start_of_message, level); } -void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message) +void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message, LogsLevel level) { /// Under high memory pressure, new allocations throw a /// MEMORY_LIMIT_EXCEEDED exception. @@ -295,17 +306,17 @@ void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_ /// MemoryTracker until the exception will be logged. LockMemoryExceptionInThread lock_memory_tracker(VariableContext::Global); - tryLogCurrentExceptionImpl(logger, start_of_message); + tryLogCurrentExceptionImpl(logger, start_of_message, level); } -void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message) +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message, LogsLevel level) { - tryLogCurrentException(logger.get(), start_of_message); + tryLogCurrentException(logger.get(), start_of_message, level); } -void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message) +void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message, LogsLevel level) { - tryLogCurrentException(logger.load(), start_of_message); + tryLogCurrentException(logger.load(), start_of_message, level); } static void getNoSpaceLeftInfoMessage(std::filesystem::path path, String & msg) diff --git a/src/Common/Exception.h b/src/Common/Exception.h index 8ec640ff642..edc1b95bca4 100644 --- a/src/Common/Exception.h +++ b/src/Common/Exception.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -276,10 +277,10 @@ using Exceptions = std::vector; * Can be used in destructors in the catch-all block. */ /// TODO: Logger leak constexpr overload -void tryLogCurrentException(const char * log_name, const std::string & start_of_message = ""); -void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message = ""); -void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message = ""); -void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message = ""); +void tryLogCurrentException(const char * log_name, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); +void tryLogCurrentException(Poco::Logger * logger, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); +void tryLogCurrentException(LoggerPtr logger, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); +void tryLogCurrentException(const AtomicLogger & logger, const std::string & start_of_message = "", LogsLevel level = LogsLevel::error); /** Prints current exception in canonical format. diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index e7e4ae25a68..ea5507c3155 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1614,7 +1614,8 @@ void TCPHandler::receiveHello() if (e.code() != DB::ErrorCodes::AUTHENTICATION_FAILED) throw; - tryLogCurrentException(log, "SSL authentication failed, falling back to password authentication"); + tryLogCurrentException(log, "SSL authentication failed, falling back to password authentication", LogsLevel::debug); + /// ^^ Log at debug level instead of default error level as authentication failures are not an unusual event. } } } From 7f1ccc30c9e192a00ca624bcfcd05c9b2837d27d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Sun, 3 Nov 2024 21:19:27 +0000 Subject: [PATCH 257/353] Try to suppress msan warnings --- contrib/SimSIMD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index d7798ac6cb7..c03d065a766 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit d7798ac6cb78ac1cb1cdc590f391643f983a2fd7 +Subproject commit c03d065a7661004a9a18fe52753efafa170c67f9 From 5aba66e50a98f040daaa3c2235310e68cfa45e55 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 4 Nov 2024 03:13:42 +0000 Subject: [PATCH 258/353] adjust CI timeout, use TIMEOUT variable for setting fuzzers timeout --- docker/test/libfuzzer/Dockerfile | 2 -- tests/ci/ci_config.py | 2 +- tests/ci/libfuzzer_test_check.py | 3 +++ tests/fuzz/runner.py | 8 ++------ 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/docker/test/libfuzzer/Dockerfile b/docker/test/libfuzzer/Dockerfile index 3ffae0cd921..46e305c90ab 100644 --- a/docker/test/libfuzzer/Dockerfile +++ b/docker/test/libfuzzer/Dockerfile @@ -33,8 +33,6 @@ RUN apt-get update \ COPY requirements.txt / RUN pip3 install --no-cache-dir -r /requirements.txt -ENV FUZZER_ARGS="-max_total_time=60" - SHELL ["/bin/bash", "-c"] # docker run --network=host --volume :/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/libfuzzer diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index b4b7dbee59c..80da822652f 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -530,7 +530,7 @@ class CI: JobNames.LIBFUZZER_TEST: JobConfig( required_builds=[BuildNames.FUZZERS], run_by_labels=[Tags.libFuzzer], - timeout=10800, + timeout=5400, run_command='libfuzzer_test_check.py "$CHECK_NAME"', runner_type=Runners.FUNC_TESTER, ), diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index 379d681cb3e..d0936eb2323 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -22,6 +22,7 @@ from stopwatch import Stopwatch from tee_popen import TeePopen NO_CHANGES_MSG = "Nothing to run" +TIMEOUT = 60 s3 = S3Helper() @@ -264,6 +265,8 @@ def main(): check_name, run_by_hash_num, run_by_hash_total ) + additional_envs.append(f"TIMEOUT={TIMEOUT}") + ci_logs_credentials = CiLogsCredentials(Path(temp_path) / "export-logs-config.sh") ci_logs_args = ci_logs_credentials.get_docker_arguments( pr_info, stopwatch.start_time_str, check_name diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index af73a989ec3..0880940aabd 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -9,7 +9,7 @@ import subprocess from pathlib import Path DEBUGGER = os.getenv("DEBUGGER", "") -FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") +TIMEOUT = int(os.getenv("TIMEOUT", "0")) OUTPUT = "/test_output" @@ -150,11 +150,7 @@ def main(): subprocess.check_call("ls -al", shell=True) - timeout = 60 - - match = re.search(r"(^|\s+)-max_total_time=(\d+)($|\s)", FUZZER_ARGS) - if match: - timeout = int(match.group(2)) + timeout = 30 if TIMEOUT == 0 else TIMEOUT with Path() as current: for fuzzer in current.iterdir(): From e2d64ea30254ce7e126c4442fe393429cfbd1c21 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 4 Nov 2024 03:37:46 +0000 Subject: [PATCH 259/353] fix style --- tests/fuzz/runner.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/fuzz/runner.py b/tests/fuzz/runner.py index 0880940aabd..f4c66e00117 100644 --- a/tests/fuzz/runner.py +++ b/tests/fuzz/runner.py @@ -4,7 +4,6 @@ import configparser import datetime import logging import os -import re import subprocess from pathlib import Path From a6c98a4a7f6c650c84dc750972176427a6e8c479 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 4 Nov 2024 05:17:46 +0000 Subject: [PATCH 260/353] take some changes from private --- tests/ci/s3_helper.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index 46c206f0540..ced6d29e5c7 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -322,17 +322,23 @@ class S3Helper: return result def list_prefix_non_recursive( - self, s3_prefix_path: str, bucket: str = S3_BUILDS_BUCKET + self, + s3_prefix_path: str, + bucket: str = S3_BUILDS_BUCKET, + only_dirs: bool = False, ) -> List[str]: paginator = self.client.get_paginator("list_objects_v2") - pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path) + pages = paginator.paginate( + Bucket=bucket, Prefix=s3_prefix_path, Delimiter="/" + ) result = [] for page in pages: - if "Contents" in page: + if not only_dirs and "Contents" in page: for obj in page["Contents"]: - if "/" not in obj["Key"][len(s3_prefix_path) + 1 :]: - result.append(obj["Key"]) - + result.append(obj["Key"]) + if "CommonPrefixes" in page: + for obj in page["CommonPrefixes"]: + result.append(obj["Prefix"]) return result def url_if_exists(self, key: str, bucket: str = S3_BUILDS_BUCKET) -> str: From 94c8e6e6c201194fc6eea0784e9200fdf5d639a4 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 4 Nov 2024 05:31:15 +0000 Subject: [PATCH 261/353] Automatic style fix --- tests/ci/s3_helper.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index ced6d29e5c7..d0aa034258a 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -328,9 +328,7 @@ class S3Helper: only_dirs: bool = False, ) -> List[str]: paginator = self.client.get_paginator("list_objects_v2") - pages = paginator.paginate( - Bucket=bucket, Prefix=s3_prefix_path, Delimiter="/" - ) + pages = paginator.paginate(Bucket=bucket, Prefix=s3_prefix_path, Delimiter="/") result = [] for page in pages: if not only_dirs and "Contents" in page: From 12c21dc7df4ea2a538a1c59bfa7eb05dd76df08d Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 4 Nov 2024 09:00:01 +0000 Subject: [PATCH 262/353] Minor fixups --- contrib/SimSIMD | 2 +- .../0_stateless/02354_vector_search_expansion_search.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/SimSIMD b/contrib/SimSIMD index c03d065a766..ee3c9c9c00b 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit c03d065a7661004a9a18fe52753efafa170c67f9 +Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21 diff --git a/tests/queries/0_stateless/02354_vector_search_expansion_search.sql b/tests/queries/0_stateless/02354_vector_search_expansion_search.sql index f0cd5374be7..427148b829f 100644 --- a/tests/queries/0_stateless/02354_vector_search_expansion_search.sql +++ b/tests/queries/0_stateless/02354_vector_search_expansion_search.sql @@ -1,4 +1,4 @@ --- Tags: no-fasttest, long, no-asan, no-asan, no-ubsan, no-debug +-- Tags: no-fasttest, long, no-asan, no-ubsan, no-debug -- ^^ Disable test for slow builds: generating data takes time but a sufficiently large data set -- is necessary for different hnsw_candidate_list_size_for_search settings to make a difference From a37c1134b99e75df1df7320c1cad6420d2014a04 Mon Sep 17 00:00:00 2001 From: divanik Date: Mon, 4 Nov 2024 12:32:14 +0000 Subject: [PATCH 263/353] Resolve issues --- src/Storages/ObjectStorage/StorageObjectStorage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index a72fd16abc2..fd2fe0400bb 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -102,7 +102,7 @@ StorageObjectStorage::StorageObjectStorage( } else { - tryLogCurrentException(__PRETTY_FUNCTION__); + tryLogCurrentException(log); } } From c3471ef20d5a3c375d632bd600438d555cd51595 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 4 Nov 2024 13:33:34 +0100 Subject: [PATCH 264/353] Update AccessControl.cpp --- src/Access/AccessControl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index 9b3b8d2a977..647fb238d48 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -608,7 +608,7 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po } catch (...) { - tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed", LogsLevel::information); + tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed", LogsLevel::debug); WriteBufferFromOwnString message; message << credentials.getUserName() << ": Authentication failed: password is incorrect, or there is no user with such name."; From 24a7e0f4ee52e47cadd00a41bff80eb3ac614960 Mon Sep 17 00:00:00 2001 From: Azat Khuzhin Date: Mon, 4 Nov 2024 13:44:36 +0100 Subject: [PATCH 265/353] Fix missing cluster startup for test_quorum_inserts::test_insert_quorum_with_keeper_fail def test_insert_quorum_with_keeper_loss_connection(): > zero.query( "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" ) def query( > return self.client.query( E AttributeError: 'NoneType' object has no attribute 'query' CI: https://s3.amazonaws.com/clickhouse-test-reports/71406/8b3ce129456a1f85839a48538780639e2e3c3020/integration_tests__asan__old_analyzer__[6_6]//home/ubuntu/actions-runner/_work/_temp/test/output_dir/integration_run_parallel3_0.log Signed-off-by: Azat Khuzhin --- tests/integration/test_quorum_inserts/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index eefc4882e8e..66f96d61b3e 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -366,7 +366,7 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(): +def test_insert_quorum_with_keeper_loss_connection(started_cluster): zero.query( "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" ) From 097b45bf5af2d32c4a816c9208c65dab60f2da18 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Mon, 4 Nov 2024 13:56:40 +0000 Subject: [PATCH 266/353] small refactoring --- tests/ci/libfuzzer_test_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index d0936eb2323..2616fbe3f5d 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -21,8 +21,8 @@ from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen -NO_CHANGES_MSG = "Nothing to run" TIMEOUT = 60 +NO_CHANGES_MSG = "Nothing to run" s3 = S3Helper() From 1d888bc1ebc762faf1136d6910fef8641216fb6e Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:40:26 +0100 Subject: [PATCH 267/353] Fix wrong change --- src/Interpreters/Cache/FileSegment.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 080b54feb06..9c8f041fabf 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -139,7 +139,7 @@ FileSegmentGuard::Lock FileSegment::lock() const void FileSegment::setDownloadState(State state, const FileSegmentGuard::Lock & lock) { - if (isCompleted(false)) + if (isCompleted(false) && state != State::DETACHED) { throw Exception( ErrorCodes::LOGICAL_ERROR, From 929da1411e5357d7a99210a4b6f617a2f66f933e Mon Sep 17 00:00:00 2001 From: vdimir Date: Mon, 4 Nov 2024 16:06:20 +0000 Subject: [PATCH 268/353] Fix crash in mongodb table function --- src/TableFunctions/TableFunctionMongoDB.cpp | 10 +++++++--- .../TableFunctionMongoDBPocoLegacy.cpp | 8 +++++--- .../03261_mongodb_argumetns_crash.reference | 0 .../0_stateless/03261_mongodb_argumetns_crash.sql | 13 +++++++++++++ 4 files changed, 25 insertions(+), 6 deletions(-) create mode 100644 tests/queries/0_stateless/03261_mongodb_argumetns_crash.reference create mode 100644 tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql diff --git a/src/TableFunctions/TableFunctionMongoDB.cpp b/src/TableFunctions/TableFunctionMongoDB.cpp index e13427c1557..966ce858875 100644 --- a/src/TableFunctions/TableFunctionMongoDB.cpp +++ b/src/TableFunctions/TableFunctionMongoDB.cpp @@ -118,14 +118,18 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt if (const auto * ast_func = typeid_cast(args[i].get())) { const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); + const auto & function_args = args_expr->children; + if (function_args.size() != 2 || ast_func->name != "equals" || function_args[0]->as()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); auto arg_name = function_args[0]->as()->name(); if (arg_name == "structure") structure = checkAndGetLiteralArgument(function_args[1], "structure"); + else if (arg_name == "options") + main_arguments.push_back(function_args[1]); + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); } else if (i == 2) { diff --git a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp b/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp index dc1df7fcad8..70b28ddfaf0 100644 --- a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp +++ b/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp @@ -98,9 +98,9 @@ void TableFunctionMongoDBPocoLegacy::parseArguments(const ASTPtr & ast_function, if (const auto * ast_func = typeid_cast(args[i].get())) { const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); + const auto & function_args = args_expr->children; + if (function_args.size() != 2 || ast_func->name != "equals" || function_args[0]->as()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); auto arg_name = function_args[0]->as()->name(); @@ -108,6 +108,8 @@ void TableFunctionMongoDBPocoLegacy::parseArguments(const ASTPtr & ast_function, structure = checkAndGetLiteralArgument(function_args[1], "structure"); else if (arg_name == "options") main_arguments.push_back(function_args[1]); + else + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); } else if (i == 5) { diff --git a/tests/queries/0_stateless/03261_mongodb_argumetns_crash.reference b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql new file mode 100644 index 00000000000..830d3995bd5 --- /dev/null +++ b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest + +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', NULL, 'my_collection', 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', NULL, 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', NULL, 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', materialize(1) + 1); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', 'x Int32', NULL); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb(NULL, 'test', 'my_collection', 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } + From 24017bb7add084f38022c2cf1a1fa9a96788ebc9 Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Mon, 4 Nov 2024 17:31:39 +0100 Subject: [PATCH 269/353] add parallel_replicas_prefer_local_join --- ...eplicas_join_algo_and_analyzer_4.reference | 58 +++++++++++++++++++ ...allel_replicas_join_algo_and_analyzer_4.sh | 34 ++++++----- 2 files changed, 77 insertions(+), 15 deletions(-) diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference index 8464317f7e6..52c4e872f84 100644 --- a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference @@ -56,3 +56,61 @@ SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP 500030000 500040000 SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` ALL LEFT JOIN (SELECT `__table4`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table4`) AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_4551627371769371400_3093038500622465792` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` +4999950000 +4999950000 +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` +SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP BY `__table1`.`item_id` +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +499950000 +499960000 +499970000 +499980000 +499990000 +500000000 +500010000 +500020000 +500030000 +500040000 +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_4551627371769371400_3093038500622465792` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh index 0e1f07b6ac5..18a2fbd317b 100755 --- a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh @@ -75,23 +75,27 @@ query3=" ORDER BY price_sold " -for prefer_local_plan in {0..1}; do - for query in "${query1}" "${query2}" "${query3}"; do - for enable_parallel_replicas in {0..1}; do - ${CLICKHOUSE_CLIENT} --query=" - set enable_analyzer=1; - set parallel_replicas_local_plan=${prefer_local_plan}; - set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; +for parallel_replicas_prefer_local_join in 1 0; do + for prefer_local_plan in {0..1}; do + for query in "${query1}" "${query2}" "${query3}"; do + for enable_parallel_replicas in {0..1}; do + ${CLICKHOUSE_CLIENT} --query=" + set enable_analyzer=1; + set parallel_replicas_prefer_local_join=${parallel_replicas_prefer_local_join}; + set parallel_replicas_local_plan=${prefer_local_plan}; + set allow_experimental_parallel_reading_from_replicas=${enable_parallel_replicas}, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; - ${query}; + --SELECT '----- enable_parallel_replicas=$enable_parallel_replicas prefer_local_plan=$prefer_local_plan parallel_replicas_prefer_local_join=$parallel_replicas_prefer_local_join -----'; + ${query}; - SELECT replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1') - FROM - ( - EXPLAIN actions=1 ${query} - ) - WHERE explain LIKE '%ParallelReplicas%'; - " + SELECT replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1') + FROM + ( + EXPLAIN actions=1 ${query} + ) + WHERE explain LIKE '%ParallelReplicas%'; + " + done done done done From 6b4d44be2894bf99897fca011817c9d77bfbabdf Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Mon, 4 Nov 2024 16:42:06 +0000 Subject: [PATCH 270/353] Update version_date.tsv and changelogs after v24.8.6.70-lts --- SECURITY.md | 3 +- docker/keeper/Dockerfile | 2 +- docker/server/Dockerfile.alpine | 2 +- docker/server/Dockerfile.ubuntu | 2 +- docs/changelogs/v24.8.6.70-lts.md | 50 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 2 ++ 6 files changed, 57 insertions(+), 4 deletions(-) create mode 100644 docs/changelogs/v24.8.6.70-lts.md diff --git a/SECURITY.md b/SECURITY.md index db302da8ecd..1b0648dc489 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -14,9 +14,10 @@ The following versions of ClickHouse server are currently supported with securit | Version | Supported | |:-|:-| +| 24.10 | ✔️ | | 24.9 | ✔️ | | 24.8 | ✔️ | -| 24.7 | ✔️ | +| 24.7 | ❌ | | 24.6 | ❌ | | 24.5 | ❌ | | 24.4 | ❌ | diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index dfe6a420260..bc76bdbb619 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-keeper" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index 991c25ad142..93acf1a5773 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" ARG DIRECT_DOWNLOAD_URLS="" diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 5dc88b49e31..506a627b11c 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="24.9.2.42" +ARG VERSION="24.10.1.2812" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" #docker-official-library:off diff --git a/docs/changelogs/v24.8.6.70-lts.md b/docs/changelogs/v24.8.6.70-lts.md new file mode 100644 index 00000000000..81fa4db1458 --- /dev/null +++ b/docs/changelogs/v24.8.6.70-lts.md @@ -0,0 +1,50 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.8.6.70-lts (ddb8c219771) FIXME as compared to v24.8.5.115-lts (8c4cb00a384) + +#### Backward Incompatible Change +* Backported in [#71359](https://github.com/ClickHouse/ClickHouse/issues/71359): Fix possible error `No such file or directory` due to unescaped special symbols in files for JSON subcolumns. [#71182](https://github.com/ClickHouse/ClickHouse/pull/71182) ([Pavel Kruglov](https://github.com/Avogar)). + +#### Improvement +* Backported in [#70680](https://github.com/ClickHouse/ClickHouse/issues/70680): Don't do validation when synchronizing user_directories from keeper. [#70644](https://github.com/ClickHouse/ClickHouse/pull/70644) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71395](https://github.com/ClickHouse/ClickHouse/issues/71395): Do not call the object storage API when listing directories, as this may be cost-inefficient. Instead, store the list of filenames in the memory. The trade-offs are increased initial load time and memory required to store filenames. [#70823](https://github.com/ClickHouse/ClickHouse/pull/70823) ([Julia Kartseva](https://github.com/jkartseva)). +* Backported in [#71287](https://github.com/ClickHouse/ClickHouse/issues/71287): Reduce the number of object storage HEAD API requests in the plain_rewritable disk. [#70915](https://github.com/ClickHouse/ClickHouse/pull/70915) ([Julia Kartseva](https://github.com/jkartseva)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#70934](https://github.com/ClickHouse/ClickHouse/issues/70934): Fix incorrect JOIN ON section optimization in case of `IS NULL` check under any other function (like `NOT`) that may lead to wrong results. Closes [#67915](https://github.com/ClickHouse/ClickHouse/issues/67915). [#68049](https://github.com/ClickHouse/ClickHouse/pull/68049) ([Vladimir Cherkasov](https://github.com/vdimir)). +* Backported in [#70735](https://github.com/ClickHouse/ClickHouse/issues/70735): Fix unexpected exception when passing empty tuple in array. This fixes [#68618](https://github.com/ClickHouse/ClickHouse/issues/68618). [#68848](https://github.com/ClickHouse/ClickHouse/pull/68848) ([Amos Bird](https://github.com/amosbird)). +* Backported in [#71138](https://github.com/ClickHouse/ClickHouse/issues/71138): Fix propogating structure argument in s3Cluster. Previously the `DEFAULT` expression of the column could be lost when sending the query to the replicas in s3Cluster. [#69147](https://github.com/ClickHouse/ClickHouse/pull/69147) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70561](https://github.com/ClickHouse/ClickHouse/issues/70561): Fix `getSubcolumn` with `LowCardinality` columns by overriding `useDefaultImplementationForLowCardinalityColumns` to return `true`. [#69831](https://github.com/ClickHouse/ClickHouse/pull/69831) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)). +* Backported in [#70903](https://github.com/ClickHouse/ClickHouse/issues/70903): Avoid reusing columns among different named tuples when evaluating `tuple` functions. This fixes [#70022](https://github.com/ClickHouse/ClickHouse/issues/70022). [#70103](https://github.com/ClickHouse/ClickHouse/pull/70103) ([Amos Bird](https://github.com/amosbird)). +* Backported in [#70623](https://github.com/ClickHouse/ClickHouse/issues/70623): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70688](https://github.com/ClickHouse/ClickHouse/issues/70688): Fix possible use-after-free in `SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf`. [#70358](https://github.com/ClickHouse/ClickHouse/pull/70358) ([Azat Khuzhin](https://github.com/azat)). +* Backported in [#70494](https://github.com/ClickHouse/ClickHouse/issues/70494): Fix crash during GROUP BY JSON sub-object subcolumn. [#70374](https://github.com/ClickHouse/ClickHouse/pull/70374) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70482](https://github.com/ClickHouse/ClickHouse/issues/70482): Don't prefetch parts for vertical merges if part has no rows. [#70452](https://github.com/ClickHouse/ClickHouse/pull/70452) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#70556](https://github.com/ClickHouse/ClickHouse/issues/70556): Fix crash in WHERE with lambda functions. [#70464](https://github.com/ClickHouse/ClickHouse/pull/70464) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70878](https://github.com/ClickHouse/ClickHouse/issues/70878): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#70575](https://github.com/ClickHouse/ClickHouse/issues/70575): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#71052](https://github.com/ClickHouse/ClickHouse/issues/71052): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#70651](https://github.com/ClickHouse/ClickHouse/issues/70651): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70757](https://github.com/ClickHouse/ClickHouse/issues/70757): Fixed rare crashes in `SELECT`-s and merges after adding a column of `Array` type with non-empty default expression. [#70695](https://github.com/ClickHouse/ClickHouse/pull/70695) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#70763](https://github.com/ClickHouse/ClickHouse/issues/70763): Fix infinite recursion when infering a proto schema with skip unsupported fields enabled. [#70697](https://github.com/ClickHouse/ClickHouse/pull/70697) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71118](https://github.com/ClickHouse/ClickHouse/issues/71118): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#70896](https://github.com/ClickHouse/ClickHouse/issues/70896): Disable enable_named_columns_in_function_tuple by default. [#70833](https://github.com/ClickHouse/ClickHouse/pull/70833) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70994](https://github.com/ClickHouse/ClickHouse/issues/70994): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71210](https://github.com/ClickHouse/ClickHouse/issues/71210): Fix logical error in `StorageS3Queue` "Cannot create a persistent node in /processed since it already exists". [#70984](https://github.com/ClickHouse/ClickHouse/pull/70984) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#71248](https://github.com/ClickHouse/ClickHouse/issues/71248): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). +* Backported in [#71375](https://github.com/ClickHouse/ClickHouse/issues/71375): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#71026](https://github.com/ClickHouse/ClickHouse/issues/71026): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#70388](https://github.com/ClickHouse/ClickHouse/issues/70388): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)). +* Backported in [#70701](https://github.com/ClickHouse/ClickHouse/issues/70701): Fix order in 03249_dynamic_alter_consistency. [#70453](https://github.com/ClickHouse/ClickHouse/pull/70453) ([Alexander Gololobov](https://github.com/davenger)). +* Backported in [#70542](https://github.com/ClickHouse/ClickHouse/issues/70542): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70804](https://github.com/ClickHouse/ClickHouse/issues/70804): When the `PR Check` status is set, it's a valid RunConfig job failure. [#70643](https://github.com/ClickHouse/ClickHouse/pull/70643) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Backported in [#71229](https://github.com/ClickHouse/ClickHouse/issues/71229): Maybe not GWPAsan by default. [#71174](https://github.com/ClickHouse/ClickHouse/pull/71174) ([Antonio Andelic](https://github.com/antonio2368)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 10c55aa4bf5..cf28db5d49a 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,5 +1,7 @@ +v24.10.1.2812-stable 2024-11-01 v24.9.2.42-stable 2024-10-03 v24.9.1.3278-stable 2024-09-26 +v24.8.6.70-lts 2024-11-04 v24.8.5.115-lts 2024-10-08 v24.8.4.13-lts 2024-09-06 v24.8.3.59-lts 2024-09-03 From de751c7e4d3e6445348cd6e5d92a09dc7c41e0ab Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 4 Nov 2024 18:25:27 +0100 Subject: [PATCH 271/353] Update AccessControl.cpp --- src/Access/AccessControl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp index 647fb238d48..9b3b8d2a977 100644 --- a/src/Access/AccessControl.cpp +++ b/src/Access/AccessControl.cpp @@ -608,7 +608,7 @@ AuthResult AccessControl::authenticate(const Credentials & credentials, const Po } catch (...) { - tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed", LogsLevel::debug); + tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed", LogsLevel::information); WriteBufferFromOwnString message; message << credentials.getUserName() << ": Authentication failed: password is incorrect, or there is no user with such name."; From a612e9248c44bd41db761eb88e152a7d2ce6218c Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Mon, 4 Nov 2024 18:26:02 +0100 Subject: [PATCH 272/353] Update TCPHandler.cpp --- src/Server/TCPHandler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index ea5507c3155..4f54918445f 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -1614,7 +1614,7 @@ void TCPHandler::receiveHello() if (e.code() != DB::ErrorCodes::AUTHENTICATION_FAILED) throw; - tryLogCurrentException(log, "SSL authentication failed, falling back to password authentication", LogsLevel::debug); + tryLogCurrentException(log, "SSL authentication failed, falling back to password authentication", LogsLevel::information); /// ^^ Log at debug level instead of default error level as authentication failures are not an unusual event. } } From c1ce74f52f9b5b53db7bcf43aa0a1a47c9dd9859 Mon Sep 17 00:00:00 2001 From: MikhailBurdukov <102754618+MikhailBurdukov@users.noreply.github.com> Date: Mon, 4 Nov 2024 21:40:59 +0300 Subject: [PATCH 273/353] Update tests/integration/test_named_collections/test.py Co-authored-by: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> --- tests/integration/test_named_collections/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_named_collections/test.py b/tests/integration/test_named_collections/test.py index bd04bb9e3c8..e2fa776a8f0 100644 --- a/tests/integration/test_named_collections/test.py +++ b/tests/integration/test_named_collections/test.py @@ -803,7 +803,7 @@ def test_keeper_storage_remove_on_cluster(cluster, ignore, expected_raise): def test_name_escaping(cluster, instance_name): node = cluster.instances[instance_name] - node.query("DROP NAMED COLLECTION IF EXISTS test;") + node.query("DROP NAMED COLLECTION IF EXISTS `test_!strange/symbols!`;") node.query("CREATE NAMED COLLECTION `test_!strange/symbols!` AS key1=1, key2=2") node.restart_clickhouse() From f08379fa18eada67fd75005c256f70db6ec88677 Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 5 Nov 2024 01:58:23 +0000 Subject: [PATCH 274/353] attempt to fix irrelevant test --- tests/integration/test_quorum_inserts/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index eefc4882e8e..66f96d61b3e 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -366,7 +366,7 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(): +def test_insert_quorum_with_keeper_loss_connection(started_cluster): zero.query( "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" ) From a35cc85a68c9356f5697fa22e057bf74a28ee5bb Mon Sep 17 00:00:00 2001 From: Yakov Olkhovskiy Date: Tue, 5 Nov 2024 04:07:09 +0000 Subject: [PATCH 275/353] remove irrelevant changes --- tests/integration/test_quorum_inserts/test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 66f96d61b3e..eefc4882e8e 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -366,7 +366,7 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(started_cluster): +def test_insert_quorum_with_keeper_loss_connection(): zero.query( "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" ) From 3491c0c0e83c5f76c5d5de5097ce513436b4d010 Mon Sep 17 00:00:00 2001 From: Max Kainov Date: Fri, 1 Nov 2024 09:42:02 +0100 Subject: [PATCH 276/353] CI: Remove deprecated release script --- tests/ci/mark_release_ready.py | 3 +- tests/ci/release.py | 693 --------------------------------- 2 files changed, 2 insertions(+), 694 deletions(-) delete mode 100755 tests/ci/release.py diff --git a/tests/ci/mark_release_ready.py b/tests/ci/mark_release_ready.py index 7ffb3c9a89b..838961bd89f 100755 --- a/tests/ci/mark_release_ready.py +++ b/tests/ci/mark_release_ready.py @@ -9,9 +9,10 @@ from get_robot_token import get_best_robot_token from git_helper import commit as commit_arg from github_helper import GitHub from pr_info import PRInfo -from release import RELEASE_READY_STATUS from report import SUCCESS +RELEASE_READY_STATUS = "Ready for release" + def main(): parser = argparse.ArgumentParser( diff --git a/tests/ci/release.py b/tests/ci/release.py deleted file mode 100755 index ed9d60a5cad..00000000000 --- a/tests/ci/release.py +++ /dev/null @@ -1,693 +0,0 @@ -#!/usr/bin/env python3 - -""" -script to create releases for ClickHouse - -The `gh` CLI preferred over the PyGithub to have an easy way to rollback bad -release in command line by simple execution giving rollback commands - -On another hand, PyGithub is used for convenient getting commit's status from API - -To run this script on a freshly installed Ubuntu 22.04 system, it is enough to do the following commands: - -sudo apt install pip -pip install requests boto3 github PyGithub -sudo snap install gh -gh auth login -""" - - -import argparse -import json -import logging -import subprocess -from contextlib import contextmanager -from typing import Any, Final, Iterator, List, Optional, Tuple - -from ci_config import Labels -from git_helper import Git, commit, release_branch -from report import SUCCESS -from version_helper import ( - FILE_WITH_VERSION_PATH, - GENERATED_CONTRIBUTORS, - ClickHouseVersion, - VersionType, - get_abs_path, - get_version_from_repo, - update_cmake_version, - update_contributors, -) - -RELEASE_READY_STATUS = "Ready for release" - - -class Repo: - VALID = ("ssh", "https", "origin") - - def __init__(self, repo: str, protocol: str): - self._repo = repo - self._url = "" - self.url = protocol - - @property - def url(self) -> str: - return self._url - - @url.setter - def url(self, protocol: str) -> None: - if protocol == "ssh": - self._url = f"git@github.com:{self}.git" - elif protocol == "https": - self._url = f"https://github.com/{self}.git" - elif protocol == "origin": - self._url = protocol - else: - raise ValueError(f"protocol must be in {self.VALID}") - - def __str__(self): - return self._repo - - -class Release: - NEW = "new" # type: Final - PATCH = "patch" # type: Final - VALID_TYPE = (NEW, PATCH) # type: Final[Tuple[str, str]] - CMAKE_PATH = get_abs_path(FILE_WITH_VERSION_PATH) - CONTRIBUTORS_PATH = get_abs_path(GENERATED_CONTRIBUTORS) - - def __init__( - self, - repo: Repo, - release_commit: str, - release_type: str, - dry_run: bool, - with_stderr: bool, - ): - self.repo = repo - self._release_commit = "" - self.release_commit = release_commit - self.dry_run = dry_run - self.with_stderr = with_stderr - assert release_type in self.VALID_TYPE - self.release_type = release_type - self._git = Git() - self._version = get_version_from_repo(git=self._git) - self.release_version = self.version - self._release_branch = "" - self._version_new_tag = None # type: Optional[ClickHouseVersion] - self._rollback_stack = [] # type: List[str] - - def run( - self, cmd: str, cwd: Optional[str] = None, dry_run: bool = False, **kwargs: Any - ) -> str: - cwd_text = "" - if cwd: - cwd_text = f" (CWD='{cwd}')" - if dry_run: - logging.info("Would run command%s:\n %s", cwd_text, cmd) - return "" - if not self.with_stderr: - kwargs["stderr"] = subprocess.DEVNULL - - logging.info("Running command%s:\n %s", cwd_text, cmd) - return self._git.run(cmd, cwd, **kwargs) - - def set_release_info(self): - # Fetch release commit and tags in case they don't exist locally - self.run( - f"git fetch {self.repo.url} {self.release_commit} --no-recurse-submodules" - ) - self.run(f"git fetch {self.repo.url} --tags --no-recurse-submodules") - - # Get the actual version for the commit before check - with self._checkout(self.release_commit, True): - self.release_branch = f"{self.version.major}.{self.version.minor}" - self.release_version = get_version_from_repo(git=self._git) - self.release_version.with_description(self.get_stable_release_type()) - - self.read_version() - - def read_version(self): - self._git.update() - self.version = get_version_from_repo(git=self._git) - - def get_stable_release_type(self) -> str: - if self.version.is_lts: - return VersionType.LTS - return VersionType.STABLE - - def check_commit_release_ready(self): - per_page = 100 - page = 1 - while True: - statuses = json.loads( - self.run( - f"gh api 'repos/{self.repo}/commits/{self.release_commit}" - f"/statuses?per_page={per_page}&page={page}'" - ) - ) - - if not statuses: - break - - for status in statuses: - if status["context"] == RELEASE_READY_STATUS: - if not status["state"] == SUCCESS: - raise ValueError( - f"the status {RELEASE_READY_STATUS} is {status['state']}" - ", not success" - ) - - return - - page += 1 - - raise KeyError( - f"the status {RELEASE_READY_STATUS} " - f"is not found for commit {self.release_commit}" - ) - - def check_prerequisites(self): - """ - Check tooling installed in the system, `git` is checked by Git() init - """ - try: - self.run("gh auth status") - except subprocess.SubprocessError: - logging.error( - "The github-cli either not installed or not setup, please follow " - "the instructions on https://github.com/cli/cli#installation and " - "https://cli.github.com/manual/" - ) - raise - - if self.release_type == self.PATCH: - self.check_commit_release_ready() - - def do( - self, check_dirty: bool, check_run_from_master: bool, check_branch: bool - ) -> None: - self.check_prerequisites() - - if check_dirty: - logging.info("Checking if repo is clean") - try: - self.run("git diff HEAD --exit-code") - except subprocess.CalledProcessError: - logging.fatal("Repo contains uncommitted changes") - raise - - if check_run_from_master and self._git.branch != "master": - raise RuntimeError("the script must be launched only from master") - - self.set_release_info() - - if check_branch: - self.check_branch() - - if self.release_type == self.NEW: - with self._checkout(self.release_commit, True): - # Checkout to the commit, it will provide the correct current version - with self.new_release(): - with self.create_release_branch(): - logging.info( - "Publishing release %s from commit %s is done", - self.release_version.describe, - self.release_commit, - ) - - elif self.release_type == self.PATCH: - with self._checkout(self.release_commit, True): - with self.patch_release(): - logging.info( - "Publishing release %s from commit %s is done", - self.release_version.describe, - self.release_commit, - ) - - if self.dry_run: - logging.info("Dry running, clean out possible changes") - rollback = self._rollback_stack.copy() - rollback.reverse() - for cmd in rollback: - self.run(cmd) - return - - self.log_post_workflows() - self.log_rollback() - - def check_no_tags_after(self): - tags_after_commit = self.run(f"git tag --contains={self.release_commit}") - if tags_after_commit: - raise RuntimeError( - f"Commit {self.release_commit} belongs to following tags:\n" - f"{tags_after_commit}\nChoose another commit" - ) - - def check_branch(self): - branch = self.release_branch - if self.release_type == self.NEW: - # Commit to spin up the release must belong to a main branch - branch = "master" - elif self.release_type != self.PATCH: - raise ( - ValueError(f"release_type {self.release_type} not in {self.VALID_TYPE}") - ) - - # Prefetch the branch to have it updated - if self._git.branch == branch: - self.run("git pull --no-recurse-submodules") - else: - self.run( - f"git fetch {self.repo.url} {branch}:{branch} --no-recurse-submodules" - ) - output = self.run(f"git branch --contains={self.release_commit} {branch}") - if branch not in output: - raise RuntimeError( - f"commit {self.release_commit} must belong to {branch} " - f"for {self.release_type} release" - ) - - def _update_cmake_contributors( - self, version: ClickHouseVersion, reset_tweak: bool = True - ) -> None: - if reset_tweak: - desc = version.description - version = version.reset_tweak() - version.with_description(desc) - update_cmake_version(version) - update_contributors(raise_error=True) - if self.dry_run: - logging.info( - "Dry running, resetting the following changes in the repo:\n%s", - self.run(f"git diff '{self.CMAKE_PATH}' '{self.CONTRIBUTORS_PATH}'"), - ) - self.run(f"git checkout '{self.CMAKE_PATH}' '{self.CONTRIBUTORS_PATH}'") - - def _commit_cmake_contributors( - self, version: ClickHouseVersion, reset_tweak: bool = True - ) -> None: - if reset_tweak: - version = version.reset_tweak() - self.run( - f"git commit '{self.CMAKE_PATH}' '{self.CONTRIBUTORS_PATH}' " - f"-m 'Update autogenerated version to {version.string} and contributors'", - dry_run=self.dry_run, - ) - - @property - def bump_part(self) -> ClickHouseVersion.PART_TYPE: - if self.release_type == Release.NEW: - if self._version.minor >= 12: - return "major" - return "minor" - return "patch" - - @property - def has_rollback(self) -> bool: - return bool(self._rollback_stack) - - def log_rollback(self): - if self.has_rollback: - rollback = self._rollback_stack.copy() - rollback.reverse() - logging.info( - "To rollback the action run the following commands:\n %s", - "\n ".join(rollback), - ) - - def log_post_workflows(self): - logging.info( - "To verify all actions are running good visit the following links:\n %s", - "\n ".join( - f"https://github.com/{self.repo}/actions/workflows/{action}.yml" - for action in ("release", "tags_stable") - ), - ) - - @contextmanager - def create_release_branch(self): - self.check_no_tags_after() - # Create release branch - self.read_version() - assert self._version_new_tag is not None - with self._create_tag( - self._version_new_tag.describe, - self.release_commit, - f"Initial commit for release {self._version_new_tag.major}.{self._version_new_tag.minor}", - ): - with self._create_branch(self.release_branch, self.release_commit): - with self._checkout(self.release_branch, True): - with self._bump_release_branch(): - yield - - @contextmanager - def patch_release(self): - self.check_no_tags_after() - self.read_version() - version_type = self.get_stable_release_type() - self.version.with_description(version_type) - with self._create_gh_release(False): - self.version = self.version.update(self.bump_part) - self.version.with_description(version_type) - self._update_cmake_contributors(self.version) - # Checking out the commit of the branch and not the branch itself, - # then we are able to skip rollback - with self._checkout(f"{self.release_branch}^0", False): - current_commit = self.run("git rev-parse HEAD") - self._commit_cmake_contributors(self.version) - with self._push( - "HEAD", with_rollback_on_fail=False, remote_ref=self.release_branch - ): - # DO NOT PUT ANYTHING ELSE HERE - # The push must be the last action and mean the successful release - self._rollback_stack.append( - f"{self.dry_run_prefix}git push {self.repo.url} " - f"+{current_commit}:{self.release_branch}" - ) - yield - - @contextmanager - def new_release(self): - # Create branch for a version bump - self.read_version() - self.version = self.version.update(self.bump_part) - helper_branch = f"{self.version.major}.{self.version.minor}-prepare" - with self._create_branch(helper_branch, self.release_commit): - with self._checkout(helper_branch, True): - with self._bump_version_in_master(helper_branch): - yield - - @property - def version(self) -> ClickHouseVersion: - return self._version - - @version.setter - def version(self, version: ClickHouseVersion) -> None: - if not isinstance(version, ClickHouseVersion): - raise ValueError(f"version must be ClickHouseVersion, not {type(version)}") - self._version = version - - @property - def release_branch(self) -> str: - return self._release_branch - - @release_branch.setter - def release_branch(self, branch: str) -> None: - self._release_branch = release_branch(branch) - - @property - def release_commit(self) -> str: - return self._release_commit - - @release_commit.setter - def release_commit(self, release_commit: str) -> None: - self._release_commit = commit(release_commit) - - @property - def dry_run_prefix(self) -> str: - if self.dry_run: - return "# " - return "" - - @contextmanager - def _bump_release_branch(self): - # Update only git, original version stays the same - self._git.update() - new_version = self.version.copy() - version_type = self.get_stable_release_type() - pr_labels = f"--label {Labels.RELEASE}" - if version_type == VersionType.LTS: - pr_labels += f" --label {Labels.RELEASE_LTS}" - new_version.with_description(version_type) - self._update_cmake_contributors(new_version) - self._commit_cmake_contributors(new_version) - with self._push(self.release_branch): - with self._create_gh_label( - f"v{self.release_branch}-must-backport", "10dbed" - ): - with self._create_gh_label( - f"v{self.release_branch}-affected", "c2bfff" - ): - # The following command is rolled back by deleting branch - # in self._push - self.run( - f"gh pr create --repo {self.repo} --title " - f"'Release pull request for branch {self.release_branch}' " - f"--head {self.release_branch} {pr_labels} " - "--body 'This PullRequest is a part of ClickHouse release " - "cycle. It is used by CI system only. Do not perform any " - "changes with it.'", - dry_run=self.dry_run, - ) - # Here the release branch part is done. - # We don't create a release itself automatically to have a - # safe window to backport possible bug fixes. - yield - - @contextmanager - def _bump_version_in_master(self, helper_branch: str) -> Iterator[None]: - self.read_version() - self.version = self.version.update(self.bump_part) - self.version.with_description(VersionType.TESTING) - self._update_cmake_contributors(self.version) - self._commit_cmake_contributors(self.version) - # Create a version-new tag - self._version_new_tag = self.version.copy() - self._version_new_tag.tweak = 1 - self._version_new_tag.with_description(VersionType.NEW) - - with self._push(helper_branch): - body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md") - # The following command is rolled back by deleting branch in self._push - self.run( - f"gh pr create --repo {self.repo} --title 'Update version after " - f"release' --head {helper_branch} --body-file '{body_file}' " - "--label 'do not test' --assignee @me", - dry_run=self.dry_run, - ) - # Here the new release part is done - yield - - @contextmanager - def _checkout(self, ref: str, with_checkout_back: bool = False) -> Iterator[None]: - self._git.update() - orig_ref = self._git.branch or self._git.sha - rollback_cmd = "" - if ref not in (self._git.branch, self._git.sha): - self.run(f"git checkout {ref}") - # checkout is not put into rollback_stack intentionally - rollback_cmd = f"git checkout {orig_ref}" - # always update version and git after checked out ref - self.read_version() - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back checked out %s for %s", ref, orig_ref) - self.run(f"git reset --hard; git checkout -f {orig_ref}") - raise - # Normal flow when we need to checkout back - if with_checkout_back and rollback_cmd: - self.run(rollback_cmd) - - @contextmanager - def _create_branch(self, name: str, start_point: str = "") -> Iterator[None]: - self.run(f"git branch {name} {start_point}") - - rollback_cmd = f"git branch -D {name}" - self._rollback_stack.append(rollback_cmd) - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back created branch %s", name) - self.run(rollback_cmd) - raise - - @contextmanager - def _create_gh_label(self, label: str, color_hex: str) -> Iterator[None]: - # API call, https://docs.github.com/en/rest/reference/issues#create-a-label - self.run( - f"gh api repos/{self.repo}/labels -f name={label} -f color={color_hex}", - dry_run=self.dry_run, - ) - rollback_cmd = ( - f"{self.dry_run_prefix}gh api repos/{self.repo}/labels/{label} -X DELETE" - ) - self._rollback_stack.append(rollback_cmd) - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back label %s", label) - self.run(rollback_cmd) - raise - - @contextmanager - def _create_gh_release(self, as_prerelease: bool) -> Iterator[None]: - tag = self.release_version.describe - with self._create_tag(tag, self.release_commit): - # Preserve tag if version is changed - prerelease = "" - if as_prerelease: - prerelease = "--prerelease" - self.run( - f"gh release create {prerelease} --repo {self.repo} " - f"--title 'Release {tag}' '{tag}'", - dry_run=self.dry_run, - ) - rollback_cmd = ( - f"{self.dry_run_prefix}gh release delete --yes " - f"--repo {self.repo} '{tag}'" - ) - self._rollback_stack.append(rollback_cmd) - try: - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back release publishing") - self.run(rollback_cmd) - raise - - @contextmanager - def _create_tag( - self, tag: str, commit: str, tag_message: str = "" - ) -> Iterator[None]: - tag_message = tag_message or f"Release {tag}" - # Create tag even in dry-run - self.run(f"git tag -a -m '{tag_message}' '{tag}' {commit}") - rollback_cmd = f"git tag -d '{tag}'" - self._rollback_stack.append(rollback_cmd) - try: - with self._push(tag): - yield - except (Exception, KeyboardInterrupt): - logging.warning("Rolling back tag %s", tag) - self.run(rollback_cmd) - raise - - @contextmanager - def _push( - self, ref: str, with_rollback_on_fail: bool = True, remote_ref: str = "" - ) -> Iterator[None]: - if remote_ref == "": - remote_ref = ref - - self.run(f"git push {self.repo.url} {ref}:{remote_ref}", dry_run=self.dry_run) - if with_rollback_on_fail: - rollback_cmd = ( - f"{self.dry_run_prefix}git push -d {self.repo.url} {remote_ref}" - ) - self._rollback_stack.append(rollback_cmd) - - try: - yield - except (Exception, KeyboardInterrupt): - if with_rollback_on_fail: - logging.warning("Rolling back pushed ref %s", ref) - self.run(rollback_cmd) - - raise - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - description="Script to release a new ClickHouse version, requires `git` and " - "`gh` (github-cli) commands " - "!!! LAUNCH IT ONLY FROM THE MASTER BRANCH !!!", - ) - - parser.add_argument( - "--commit", - required=True, - type=commit, - help="commit create a release", - ) - parser.add_argument( - "--repo", - default="ClickHouse/ClickHouse", - help="repository to create the release", - ) - parser.add_argument( - "--remote-protocol", - "-p", - default="ssh", - choices=Repo.VALID, - help="repo protocol for git commands remote, 'origin' is a special case and " - "uses 'origin' as a remote", - ) - parser.add_argument( - "--type", - required=True, - choices=Release.VALID_TYPE, - dest="release_type", - help="a release type to bump the major.minor.patch version part, " - "new branch is created only for the value 'new'", - ) - parser.add_argument("--with-release-branch", default=True, help=argparse.SUPPRESS) - parser.add_argument("--check-dirty", default=True, help=argparse.SUPPRESS) - parser.add_argument( - "--no-check-dirty", - dest="check_dirty", - action="store_false", - default=argparse.SUPPRESS, - help="(dangerous) if set, skip check repository for uncommitted changes", - ) - parser.add_argument("--check-run-from-master", default=True, help=argparse.SUPPRESS) - parser.add_argument( - "--no-run-from-master", - dest="check_run_from_master", - action="store_false", - default=argparse.SUPPRESS, - help="(for development) if set, the script could run from non-master branch", - ) - parser.add_argument("--check-branch", default=True, help=argparse.SUPPRESS) - parser.add_argument( - "--no-check-branch", - dest="check_branch", - action="store_false", - default=argparse.SUPPRESS, - help="(debug or development only, dangerous) if set, skip the branch check for " - "a run. By default, 'new' type work only for master, and 'patch' " - "works only for a release branches, that name " - "should be the same as '$MAJOR.$MINOR' version, e.g. 22.2", - ) - parser.add_argument( - "--dry-run", - action="store_true", - help="do not make any actual changes in the repo, just show what will be done", - ) - parser.add_argument( - "--with-stderr", - action="store_true", - help="if set, the stderr of all subprocess commands will be printed as well", - ) - - return parser.parse_args() - - -def main(): - logging.basicConfig(level=logging.INFO) - args = parse_args() - repo = Repo(args.repo, args.remote_protocol) - release = Release( - repo, args.commit, args.release_type, args.dry_run, args.with_stderr - ) - - try: - release.do(args.check_dirty, args.check_run_from_master, args.check_branch) - except: - if release.has_rollback: - logging.error( - "!!The release process finished with error, read the output carefully!!" - ) - logging.error( - "Probably, rollback finished with error. " - "If you don't see any of the following commands in the output, " - "execute them manually:" - ) - release.log_rollback() - raise - - -if __name__ == "__main__": - assert False, "Script Deprecated, ask ci team for help" - main() From 1abfa41b890d4cdcb09d06579b8e9b7f14d4f4f5 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 5 Nov 2024 11:18:11 +0100 Subject: [PATCH 277/353] Update CMakeLists.txt --- contrib/usearch-cmake/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 69a986de192..fda061bf467 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -19,7 +19,8 @@ endif () add_library(ch_contrib::usearch ALIAS _usearch) - +# Cf. https://github.com/llvm/llvm-project/issues/107810 (though it is not 100% the same stack) +# # LLVM ERROR: Cannot select: 0x7996e7a73150: f32,ch = load<(load (s16) from %ir.22, !tbaa !54231), anyext from bf16> 0x79961cb737c0, 0x7996e7a1a500, undef:i64, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 # 0x7996e7a1a500: i64 = add 0x79961e770d00, Constant:i64<-16>, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 # 0x79961e770d00: i64,ch = CopyFromReg 0x79961cb737c0, Register:i64 %4, ./contrib/SimSIMD/include/simsimd/dot.h:215:1 From 087a886bc9f312c4cc4fc6cba1d1ea5d1681c137 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 5 Nov 2024 11:18:21 +0100 Subject: [PATCH 278/353] Update src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp Co-authored-by: Nikita Taranov --- src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 0b5ffa659dc..5a725922e14 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -118,8 +118,6 @@ USearchIndexWithSerialization::USearchIndexWithSerialization( if (!result) throw Exception(ErrorCodes::INCORRECT_DATA, "Could not create vector similarity index. Error: {}", String(result.error.release())); swap(result.index); - - /// LOG_TRACE(getLogger("XXX"), "{}", simsimd_uses_dynamic_dispatch()); } void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const From 0cc8626279fefc6ceae0a806b4e326ea0a354476 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Tue, 5 Nov 2024 11:31:27 +0000 Subject: [PATCH 279/353] Fix assert during insert into vector similarity index in presence of other skipping indexes --- .../MergeTreeIndexVectorSimilarity.cpp | 79 ++++++++++--------- .../02354_vector_search_bugs.reference | 1 + .../0_stateless/02354_vector_search_bugs.sql | 15 ++++ 3 files changed, 58 insertions(+), 37 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 5a725922e14..498d0131d5a 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -347,53 +347,58 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ if (index_sample_block.columns() > 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); - const String & index_column_name = index_sample_block.getByPosition(0).name; - const ColumnPtr & index_column = block.getByName(index_column_name).column; - ColumnPtr column_cut = index_column->cut(*pos, rows_read); + for (size_t i = 0; i < index_sample_block.columns(); ++i) + { + const auto & index_column_with_type_and_name = index_sample_block.getByPosition(i); - const auto * column_array = typeid_cast(column_cut.get()); - if (!column_array) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float*) column"); + const auto & index_column_name = index_column_with_type_and_name.name; + const auto & index_column = block.getByName(index_column_name).column; + ColumnPtr column_cut = index_column->cut(*pos, rows_read); - if (column_array->empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); + const auto * column_array = typeid_cast(column_cut.get()); + if (!column_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float*) column"); - /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays - /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default - /// values which is also empty. - if (column_array->isDefaultAt(0)) - throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); + if (column_array->empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); - const size_t rows = column_array->size(); + /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays + /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default + /// values which is also empty. + if (column_array->isDefaultAt(0)) + throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); - const auto & column_array_offsets = column_array->getOffsets(); - const size_t dimensions = column_array_offsets[0]; + const size_t rows = column_array->size(); - if (!index) - index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); + const auto & column_array_offsets = column_array->getOffsets(); + const size_t dimensions = column_array_offsets[0]; - /// Also check that previously inserted blocks have the same size as this block. - /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across - /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. - if (index->dimensions() != dimensions) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); + if (!index) + index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); - /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp - if (index->size() + rows > std::numeric_limits::max()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); + /// Also check that previously inserted blocks have the same size as this block. + /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across + /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. + if (index->dimensions() != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); - DataTypePtr data_type = block.getDataTypes()[0]; - const auto * data_type_array = typeid_cast(data_type.get()); - if (!data_type_array) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); - const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); + /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp + if (index->size() + rows > std::numeric_limits::max()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); - if (WhichDataType(nested_type_index).isFloat32()) - updateImpl(column_array, column_array_offsets, index, dimensions, rows); - else if (WhichDataType(nested_type_index).isFloat64()) - updateImpl(column_array, column_array_offsets, index, dimensions, rows); - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); + DataTypePtr data_type = index_column_with_type_and_name.type; + const auto * data_type_array = typeid_cast(data_type.get()); + if (!data_type_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); + const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); + + if (WhichDataType(nested_type_index).isFloat32()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); + else if (WhichDataType(nested_type_index).isFloat64()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); + } *pos += rows_read; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.reference b/tests/queries/0_stateless/02354_vector_search_bugs.reference index 9b610cf543a..dec921cf586 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.reference +++ b/tests/queries/0_stateless/02354_vector_search_bugs.reference @@ -41,3 +41,4 @@ Expression (Projection) Parts: 1/1 Granules: 4/4 index_granularity_bytes = 0 is disallowed +Issue #71381: Vector similarity index and other skipping indexes used on the same table diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index d55bdb88a76..6bcb0f78e75 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -117,3 +117,18 @@ CREATE TABLE tab(id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id SE ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); -- { serverError INVALID_SETTING_VALUE } DROP TABLE tab; + +SELECT 'Issue #71381: Vector similarity index and other skipping indexes used on the same table'; + +CREATE TABLE tab( + val String, + vec Array(Float32), + INDEX ann_idx vec TYPE vector_similarity('hnsw', 'cosineDistance'), + INDEX set_idx val TYPE set(100) GRANULARITY 100 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES ('hello world', [0.0]); + +DROP TABLE tab; From bbe28d45bff0bd721685c812706f113e1412ed6b Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 5 Nov 2024 12:33:25 +0000 Subject: [PATCH 280/353] fix --- src/Parsers/ASTFunction.cpp | 5 ++- src/TableFunctions/TableFunctionMongoDB.cpp | 42 +++++++++---------- src/TableFunctions/TableFunctionMongoDB.h | 15 +++++++ .../TableFunctionMongoDBPocoLegacy.cpp | 15 ++----- .../03261_mongodb_argumetns_crash.sql | 1 + 5 files changed, 45 insertions(+), 33 deletions(-) create mode 100644 src/TableFunctions/TableFunctionMongoDB.h diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index 53d44e2f325..11cfe2e584e 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -724,7 +724,10 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format { if (secret_arguments.are_named) { - assert_cast(argument.get())->arguments->children[0]->formatImpl(settings, state, nested_dont_need_parens); + if (const auto * func_ast = typeid_cast(argument.get())) + func_ast->arguments->children[0]->formatImpl(settings, state, nested_dont_need_parens); + else + argument->formatImpl(settings, state, nested_dont_need_parens); settings.ostr << (settings.hilite ? hilite_operator : "") << " = " << (settings.hilite ? hilite_none : ""); } if (!secret_arguments.replacement.empty()) diff --git a/src/TableFunctions/TableFunctionMongoDB.cpp b/src/TableFunctions/TableFunctionMongoDB.cpp index 966ce858875..9f91839fb33 100644 --- a/src/TableFunctions/TableFunctionMongoDB.cpp +++ b/src/TableFunctions/TableFunctionMongoDB.cpp @@ -15,7 +15,7 @@ #include #include #include - +#include namespace DB { @@ -85,17 +85,11 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt { if (const auto * ast_func = typeid_cast(args[i].get())) { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - auto function_args = args_expr->children; - if (function_args.size() != 2) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument"); - - auto arg_name = function_args[0]->as()->name(); - + const auto & [arg_name, arg_value] = getKeyValueMongoDBArgument(ast_func); if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); + structure = checkAndGetLiteralArgument(arg_value, arg_name); else if (arg_name == "options") - main_arguments.push_back(function_args[1]); + main_arguments.push_back(arg_value); } else if (i == 5) { @@ -117,19 +111,11 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt { if (const auto * ast_func = typeid_cast(args[i].get())) { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - const auto & function_args = args_expr->children; - if (function_args.size() != 2 || ast_func->name != "equals" || function_args[0]->as()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); - - auto arg_name = function_args[0]->as()->name(); - + const auto & [arg_name, arg_value] = getKeyValueMongoDBArgument(ast_func); if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); + structure = checkAndGetLiteralArgument(arg_value, arg_name); else if (arg_name == "options") - main_arguments.push_back(function_args[1]); - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); + main_arguments.push_back(arg_value); } else if (i == 2) { @@ -149,6 +135,20 @@ void TableFunctionMongoDB::parseArguments(const ASTPtr & ast_function, ContextPt } +std::pair getKeyValueMongoDBArgument(const ASTFunction * ast_func) +{ + const auto * args_expr = assert_cast(ast_func->arguments.get()); + const auto & function_args = args_expr->children; + if (function_args.size() != 2 || ast_func->name != "equals" || !function_args[0]->as()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); + + const auto & arg_name = function_args[0]->as()->name(); + if (arg_name == "structure" || arg_name == "options") + return std::make_pair(arg_name, function_args[1]); + + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); +} + void registerTableFunctionMongoDB(TableFunctionFactory & factory) { factory.registerFunction( diff --git a/src/TableFunctions/TableFunctionMongoDB.h b/src/TableFunctions/TableFunctionMongoDB.h new file mode 100644 index 00000000000..2b75fda1675 --- /dev/null +++ b/src/TableFunctions/TableFunctionMongoDB.h @@ -0,0 +1,15 @@ + +#include + +#include +#include +#include + + +namespace DB +{ + +std::pair getKeyValueMongoDBArgument(const ASTFunction * ast_func); + +} + diff --git a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp b/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp index 70b28ddfaf0..4e27fd35e12 100644 --- a/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp +++ b/src/TableFunctions/TableFunctionMongoDBPocoLegacy.cpp @@ -15,6 +15,7 @@ #include #include #include +#include namespace DB @@ -97,19 +98,11 @@ void TableFunctionMongoDBPocoLegacy::parseArguments(const ASTPtr & ast_function, { if (const auto * ast_func = typeid_cast(args[i].get())) { - const auto * args_expr = assert_cast(ast_func->arguments.get()); - const auto & function_args = args_expr->children; - if (function_args.size() != 2 || ast_func->name != "equals" || function_args[0]->as()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); - - auto arg_name = function_args[0]->as()->name(); - + const auto & [arg_name, arg_value] = getKeyValueMongoDBArgument(ast_func); if (arg_name == "structure") - structure = checkAndGetLiteralArgument(function_args[1], "structure"); + structure = checkAndGetLiteralArgument(arg_value, "structure"); else if (arg_name == "options") - main_arguments.push_back(function_args[1]); - else - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected key-value defined argument, got {}", ast_func->formatForErrorMessage()); + main_arguments.push_back(arg_value); } else if (i == 5) { diff --git a/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql index 830d3995bd5..ca558ac6bc6 100644 --- a/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql +++ b/tests/queries/0_stateless/03261_mongodb_argumetns_crash.sql @@ -11,3 +11,4 @@ SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } SELECT * FROM mongodb(NULL, 'test', 'my_collection', 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +CREATE TABLE IF NOT EXISTS store_version ( `_id` String ) ENGINE = MongoDB(`localhost:27017`, mongodb, storeinfo, adminUser, adminUser); -- { serverError NAMED_COLLECTION_DOESNT_EXIST } From d7977f0b916ccdcc240de8d413015532d492f668 Mon Sep 17 00:00:00 2001 From: kssenii Date: Tue, 5 Nov 2024 13:36:27 +0100 Subject: [PATCH 281/353] More correct assertion --- src/Interpreters/Cache/EvictionCandidates.cpp | 3 ++- src/Interpreters/Cache/FileSegment.cpp | 7 ++++--- src/Interpreters/Cache/FileSegment.h | 7 +++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/Interpreters/Cache/EvictionCandidates.cpp b/src/Interpreters/Cache/EvictionCandidates.cpp index 08776ad5aee..f5d5fdec6ba 100644 --- a/src/Interpreters/Cache/EvictionCandidates.cpp +++ b/src/Interpreters/Cache/EvictionCandidates.cpp @@ -83,7 +83,8 @@ void EvictionCandidates::removeQueueEntries(const CachePriorityGuard::Lock & loc queue_iterator->invalidate(); chassert(candidate->releasable()); - candidate->file_segment->resetQueueIterator(); + candidate->file_segment->markDelayedRemovalAndResetQueueIterator(); + /// We need to set removed flag in file segment metadata, /// because in dynamic cache resize we first remove queue entries, /// then evict which also removes file segment metadata, diff --git a/src/Interpreters/Cache/FileSegment.cpp b/src/Interpreters/Cache/FileSegment.cpp index 080b54feb06..307d9c8afe1 100644 --- a/src/Interpreters/Cache/FileSegment.cpp +++ b/src/Interpreters/Cache/FileSegment.cpp @@ -171,10 +171,11 @@ void FileSegment::setQueueIterator(Priority::IteratorPtr iterator) queue_iterator = iterator; } -void FileSegment::resetQueueIterator() +void FileSegment::markDelayedRemovalAndResetQueueIterator() { auto lk = lock(); - queue_iterator.reset(); + on_delayed_removal = true; + queue_iterator = {}; } size_t FileSegment::getCurrentWriteOffset() const @@ -861,7 +862,7 @@ bool FileSegment::assertCorrectnessUnlocked(const FileSegmentGuard::Lock & lock) chassert(downloaded_size > 0); chassert(fs::file_size(getPath()) > 0); - chassert(queue_iterator); + chassert(queue_iterator || on_delayed_removal); check_iterator(queue_iterator); break; } diff --git a/src/Interpreters/Cache/FileSegment.h b/src/Interpreters/Cache/FileSegment.h index 79adc342329..6946d70b764 100644 --- a/src/Interpreters/Cache/FileSegment.h +++ b/src/Interpreters/Cache/FileSegment.h @@ -177,7 +177,7 @@ public: void setQueueIterator(Priority::IteratorPtr iterator); - void resetQueueIterator(); + void markDelayedRemovalAndResetQueueIterator(); KeyMetadataPtr tryGetKeyMetadata() const; @@ -249,11 +249,12 @@ private: String tryGetPath() const; - Key file_key; + const Key file_key; Range segment_range; const FileSegmentKind segment_kind; /// Size of the segment is not known until it is downloaded and /// can be bigger than max_file_segment_size. + /// is_unbound == true for temporary data in cache. const bool is_unbound; const bool background_download_enabled; @@ -279,6 +280,8 @@ private: std::atomic hits_count = 0; /// cache hits. std::atomic ref_count = 0; /// Used for getting snapshot state + bool on_delayed_removal = false; + CurrentMetrics::Increment metric_increment{CurrentMetrics::CacheFileSegments}; }; From 5152984bb170e5c63144db3dd238099534353378 Mon Sep 17 00:00:00 2001 From: vdimir Date: Tue, 5 Nov 2024 13:52:14 +0000 Subject: [PATCH 282/353] upd src/TableFunctions/TableFunctionMongoDB.h --- src/TableFunctions/TableFunctionMongoDB.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/TableFunctions/TableFunctionMongoDB.h b/src/TableFunctions/TableFunctionMongoDB.h index 2b75fda1675..2ab8ee9479f 100644 --- a/src/TableFunctions/TableFunctionMongoDB.h +++ b/src/TableFunctions/TableFunctionMongoDB.h @@ -1,3 +1,4 @@ +#pragma once #include From c16e1f021b7c24250ebf3bef1c764ba7c218de0d Mon Sep 17 00:00:00 2001 From: Anton Popov Date: Tue, 5 Nov 2024 14:57:35 +0000 Subject: [PATCH 283/353] fix memory usage in inserts with delayed streams --- .../MergeTree/IMergeTreeDataPartWriter.h | 2 ++ .../MergeTree/IMergedBlockOutputStream.h | 5 +++++ .../MergeTreeDataPartWriterCompact.h | 2 ++ .../MergeTree/MergeTreeDataPartWriterWide.h | 2 ++ src/Storages/MergeTree/MergeTreeSink.cpp | 12 +++++++---- .../MergeTree/ReplicatedMergeTreeSink.cpp | 13 ++++++++---- .../03261_delayed_streams_memory.reference | 1 + .../03261_delayed_streams_memory.sql | 20 +++++++++++++++++++ 8 files changed, 49 insertions(+), 8 deletions(-) create mode 100644 tests/queries/0_stateless/03261_delayed_streams_memory.reference create mode 100644 tests/queries/0_stateless/03261_delayed_streams_memory.sql diff --git a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h index b8ac14b1750..d1c76505d7c 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPartWriter.h +++ b/src/Storages/MergeTree/IMergeTreeDataPartWriter.h @@ -46,6 +46,8 @@ public: virtual void finish(bool sync) = 0; + virtual size_t getNumberOfOpenStreams() const = 0; + Columns releaseIndexColumns(); PlainMarksByName releaseCachedMarks(); diff --git a/src/Storages/MergeTree/IMergedBlockOutputStream.h b/src/Storages/MergeTree/IMergedBlockOutputStream.h index a901b03c115..7dd6d720170 100644 --- a/src/Storages/MergeTree/IMergedBlockOutputStream.h +++ b/src/Storages/MergeTree/IMergedBlockOutputStream.h @@ -39,6 +39,11 @@ public: return writer->releaseCachedMarks(); } + size_t getNumberOfOpenStreams() const + { + return writer->getNumberOfOpenStreams(); + } + protected: /// Remove all columns marked expired in data_part. Also, clears checksums diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h index b440a37222d..20c47fb8314 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterCompact.h @@ -32,6 +32,8 @@ public: void fillChecksums(MergeTreeDataPartChecksums & checksums, NameSet & checksums_to_remove) override; void finish(bool sync) override; + size_t getNumberOfOpenStreams() const override { return 1; } + private: /// Finish serialization of the data. Flush rows in buffer to disk, compute checksums. void fillDataChecksums(MergeTreeDataPartChecksums & checksums); diff --git a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h index 68f016a7421..b594b2d79bb 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h +++ b/src/Storages/MergeTree/MergeTreeDataPartWriterWide.h @@ -43,6 +43,8 @@ public: void finish(bool sync) final; + size_t getNumberOfOpenStreams() const override { return column_streams.size(); } + private: /// Finish serialization of data: write final mark if required and compute checksums /// Also validate written data in debug mode diff --git a/src/Storages/MergeTree/MergeTreeSink.cpp b/src/Storages/MergeTree/MergeTreeSink.cpp index 604112c26ea..99852309c77 100644 --- a/src/Storages/MergeTree/MergeTreeSink.cpp +++ b/src/Storages/MergeTree/MergeTreeSink.cpp @@ -94,7 +94,7 @@ void MergeTreeSink::consume(Chunk & chunk) DelayedPartitions partitions; const Settings & settings = context->getSettingsRef(); - size_t streams = 0; + size_t total_streams = 0; bool support_parallel_write = false; auto token_info = chunk.getChunkInfos().get(); @@ -153,16 +153,18 @@ void MergeTreeSink::consume(Chunk & chunk) max_insert_delayed_streams_for_parallel_write = 0; /// In case of too much columns/parts in block, flush explicitly. - streams += temp_part.streams.size(); + size_t current_streams = 0; + for (const auto & stream : temp_part.streams) + current_streams += stream.stream->getNumberOfOpenStreams(); - if (streams > max_insert_delayed_streams_for_parallel_write) + if (total_streams + current_streams > max_insert_delayed_streams_for_parallel_write) { finishDelayedChunk(); delayed_chunk = std::make_unique(); delayed_chunk->partitions = std::move(partitions); finishDelayedChunk(); - streams = 0; + total_streams = 0; support_parallel_write = false; partitions = DelayedPartitions{}; } @@ -174,6 +176,8 @@ void MergeTreeSink::consume(Chunk & chunk) .block_dedup_token = block_dedup_token, .part_counters = std::move(part_counters), }); + + total_streams += current_streams; } if (need_to_define_dedup_token) diff --git a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp index f1b0e5ec385..f3ae6e77ac3 100644 --- a/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp +++ b/src/Storages/MergeTree/ReplicatedMergeTreeSink.cpp @@ -341,7 +341,7 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) using DelayedPartitions = std::vector; DelayedPartitions partitions; - size_t streams = 0; + size_t total_streams = 0; bool support_parallel_write = false; for (auto & current_block : part_blocks) @@ -418,15 +418,18 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) max_insert_delayed_streams_for_parallel_write = 0; /// In case of too much columns/parts in block, flush explicitly. - streams += temp_part.streams.size(); - if (streams > max_insert_delayed_streams_for_parallel_write) + size_t current_streams = 0; + for (const auto & stream : temp_part.streams) + current_streams += stream.stream->getNumberOfOpenStreams(); + + if (total_streams + current_streams > max_insert_delayed_streams_for_parallel_write) { finishDelayedChunk(zookeeper); delayed_chunk = std::make_unique::DelayedChunk>(replicas_num); delayed_chunk->partitions = std::move(partitions); finishDelayedChunk(zookeeper); - streams = 0; + total_streams = 0; support_parallel_write = false; partitions = DelayedPartitions{}; } @@ -447,6 +450,8 @@ void ReplicatedMergeTreeSinkImpl::consume(Chunk & chunk) std::move(unmerged_block), std::move(part_counters) /// profile_events_scope must be reset here. )); + + total_streams += current_streams; } if (need_to_define_dedup_token) diff --git a/tests/queries/0_stateless/03261_delayed_streams_memory.reference b/tests/queries/0_stateless/03261_delayed_streams_memory.reference new file mode 100644 index 00000000000..7326d960397 --- /dev/null +++ b/tests/queries/0_stateless/03261_delayed_streams_memory.reference @@ -0,0 +1 @@ +Ok diff --git a/tests/queries/0_stateless/03261_delayed_streams_memory.sql b/tests/queries/0_stateless/03261_delayed_streams_memory.sql new file mode 100644 index 00000000000..863644a0dff --- /dev/null +++ b/tests/queries/0_stateless/03261_delayed_streams_memory.sql @@ -0,0 +1,20 @@ +-- Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_100_columns; + +CREATE TABLE t_100_columns (id UInt64, c0 String, c1 String, c2 String, c3 String, c4 String, c5 String, c6 String, c7 String, c8 String, c9 String, c10 String, c11 String, c12 String, c13 String, c14 String, c15 String, c16 String, c17 String, c18 String, c19 String, c20 String, c21 String, c22 String, c23 String, c24 String, c25 String, c26 String, c27 String, c28 String, c29 String, c30 String, c31 String, c32 String, c33 String, c34 String, c35 String, c36 String, c37 String, c38 String, c39 String, c40 String, c41 String, c42 String, c43 String, c44 String, c45 String, c46 String, c47 String, c48 String, c49 String, c50 String) +ENGINE = MergeTree +ORDER BY id PARTITION BY id % 50 +SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1.0, max_compress_block_size = '1M', storage_policy = 's3_cache'; + +SET max_insert_delayed_streams_for_parallel_write = 55; + +INSERT INTO t_100_columns (id) SELECT number FROM numbers(100); + +SYSTEM FLUSH LOGS; + +SELECT if (memory_usage < 300000000, 'Ok', format('Fail: memory usage {}', formatReadableSize(memory_usage))) +FROM system.query_log +WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO t_100_columns%' AND type = 'QueryFinish'; + +DROP TABLE t_100_columns; From 6c63587f7747cc05e5df4aad259cee40c34ac7c6 Mon Sep 17 00:00:00 2001 From: Vladimir Cherkasov Date: Fri, 1 Nov 2024 13:27:09 +0100 Subject: [PATCH 284/353] More info in TOO_SLOW exception --- src/QueryPipeline/ExecutionSpeedLimits.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/QueryPipeline/ExecutionSpeedLimits.cpp b/src/QueryPipeline/ExecutionSpeedLimits.cpp index 05fd394db77..fc0e86781f0 100644 --- a/src/QueryPipeline/ExecutionSpeedLimits.cpp +++ b/src/QueryPipeline/ExecutionSpeedLimits.cpp @@ -86,10 +86,12 @@ void ExecutionSpeedLimits::throttle( if (timeout_overflow_mode == OverflowMode::THROW && estimated_execution_time_seconds > max_estimated_execution_time.totalSeconds()) throw Exception( ErrorCodes::TOO_SLOW, - "Estimated query execution time ({} seconds) is too long. Maximum: {}. Estimated rows to process: {}", + "Estimated query execution time ({:.5f} seconds) is too long. Maximum: {}. Estimated rows to process: {} ({} read in {:.5f} seconds).", estimated_execution_time_seconds, max_estimated_execution_time.totalSeconds(), - total_rows_to_read); + total_rows_to_read, + read_rows, + elapsed_seconds); } if (max_execution_rps && rows_per_second >= max_execution_rps) From 6ecc673f7d4a9890004a24c16d8c6b9b5a857d93 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 16:02:40 +0000 Subject: [PATCH 285/353] Fix quorum inserts tests --- tests/integration/test_quorum_inserts/test.py | 114 +++++++++--------- 1 file changed, 54 insertions(+), 60 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index eefc4882e8e..de437fc3206 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -2,6 +2,7 @@ import concurrent import time import pytest +import uuid from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager @@ -46,10 +47,11 @@ def started_cluster(): def test_simple_add_replica(started_cluster): - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + table_name = "test_simple_" + uuid.uuid4().hex + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( - "CREATE TABLE test_simple " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " "PARTITION BY d ORDER BY a" @@ -58,91 +60,81 @@ def test_simple_add_replica(started_cluster): zero.query(create_query) first.query(create_query) - first.query("SYSTEM STOP FETCHES test_simple") + first.query(f"SYSTEM STOP FETCHES {table_name}") zero.query( - "INSERT INTO test_simple VALUES (1, '2011-01-01')", + f"INSERT INTO {table_name} VALUES (1, '2011-01-01')", settings={"insert_quorum": 1}, ) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "" == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "" == first.query(f"SELECT * from {table_name}") - first.query("SYSTEM START FETCHES test_simple") + first.query(f"SYSTEM START FETCHES {table_name}") - first.query("SYSTEM SYNC REPLICA test_simple", timeout=20) + first.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == first.query(f"SELECT * from {table_name}") second.query(create_query) - second.query("SYSTEM SYNC REPLICA test_simple", timeout=20) + second.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) - assert "1\t2011-01-01\n" == zero.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == first.query("SELECT * from test_simple") - assert "1\t2011-01-01\n" == second.query("SELECT * from test_simple") + assert "1\t2011-01-01\n" == zero.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == first.query(f"SELECT * from {table_name}") + assert "1\t2011-01-01\n" == second.query(f"SELECT * from {table_name}") - zero.query("DROP TABLE IF EXISTS test_simple ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") def test_drop_replica_and_achieve_quorum(started_cluster): + table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex zero.query( - "DROP TABLE IF EXISTS test_drop_replica_and_achieve_quorum ON CLUSTER cluster" + f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" ) - create_query = ( - "CREATE TABLE test_drop_replica_and_achieve_quorum " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{table}', '{replica}') " "PARTITION BY d ORDER BY a" ) - print("Create Replicated table with two replicas") zero.query(create_query) first.query(create_query) - print("Stop fetches on one replica. Since that, it will be isolated.") - first.query("SYSTEM STOP FETCHES test_drop_replica_and_achieve_quorum") - + first.query(f"SYSTEM STOP FETCHES {table_name}") print("Insert to other replica. This query will fail.") quorum_timeout = zero.query_and_get_error( - "INSERT INTO test_drop_replica_and_achieve_quorum(a,d) VALUES (1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES (1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." - assert TSV("1\t2011-01-01\n") == TSV( zero.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) - assert TSV("") == TSV( zero.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) - # TODO:(Mikhaylov) begin; maybe delete this lines. I want clickhouse to fetch parts and update quorum. print("START FETCHES first replica") - first.query("SYSTEM START FETCHES test_drop_replica_and_achieve_quorum") - + first.query(f"SYSTEM START FETCHES {table_name}") print("SYNC first replica") - first.query("SYSTEM SYNC REPLICA test_drop_replica_and_achieve_quorum", timeout=20) + first.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) # TODO:(Mikhaylov) end - print("Add second replica") second.query(create_query) - print("SYNC second replica") - second.query("SYSTEM SYNC REPLICA test_drop_replica_and_achieve_quorum", timeout=20) - + second.query(f"SYSTEM SYNC REPLICA {table_name}", timeout=20) print("Quorum for previous insert achieved.") assert TSV("1\t2011-01-01\n") == TSV( second.query( - "SELECT * FROM test_drop_replica_and_achieve_quorum", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) @@ -296,10 +288,11 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): def test_insert_quorum_with_ttl(started_cluster): - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + table_name = "test_insert_quorum_with_ttl_" + uuid.uuid4().hex + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( - "CREATE TABLE test_insert_quorum_with_ttl " + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " "PARTITION BY d ORDER BY a " @@ -311,12 +304,12 @@ def test_insert_quorum_with_ttl(started_cluster): zero.query(create_query) first.query(create_query) - print("Stop fetches for test_insert_quorum_with_ttl at first replica.") - first.query("SYSTEM STOP FETCHES test_insert_quorum_with_ttl") + print(f"Stop fetches for {table_name} at first replica.") + first.query(f"SYSTEM STOP FETCHES {table_name}") print("Insert should fail since it can not reach the quorum.") quorum_timeout = zero.query_and_get_error( - "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) assert "Timeout while waiting for quorum" in quorum_timeout, "Query must fail." @@ -327,51 +320,52 @@ def test_insert_quorum_with_ttl(started_cluster): time.sleep(10) assert TSV("1\t2011-01-01\n") == TSV( zero.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) - print("Resume fetches for test_insert_quorum_with_ttl at first replica.") - first.query("SYSTEM START FETCHES test_insert_quorum_with_ttl") + print(f"Resume fetches for {table_name} at first replica.") + first.query(f"SYSTEM START FETCHES {table_name}") print("Sync first replica.") - first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") + first.query(f"SYSTEM SYNC REPLICA {table_name}") zero.query( - "INSERT INTO test_insert_quorum_with_ttl(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 5000}, ) print("Inserts should resume.") - zero.query("INSERT INTO test_insert_quorum_with_ttl(a, d) VALUES(2, '2012-02-02')") + zero.query(f"INSERT INTO {table_name}(a, d) VALUES(2, '2012-02-02')") - first.query("OPTIMIZE TABLE test_insert_quorum_with_ttl") - first.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") - zero.query("SYSTEM SYNC REPLICA test_insert_quorum_with_ttl") + first.query(f"OPTIMIZE TABLE {table_name}") + first.query(f"SYSTEM SYNC REPLICA {table_name}") + zero.query(f"SYSTEM SYNC REPLICA {table_name}") assert TSV("2\t2012-02-02\n") == TSV( first.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 0}, ) ) assert TSV("2\t2012-02-02\n") == TSV( first.query( - "SELECT * FROM test_insert_quorum_with_ttl", + f"SELECT * FROM {table_name}", settings={"select_sequential_consistency": 1}, ) ) - zero.query("DROP TABLE IF EXISTS test_insert_quorum_with_ttl ON CLUSTER cluster") + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") -def test_insert_quorum_with_keeper_loss_connection(): +def test_insert_quorum_with_keeper_loss_connection(started_cluster): + table_name = "test_insert_quorum_with_keeper_loss_" + uuid.uuid4().hex zero.query( - "DROP TABLE IF EXISTS test_insert_quorum_with_keeper_fail ON CLUSTER cluster" + f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" ) create_query = ( - "CREATE TABLE test_insert_quorum_with_keeper_loss" + f"CREATE TABLE {table_name} " "(a Int8, d Date) " "Engine = ReplicatedMergeTree('/clickhouse/tables/{table}', '{replica}') " "ORDER BY a " @@ -380,7 +374,7 @@ def test_insert_quorum_with_keeper_loss_connection(): zero.query(create_query) first.query(create_query) - first.query("SYSTEM STOP FETCHES test_insert_quorum_with_keeper_loss") + first.query(f"SYSTEM STOP FETCHES {table_name}") zero.query("SYSTEM ENABLE FAILPOINT replicated_merge_tree_commit_zk_fail_after_op") zero.query("SYSTEM ENABLE FAILPOINT replicated_merge_tree_insert_retry_pause") @@ -388,7 +382,7 @@ def test_insert_quorum_with_keeper_loss_connection(): with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: insert_future = executor.submit( lambda: zero.query( - "INSERT INTO test_insert_quorum_with_keeper_loss(a,d) VALUES(1, '2011-01-01')", + f"INSERT INTO {table_name}(a,d) VALUES(1, '2011-01-01')", settings={"insert_quorum_timeout": 150000}, ) ) @@ -401,7 +395,7 @@ def test_insert_quorum_with_keeper_loss_connection(): while True: if ( zk.exists( - "/clickhouse/tables/test_insert_quorum_with_keeper_loss/replicas/zero/is_active" + f"/clickhouse/tables/{table_name}/replicas/zero/is_active" ) is None ): @@ -418,7 +412,7 @@ def test_insert_quorum_with_keeper_loss_connection(): "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 ) ) - first.query("SYSTEM START FETCHES test_insert_quorum_with_keeper_loss") + first.query(f"SYSTEM START FETCHES {table_name}") concurrent.futures.wait([quorum_fail_future]) From 3eedc74c5943f23ed4e360533e6e3bb5a6238109 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 16:25:58 +0000 Subject: [PATCH 286/353] Reformatted because of style check --- tests/integration/test_quorum_inserts/test.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index de437fc3206..824cb371595 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -88,10 +88,8 @@ def test_simple_add_replica(started_cluster): def test_drop_replica_and_achieve_quorum(started_cluster): - table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex - zero.query( - f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" - ) + table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " @@ -361,9 +359,7 @@ def test_insert_quorum_with_ttl(started_cluster): def test_insert_quorum_with_keeper_loss_connection(started_cluster): table_name = "test_insert_quorum_with_keeper_loss_" + uuid.uuid4().hex - zero.query( - f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster" - ) + zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " @@ -394,9 +390,7 @@ def test_insert_quorum_with_keeper_loss_connection(started_cluster): zk = cluster.get_kazoo_client("zoo1") while True: if ( - zk.exists( - f"/clickhouse/tables/{table_name}/replicas/zero/is_active" - ) + zk.exists(f"/clickhouse/tables/{table_name}/replicas/zero/is_active") is None ): break From 27153bfc27d45a9fddddf070bb82c7f1e164b455 Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 16:58:21 +0000 Subject: [PATCH 287/353] Resolve issues --- tests/integration/test_quorum_inserts/test.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 824cb371595..7adc51121b4 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -1,8 +1,8 @@ import concurrent import time +import uuid import pytest -import uuid from helpers.cluster import ClickHouseCluster from helpers.network import PartitionManager @@ -48,7 +48,6 @@ def started_cluster(): def test_simple_add_replica(started_cluster): table_name = "test_simple_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " @@ -89,7 +88,6 @@ def test_simple_add_replica(started_cluster): def test_drop_replica_and_achieve_quorum(started_cluster): table_name = "test_drop_replica_and_achieve_quorum_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " @@ -287,7 +285,6 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): def test_insert_quorum_with_ttl(started_cluster): table_name = "test_insert_quorum_with_ttl_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " @@ -359,7 +356,6 @@ def test_insert_quorum_with_ttl(started_cluster): def test_insert_quorum_with_keeper_loss_connection(started_cluster): table_name = "test_insert_quorum_with_keeper_loss_" + uuid.uuid4().hex - zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( f"CREATE TABLE {table_name} " "(a Int8, d Date) " From 0687f7a83f1a64abd586c5046dbc5ddda427e00a Mon Sep 17 00:00:00 2001 From: divanik Date: Tue, 5 Nov 2024 17:09:03 +0000 Subject: [PATCH 288/353] Resolve issue --- tests/integration/test_quorum_inserts/test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index 7adc51121b4..a646319c5f9 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -143,7 +143,7 @@ def test_insert_quorum_with_drop_partition(started_cluster, add_new_data): "test_quorum_insert_with_drop_partition_new_data" if add_new_data else "test_quorum_insert_with_drop_partition" - ) + ) + uuid.uuid4().hex zero.query(f"DROP TABLE IF EXISTS {table_name} ON CLUSTER cluster") create_query = ( @@ -206,12 +206,12 @@ def test_insert_quorum_with_move_partition(started_cluster, add_new_data): "test_insert_quorum_with_move_partition_source_new_data" if add_new_data else "test_insert_quorum_with_move_partition_source" - ) + ) + uuid.uuid4().hex destination_table_name = ( "test_insert_quorum_with_move_partition_destination_new_data" if add_new_data else "test_insert_quorum_with_move_partition_destination" - ) + ) + uuid.uuid4().hex zero.query(f"DROP TABLE IF EXISTS {source_table_name} ON CLUSTER cluster") zero.query(f"DROP TABLE IF EXISTS {destination_table_name} ON CLUSTER cluster") From 76683d021d96309bd3a19d2afde36f9ba802814f Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Tue, 5 Nov 2024 17:22:08 +0000 Subject: [PATCH 289/353] Fix constants in WHERE expression which could apparently contain Join. --- src/Interpreters/ExpressionAnalyzer.cpp | 8 +++++-- ...3258_old_analyzer_const_expr_bug.reference | 0 .../03258_old_analyzer_const_expr_bug.sql | 23 +++++++++++++++++++ 3 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.reference create mode 100644 tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.sql diff --git a/src/Interpreters/ExpressionAnalyzer.cpp b/src/Interpreters/ExpressionAnalyzer.cpp index 4e5cf7d2549..a89e8ca9b3c 100644 --- a/src/Interpreters/ExpressionAnalyzer.cpp +++ b/src/Interpreters/ExpressionAnalyzer.cpp @@ -1981,7 +1981,9 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( Block before_prewhere_sample = source_header; if (sanitizeBlock(before_prewhere_sample)) { - before_prewhere_sample = prewhere_dag_and_flags->dag.updateHeader(before_prewhere_sample); + ExpressionActions( + prewhere_dag_and_flags->dag.clone(), + ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_prewhere_sample); auto & column_elem = before_prewhere_sample.getByName(query.prewhere()->getColumnName()); /// If the filter column is a constant, record it. if (column_elem.column) @@ -2013,7 +2015,9 @@ ExpressionAnalysisResult::ExpressionAnalysisResult( before_where_sample = source_header; if (sanitizeBlock(before_where_sample)) { - before_where_sample = before_where->dag.updateHeader(before_where_sample); + ExpressionActions( + before_where->dag.clone(), + ExpressionActionsSettings::fromSettings(context->getSettingsRef())).execute(before_where_sample); auto & column_elem = before_where_sample.getByName(query.where()->getColumnName()); diff --git a/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.reference b/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.sql b/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.sql new file mode 100644 index 00000000000..913de3b849c --- /dev/null +++ b/tests/queries/0_stateless/03258_old_analyzer_const_expr_bug.sql @@ -0,0 +1,23 @@ +WITH + multiIf('-1' = '-1', 10080, '-1' = '7', 60, '-1' = '1', 5, 1440) AS interval_start, -- noqa + multiIf('-1' = '-1', CEIL((today() - toDate('2017-06-22')) / 7)::UInt16, '-1' = '7', 168, '-1' = '1', 288, 90) AS days_run, -- noqa:L045 + block_time as (SELECT arrayJoin( + arrayMap( + i -> toDateTime(toStartOfInterval(now(), INTERVAL interval_start MINUTE) - interval_start * 60 * i, 'UTC'), + range(days_run) + ) + )), + +sales AS ( + SELECT + toDateTime(toStartOfInterval(now(), INTERVAL interval_start MINUTE), 'UTC') AS block_time + FROM + numbers(1) + GROUP BY + block_time + ORDER BY + block_time) + +SELECT + block_time +FROM sales where block_time >= (SELECT MIN(block_time) FROM sales) format Null; From 349010012e7f29ad38b159e99dce7f297f076f63 Mon Sep 17 00:00:00 2001 From: justindeguzman Date: Tue, 5 Nov 2024 09:41:01 -0800 Subject: [PATCH 290/353] [Docs] Add cloud not supported badge for EmbeddedRocksDB engine --- .../engines/table-engines/integrations/embedded-rocksdb.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md index 1958250ed73..41c4e8fc4a9 100644 --- a/docs/en/engines/table-engines/integrations/embedded-rocksdb.md +++ b/docs/en/engines/table-engines/integrations/embedded-rocksdb.md @@ -4,9 +4,13 @@ sidebar_position: 50 sidebar_label: EmbeddedRocksDB --- +import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge'; + # EmbeddedRocksDB Engine -This engine allows integrating ClickHouse with [rocksdb](http://rocksdb.org/). + + +This engine allows integrating ClickHouse with [RocksDB](http://rocksdb.org/). ## Creating a Table {#creating-a-table} From 27efa296849e1aaa649adb51ef280410169d8018 Mon Sep 17 00:00:00 2001 From: Mikhail Artemenko Date: Tue, 5 Nov 2024 18:04:59 +0000 Subject: [PATCH 291/353] update docs --- .../statements/select/order-by.md | 61 ++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/docs/en/sql-reference/statements/select/order-by.md b/docs/en/sql-reference/statements/select/order-by.md index 512a58d7cd9..25d2e7123fd 100644 --- a/docs/en/sql-reference/statements/select/order-by.md +++ b/docs/en/sql-reference/statements/select/order-by.md @@ -291,7 +291,7 @@ All missed values of `expr` column will be filled sequentially and other columns To fill multiple columns, add `WITH FILL` modifier with optional parameters after each field name in `ORDER BY` section. ``` sql -ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] +ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr] [STALENESS const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] [STALENESS numeric_expr] [INTERPOLATE [(col [AS expr], ... colN [AS exprN])]] ``` @@ -300,6 +300,7 @@ When `FROM const_expr` not defined sequence of filling use minimal `expr` field When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`. When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types, as `days` for Date type, as `seconds` for DateTime type. It also supports [INTERVAL](https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval/) data type representing time and date intervals. When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type. +When `STALENESS const_numeric_expr` is defined, the query will generate rows until the difference from the previous row in the original data exceeds `const_numeric_expr`. `INTERPOLATE` can be applied to columns not participating in `ORDER BY WITH FILL`. Such columns are filled based on previous fields values by applying `expr`. If `expr` is not present will repeat previous value. Omitted list will result in including all allowed columns. Example of a query without `WITH FILL`: @@ -497,6 +498,64 @@ Result: └────────────┴────────────┴──────────┘ ``` +Example of a query without `STALENESS`: + +``` sql +SELECT number as key, 5 * number value, 'original' AS source +FROM numbers(16) WHERE key % 5 == 0 +ORDER BY key WITH FILL; +``` + +Result: + +``` text + ┌─key─┬─value─┬─source───┐ + 1. │ 0 │ 0 │ original │ + 2. │ 1 │ 0 │ │ + 3. │ 2 │ 0 │ │ + 4. │ 3 │ 0 │ │ + 5. │ 4 │ 0 │ │ + 6. │ 5 │ 25 │ original │ + 7. │ 6 │ 0 │ │ + 8. │ 7 │ 0 │ │ + 9. │ 8 │ 0 │ │ +10. │ 9 │ 0 │ │ +11. │ 10 │ 50 │ original │ +12. │ 11 │ 0 │ │ +13. │ 12 │ 0 │ │ +14. │ 13 │ 0 │ │ +15. │ 14 │ 0 │ │ +16. │ 15 │ 75 │ original │ + └─────┴───────┴──────────┘ +``` + +Same query after applying `STALENESS 3`: + +``` sql +SELECT number as key, 5 * number value, 'original' AS source +FROM numbers(16) WHERE key % 5 == 0 +ORDER BY key WITH FILL STALENESS 3; +``` + +Result: + +``` text + ┌─key─┬─value─┬─source───┐ + 1. │ 0 │ 0 │ original │ + 2. │ 1 │ 0 │ │ + 3. │ 2 │ 0 │ │ + 4. │ 5 │ 25 │ original │ + 5. │ 6 │ 0 │ │ + 6. │ 7 │ 0 │ │ + 7. │ 10 │ 50 │ original │ + 8. │ 11 │ 0 │ │ + 9. │ 12 │ 0 │ │ +10. │ 15 │ 75 │ original │ +11. │ 16 │ 0 │ │ +12. │ 17 │ 0 │ │ + └─────┴───────┴──────────┘ +``` + Example of a query without `INTERPOLATE`: ``` sql From 9ec0dda6eeb52c482b4e1e5929b2e03f61672659 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Tue, 5 Nov 2024 20:40:32 +0100 Subject: [PATCH 292/353] Prevent crash in SortCursor with 0 columns --- src/Core/SortCursor.h | 19 +++++++++++---- .../IMergingAlgorithmWithDelayedChunk.cpp | 9 +++++-- .../IMergingAlgorithmWithSharedChunks.cpp | 5 ++-- .../Algorithms/MergingSortedAlgorithm.cpp | 4 ++-- .../Transforms/MergeJoinTransform.cpp | 2 +- .../Transforms/SortingTransform.cpp | 2 +- .../03261_sort_cursor_crash.reference | 4 ++++ .../0_stateless/03261_sort_cursor_crash.sql | 24 +++++++++++++++++++ 8 files changed, 57 insertions(+), 12 deletions(-) create mode 100644 tests/queries/0_stateless/03261_sort_cursor_crash.reference create mode 100644 tests/queries/0_stateless/03261_sort_cursor_crash.sql diff --git a/src/Core/SortCursor.h b/src/Core/SortCursor.h index 3d568be199c..6eb009fa259 100644 --- a/src/Core/SortCursor.h +++ b/src/Core/SortCursor.h @@ -35,6 +35,11 @@ namespace DB { +namespace ErrorCodes +{ +extern const int LOGICAL_ERROR; +} + /** Cursor allows to compare rows in different blocks (and parts). * Cursor moves inside single block. * It is used in priority queue. @@ -83,21 +88,27 @@ struct SortCursorImpl SortCursorImpl( const Block & header, const Columns & columns, + size_t num_rows, const SortDescription & desc_, size_t order_ = 0, IColumn::Permutation * perm = nullptr) : desc(desc_), sort_columns_size(desc.size()), order(order_), need_collation(desc.size()) { - reset(columns, header, perm); + reset(columns, header, num_rows, perm); } bool empty() const { return rows == 0; } /// Set the cursor to the beginning of the new block. - void reset(const Block & block, IColumn::Permutation * perm = nullptr) { reset(block.getColumns(), block, perm); } + void reset(const Block & block, IColumn::Permutation * perm = nullptr) + { + if (block.getColumns().empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Empty column list in block"); + reset(block.getColumns(), block, block.getColumns()[0]->size(), perm); + } /// Set the cursor to the beginning of the new block. - void reset(const Columns & columns, const Block & block, IColumn::Permutation * perm = nullptr) + void reset(const Columns & columns, const Block & block, UInt64 num_rows, IColumn::Permutation * perm = nullptr) { all_columns.clear(); sort_columns.clear(); @@ -125,7 +136,7 @@ struct SortCursorImpl } pos = 0; - rows = all_columns[0]->size(); + rows = num_rows; permutation = perm; } diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp index cbad6813fbc..5e271e12943 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithDelayedChunk.cpp @@ -24,7 +24,12 @@ void IMergingAlgorithmWithDelayedChunk::initializeQueue(Inputs inputs) continue; cursors[source_num] = SortCursorImpl( - header, current_inputs[source_num].chunk.getColumns(), description, source_num, current_inputs[source_num].permutation); + header, + current_inputs[source_num].chunk.getColumns(), + current_inputs[source_num].chunk.getNumRows(), + description, + source_num, + current_inputs[source_num].permutation); inputs_origin_merge_tree_part_level[source_num] = getPartLevelFromChunk(current_inputs[source_num].chunk); } @@ -41,7 +46,7 @@ void IMergingAlgorithmWithDelayedChunk::updateCursor(Input & input, size_t sourc last_chunk_sort_columns = std::move(cursors[source_num].sort_columns); current_input.swap(input); - cursors[source_num].reset(current_input.chunk.getColumns(), header, current_input.permutation); + cursors[source_num].reset(current_input.chunk.getColumns(), header, current_input.chunk.getNumRows(), current_input.permutation); inputs_origin_merge_tree_part_level[source_num] = getPartLevelFromChunk(current_input.chunk); diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp index 47b7ddf38dc..f99f021286e 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.cpp @@ -31,7 +31,8 @@ void IMergingAlgorithmWithSharedChunks::initialize(Inputs inputs) source.skip_last_row = inputs[source_num].skip_last_row; source.chunk = chunk_allocator.alloc(inputs[source_num].chunk); - cursors[source_num] = SortCursorImpl(header, source.chunk->getColumns(), description, source_num, inputs[source_num].permutation); + cursors[source_num] = SortCursorImpl( + header, source.chunk->getColumns(), source.chunk->getNumRows(), description, source_num, inputs[source_num].permutation); source.chunk->all_columns = cursors[source_num].all_columns; source.chunk->sort_columns = cursors[source_num].sort_columns; @@ -49,7 +50,7 @@ void IMergingAlgorithmWithSharedChunks::consume(Input & input, size_t source_num auto & source = sources[source_num]; source.skip_last_row = input.skip_last_row; source.chunk = chunk_allocator.alloc(input.chunk); - cursors[source_num].reset(source.chunk->getColumns(), header, input.permutation); + cursors[source_num].reset(source.chunk->getColumns(), header, source.chunk->getNumRows(), input.permutation); source.chunk->all_columns = cursors[source_num].all_columns; source.chunk->sort_columns = cursors[source_num].sort_columns; diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp index 3a9cf7ee141..28c6cb473e5 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.cpp @@ -59,7 +59,7 @@ void MergingSortedAlgorithm::initialize(Inputs inputs) if (!chunk) continue; - cursors[source_num] = SortCursorImpl(header, chunk.getColumns(), description, source_num); + cursors[source_num] = SortCursorImpl(header, chunk.getColumns(), chunk.getNumRows(), description, source_num); } if (sorting_queue_strategy == SortingQueueStrategy::Default) @@ -84,7 +84,7 @@ void MergingSortedAlgorithm::consume(Input & input, size_t source_num) { removeConstAndSparse(input); current_inputs[source_num].swap(input); - cursors[source_num].reset(current_inputs[source_num].chunk.getColumns(), header); + cursors[source_num].reset(current_inputs[source_num].chunk.getColumns(), header, current_inputs[source_num].chunk.getNumRows()); if (sorting_queue_strategy == SortingQueueStrategy::Default) { diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index 1675e5d0386..77a437d4b97 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -394,7 +394,7 @@ void FullMergeJoinCursor::setChunk(Chunk && chunk) convertToFullIfSparse(chunk); current_chunk = std::move(chunk); - cursor = SortCursorImpl(sample_block, current_chunk.getColumns(), desc); + cursor = SortCursorImpl(sample_block, current_chunk.getColumns(), current_chunk.getNumRows(), desc); } bool FullMergeJoinCursor::fullyCompleted() const diff --git a/src/Processors/Transforms/SortingTransform.cpp b/src/Processors/Transforms/SortingTransform.cpp index 6e65093e9e2..6a11354e2bf 100644 --- a/src/Processors/Transforms/SortingTransform.cpp +++ b/src/Processors/Transforms/SortingTransform.cpp @@ -42,7 +42,7 @@ MergeSorter::MergeSorter(const Block & header, Chunks chunks_, SortDescription & /// Convert to full column, because some cursors expect non-contant columns convertToFullIfConst(chunk); - cursors.emplace_back(header, chunk.getColumns(), description, chunk_index); + cursors.emplace_back(header, chunk.getColumns(), chunk.getNumRows(), description, chunk_index); has_collation |= cursors.back().has_collation; nonempty_chunks.emplace_back(std::move(chunk)); diff --git a/tests/queries/0_stateless/03261_sort_cursor_crash.reference b/tests/queries/0_stateless/03261_sort_cursor_crash.reference new file mode 100644 index 00000000000..7299f2f5a5f --- /dev/null +++ b/tests/queries/0_stateless/03261_sort_cursor_crash.reference @@ -0,0 +1,4 @@ +42 +43 +44 +45 diff --git a/tests/queries/0_stateless/03261_sort_cursor_crash.sql b/tests/queries/0_stateless/03261_sort_cursor_crash.sql new file mode 100644 index 00000000000..b659f3d4a92 --- /dev/null +++ b/tests/queries/0_stateless/03261_sort_cursor_crash.sql @@ -0,0 +1,24 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/70779 +-- Crash in SortCursorImpl with the old analyzer, which produces a block with 0 columns and 1 row +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 42 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int) ENGINE = SummingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 43 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int) ENGINE = ReplacingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 44 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t1 (a0 UInt8, c0 Int32, c1 UInt8) ENGINE = AggregatingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t1 (a0, c0, c1) VALUES (1, 1, 1); +SELECT 45 FROM t1 FINAL PREWHERE t1.c0 = t1.c1; +DROP TABLE t1; From 9931b61d6fc0989facbc430d353e611d70d44b5c Mon Sep 17 00:00:00 2001 From: Nikita Taranov Date: Tue, 5 Nov 2024 20:56:04 +0100 Subject: [PATCH 293/353] fix test --- ...03255_parallel_replicas_join_algo_and_analyzer_4.reference | 4 ++-- .../03255_parallel_replicas_join_algo_and_analyzer_4.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference index 52c4e872f84..d846b26b72b 100644 --- a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.reference @@ -84,7 +84,7 @@ SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP 500020000 500030000 500040000 -SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_4551627371769371400_3093038500622465792` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_x_y_` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC 4999950000 4999950000 SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t` AS `__table1` GROUP BY `__table1`.`item_id` @@ -113,4 +113,4 @@ SELECT `__table1`.`item_id` AS `item_id` FROM `default`.`t1` AS `__table1` GROUP 500020000 500030000 500040000 -SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_4551627371769371400_3093038500622465792` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC +SELECT sum(`__table1`.`item_id`) AS `sum(item_id)` FROM (SELECT `__table2`.`item_id` AS `item_id`, `__table2`.`price_sold` AS `price_sold` FROM `default`.`t` AS `__table2`) AS `__table1` GLOBAL ALL LEFT JOIN `_data_x_y_` AS `__table3` ON `__table1`.`item_id` = `__table3`.`item_id` GROUP BY `__table1`.`price_sold` ORDER BY `__table1`.`price_sold` ASC diff --git a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh index 18a2fbd317b..19866f26949 100755 --- a/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh +++ b/tests/queries/0_stateless/03255_parallel_replicas_join_algo_and_analyzer_4.sh @@ -88,7 +88,7 @@ for parallel_replicas_prefer_local_join in 1 0; do --SELECT '----- enable_parallel_replicas=$enable_parallel_replicas prefer_local_plan=$prefer_local_plan parallel_replicas_prefer_local_join=$parallel_replicas_prefer_local_join -----'; ${query}; - SELECT replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1') + SELECT replaceRegexpAll(replaceRegexpAll(explain, '.*Query: (.*) Replicas:.*', '\\1'), '(.*)_data_[\d]+_[\d]+(.*)', '\1_data_x_y_\2') FROM ( EXPLAIN actions=1 ${query} From 24c5ef9a052b464671cfb78e887b11237281f53b Mon Sep 17 00:00:00 2001 From: alesapin Date: Tue, 5 Nov 2024 23:08:15 +0100 Subject: [PATCH 294/353] Expose base setting for merge selector --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 2 ++ src/Storages/MergeTree/MergeTreeSettings.cpp | 1 + 2 files changed, 3 insertions(+) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 62ad9d4a52a..6b9638b11d2 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -71,6 +71,7 @@ namespace MergeTreeSetting extern const MergeTreeSettingsUInt64 parts_to_throw_insert; extern const MergeTreeSettingsMergeSelectorAlgorithm merge_selector_algorithm; extern const MergeTreeSettingsBool merge_selector_enable_heuristic_to_remove_small_parts_at_right; + extern const MergeTreeSettingsFloat merge_selector_base; } namespace ErrorCodes @@ -542,6 +543,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( simple_merge_settings.window_size = (*data_settings)[MergeTreeSetting::merge_selector_window_size]; simple_merge_settings.max_parts_to_merge_at_once = (*data_settings)[MergeTreeSetting::max_parts_to_merge_at_once]; simple_merge_settings.enable_heuristic_to_remove_small_parts_at_right = (*data_settings)[MergeTreeSetting::merge_selector_enable_heuristic_to_remove_small_parts_at_right]; + simple_merge_settings.base = (*data_settings)[MergeTreeSetting::merge_selector_base]; if (!(*data_settings)[MergeTreeSetting::min_age_to_force_merge_on_partition_only]) simple_merge_settings.min_age_to_force_merge = (*data_settings)[MergeTreeSetting::min_age_to_force_merge_seconds]; diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 883191d59ab..33910d1048d 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -101,6 +101,7 @@ namespace ErrorCodes DECLARE(Milliseconds, background_task_preferred_step_execution_time_ms, 50, "Target time to execution of one step of merge or mutation. Can be exceeded if one step takes longer time", 0) \ DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \ DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \ + DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \ \ /** Inserts settings. */ \ DECLARE(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ From 0c1aa03cb172ca666b7054863626d563e1de21e7 Mon Sep 17 00:00:00 2001 From: justindeguzman Date: Wed, 6 Nov 2024 00:05:55 -0800 Subject: [PATCH 295/353] [Docs] Update note about Prometheus integration and ClickHouse Cloud --- docs/en/interfaces/prometheus.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md index 8e7023cc51f..11f503b54d7 100644 --- a/docs/en/interfaces/prometheus.md +++ b/docs/en/interfaces/prometheus.md @@ -9,7 +9,7 @@ sidebar_label: Prometheus protocols ## Exposing metrics {#expose} :::note -ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com. +If you are using ClickHouse Cloud, you can expose metrics to Prometheus using the [Prometheus Integration](/en/integrations/prometheus). ::: ClickHouse can expose its own metrics for scraping from Prometheus: From 4f8099d7aa6d1dff2ad79fc020810fe36a3cfd3b Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 08:51:44 +0000 Subject: [PATCH 296/353] Simplify the code --- .../MergeTreeIndexVectorSimilarity.cpp | 81 +++++++++---------- .../0_stateless/02354_vector_search_bugs.sql | 2 +- 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index 498d0131d5a..e55010ac9ec 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -345,60 +345,57 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ throw Exception(ErrorCodes::INCORRECT_DATA, "Index granularity is too big: more than {} rows per index granule.", std::numeric_limits::max()); if (index_sample_block.columns() > 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected that index is build over a single column"); - for (size_t i = 0; i < index_sample_block.columns(); ++i) - { - const auto & index_column_with_type_and_name = index_sample_block.getByPosition(i); + const auto & index_column_with_type_and_name = index_sample_block.getByPosition(0); - const auto & index_column_name = index_column_with_type_and_name.name; - const auto & index_column = block.getByName(index_column_name).column; - ColumnPtr column_cut = index_column->cut(*pos, rows_read); + const auto & index_column_name = index_column_with_type_and_name.name; + const auto & index_column = block.getByName(index_column_name).column; + ColumnPtr column_cut = index_column->cut(*pos, rows_read); - const auto * column_array = typeid_cast(column_cut.get()); - if (!column_array) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float*) column"); + const auto * column_array = typeid_cast(column_cut.get()); + if (!column_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float*) column"); - if (column_array->empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); + if (column_array->empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); - /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays - /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default - /// values which is also empty. - if (column_array->isDefaultAt(0)) - throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); + /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays + /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default + /// values which is also empty. + if (column_array->isDefaultAt(0)) + throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); - const size_t rows = column_array->size(); + const size_t rows = column_array->size(); - const auto & column_array_offsets = column_array->getOffsets(); - const size_t dimensions = column_array_offsets[0]; + const auto & column_array_offsets = column_array->getOffsets(); + const size_t dimensions = column_array_offsets[0]; - if (!index) - index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); + if (!index) + index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); - /// Also check that previously inserted blocks have the same size as this block. - /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across - /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. - if (index->dimensions() != dimensions) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); + /// Also check that previously inserted blocks have the same size as this block. + /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across + /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. + if (index->dimensions() != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column with vector similarity index must have equal length"); - /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp - if (index->size() + rows > std::numeric_limits::max()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); + /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp + if (index->size() + rows > std::numeric_limits::max()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); - DataTypePtr data_type = index_column_with_type_and_name.type; - const auto * data_type_array = typeid_cast(data_type.get()); - if (!data_type_array) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); - const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); + DataTypePtr data_type = index_column_with_type_and_name.type; + const auto * data_type_array = typeid_cast(data_type.get()); + if (!data_type_array) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); + const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - if (WhichDataType(nested_type_index).isFloat32()) - updateImpl(column_array, column_array_offsets, index, dimensions, rows); - else if (WhichDataType(nested_type_index).isFloat64()) - updateImpl(column_array, column_array_offsets, index, dimensions, rows); - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); - } + if (WhichDataType(nested_type_index).isFloat32()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); + else if (WhichDataType(nested_type_index).isFloat64()) + updateImpl(column_array, column_array_offsets, index, dimensions, rows); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); *pos += rows_read; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index 6bcb0f78e75..276d4eb5b59 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -124,7 +124,7 @@ CREATE TABLE tab( val String, vec Array(Float32), INDEX ann_idx vec TYPE vector_similarity('hnsw', 'cosineDistance'), - INDEX set_idx val TYPE set(100) GRANULARITY 100 + INDEX set_idx val TYPE set(100) ) ENGINE = MergeTree() ORDER BY tuple(); From 918ad5c4d54c27b6c14e1221ae56a40dd937e2cc Mon Sep 17 00:00:00 2001 From: Ilya Golshtein Date: Wed, 6 Nov 2024 09:42:35 +0000 Subject: [PATCH 297/353] fix_test_drop_complex_columns: tests passed --- .../test.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py b/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py index 6d2bb0a3b70..9937c0ed4ea 100644 --- a/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py +++ b/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py @@ -68,9 +68,19 @@ CREATE TABLE test_s3(c1 Int8, c2 Date) ENGINE = ReplicatedMergeTree('/test/table def test_drop_complex_columns(started_cluster): + node1 = cluster.instances["node1"] + node1.query( + """ +CREATE TABLE warming_up( +id Int8 +) ENGINE = MergeTree +order by (id) SETTINGS storage_policy = 's3';""" + ) + + # Now we are sure that s3 storage is up and running start_objects = get_objects_in_data_path() print("Objects before", start_objects) - node1 = cluster.instances["node1"] + node1.query( """ CREATE TABLE test_s3_complex_types( @@ -104,3 +114,4 @@ vertical_merge_algorithm_min_columns_to_activate=1;""" end_objects = get_objects_in_data_path() print("Objects after drop", end_objects) assert start_objects == end_objects + node1.query("DROP TABLE warming_up SYNC") From b38dc1d8ca791c6fc686ae9d8efedeb77e354de2 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Wed, 6 Nov 2024 11:05:43 +0100 Subject: [PATCH 298/353] Update FileCache.cpp --- src/Interpreters/Cache/FileCache.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index ae3c9c58fc5..f7b7ffc5aea 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -1438,8 +1438,6 @@ void FileCache::loadMetadataForKeys(const fs::path & keys_dir) "cached file `{}` does not fit in cache anymore (size: {})", size_limit, offset_it->path().string(), size); - chassert(false); /// TODO: remove before merge. - fs::remove(offset_it->path()); } } From f0bb69f12667108659b5ed9803f4b290c7faafee Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 11:46:49 +0000 Subject: [PATCH 299/353] Simplify more --- src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index e55010ac9ec..f95b840e223 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -347,9 +347,8 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ if (index_sample_block.columns() > 1) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected that index is build over a single column"); - const auto & index_column_with_type_and_name = index_sample_block.getByPosition(0); + const auto & index_column_name = index_sample_block.getByPosition(0).name; - const auto & index_column_name = index_column_with_type_and_name.name; const auto & index_column = block.getByName(index_column_name).column; ColumnPtr column_cut = index_column->cut(*pos, rows_read); @@ -384,8 +383,7 @@ void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_ if (index->size() + rows > std::numeric_limits::max()) throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index would exceed 4 billion entries"); - DataTypePtr data_type = index_column_with_type_and_name.type; - const auto * data_type_array = typeid_cast(data_type.get()); + const auto * data_type_array = typeid_cast(block.getByName(index_column_name).type.get()); if (!data_type_array) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected data type Array(Float*)"); const TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); From 7c6472a09034715bbeb8374667203076c3458e82 Mon Sep 17 00:00:00 2001 From: Joe Lynch Date: Wed, 6 Nov 2024 13:34:39 +0100 Subject: [PATCH 300/353] Fix documentation for system.grants.is_partial_revoke --- docs/en/operations/system-tables/grants.md | 4 ++-- src/Storages/System/StorageSystemGrants.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/en/operations/system-tables/grants.md b/docs/en/operations/system-tables/grants.md index 262a53a87a5..debc3146008 100644 --- a/docs/en/operations/system-tables/grants.md +++ b/docs/en/operations/system-tables/grants.md @@ -19,7 +19,7 @@ Columns: - `column` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Name of a column to which access is granted. - `is_partial_revoke` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Logical value. It shows whether some privileges have been revoked. Possible values: -- `0` — The row describes a partial revoke. -- `1` — The row describes a grant. +- `0` — The row describes a grant. +- `1` — The row describes a partial revoke. - `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#granting-privilege-syntax). diff --git a/src/Storages/System/StorageSystemGrants.cpp b/src/Storages/System/StorageSystemGrants.cpp index 5de1f8cef55..aa010e44388 100644 --- a/src/Storages/System/StorageSystemGrants.cpp +++ b/src/Storages/System/StorageSystemGrants.cpp @@ -30,8 +30,8 @@ ColumnsDescription StorageSystemGrants::getColumnsDescription() {"column", std::make_shared(std::make_shared()), "Name of a column to which access is granted."}, {"is_partial_revoke", std::make_shared(), "Logical value. It shows whether some privileges have been revoked. Possible values: " - "0 — The row describes a partial revoke, " - "1 — The row describes a grant." + "0 — The row describes a grant, " + "1 — The row describes a partial revoke." }, {"grant_option", std::make_shared(), "Permission is granted WITH GRANT OPTION."}, }; From 9ee22533a067fc235aea65ff7b89c801b112b918 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Wed, 6 Nov 2024 13:46:30 +0100 Subject: [PATCH 301/353] Move bitShift function changelog entries to backward incompatible Move bitShift function changelog entries to backward incompatible --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90285582b4e..dacee73440f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -488,6 +488,7 @@ * Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)). * Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)). +* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)). #### New Feature * Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)). @@ -599,7 +600,6 @@ * Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)). * Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)). * Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)). -* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)). * Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)). * Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)). * Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)). From 533009b914761e317025b256b31474f44a9b4734 Mon Sep 17 00:00:00 2001 From: Denny Crane Date: Wed, 6 Nov 2024 08:57:32 -0400 Subject: [PATCH 302/353] Update AlterCommands.cpp --- src/Storages/AlterCommands.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index ab4403b3a94..c14775057a5 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1496,7 +1496,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const if (command.to_remove == AlterCommand::RemoveProperty::CODEC && column_from_table.codec == nullptr) throw Exception( ErrorCodes::BAD_ARGUMENTS, - "Column {} doesn't have TTL, cannot remove it", + "Column {} doesn't have CODEC, cannot remove it", backQuote(column_name)); if (command.to_remove == AlterCommand::RemoveProperty::COMMENT && column_from_table.comment.empty()) throw Exception( From e5b6a3c1fe9773953e01f7de161bc0c36a75b454 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Wed, 6 Nov 2024 14:33:25 +0100 Subject: [PATCH 303/353] Update 03261_tuple_map_object_to_json_cast.sql --- .../queries/0_stateless/03261_tuple_map_object_to_json_cast.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql index 91d3f504f92..2e5cecaf502 100644 --- a/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql +++ b/tests/queries/0_stateless/03261_tuple_map_object_to_json_cast.sql @@ -5,6 +5,7 @@ set allow_experimental_object_type = 1; set allow_experimental_variant_type = 1; set use_variant_as_common_type = 1; set enable_named_columns_in_function_tuple = 1; +set enable_analyzer = 1; select 'Map to JSON'; select map('a', number::UInt32, 'b', toDate(number), 'c', range(number), 'd', [map('e', number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); From d270885bfa52548dbf342b5ddacf8803a354d2a8 Mon Sep 17 00:00:00 2001 From: Amos Bird Date: Wed, 6 Nov 2024 21:37:47 +0800 Subject: [PATCH 304/353] Allow specifying cmdline flags in integration test --- tests/integration/helpers/cluster.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 6751f205fb8..e2237363131 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -1653,6 +1653,7 @@ class ClickHouseCluster: copy_common_configs=True, config_root_name="clickhouse", extra_configs=[], + extra_args="", randomize_settings=True, ) -> "ClickHouseInstance": """Add an instance to the cluster. @@ -1740,6 +1741,7 @@ class ClickHouseCluster: with_postgres_cluster=with_postgres_cluster, with_postgresql_java_client=with_postgresql_java_client, clickhouse_start_command=clickhouse_start_command, + clickhouse_start_extra_args=extra_args, main_config_name=main_config_name, users_config_name=users_config_name, copy_common_configs=copy_common_configs, @@ -3368,6 +3370,7 @@ class ClickHouseInstance: with_postgres_cluster, with_postgresql_java_client, clickhouse_start_command=CLICKHOUSE_START_COMMAND, + clickhouse_start_extra_args="", main_config_name="config.xml", users_config_name="users.xml", copy_common_configs=True, @@ -3463,11 +3466,18 @@ class ClickHouseInstance: self.users_config_name = users_config_name self.copy_common_configs = copy_common_configs - self.clickhouse_start_command = clickhouse_start_command.replace( + clickhouse_start_command_with_conf = clickhouse_start_command.replace( "{main_config_file}", self.main_config_name ) - self.clickhouse_stay_alive_command = "bash -c \"trap 'pkill tail' INT TERM; {} --daemon; coproc tail -f /dev/null; wait $$!\"".format( - clickhouse_start_command + + self.clickhouse_start_command = "{} -- {}".format( + clickhouse_start_command_with_conf, clickhouse_start_extra_args + ) + self.clickhouse_start_command_in_daemon = "{} --daemon -- {}".format( + clickhouse_start_command_with_conf, clickhouse_start_extra_args + ) + self.clickhouse_stay_alive_command = "bash -c \"trap 'pkill tail' INT TERM; {}; coproc tail -f /dev/null; wait $$!\"".format( + self.clickhouse_start_command_in_daemon ) self.path = p.join(self.cluster.instances_dir, name) @@ -3910,7 +3920,7 @@ class ClickHouseInstance: if pid is None: logging.debug("No clickhouse process running. Start new one.") self.exec_in_container( - ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + ["bash", "-c", self.clickhouse_start_command_in_daemon], user=str(os.getuid()), ) if expected_to_fail: @@ -4230,7 +4240,7 @@ class ClickHouseInstance: user="root", ) self.exec_in_container( - ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + ["bash", "-c", self.clickhouse_start_command_in_daemon], user=str(os.getuid()), ) @@ -4311,7 +4321,7 @@ class ClickHouseInstance: ] ) self.exec_in_container( - ["bash", "-c", "{} --daemon".format(self.clickhouse_start_command)], + ["bash", "-c", self.clickhouse_start_command_in_daemon], user=str(os.getuid()), ) @@ -4704,9 +4714,7 @@ class ClickHouseInstance: entrypoint_cmd = self.clickhouse_start_command if self.stay_alive: - entrypoint_cmd = self.clickhouse_stay_alive_command.replace( - "{main_config_file}", self.main_config_name - ) + entrypoint_cmd = self.clickhouse_stay_alive_command else: entrypoint_cmd = ( "[" From 71a0e7f07f41c0388b98849717240e845c53dc67 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 13:34:05 +0000 Subject: [PATCH 305/353] Split tests --- ...> 02354_vector_search_bug_52282.reference} | 0 .../02354_vector_search_bug_52282.sql | 13 ++ ...> 02354_vector_search_bug_69085.reference} | 9 -- .../02354_vector_search_bug_69085.sql | 52 +++++++ .../02354_vector_search_bug_71381.reference | 0 .../02354_vector_search_bug_71381.sql | 20 +++ ...h_bug_adaptive_index_granularity.reference | 0 ..._search_bug_adaptive_index_granularity.sql | 20 +++ ...search_bug_different_array_sizes.reference | 0 ...ector_search_bug_different_array_sizes.sql | 24 ++++ ...ctor_search_bug_multiple_indexes.reference | 0 ...354_vector_search_bug_multiple_indexes.sql | 14 ++ ...vector_search_bug_multiple_marks.reference | 2 + ...02354_vector_search_bug_multiple_marks.sql | 25 ++++ .../0_stateless/02354_vector_search_bugs.sql | 134 ------------------ .../02354_vector_search_multiple_indexes.sql | 1 + 16 files changed, 171 insertions(+), 143 deletions(-) rename tests/queries/0_stateless/{02354_vector_search_multiple_indexes.reference => 02354_vector_search_bug_52282.reference} (100%) create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_52282.sql rename tests/queries/0_stateless/{02354_vector_search_bugs.reference => 02354_vector_search_bug_69085.reference} (68%) create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_69085.sql create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_71381.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_71381.sql create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql delete mode 100644 tests/queries/0_stateless/02354_vector_search_bugs.sql diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_bug_52282.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference rename to tests/queries/0_stateless/02354_vector_search_bug_52282.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_52282.sql b/tests/queries/0_stateless/02354_vector_search_bug_52282.sql new file mode 100644 index 00000000000..b8066ce278a --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_52282.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; + +-- Issue #52258: Vector similarity indexes must reject empty Arrays or Arrays with default values + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree() ORDER BY id; +INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } +INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.reference b/tests/queries/0_stateless/02354_vector_search_bug_69085.reference similarity index 68% rename from tests/queries/0_stateless/02354_vector_search_bugs.reference rename to tests/queries/0_stateless/02354_vector_search_bug_69085.reference index dec921cf586..3b4e2d9ef17 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.reference +++ b/tests/queries/0_stateless/02354_vector_search_bug_69085.reference @@ -1,10 +1,3 @@ -Rejects INSERTs of Arrays with different sizes -Issue #52258: Empty Arrays or Arrays with default values are rejected -It is possible to create parts with different Array vector sizes but there will be an error at query time -Correctness of index with > 1 mark -1 [1,0] 0 -9000 [9000,0] 0 -Issue #69085: Reference vector computed by a subquery Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) @@ -40,5 +33,3 @@ Expression (Projection) Condition: true Parts: 1/1 Granules: 4/4 -index_granularity_bytes = 0 is disallowed -Issue #71381: Vector similarity index and other skipping indexes used on the same table diff --git a/tests/queries/0_stateless/02354_vector_search_bug_69085.sql b/tests/queries/0_stateless/02354_vector_search_bug_69085.sql new file mode 100644 index 00000000000..4dbcdf66e36 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_69085.sql @@ -0,0 +1,52 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; +SET enable_analyzer = 0; + +-- Issue #69085: Reference vector for vector search is computed by a subquery + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); + +-- works +EXPLAIN indexes = 1 +WITH [0., 2.] AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +-- does not work +EXPLAIN indexes = 1 +WITH ( + SELECT vec + FROM tab + LIMIT 1 +) AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +-- does not work as well +EXPLAIN indexes = 1 +WITH ( + SELECT [0., 2.] +) AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_71381.reference b/tests/queries/0_stateless/02354_vector_search_bug_71381.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_bug_71381.sql b/tests/queries/0_stateless/02354_vector_search_bug_71381.sql new file mode 100644 index 00000000000..9e3246700b8 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_71381.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; + +-- Issue #71381: Usage of vector similarity index and further skipping indexes on the same table + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab( + val String, + vec Array(Float32), + INDEX ann_idx vec TYPE vector_similarity('hnsw', 'cosineDistance'), + INDEX set_idx val TYPE set(100) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES ('hello world', [0.0]); + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference b/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql b/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql new file mode 100644 index 00000000000..208b5b7a874 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that vector similarity indexes cannot be created with index_granularity_bytes = 0 + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; + +-- If adaptive index granularity is disabled, certain vector search queries with PREWHERE run into LOGICAL_ERRORs. +-- SET allow_experimental_vector_similarity_index = 1; +-- CREATE TABLE tab (`id` Int32, `vec` Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 100000000) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; +-- INSERT INTO tab SELECT number, [toFloat32(number), 0.] FROM numbers(10000); +-- WITH [1., 0.] AS reference_vec SELECT id, L2Distance(vec, reference_vec) FROM tab PREWHERE toLowCardinality(10) ORDER BY L2Distance(vec, reference_vec) ASC LIMIT 100; +-- As a workaround, force enabled adaptive index granularity for now (it is the default anyways). +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; -- { serverError INVALID_SETTING_VALUE } + +CREATE TABLE tab(id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; +ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); -- { serverError INVALID_SETTING_VALUE } + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference b/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql b/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql new file mode 100644 index 00000000000..41b9d7869e4 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql @@ -0,0 +1,24 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; +SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; + +-- Vector similarity indexes reject INSERTs of Arrays with different sizes +INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } + +-- It is possible to create parts with different Array vector sizes but there will be an error at query time +SYSTEM STOP MERGES tab; +INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); +INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql new file mode 100644 index 00000000000..f1cfc041233 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that multiple vector similarity indexes can be created on the same column (even if that makes no sense) + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')); + +ALTER TABLE tab ADD INDEX idx(vec) TYPE minmax; +ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); +ALTER TABLE tab ADD INDEX vec_idx2(vec) TYPE vector_similarity('hnsw', 'L2Distance'); -- silly but creating the same index also works for non-vector indexes ... + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference new file mode 100644 index 00000000000..117bf2cead8 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference @@ -0,0 +1,2 @@ +1 [1,0] 0 +9000 [9000,0] 0 diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql new file mode 100644 index 00000000000..fb99dd2361c --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests correctness of vector similarity index with > 1 mark + +SET allow_experimental_vector_similarity_index = 1; +SET enable_analyzer = 0; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); + +WITH [1.0, 0.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 1; + +WITH [9000.0, 0.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 1; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql deleted file mode 100644 index 276d4eb5b59..00000000000 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ /dev/null @@ -1,134 +0,0 @@ --- Tags: no-fasttest, no-ordinary-database - --- Tests various bugs and special cases for vector indexes. - -SET allow_experimental_vector_similarity_index = 1; -SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof - -DROP TABLE IF EXISTS tab; - -SELECT 'Rejects INSERTs of Arrays with different sizes'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } -DROP TABLE tab; - -SELECT 'Issue #52258: Empty Arrays or Arrays with default values are rejected'; - -CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree() ORDER BY id; -INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } -INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } -DROP TABLE tab; - -SELECT 'It is possible to create parts with different Array vector sizes but there will be an error at query time'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -SYSTEM STOP MERGES tab; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); -INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); - -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 3; -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } - -DROP TABLE tab; - -SELECT 'Correctness of index with > 1 mark'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; -INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); - -WITH [1.0, 0.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -WITH [9000.0, 0.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -DROP TABLE tab; - -SELECT 'Issue #69085: Reference vector computed by a subquery'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); - --- works -EXPLAIN indexes = 1 -WITH [0., 2.] AS reference_vec -SELECT - id, - vec, - cosineDistance(vec, reference_vec) AS distance -FROM tab -ORDER BY distance -LIMIT 1 -SETTINGS enable_analyzer = 0; - --- does not work -EXPLAIN indexes = 1 -WITH ( - SELECT vec - FROM tab - LIMIT 1 -) AS reference_vec -SELECT - id, - vec, - cosineDistance(vec, reference_vec) AS distance -FROM tab -ORDER BY distance -LIMIT 1 -SETTINGS enable_analyzer = 0; - --- does not work as well -EXPLAIN indexes = 1 -WITH ( - SELECT [0., 2.] -) AS reference_vec -SELECT - id, - vec, - cosineDistance(vec, reference_vec) AS distance -FROM tab -ORDER BY distance -LIMIT 1 -SETTINGS enable_analyzer = 0; - -DROP TABLE tab; - -SELECT 'index_granularity_bytes = 0 is disallowed'; - --- If adaptive index granularity is disabled, certain vector search queries with PREWHERE run into LOGICAL_ERRORs. --- SET allow_experimental_vector_similarity_index = 1; --- CREATE TABLE tab (`id` Int32, `vec` Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 100000000) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; --- INSERT INTO tab SELECT number, [toFloat32(number), 0.] FROM numbers(10000); --- WITH [1., 0.] AS reference_vec SELECT id, L2Distance(vec, reference_vec) FROM tab PREWHERE toLowCardinality(10) ORDER BY L2Distance(vec, reference_vec) ASC LIMIT 100; --- As a workaround, force enabled adaptive index granularity for now (it is the default anyways). -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; -- { serverError INVALID_SETTING_VALUE } - -CREATE TABLE tab(id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; -ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); -- { serverError INVALID_SETTING_VALUE } - -DROP TABLE tab; - -SELECT 'Issue #71381: Vector similarity index and other skipping indexes used on the same table'; - -CREATE TABLE tab( - val String, - vec Array(Float32), - INDEX ann_idx vec TYPE vector_similarity('hnsw', 'cosineDistance'), - INDEX set_idx val TYPE set(100) -) -ENGINE = MergeTree() -ORDER BY tuple(); - -INSERT INTO tab VALUES ('hello world', [0.0]); - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql index f1cfc041233..aedba286a9f 100644 --- a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql +++ b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql @@ -5,6 +5,7 @@ SET allow_experimental_vector_similarity_index = 1; DROP TABLE IF EXISTS tab; + CREATE TABLE tab (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')); ALTER TABLE tab ADD INDEX idx(vec) TYPE minmax; From 4e3bde24605e1401749703bfe2eb28d7298f6630 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 6 Nov 2024 14:52:59 +0100 Subject: [PATCH 306/353] Add ProfileEvents for merge selector timings --- src/Common/ProfileEvents.cpp | 6 ++++ .../MergeTree/MergeTreeDataMergerMutator.cpp | 30 +++++++++++++++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index 0774d36462d..7b9f670d340 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -746,6 +746,12 @@ The server successfully detected this situation and will download merged part fr M(ReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for s3Cluster table function and similar). Measured on the remote server side.", ValueType::Microseconds) \ M(MergeTreeReadTaskRequestsSentElapsedMicroseconds, "Time spent in callbacks requested from the remote server back to the initiator server to choose the read task (for MergeTree tables). Measured on the remote server side.", ValueType::Microseconds) \ M(MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds, "Time spent in sending the announcement from the remote server to the initiator server about the set of data parts (for MergeTree tables). Measured on the remote server side.", ValueType::Microseconds) \ + M(MergerMutatorsGetPartsForMergeElapsedMicroseconds, "Time spent to take data parts snapshot to build ranges from them.", ValueType::Microseconds) \ + M(MergerMutatorPrepareRangesForMergeElapsedMicroseconds, "Time spent to prepare parts ranges which can be merged according to merge predicate.", ValueType::Microseconds) \ + M(MergerMutatorSelectPartsForMergeElapsedMicroseconds, "Time spent to select parts from ranges which can be merged.", ValueType::Microseconds) \ + M(MergerMutatorRangesForMergeCount, "Amount of candidate ranges for merge", ValueType::Number) \ + M(MergerMutatorPartsInRangesForMergeCount, "Amount of candidate parts for merge", ValueType::Number) \ + M(MergerMutatorSelectRangePartsCount, "Amount of parts in selected range for merge", ValueType::Number) \ \ M(ConnectionPoolIsFullMicroseconds, "Total time spent waiting for a slot in connection pool.", ValueType::Microseconds) \ M(AsyncLoaderWaitMicroseconds, "Total time a query was waiting for async loader jobs.", ValueType::Microseconds) \ diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 6b9638b11d2..3d935f8b70d 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -48,6 +48,17 @@ namespace CurrentMetrics { extern const Metric BackgroundMergesAndMutationsPoolTask; } +namespace ProfileEvents +{ + + extern const Event MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds; + extern const Event MergerMutatorsGetPartsForMergeElapsedMicroseconds; + extern const Event MergerMutatorPrepareRangesForMergeElapsedMicroseconds; + extern const Event MergerMutatorSelectPartsForMergeElapsedMicroseconds; + extern const Event MergerMutatorRangesForMergeCount; + extern const Event MergerMutatorPartsInRangesForMergeCount; + extern const Event MergerMutatorSelectRangePartsCount; +} namespace DB { @@ -215,6 +226,7 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart { PartitionIdsHint res; MergeTreeData::DataPartsVector data_parts = getDataPartsToSelectMergeFrom(txn); + if (data_parts.empty()) return res; @@ -272,6 +284,8 @@ MergeTreeDataMergerMutator::PartitionIdsHint MergeTreeDataMergerMutator::getPart MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::getDataPartsToSelectMergeFrom( const MergeTreeTransactionPtr & txn, const PartitionIdsHint * partitions_hint) const { + + Stopwatch get_data_parts_for_merge_timer; auto res = getDataPartsToSelectMergeFrom(txn); if (!partitions_hint) return res; @@ -280,6 +294,8 @@ MergeTreeData::DataPartsVector MergeTreeDataMergerMutator::getDataPartsToSelectM { return !partitions_hint->contains(part->info.partition_id); }); + + ProfileEvents::increment(ProfileEvents::MergerMutatorsGetPartsForMergeElapsedMicroseconds, get_data_parts_for_merge_timer.elapsedMicroseconds()); return res; } @@ -357,6 +373,7 @@ MergeTreeDataMergerMutator::MergeSelectingInfo MergeTreeDataMergerMutator::getPo const MergeTreeTransactionPtr & txn, PreformattedMessage & out_disable_reason) const { + Stopwatch ranges_for_merge_timer; MergeSelectingInfo res; res.current_time = std::time(nullptr); @@ -457,6 +474,10 @@ MergeTreeDataMergerMutator::MergeSelectingInfo MergeTreeDataMergerMutator::getPo prev_part = ∂ } + ProfileEvents::increment(ProfileEvents::MergerMutatorPartsInRangesForMergeCount, res.parts_selected_precondition); + ProfileEvents::increment(ProfileEvents::MergerMutatorRangesForMergeCount, res.parts_ranges.size()); + ProfileEvents::increment(ProfileEvents::MergerMutatorPrepareRangesForMergeElapsedMicroseconds, ranges_for_merge_timer.elapsedMicroseconds()); + return res; } @@ -471,6 +492,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( PreformattedMessage & out_disable_reason, bool dry_run) { + Stopwatch select_parts_from_ranges_timer; const auto data_settings = data.getSettings(); IMergeSelector::PartsRange parts_to_merge; @@ -570,7 +592,8 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( if (parts_to_merge.empty()) { - out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors)"); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectPartsForMergeElapsedMicroseconds, select_parts_from_ranges_timer.elapsedMicroseconds()); + out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors) in {}", select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); return SelectPartsDecision::CANNOT_SELECT; } } @@ -583,8 +606,11 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( parts.push_back(part); } - LOG_DEBUG(log, "Selected {} parts from {} to {}", parts.size(), parts.front()->name, parts.back()->name); + LOG_DEBUG(log, "Selected {} parts from {} to {} in {}ms", parts.size(), parts.front()->name, parts.back()->name, select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectRangePartsCount, parts.size()); + future_part->assign(std::move(parts)); + ProfileEvents::increment(ProfileEvents::MergerMutatorSelectPartsForMergeElapsedMicroseconds, select_parts_from_ranges_timer.elapsedMicroseconds()); return SelectPartsDecision::SELECTED; } From afb92f04e62b446fb5c8b0417c658f206ce2a55d Mon Sep 17 00:00:00 2001 From: Alexander Gololobov Date: Wed, 6 Nov 2024 14:56:30 +0100 Subject: [PATCH 307/353] Added ms --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 3d935f8b70d..4d0fb7f9eeb 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -593,7 +593,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( if (parts_to_merge.empty()) { ProfileEvents::increment(ProfileEvents::MergerMutatorSelectPartsForMergeElapsedMicroseconds, select_parts_from_ranges_timer.elapsedMicroseconds()); - out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors) in {}", select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); + out_disable_reason = PreformattedMessage::create("Did not find any parts to merge (with usual merge selectors) in {}ms", select_parts_from_ranges_timer.elapsedMicroseconds() / 1000); return SelectPartsDecision::CANNOT_SELECT; } } From 7795d43055a3bcf4c5f0710152d4c71cc183d000 Mon Sep 17 00:00:00 2001 From: Dmitry Novik Date: Mon, 4 Nov 2024 17:03:16 +0100 Subject: [PATCH 308/353] Analyzer: Check what happens after if-condition removal --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index cb3087af707..55bbf4907bb 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -5448,16 +5448,13 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier */ scope.use_identifier_lookup_to_result_cache = false; - if (query_node_typed.getJoinTree()) - { - TableExpressionsAliasVisitor table_expressions_visitor(scope); - table_expressions_visitor.visit(query_node_typed.getJoinTree()); + TableExpressionsAliasVisitor table_expressions_visitor(scope); + table_expressions_visitor.visit(query_node_typed.getJoinTree()); - initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope); - scope.aliases.alias_name_to_table_expression_node.clear(); + initializeQueryJoinTreeNode(query_node_typed.getJoinTree(), scope); + scope.aliases.alias_name_to_table_expression_node.clear(); - resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor); - } + resolveQueryJoinTreeNode(query_node_typed.getJoinTree(), scope, visitor); if (!scope.group_by_use_nulls) scope.use_identifier_lookup_to_result_cache = true; From f4c0254254b7cfe1f603dc57350a226c9d5dd993 Mon Sep 17 00:00:00 2001 From: Ilya Golshtein Date: Wed, 6 Nov 2024 14:52:55 +0000 Subject: [PATCH 309/353] fix_test_drop_complex_columns: flaky check for test_drop_after_fetch --- .../test_replicated_s3_zero_copy_drop_partition/test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py b/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py index 9937c0ed4ea..7623a24c0ef 100644 --- a/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py +++ b/tests/integration/test_replicated_s3_zero_copy_drop_partition/test.py @@ -65,6 +65,8 @@ CREATE TABLE test_s3(c1 Int8, c2 Date) ENGINE = ReplicatedMergeTree('/test/table objects_after = get_objects_in_data_path() assert objects_before == objects_after + node1.query("DROP TABLE test_local SYNC") + node1.query("DROP TABLE test_s3 SYNC") def test_drop_complex_columns(started_cluster): From 33bd082149ca207b55915cd78c8c19cdc6aacdc9 Mon Sep 17 00:00:00 2001 From: alesapin Date: Wed, 6 Nov 2024 16:00:25 +0100 Subject: [PATCH 310/353] Followup --- src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 3d935f8b70d..40c4db3a69d 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -51,7 +51,6 @@ namespace CurrentMetrics namespace ProfileEvents { - extern const Event MergeTreeAllRangesAnnouncementsSentElapsedMicroseconds; extern const Event MergerMutatorsGetPartsForMergeElapsedMicroseconds; extern const Event MergerMutatorPrepareRangesForMergeElapsedMicroseconds; extern const Event MergerMutatorSelectPartsForMergeElapsedMicroseconds; From 15337692e68961c247dd809f3b13e89a8acc74b7 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 15:10:10 +0000 Subject: [PATCH 311/353] Minor: Remove "experimental" mention of analyzer --- src/Core/Settings.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 081e07ca2ce..7e8d0aabce0 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -4239,7 +4239,7 @@ Rewrite aggregate functions with if expression as argument when logically equiva For example, `avg(if(cond, col, null))` can be rewritten to `avgOrNullIf(cond, col)`. It may improve performance. :::note -Supported only with experimental analyzer (`enable_analyzer = 1`). +Supported only with the analyzer (`enable_analyzer = 1`). ::: )", 0) \ DECLARE(Bool, optimize_rewrite_array_exists_to_has, false, R"( From 12ab488453796a46f1f37d91cf60c6a6007e0134 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 6 Nov 2024 16:20:57 +0100 Subject: [PATCH 312/353] Revert "Selection of hash join inner table" --- src/Core/Joins.h | 11 - src/Core/Settings.cpp | 3 - src/Core/Settings.h | 1 - src/Core/SettingsEnums.cpp | 4 - src/Core/SettingsEnums.h | 2 +- src/Interpreters/ConcurrentHashJoin.h | 11 - src/Interpreters/FullSortingMergeJoin.h | 2 +- src/Interpreters/HashJoin/HashJoin.cpp | 16 +- src/Interpreters/HashJoin/HashJoin.h | 5 +- .../HashJoin/HashJoinMethodsImpl.h | 18 +- src/Interpreters/InterpreterSelectQuery.cpp | 4 +- src/Interpreters/TableJoin.cpp | 56 +---- src/Interpreters/TableJoin.h | 19 +- src/Interpreters/TreeRewriter.cpp | 5 +- src/Parsers/CreateQueryUUIDs.cpp | 2 +- src/Planner/CollectColumnIdentifiers.cpp | 1 - src/Planner/PlannerJoinTree.cpp | 152 +++++-------- src/Processors/QueryPlan/JoinStep.cpp | 103 +-------- src/Processors/QueryPlan/JoinStep.h | 17 +- .../QueryPlan/Optimizations/Optimizations.h | 1 - .../QueryPlan/Optimizations/optimizeJoin.cpp | 102 --------- .../QueryPlan/Optimizations/optimizeTree.cpp | 3 - .../QueryPlan/ReadFromMemoryStorageStep.h | 2 - .../Transforms/ColumnPermuteTransform.cpp | 49 ----- .../Transforms/ColumnPermuteTransform.h | 30 --- .../Transforms/JoiningTransform.cpp | 1 - tests/clickhouse-test | 4 - tests/integration/helpers/cluster.py | 13 +- tests/integration/helpers/random_settings.py | 2 - .../test_peak_memory_usage/test.py | 2 +- .../0_stateless/00826_cross_to_inner_join.sql | 13 +- .../00847_multiple_join_same_column.sql | 14 +- .../01015_empty_in_inner_right_join.sql.j2 | 2 - .../01107_join_right_table_totals.reference | 7 - .../01107_join_right_table_totals.sql | 10 +- .../01763_filter_push_down_bugs.reference | 2 +- .../01881_join_on_conditions_hash.sql.j2 | 10 +- .../0_stateless/02000_join_on_const.reference | 18 +- .../0_stateless/02000_join_on_const.sql | 16 +- .../02001_join_on_const_bs_long.sql.j2 | 4 +- ...oin_with_nullable_lowcardinality_crash.sql | 5 +- .../0_stateless/02282_array_distance.sql | 12 +- .../02381_join_dup_columns_in_plan.reference | 1 + .../0_stateless/02461_join_lc_issue_42380.sql | 3 +- ...emove_redundant_sorting_analyzer.reference | 4 +- ...move_redundant_distinct_analyzer.reference | 18 +- .../02514_analyzer_drop_join_on.reference | 55 +++-- .../02514_analyzer_drop_join_on.sql | 1 - ...oin_with_totals_and_subquery_bug.reference | 2 +- .../02835_join_step_explain.reference | 32 +-- .../0_stateless/02835_join_step_explain.sql | 2 - .../02962_join_using_bug_57894.reference | 1 - .../02962_join_using_bug_57894.sql | 2 - ...filter_push_down_equivalent_sets.reference | 206 ++++++++---------- ..._join_filter_push_down_equivalent_sets.sql | 40 +--- .../03038_recursive_cte_postgres_4.reference | 4 +- .../03038_recursive_cte_postgres_4.sql | 4 +- .../0_stateless/03094_one_thousand_joins.sql | 1 - ...convert_outer_join_to_inner_join.reference | 36 +-- ...03130_convert_outer_join_to_inner_join.sql | 13 +- ...ter_push_down_equivalent_columns.reference | 3 +- .../03236_squashing_high_memory.sql | 1 - 62 files changed, 314 insertions(+), 869 deletions(-) delete mode 100644 src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp delete mode 100644 src/Processors/Transforms/ColumnPermuteTransform.cpp delete mode 100644 src/Processors/Transforms/ColumnPermuteTransform.h diff --git a/src/Core/Joins.h b/src/Core/Joins.h index dd6d86fc902..0964bf86e6b 100644 --- a/src/Core/Joins.h +++ b/src/Core/Joins.h @@ -119,15 +119,4 @@ enum class JoinTableSide : uint8_t const char * toString(JoinTableSide join_table_side); -/// Setting to choose which table to use as the inner table in hash join -enum class JoinInnerTableSelectionMode : uint8_t -{ - /// Use left table - Left, - /// Use right table - Right, - /// Use the table with the smallest number of rows - Auto, -}; - } diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 081e07ca2ce..ada6b674c87 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -1912,9 +1912,6 @@ See also: For single JOIN in case of identifier ambiguity prefer left table )", IMPORTANT) \ \ - DECLARE(JoinInnerTableSelectionMode, query_plan_join_inner_table_selection, JoinInnerTableSelectionMode::Auto, R"( -Select the side of the join to be the inner table in the query plan. Supported only for `ALL` join strictness with `JOIN ON` clause. Possible values: 'auto', 'left', 'right'. -)", 0) \ DECLARE(UInt64, preferred_block_size_bytes, 1000000, R"( This setting adjusts the data block size for query processing and represents additional fine-tuning to the more rough 'max_block_size' setting. If the columns are large and with 'max_block_size' rows the block size is likely to be larger than the specified amount of bytes, its size will be lowered for better CPU cache locality. )", 0) \ diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 1cc58deb94a..ac3b1fe651e 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -66,7 +66,6 @@ class WriteBuffer; M(CLASS_NAME, IntervalOutputFormat) \ M(CLASS_NAME, JoinAlgorithm) \ M(CLASS_NAME, JoinStrictness) \ - M(CLASS_NAME, JoinInnerTableSelectionMode) \ M(CLASS_NAME, LightweightMutationProjectionMode) \ M(CLASS_NAME, LoadBalancing) \ M(CLASS_NAME, LocalFSReadMethod) \ diff --git a/src/Core/SettingsEnums.cpp b/src/Core/SettingsEnums.cpp index 89e9cb295c3..cef63039277 100644 --- a/src/Core/SettingsEnums.cpp +++ b/src/Core/SettingsEnums.cpp @@ -55,10 +55,6 @@ IMPLEMENT_SETTING_MULTI_ENUM(JoinAlgorithm, ErrorCodes::UNKNOWN_JOIN, {"full_sorting_merge", JoinAlgorithm::FULL_SORTING_MERGE}, {"grace_hash", JoinAlgorithm::GRACE_HASH}}) -IMPLEMENT_SETTING_ENUM(JoinInnerTableSelectionMode, ErrorCodes::BAD_ARGUMENTS, - {{"left", JoinInnerTableSelectionMode::Left}, - {"right", JoinInnerTableSelectionMode::Right}, - {"auto", JoinInnerTableSelectionMode::Auto}}) IMPLEMENT_SETTING_ENUM(TotalsMode, ErrorCodes::UNKNOWN_TOTALS_MODE, {{"before_having", TotalsMode::BEFORE_HAVING}, diff --git a/src/Core/SettingsEnums.h b/src/Core/SettingsEnums.h index 35bdb8a7f65..607011b505b 100644 --- a/src/Core/SettingsEnums.h +++ b/src/Core/SettingsEnums.h @@ -128,8 +128,8 @@ constexpr auto getEnumValues(); DECLARE_SETTING_ENUM(LoadBalancing) DECLARE_SETTING_ENUM(JoinStrictness) + DECLARE_SETTING_MULTI_ENUM(JoinAlgorithm) -DECLARE_SETTING_ENUM(JoinInnerTableSelectionMode) /// Which rows should be included in TOTALS. diff --git a/src/Interpreters/ConcurrentHashJoin.h b/src/Interpreters/ConcurrentHashJoin.h index b377727a134..a911edaccc3 100644 --- a/src/Interpreters/ConcurrentHashJoin.h +++ b/src/Interpreters/ConcurrentHashJoin.h @@ -60,17 +60,6 @@ public: IBlocksStreamPtr getNonJoinedBlocks(const Block & left_sample_block, const Block & result_sample_block, UInt64 max_block_size) const override; - - bool isCloneSupported() const override - { - return !getTotals() && getTotalRowCount() == 0; - } - - std::shared_ptr clone(const std::shared_ptr & table_join_, const Block &, const Block & right_sample_block_) const override - { - return std::make_shared(context, table_join_, slots, right_sample_block_, stats_collecting_params); - } - private: struct InternalHashJoin { diff --git a/src/Interpreters/FullSortingMergeJoin.h b/src/Interpreters/FullSortingMergeJoin.h index faa9114c618..3f1e0d59287 100644 --- a/src/Interpreters/FullSortingMergeJoin.h +++ b/src/Interpreters/FullSortingMergeJoin.h @@ -36,7 +36,7 @@ public: bool isCloneSupported() const override { - return !getTotals(); + return true; } std::shared_ptr clone(const std::shared_ptr & table_join_, diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index dad8a487745..3e7f3deea8b 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -383,16 +383,6 @@ size_t HashJoin::getTotalByteCount() const return res; } -bool HashJoin::isUsedByAnotherAlgorithm() const -{ - return table_join->isEnabledAlgorithm(JoinAlgorithm::AUTO) || table_join->isEnabledAlgorithm(JoinAlgorithm::GRACE_HASH); -} - -bool HashJoin::canRemoveColumnsFromLeftBlock() const -{ - return table_join->enableEnalyzer() && !table_join->hasUsing() && !isUsedByAnotherAlgorithm(); -} - void HashJoin::initRightBlockStructure(Block & saved_block_sample) { if (isCrossOrComma(kind)) @@ -404,7 +394,8 @@ void HashJoin::initRightBlockStructure(Block & saved_block_sample) bool multiple_disjuncts = !table_join->oneDisjunct(); /// We could remove key columns for LEFT | INNER HashJoin but we should keep them for JoinSwitcher (if any). - bool save_key_columns = isUsedByAnotherAlgorithm() || + bool save_key_columns = table_join->isEnabledAlgorithm(JoinAlgorithm::AUTO) || + table_join->isEnabledAlgorithm(JoinAlgorithm::GRACE_HASH) || isRightOrFull(kind) || multiple_disjuncts || table_join->getMixedJoinExpression(); @@ -1237,10 +1228,7 @@ IBlocksStreamPtr HashJoin::getNonJoinedBlocks(const Block & left_sample_block, { if (!JoinCommon::hasNonJoinedBlocks(*table_join)) return {}; - size_t left_columns_count = left_sample_block.columns(); - if (canRemoveColumnsFromLeftBlock()) - left_columns_count = table_join->getOutputColumns(JoinTableSide::Left).size(); bool flag_per_row = needUsedFlagsForPerRightTableRow(table_join); if (!flag_per_row) diff --git a/src/Interpreters/HashJoin/HashJoin.h b/src/Interpreters/HashJoin/HashJoin.h index 8a27961354a..4c1ebbcdc66 100644 --- a/src/Interpreters/HashJoin/HashJoin.h +++ b/src/Interpreters/HashJoin/HashJoin.h @@ -127,7 +127,7 @@ public: bool isCloneSupported() const override { - return !getTotals() && getTotalRowCount() == 0; + return true; } std::shared_ptr clone(const std::shared_ptr & table_join_, @@ -464,9 +464,6 @@ private: bool empty() const; - bool isUsedByAnotherAlgorithm() const; - bool canRemoveColumnsFromLeftBlock() const; - void validateAdditionalFilterExpression(std::shared_ptr additional_filter_expression); bool needUsedFlagsForPerRightTableRow(std::shared_ptr table_join_) const; diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h index 7e8a2658b9c..45a766e2df6 100644 --- a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -56,6 +56,7 @@ Block HashJoinMethods::joinBlockImpl( const auto & key_names = !is_join_get ? onexprs[i].key_names_left : onexprs[i].key_names_right; join_on_keys.emplace_back(block, key_names, onexprs[i].condColumnNames().first, join.key_sizes[i]); } + size_t existing_columns = block.columns(); /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. * Because if they are constants, then in the "not joined" rows, they may have different values @@ -98,22 +99,6 @@ Block HashJoinMethods::joinBlockImpl( added_columns.buildJoinGetOutput(); else added_columns.buildOutput(); - - const auto & table_join = join.table_join; - std::set block_columns_to_erase; - if (join.canRemoveColumnsFromLeftBlock()) - { - std::unordered_set left_output_columns; - for (const auto & out_column : table_join->getOutputColumns(JoinTableSide::Left)) - left_output_columns.insert(out_column.name); - for (size_t i = 0; i < block.columns(); ++i) - { - if (!left_output_columns.contains(block.getByPosition(i).name)) - block_columns_to_erase.insert(i); - } - } - size_t existing_columns = block.columns(); - for (size_t i = 0; i < added_columns.size(); ++i) block.insert(added_columns.moveColumn(i)); @@ -175,7 +160,6 @@ Block HashJoinMethods::joinBlockImpl( block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); } } - block.erase(block_columns_to_erase); return remaining_block; } diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index 8ddf51fa25e..3918c1c37ea 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1888,9 +1888,7 @@ void InterpreterSelectQuery::executeImpl(QueryPlan & query_plan, std::optional

setStepDescription(fmt::format("JOIN {}", expressions.join->pipelineType())); std::vector plans; diff --git a/src/Interpreters/TableJoin.cpp b/src/Interpreters/TableJoin.cpp index 555aaff2e06..2532dddba3c 100644 --- a/src/Interpreters/TableJoin.cpp +++ b/src/Interpreters/TableJoin.cpp @@ -41,7 +41,6 @@ namespace DB namespace Setting { extern const SettingsBool allow_experimental_join_right_table_sorting; - extern const SettingsBool allow_experimental_analyzer; extern const SettingsUInt64 cross_join_min_bytes_to_compress; extern const SettingsUInt64 cross_join_min_rows_to_compress; extern const SettingsUInt64 default_max_bytes_in_join; @@ -144,7 +143,6 @@ TableJoin::TableJoin(const Settings & settings, VolumePtr tmp_volume_, Temporary , max_memory_usage(settings[Setting::max_memory_usage]) , tmp_volume(tmp_volume_) , tmp_data(tmp_data_) - , enable_analyzer(settings[Setting::allow_experimental_analyzer]) { } @@ -163,8 +161,6 @@ void TableJoin::resetCollected() clauses.clear(); columns_from_joined_table.clear(); columns_added_by_join.clear(); - columns_from_left_table.clear(); - result_columns_from_left_table.clear(); original_names.clear(); renames.clear(); left_type_map.clear(); @@ -207,20 +203,6 @@ size_t TableJoin::rightKeyInclusion(const String & name) const return count; } -void TableJoin::setInputColumns(NamesAndTypesList left_output_columns, NamesAndTypesList right_output_columns) -{ - columns_from_left_table = std::move(left_output_columns); - columns_from_joined_table = std::move(right_output_columns); -} - - -const NamesAndTypesList & TableJoin::getOutputColumns(JoinTableSide side) -{ - if (side == JoinTableSide::Left) - return result_columns_from_left_table; - return columns_added_by_join; -} - void TableJoin::deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix) { NameSet joined_columns; @@ -369,18 +351,9 @@ bool TableJoin::rightBecomeNullable(const DataTypePtr & column_type) const return forceNullableRight() && JoinCommon::canBecomeNullable(column_type); } -void TableJoin::setUsedColumn(const NameAndTypePair & joined_column, JoinTableSide side) -{ - if (side == JoinTableSide::Left) - result_columns_from_left_table.push_back(joined_column); - else - columns_added_by_join.push_back(joined_column); - -} - void TableJoin::addJoinedColumn(const NameAndTypePair & joined_column) { - setUsedColumn(joined_column, JoinTableSide::Right); + columns_added_by_join.emplace_back(joined_column); } NamesAndTypesList TableJoin::correctedColumnsAddedByJoin() const @@ -1022,32 +995,5 @@ size_t TableJoin::getMaxMemoryUsage() const return max_memory_usage; } -void TableJoin::swapSides() -{ - assertEnableEnalyzer(); - - std::swap(key_asts_left, key_asts_right); - std::swap(left_type_map, right_type_map); - for (auto & clause : clauses) - { - std::swap(clause.key_names_left, clause.key_names_right); - std::swap(clause.on_filter_condition_left, clause.on_filter_condition_right); - std::swap(clause.analyzer_left_filter_condition_column_name, clause.analyzer_right_filter_condition_column_name); - } - - std::swap(columns_from_left_table, columns_from_joined_table); - std::swap(result_columns_from_left_table, columns_added_by_join); - - if (table_join.kind == JoinKind::Left) - table_join.kind = JoinKind::Right; - else if (table_join.kind == JoinKind::Right) - table_join.kind = JoinKind::Left; -} - -void TableJoin::assertEnableEnalyzer() const -{ - if (!enable_analyzer) - throw DB::Exception(ErrorCodes::NOT_IMPLEMENTED, "TableJoin: analyzer is disabled"); -} } diff --git a/src/Interpreters/TableJoin.h b/src/Interpreters/TableJoin.h index e0e1926fb12..e1bae55a4ed 100644 --- a/src/Interpreters/TableJoin.h +++ b/src/Interpreters/TableJoin.h @@ -167,9 +167,6 @@ private: ASOFJoinInequality asof_inequality = ASOFJoinInequality::GreaterOrEquals; - NamesAndTypesList columns_from_left_table; - NamesAndTypesList result_columns_from_left_table; - /// All columns which can be read from joined table. Duplicating names are qualified. NamesAndTypesList columns_from_joined_table; /// Columns will be added to block by JOIN. @@ -205,8 +202,6 @@ private: bool is_join_with_constant = false; - bool enable_analyzer = false; - Names requiredJoinedNames() const; /// Create converting actions and change key column names if required @@ -271,8 +266,6 @@ public: VolumePtr getGlobalTemporaryVolume() { return tmp_volume; } TemporaryDataOnDiskScopePtr getTempDataOnDisk() { return tmp_data; } - bool enableEnalyzer() const { return enable_analyzer; } - void assertEnableEnalyzer() const; ActionsDAG createJoinedBlockActions(ContextPtr context) const; @@ -289,7 +282,6 @@ public: } bool allowParallelHashJoin() const; - void swapSides(); bool joinUseNulls() const { return join_use_nulls; } @@ -380,9 +372,6 @@ public: bool leftBecomeNullable(const DataTypePtr & column_type) const; bool rightBecomeNullable(const DataTypePtr & column_type) const; void addJoinedColumn(const NameAndTypePair & joined_column); - - void setUsedColumn(const NameAndTypePair & joined_column, JoinTableSide side); - void setColumnsAddedByJoin(const NamesAndTypesList & columns_added_by_join_value) { columns_added_by_join = columns_added_by_join_value; @@ -408,17 +397,11 @@ public: ASTPtr leftKeysList() const; ASTPtr rightKeysList() const; /// For ON syntax only - void setColumnsFromJoinedTable(NamesAndTypesList columns_from_joined_table_value, const NameSet & left_table_columns, const String & right_table_prefix, const NamesAndTypesList & columns_from_left_table_) + void setColumnsFromJoinedTable(NamesAndTypesList columns_from_joined_table_value, const NameSet & left_table_columns, const String & right_table_prefix) { columns_from_joined_table = std::move(columns_from_joined_table_value); deduplicateAndQualifyColumnNames(left_table_columns, right_table_prefix); - result_columns_from_left_table = columns_from_left_table_; - columns_from_left_table = columns_from_left_table_; } - - void setInputColumns(NamesAndTypesList left_output_columns, NamesAndTypesList right_output_columns); - const NamesAndTypesList & getOutputColumns(JoinTableSide side); - const NamesAndTypesList & columnsFromJoinedTable() const { return columns_from_joined_table; } const NamesAndTypesList & columnsAddedByJoin() const { return columns_added_by_join; } diff --git a/src/Interpreters/TreeRewriter.cpp b/src/Interpreters/TreeRewriter.cpp index 28e11166762..ea08fd92339 100644 --- a/src/Interpreters/TreeRewriter.cpp +++ b/src/Interpreters/TreeRewriter.cpp @@ -1353,15 +1353,12 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect( if (tables_with_columns.size() > 1) { - auto columns_from_left_table = tables_with_columns[0].columns; const auto & right_table = tables_with_columns[1]; auto columns_from_joined_table = right_table.columns; /// query can use materialized or aliased columns from right joined table, /// we want to request it for right table columns_from_joined_table.insert(columns_from_joined_table.end(), right_table.hidden_columns.begin(), right_table.hidden_columns.end()); - columns_from_left_table.insert(columns_from_left_table.end(), tables_with_columns[0].hidden_columns.begin(), tables_with_columns[0].hidden_columns.end()); - result.analyzed_join->setColumnsFromJoinedTable( - std::move(columns_from_joined_table), source_columns_set, right_table.table.getQualifiedNamePrefix(), columns_from_left_table); + result.analyzed_join->setColumnsFromJoinedTable(std::move(columns_from_joined_table), source_columns_set, right_table.table.getQualifiedNamePrefix()); } translateQualifiedNames(query, *select_query, source_columns_set, tables_with_columns); diff --git a/src/Parsers/CreateQueryUUIDs.cpp b/src/Parsers/CreateQueryUUIDs.cpp index 70848440a0e..c788cc7a025 100644 --- a/src/Parsers/CreateQueryUUIDs.cpp +++ b/src/Parsers/CreateQueryUUIDs.cpp @@ -31,7 +31,7 @@ CreateQueryUUIDs::CreateQueryUUIDs(const ASTCreateQuery & query, bool generate_r /// If we generate random UUIDs for already existing tables then those UUIDs will not be correct making those inner target table inaccessible. /// Thus it's not safe for example to replace /// "ATTACH MATERIALIZED VIEW mv AS SELECT a FROM b" with - /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID '123e4567-e89b-12d3-a456-426614174000' AS SELECT a FROM b" + /// "ATTACH MATERIALIZED VIEW mv TO INNER UUID "XXXX" AS SELECT a FROM b" /// This replacement is safe only for CREATE queries when inner target tables don't exist yet. if (!query.attach) { diff --git a/src/Planner/CollectColumnIdentifiers.cpp b/src/Planner/CollectColumnIdentifiers.cpp index dd5bdd4d141..95f1c7d53d8 100644 --- a/src/Planner/CollectColumnIdentifiers.cpp +++ b/src/Planner/CollectColumnIdentifiers.cpp @@ -2,7 +2,6 @@ #include #include -#include #include diff --git a/src/Planner/PlannerJoinTree.cpp b/src/Planner/PlannerJoinTree.cpp index a1ce455f266..5c153f6db39 100644 --- a/src/Planner/PlannerJoinTree.cpp +++ b/src/Planner/PlannerJoinTree.cpp @@ -104,7 +104,6 @@ namespace Setting extern const SettingsBool optimize_move_to_prewhere; extern const SettingsBool optimize_move_to_prewhere_if_final; extern const SettingsBool use_concurrency_control; - extern const SettingsJoinInnerTableSelectionMode query_plan_join_inner_table_selection; } namespace ErrorCodes @@ -1242,55 +1241,6 @@ void joinCastPlanColumnsToNullable(QueryPlan & plan_to_add_cast, PlannerContextP plan_to_add_cast.addStep(std::move(cast_join_columns_step)); } -std::optional createStepToDropColumns( - const Block & header, - const ColumnIdentifierSet & outer_scope_columns, - const PlannerContextPtr & planner_context) -{ - ActionsDAG drop_unused_columns_after_join_actions_dag(header.getColumnsWithTypeAndName()); - ActionsDAG::NodeRawConstPtrs drop_unused_columns_after_join_actions_dag_updated_outputs; - std::unordered_set drop_unused_columns_after_join_actions_dag_updated_outputs_names; - std::optional first_skipped_column_node_index; - - auto & drop_unused_columns_after_join_actions_dag_outputs = drop_unused_columns_after_join_actions_dag.getOutputs(); - size_t drop_unused_columns_after_join_actions_dag_outputs_size = drop_unused_columns_after_join_actions_dag_outputs.size(); - - const auto & global_planner_context = planner_context->getGlobalPlannerContext(); - - for (size_t i = 0; i < drop_unused_columns_after_join_actions_dag_outputs_size; ++i) - { - const auto & output = drop_unused_columns_after_join_actions_dag_outputs[i]; - - if (drop_unused_columns_after_join_actions_dag_updated_outputs_names.contains(output->result_name) - || !global_planner_context->hasColumnIdentifier(output->result_name)) - continue; - - if (!outer_scope_columns.contains(output->result_name)) - { - if (!first_skipped_column_node_index) - first_skipped_column_node_index = i; - continue; - } - - drop_unused_columns_after_join_actions_dag_updated_outputs.push_back(output); - drop_unused_columns_after_join_actions_dag_updated_outputs_names.insert(output->result_name); - } - - if (!first_skipped_column_node_index) - return {}; - - /** It is expected that JOIN TREE query plan will contain at least 1 column, even if there are no columns in outer scope. - * - * Example: SELECT count() FROM test_table_1 AS t1, test_table_2 AS t2; - */ - if (drop_unused_columns_after_join_actions_dag_updated_outputs.empty() && first_skipped_column_node_index) - drop_unused_columns_after_join_actions_dag_updated_outputs.push_back(drop_unused_columns_after_join_actions_dag_outputs[*first_skipped_column_node_index]); - - drop_unused_columns_after_join_actions_dag_outputs = std::move(drop_unused_columns_after_join_actions_dag_updated_outputs); - - return drop_unused_columns_after_join_actions_dag; -} - JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_expression, JoinTreeQueryPlan left_join_tree_query_plan, JoinTreeQueryPlan right_join_tree_query_plan, @@ -1563,37 +1513,21 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ } const Block & left_header = left_plan.getCurrentHeader(); + auto left_table_names = left_header.getNames(); + NameSet left_table_names_set(left_table_names.begin(), left_table_names.end()); + + auto columns_from_joined_table = right_plan.getCurrentHeader().getNamesAndTypesList(); + table_join->setColumnsFromJoinedTable(columns_from_joined_table, left_table_names_set, ""); + + for (auto & column_from_joined_table : columns_from_joined_table) + { + /// Add columns from joined table only if they are presented in outer scope, otherwise they can be dropped + if (planner_context->getGlobalPlannerContext()->hasColumnIdentifier(column_from_joined_table.name) && + outer_scope_columns.contains(column_from_joined_table.name)) + table_join->addJoinedColumn(column_from_joined_table); + } + const Block & right_header = right_plan.getCurrentHeader(); - - auto columns_from_left_table = left_header.getNamesAndTypesList(); - auto columns_from_right_table = right_header.getNamesAndTypesList(); - - table_join->setInputColumns(columns_from_left_table, columns_from_right_table); - - for (auto & column_from_joined_table : columns_from_left_table) - { - /// Add columns to output only if they are presented in outer scope, otherwise they can be dropped - if (planner_context->getGlobalPlannerContext()->hasColumnIdentifier(column_from_joined_table.name) && - outer_scope_columns.contains(column_from_joined_table.name)) - table_join->setUsedColumn(column_from_joined_table, JoinTableSide::Left); - } - - for (auto & column_from_joined_table : columns_from_right_table) - { - /// Add columns to output only if they are presented in outer scope, otherwise they can be dropped - if (planner_context->getGlobalPlannerContext()->hasColumnIdentifier(column_from_joined_table.name) && - outer_scope_columns.contains(column_from_joined_table.name)) - table_join->setUsedColumn(column_from_joined_table, JoinTableSide::Right); - } - - if (table_join->getOutputColumns(JoinTableSide::Left).empty() && table_join->getOutputColumns(JoinTableSide::Right).empty()) - { - if (!columns_from_left_table.empty()) - table_join->setUsedColumn(columns_from_left_table.front(), JoinTableSide::Left); - else if (!columns_from_right_table.empty()) - table_join->setUsedColumn(columns_from_right_table.front(), JoinTableSide::Right); - } - auto join_algorithm = chooseJoinAlgorithm(table_join, join_node.getRightTableExpression(), left_header, right_header, planner_context); auto result_plan = QueryPlan(); @@ -1681,26 +1615,13 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ } auto join_pipeline_type = join_algorithm->pipelineType(); - - ColumnIdentifierSet outer_scope_columns_nonempty; - if (outer_scope_columns.empty()) - { - if (left_header.columns() > 1) - outer_scope_columns_nonempty.insert(left_header.getByPosition(0).name); - else if (right_header.columns() > 1) - outer_scope_columns_nonempty.insert(right_header.getByPosition(0).name); - } - auto join_step = std::make_unique( left_plan.getCurrentHeader(), right_plan.getCurrentHeader(), std::move(join_algorithm), settings[Setting::max_block_size], settings[Setting::max_threads], - outer_scope_columns.empty() ? outer_scope_columns_nonempty : outer_scope_columns, - false /*optimize_read_in_order*/, - true /*optimize_skip_unused_shards*/); - join_step->inner_table_selection_mode = settings[Setting::query_plan_join_inner_table_selection]; + false /*optimize_read_in_order*/); join_step->setStepDescription(fmt::format("JOIN {}", join_pipeline_type)); @@ -1711,18 +1632,47 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_ result_plan.unitePlans(std::move(join_step), {std::move(plans)}); } - const auto & header_after_join = result_plan.getCurrentHeader(); - if (header_after_join.columns() > outer_scope_columns.size()) + ActionsDAG drop_unused_columns_after_join_actions_dag(result_plan.getCurrentHeader().getColumnsWithTypeAndName()); + ActionsDAG::NodeRawConstPtrs drop_unused_columns_after_join_actions_dag_updated_outputs; + std::unordered_set drop_unused_columns_after_join_actions_dag_updated_outputs_names; + std::optional first_skipped_column_node_index; + + auto & drop_unused_columns_after_join_actions_dag_outputs = drop_unused_columns_after_join_actions_dag.getOutputs(); + size_t drop_unused_columns_after_join_actions_dag_outputs_size = drop_unused_columns_after_join_actions_dag_outputs.size(); + + for (size_t i = 0; i < drop_unused_columns_after_join_actions_dag_outputs_size; ++i) { - auto drop_unused_columns_after_join_actions_dag = createStepToDropColumns(header_after_join, outer_scope_columns, planner_context); - if (drop_unused_columns_after_join_actions_dag) + const auto & output = drop_unused_columns_after_join_actions_dag_outputs[i]; + + const auto & global_planner_context = planner_context->getGlobalPlannerContext(); + if (drop_unused_columns_after_join_actions_dag_updated_outputs_names.contains(output->result_name) + || !global_planner_context->hasColumnIdentifier(output->result_name)) + continue; + + if (!outer_scope_columns.contains(output->result_name)) { - auto drop_unused_columns_after_join_transform_step = std::make_unique(result_plan.getCurrentHeader(), std::move(*drop_unused_columns_after_join_actions_dag)); - drop_unused_columns_after_join_transform_step->setStepDescription("Drop unused columns after JOIN"); - result_plan.addStep(std::move(drop_unused_columns_after_join_transform_step)); + if (!first_skipped_column_node_index) + first_skipped_column_node_index = i; + continue; } + + drop_unused_columns_after_join_actions_dag_updated_outputs.push_back(output); + drop_unused_columns_after_join_actions_dag_updated_outputs_names.insert(output->result_name); } + /** It is expected that JOIN TREE query plan will contain at least 1 column, even if there are no columns in outer scope. + * + * Example: SELECT count() FROM test_table_1 AS t1, test_table_2 AS t2; + */ + if (drop_unused_columns_after_join_actions_dag_updated_outputs.empty() && first_skipped_column_node_index) + drop_unused_columns_after_join_actions_dag_updated_outputs.push_back(drop_unused_columns_after_join_actions_dag_outputs[*first_skipped_column_node_index]); + + drop_unused_columns_after_join_actions_dag_outputs = std::move(drop_unused_columns_after_join_actions_dag_updated_outputs); + + auto drop_unused_columns_after_join_transform_step = std::make_unique(result_plan.getCurrentHeader(), std::move(drop_unused_columns_after_join_actions_dag)); + drop_unused_columns_after_join_transform_step->setStepDescription("DROP unused columns after JOIN"); + result_plan.addStep(std::move(drop_unused_columns_after_join_transform_step)); + for (const auto & right_join_tree_query_plan_row_policy : right_join_tree_query_plan.used_row_policies) left_join_tree_query_plan.used_row_policies.insert(right_join_tree_query_plan_row_policy); diff --git a/src/Processors/QueryPlan/JoinStep.cpp b/src/Processors/QueryPlan/JoinStep.cpp index 7ade437822e..018b52a5c68 100644 --- a/src/Processors/QueryPlan/JoinStep.cpp +++ b/src/Processors/QueryPlan/JoinStep.cpp @@ -6,7 +6,6 @@ #include #include #include -#include namespace DB { @@ -37,37 +36,6 @@ std::vector> describeJoinActions(const JoinPtr & join) return description; } -std::vector getPermutationForBlock( - const Block & block, - const Block & lhs_block, - const Block & rhs_block, - const NameSet & name_filter) -{ - std::vector permutation; - permutation.reserve(block.columns()); - Block::NameMap name_map = block.getNamesToIndexesMap(); - - bool is_trivial = true; - for (const auto & other_block : {lhs_block, rhs_block}) - { - for (const auto & col : other_block) - { - if (!name_filter.contains(col.name)) - continue; - if (auto it = name_map.find(col.name); it != name_map.end()) - { - is_trivial = is_trivial && it->second == permutation.size(); - permutation.push_back(it->second); - } - } - } - - if (is_trivial && permutation.size() == block.columns()) - return {}; - - return permutation; -} - } JoinStep::JoinStep( @@ -76,15 +44,8 @@ JoinStep::JoinStep( JoinPtr join_, size_t max_block_size_, size_t max_streams_, - NameSet required_output_, - bool keep_left_read_in_order_, - bool use_new_analyzer_) - : join(std::move(join_)) - , max_block_size(max_block_size_) - , max_streams(max_streams_) - , required_output(std::move(required_output_)) - , keep_left_read_in_order(keep_left_read_in_order_) - , use_new_analyzer(use_new_analyzer_) + bool keep_left_read_in_order_) + : join(std::move(join_)), max_block_size(max_block_size_), max_streams(max_streams_), keep_left_read_in_order(keep_left_read_in_order_) { updateInputHeaders({left_header_, right_header_}); } @@ -94,43 +55,23 @@ QueryPipelineBuilderPtr JoinStep::updatePipeline(QueryPipelineBuilders pipelines if (pipelines.size() != 2) throw Exception(ErrorCodes::LOGICAL_ERROR, "JoinStep expect two input steps"); - Block lhs_header = pipelines[0]->getHeader(); - Block rhs_header = pipelines[1]->getHeader(); - - if (swap_streams) - std::swap(pipelines[0], pipelines[1]); - if (join->pipelineType() == JoinPipelineType::YShaped) { auto joined_pipeline = QueryPipelineBuilder::joinPipelinesYShaped( - std::move(pipelines[0]), std::move(pipelines[1]), join, join_algorithm_header, max_block_size, &processors); + std::move(pipelines[0]), std::move(pipelines[1]), join, *output_header, max_block_size, &processors); joined_pipeline->resize(max_streams); return joined_pipeline; } - auto pipeline = QueryPipelineBuilder::joinPipelinesRightLeft( + return QueryPipelineBuilder::joinPipelinesRightLeft( std::move(pipelines[0]), std::move(pipelines[1]), join, - join_algorithm_header, + *output_header, max_block_size, max_streams, keep_left_read_in_order, &processors); - - if (!use_new_analyzer) - return pipeline; - - auto column_permutation = getPermutationForBlock(pipeline->getHeader(), lhs_header, rhs_header, required_output); - if (!column_permutation.empty()) - { - pipeline->addSimpleTransform([&column_permutation](const Block & header) - { - return std::make_shared(header, column_permutation); - }); - } - - return pipeline; } bool JoinStep::allowPushDownToRight() const @@ -149,49 +90,17 @@ void JoinStep::describeActions(FormatSettings & settings) const for (const auto & [name, value] : describeJoinActions(join)) settings.out << prefix << name << ": " << value << '\n'; - if (swap_streams) - settings.out << prefix << "Swapped: true\n"; } void JoinStep::describeActions(JSONBuilder::JSONMap & map) const { for (const auto & [name, value] : describeJoinActions(join)) map.add(name, value); - if (swap_streams) - map.add("Swapped", true); -} - -void JoinStep::setJoin(JoinPtr join_, bool swap_streams_) -{ - join_algorithm_header.clear(); - swap_streams = swap_streams_; - join = std::move(join_); - updateOutputHeader(); } void JoinStep::updateOutputHeader() { - if (join_algorithm_header) - return; - - const auto & header = swap_streams ? input_headers[1] : input_headers[0]; - - Block result_header = JoiningTransform::transformHeader(header, join); - join_algorithm_header = result_header; - - if (!use_new_analyzer) - { - if (swap_streams) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot swap streams without new analyzer"); - output_header = result_header; - return; - } - - auto column_permutation = getPermutationForBlock(result_header, input_headers[0], input_headers[1], required_output); - if (!column_permutation.empty()) - result_header = ColumnPermuteTransform::permute(result_header, column_permutation); - - output_header = result_header; + output_header = JoiningTransform::transformHeader(input_headers.front(), join); } static ITransformingStep::Traits getStorageJoinTraits() diff --git a/src/Processors/QueryPlan/JoinStep.h b/src/Processors/QueryPlan/JoinStep.h index 1eca42c62cf..2793784d633 100644 --- a/src/Processors/QueryPlan/JoinStep.h +++ b/src/Processors/QueryPlan/JoinStep.h @@ -2,7 +2,6 @@ #include #include -#include namespace DB { @@ -20,9 +19,7 @@ public: JoinPtr join_, size_t max_block_size_, size_t max_streams_, - NameSet required_output_, - bool keep_left_read_in_order_, - bool use_new_analyzer_); + bool keep_left_read_in_order_); String getName() const override { return "Join"; } @@ -34,26 +31,16 @@ public: void describeActions(FormatSettings & settings) const override; const JoinPtr & getJoin() const { return join; } - void setJoin(JoinPtr join_, bool swap_streams_ = false); + void setJoin(JoinPtr join_) { join = std::move(join_); } bool allowPushDownToRight() const; - JoinInnerTableSelectionMode inner_table_selection_mode = JoinInnerTableSelectionMode::Right; - private: void updateOutputHeader() override; - /// Header that expected to be returned from IJoin - Block join_algorithm_header; - JoinPtr join; size_t max_block_size; size_t max_streams; - - const NameSet required_output; - std::set columns_to_remove; bool keep_left_read_in_order; - bool use_new_analyzer = false; - bool swap_streams = false; }; /// Special step for the case when Join is already filled. diff --git a/src/Processors/QueryPlan/Optimizations/Optimizations.h b/src/Processors/QueryPlan/Optimizations/Optimizations.h index c1c4d1e1635..751d5182dc3 100644 --- a/src/Processors/QueryPlan/Optimizations/Optimizations.h +++ b/src/Processors/QueryPlan/Optimizations/Optimizations.h @@ -113,7 +113,6 @@ void optimizePrimaryKeyConditionAndLimit(const Stack & stack); void optimizePrewhere(Stack & stack, QueryPlan::Nodes & nodes); void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes); void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &); -void optimizeJoin(QueryPlan::Node & node, QueryPlan::Nodes &); void optimizeDistinctInOrder(QueryPlan::Node & node, QueryPlan::Nodes &); /// A separate tree traverse to apply sorting properties after *InOrder optimizations. diff --git a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp b/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp deleted file mode 100644 index c0b31864eac..00000000000 --- a/src/Processors/QueryPlan/Optimizations/optimizeJoin.cpp +++ /dev/null @@ -1,102 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include - -namespace DB::QueryPlanOptimizations -{ - -static std::optional estimateReadRowsCount(QueryPlan::Node & node) -{ - IQueryPlanStep * step = node.step.get(); - if (const auto * reading = typeid_cast(step)) - { - if (auto analyzed_result = reading->getAnalyzedResult()) - return analyzed_result->selected_rows; - if (auto analyzed_result = reading->selectRangesToRead()) - return analyzed_result->selected_rows; - return {}; - } - - if (const auto * reading = typeid_cast(step)) - return reading->getStorage()->totalRows(Settings{}); - - if (node.children.size() != 1) - return {}; - - if (typeid_cast(step) || typeid_cast(step)) - return estimateReadRowsCount(*node.children.front()); - - return {}; -} - -void optimizeJoin(QueryPlan::Node & node, QueryPlan::Nodes &) -{ - auto * join_step = typeid_cast(node.step.get()); - if (!join_step || node.children.size() != 2) - return; - - const auto & join = join_step->getJoin(); - if (join->pipelineType() != JoinPipelineType::FillRightFirst || !join->isCloneSupported()) - return; - - const auto & table_join = join->getTableJoin(); - - /// Algorithms other than HashJoin may not support OUTER JOINs - if (table_join.kind() != JoinKind::Inner && !typeid_cast(join.get())) - return; - - /// fixme: USING clause handled specially in join algorithm, so swap breaks it - /// fixme: Swapping for SEMI and ANTI joins should be alright, need to try to enable it and test - if (table_join.hasUsing() || table_join.strictness() != JoinStrictness::All) - return; - - bool need_swap = false; - if (join_step->inner_table_selection_mode == JoinInnerTableSelectionMode::Auto) - { - auto lhs_extimation = estimateReadRowsCount(*node.children[0]); - auto rhs_extimation = estimateReadRowsCount(*node.children[1]); - LOG_TRACE(getLogger("optimizeJoin"), "Left table estimation: {}, right table estimation: {}", - lhs_extimation.transform(toString).value_or("unknown"), - rhs_extimation.transform(toString).value_or("unknown")); - - if (lhs_extimation && rhs_extimation && *lhs_extimation < *rhs_extimation) - need_swap = true; - } - else if (join_step->inner_table_selection_mode == JoinInnerTableSelectionMode::Left) - { - need_swap = true; - } - - if (!need_swap) - return; - - const auto & headers = join_step->getInputHeaders(); - if (headers.size() != 2) - return; - - const auto & left_stream_input_header = headers.front(); - const auto & right_stream_input_header = headers.back(); - - auto updated_table_join = std::make_shared(table_join); - updated_table_join->swapSides(); - auto updated_join = join->clone(updated_table_join, right_stream_input_header, left_stream_input_header); - join_step->setJoin(std::move(updated_join), /* swap_streams= */ true); -} - -} diff --git a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp index c034ca79181..03418c752d4 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeTree.cpp @@ -227,9 +227,6 @@ void addStepsToBuildSets(QueryPlan & plan, QueryPlan::Node & root, QueryPlan::No /// NOTE: frame cannot be safely used after stack was modified. auto & frame = stack.back(); - if (frame.next_child == 0) - optimizeJoin(*frame.node, nodes); - /// Traverse all children first. if (frame.next_child < frame.node->children.size()) { diff --git a/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h b/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h index a9c2d2df2c4..238c1a3aad0 100644 --- a/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h +++ b/src/Processors/QueryPlan/ReadFromMemoryStorageStep.h @@ -35,8 +35,6 @@ public: void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override; - const StoragePtr & getStorage() const { return storage; } - private: static constexpr auto name = "ReadFromMemoryStorage"; diff --git a/src/Processors/Transforms/ColumnPermuteTransform.cpp b/src/Processors/Transforms/ColumnPermuteTransform.cpp deleted file mode 100644 index f371689814c..00000000000 --- a/src/Processors/Transforms/ColumnPermuteTransform.cpp +++ /dev/null @@ -1,49 +0,0 @@ -#include - -namespace DB -{ - -namespace -{ - -template -void applyPermutation(std::vector & data, const std::vector & permutation) -{ - std::vector res; - res.reserve(permutation.size()); - for (size_t i : permutation) - res.push_back(data[i]); - data = std::move(res); -} - -void permuteChunk(Chunk & chunk, const std::vector & permutation) -{ - size_t num_rows = chunk.getNumRows(); - auto columns = chunk.detachColumns(); - applyPermutation(columns, permutation); - chunk.setColumns(std::move(columns), num_rows); -} - -} - -Block ColumnPermuteTransform::permute(const Block & block, const std::vector & permutation) -{ - auto columns = block.getColumnsWithTypeAndName(); - applyPermutation(columns, permutation); - return Block(columns); -} - -ColumnPermuteTransform::ColumnPermuteTransform(const Block & header_, const std::vector & permutation_) - : ISimpleTransform(header_, permute(header_, permutation_), false) - , permutation(permutation_) -{ -} - - -void ColumnPermuteTransform::transform(Chunk & chunk) -{ - permuteChunk(chunk, permutation); -} - - -} diff --git a/src/Processors/Transforms/ColumnPermuteTransform.h b/src/Processors/Transforms/ColumnPermuteTransform.h deleted file mode 100644 index 25f3a8d0825..00000000000 --- a/src/Processors/Transforms/ColumnPermuteTransform.h +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -class ColumnPermuteTransform : public ISimpleTransform -{ -public: - ColumnPermuteTransform(const Block & header_, const std::vector & permutation_); - - String getName() const override { return "ColumnPermuteTransform"; } - - void transform(Chunk & chunk) override; - - static Block permute(const Block & block, const std::vector & permutation); - -private: - Names column_names; - std::vector permutation; -}; - - -} diff --git a/src/Processors/Transforms/JoiningTransform.cpp b/src/Processors/Transforms/JoiningTransform.cpp index 187f4bf6728..f2fb6327129 100644 --- a/src/Processors/Transforms/JoiningTransform.cpp +++ b/src/Processors/Transforms/JoiningTransform.cpp @@ -19,7 +19,6 @@ Block JoiningTransform::transformHeader(Block header, const JoinPtr & join) join->initialize(header); ExtraBlockPtr tmp; join->joinBlock(header, tmp); - materializeBlockInplace(header); LOG_TEST(getLogger("JoiningTransform"), "After join block: '{}'", header.dumpStructure()); return header; } diff --git a/tests/clickhouse-test b/tests/clickhouse-test index f4c3b368632..9c035b7cc35 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -789,7 +789,6 @@ def get_localzone(): return os.getenv("TZ", "/".join(os.readlink("/etc/localtime").split("/")[-2:])) -# Refer to `tests/integration/helpers/random_settings.py` for integration test random settings class SettingsRandomizer: settings = { "max_insert_threads": lambda: ( @@ -920,9 +919,6 @@ class SettingsRandomizer: "max_parsing_threads": lambda: random.choice([0, 1, 10]), "optimize_functions_to_subcolumns": lambda: random.randint(0, 1), "parallel_replicas_local_plan": lambda: random.randint(0, 1), - "query_plan_join_inner_table_selection": lambda: random.choice( - ["left", "auto", "right"] - ), "output_format_native_write_json_as_string": lambda: random.randint(0, 1), "enable_vertical_final": lambda: random.randint(0, 1), } diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index 6751f205fb8..7c531cdd493 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -67,7 +67,6 @@ DEFAULT_ENV_NAME = ".env" DEFAULT_BASE_CONFIG_DIR = os.environ.get( "CLICKHOUSE_TESTS_BASE_CONFIG_DIR", "/etc/clickhouse-server/" ) -DOCKER_BASE_TAG = os.environ.get("DOCKER_BASE_TAG", "latest") SANITIZER_SIGN = "==================" @@ -504,6 +503,7 @@ class ClickHouseCluster: "CLICKHOUSE_TESTS_DOCKERD_HOST" ) self.docker_api_version = os.environ.get("DOCKER_API_VERSION") + self.docker_base_tag = os.environ.get("DOCKER_BASE_TAG", "latest") self.base_cmd = ["docker", "compose"] if custom_dockerd_host: @@ -1079,7 +1079,7 @@ class ClickHouseCluster: env_variables["keeper_binary"] = binary_path env_variables["keeper_cmd_prefix"] = keeper_cmd_prefix - env_variables["image"] = "clickhouse/integration-test:" + DOCKER_BASE_TAG + env_variables["image"] = "clickhouse/integration-test:" + self.docker_base_tag env_variables["user"] = str(os.getuid()) env_variables["keeper_fs"] = "bind" for i in range(1, 4): @@ -1675,7 +1675,7 @@ class ClickHouseCluster: ) if tag is None: - tag = DOCKER_BASE_TAG + tag = self.docker_base_tag if not env_variables: env_variables = {} self.use_keeper = use_keeper @@ -4538,12 +4538,7 @@ class ClickHouseInstance: if len(self.custom_dictionaries_paths): write_embedded_config("0_common_enable_dictionaries.xml", self.config_d_dir) - if ( - self.randomize_settings - and self.image == "clickhouse/integration-test" - and self.tag == DOCKER_BASE_TAG - and self.base_config_dir == DEFAULT_BASE_CONFIG_DIR - ): + if self.randomize_settings and self.base_config_dir == DEFAULT_BASE_CONFIG_DIR: # If custom main config is used, do not apply random settings to it write_random_settings_config(Path(users_d_dir) / "0_random_settings.xml") diff --git a/tests/integration/helpers/random_settings.py b/tests/integration/helpers/random_settings.py index 32cde54d0e7..b2319561fd7 100644 --- a/tests/integration/helpers/random_settings.py +++ b/tests/integration/helpers/random_settings.py @@ -5,8 +5,6 @@ def randomize_settings(): yield "max_joined_block_size_rows", random.randint(8000, 100000) if random.random() < 0.5: yield "max_block_size", random.randint(8000, 100000) - if random.random() < 0.5: - yield "query_plan_join_inner_table_selection", random.choice(["auto", "left"]) def write_random_settings_config(destination): diff --git a/tests/integration/test_peak_memory_usage/test.py b/tests/integration/test_peak_memory_usage/test.py index 69057573173..51268dcf386 100644 --- a/tests/integration/test_peak_memory_usage/test.py +++ b/tests/integration/test_peak_memory_usage/test.py @@ -91,7 +91,7 @@ def test_clickhouse_client_max_peak_memory_usage_distributed(started_cluster): with client(name="client1>", log=client_output, command=command_text) as client1: client1.expect(prompt) client1.send( - "SELECT COUNT(*) FROM distributed_fixed_numbers JOIN fixed_numbers_2 ON distributed_fixed_numbers.number=fixed_numbers_2.number SETTINGS query_plan_join_inner_table_selection = 'right'", + "SELECT COUNT(*) FROM distributed_fixed_numbers JOIN fixed_numbers_2 ON distributed_fixed_numbers.number=fixed_numbers_2.number", ) client1.expect("Peak memory usage", timeout=60) client1.expect(prompt) diff --git a/tests/queries/0_stateless/00826_cross_to_inner_join.sql b/tests/queries/0_stateless/00826_cross_to_inner_join.sql index 5ab7a2d0626..e9f9e13e2d3 100644 --- a/tests/queries/0_stateless/00826_cross_to_inner_join.sql +++ b/tests/queries/0_stateless/00826_cross_to_inner_join.sql @@ -15,9 +15,9 @@ INSERT INTO t2_00826 values (1,1), (1,2); INSERT INTO t2_00826 (a) values (2), (3); SELECT '--- cross ---'; -SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a ORDER BY ALL; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a; SELECT '--- cross nullable ---'; -SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.b ORDER BY ALL; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.b; SELECT '--- cross nullable vs not nullable ---'; SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.b ORDER BY t1_00826.a; SELECT '--- cross self ---'; @@ -41,15 +41,14 @@ SELECT '--- is null or ---'; SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b > t2_00826.a) ORDER BY t1_00826.a; SELECT '--- do not rewrite alias ---'; -SELECT a as b FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND b > 0 ORDER BY ALL; +SELECT a as b FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND b > 0; SELECT '--- comma ---'; -SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a ORDER BY ALL; +SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a; SELECT '--- comma nullable ---'; -SELECT * FROM t1_00826, t2_00826 where t1_00826.b = t2_00826.b ORDER BY ALL; +SELECT * FROM t1_00826, t2_00826 where t1_00826.b = t2_00826.b; SELECT '--- comma and or ---'; -SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b < 2) -ORDER BY ALL; +SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b < 2); SELECT '--- cross ---'; diff --git a/tests/queries/0_stateless/00847_multiple_join_same_column.sql b/tests/queries/0_stateless/00847_multiple_join_same_column.sql index bbb4eb12466..c7f0c6383c2 100644 --- a/tests/queries/0_stateless/00847_multiple_join_same_column.sql +++ b/tests/queries/0_stateless/00847_multiple_join_same_column.sql @@ -20,42 +20,42 @@ select t.a, s.b, s.a, s.b, y.a, y.b from t left join s on (t.a = s.a and s.b = t.b) left join y on (y.a = s.a and y.b = s.b) order by t.a -format PrettyCompactMonoBlock; +format PrettyCompactNoEscapes; select t.a as t_a from t left join s on s.a = t_a order by t.a -format PrettyCompactMonoBlock; +format PrettyCompactNoEscapes; select t.a, s.a as s_a from t left join s on s.a = t.a left join y on y.b = s.b order by t.a -format PrettyCompactMonoBlock; +format PrettyCompactNoEscapes; select t.a, t.a, t.b as t_b from t left join s on t.a = s.a left join y on y.b = s.b order by t.a -format PrettyCompactMonoBlock; +format PrettyCompactNoEscapes; select s.a, s.a, s.b as s_b, s.b from t left join s on s.a = t.a left join y on s.b = y.b order by t.a -format PrettyCompactMonoBlock; +format PrettyCompactNoEscapes; select y.a, y.a, y.b as y_b, y.b from t left join s on s.a = t.a left join y on y.b = s.b order by t.a -format PrettyCompactMonoBlock; +format PrettyCompactNoEscapes; select t.a, t.a as t_a, s.a, s.a as s_a, y.a, y.a as y_a from t left join s on t.a = s.a left join y on y.b = s.b order by t.a -format PrettyCompactMonoBlock; +format PrettyCompactNoEscapes; drop table t; drop table s; diff --git a/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 b/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 index cdbb0542ffb..cdb9d253b9b 100644 --- a/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 +++ b/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 @@ -1,7 +1,5 @@ SET joined_subquery_requires_alias = 0; -SET query_plan_join_inner_table_selection = 'auto'; - {% for join_algorithm in ['partial_merge', 'hash'] -%} SET join_algorithm = '{{ join_algorithm }}'; diff --git a/tests/queries/0_stateless/01107_join_right_table_totals.reference b/tests/queries/0_stateless/01107_join_right_table_totals.reference index aa569ff9331..daf503b776d 100644 --- a/tests/queries/0_stateless/01107_join_right_table_totals.reference +++ b/tests/queries/0_stateless/01107_join_right_table_totals.reference @@ -18,35 +18,28 @@ 0 0 0 0 -- 1 1 1 1 0 0 -- 1 1 1 1 0 0 -- 1 1 1 1 0 0 -- 1 1 1 1 0 0 -- 1 1 0 0 -- 1 foo 1 1 300 0 foo 1 0 300 -- 1 100 1970-01-01 1 100 1970-01-01 1 100 1970-01-01 1 200 1970-01-02 1 200 1970-01-02 1 100 1970-01-01 diff --git a/tests/queries/0_stateless/01107_join_right_table_totals.sql b/tests/queries/0_stateless/01107_join_right_table_totals.sql index 7e549282489..ad8954d5d70 100644 --- a/tests/queries/0_stateless/01107_join_right_table_totals.sql +++ b/tests/queries/0_stateless/01107_join_right_table_totals.sql @@ -64,47 +64,39 @@ USING (id); INSERT INTO t VALUES (1, 100, '1970-01-01'), (1, 200, '1970-01-02'); -SELECT '-'; SELECT * FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l LEFT JOIN (SELECT item_id FROM t ) r ON l.item_id = r.item_id; -SELECT '-'; SELECT * FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l RIGHT JOIN (SELECT item_id FROM t ) r ON l.item_id = r.item_id; -SELECT '-'; SELECT * FROM (SELECT item_id FROM t) l LEFT JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r ON l.item_id = r.item_id; -SELECT '-'; SELECT * FROM (SELECT item_id FROM t) l RIGHT JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r ON l.item_id = r.item_id; -SELECT '-'; SELECT * FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l LEFT JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r ON l.item_id = r.item_id; -SELECT '-'; SELECT * FROM (SELECT item_id, 'foo' AS key, 1 AS val FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l LEFT JOIN (SELECT item_id, sum(price_sold) AS val FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r ON l.item_id = r.item_id; -SELECT '-'; SELECT * FROM (SELECT * FROM t GROUP BY item_id, price_sold, date WITH TOTALS ORDER BY item_id, price_sold, date) l LEFT JOIN (SELECT * FROM t GROUP BY item_id, price_sold, date WITH TOTALS ORDER BY item_id, price_sold, date ) r -ON l.item_id = r.item_id -ORDER BY ALL; +ON l.item_id = r.item_id; DROP TABLE t; diff --git a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference index 229ac6eae09..19018a610b7 100644 --- a/tests/queries/0_stateless/01763_filter_push_down_bugs.reference +++ b/tests/queries/0_stateless/01763_filter_push_down_bugs.reference @@ -26,7 +26,7 @@ Expression ((Projection + Before ORDER BY)) Parts: 1/1 Granules: 1/1 Expression ((Project names + Projection)) - Filter (WHERE) + Filter ((WHERE + DROP unused columns after JOIN)) Join (JOIN FillRightFirst) Expression ReadFromMergeTree (default.t1) diff --git a/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 b/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 index c13722f431a..c2d85cefb18 100644 --- a/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 +++ b/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 @@ -75,7 +75,7 @@ SELECT * FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t2.key; -- { serverErro SELECT * FROM t1 JOIN t2_nullable as t2 ON t2.key == t2.key2 AND (t1.id == t2.id OR isNull(t2.key2)); -- { serverError 403 } SELECT * FROM t1 JOIN t2 ON t2.key == t2.key2 OR t1.id == t2.id; -- { serverError 403 } SELECT * FROM t1 JOIN t2 ON (t2.key == t2.key2 AND (t1.key == t1.key2 AND t1.key != 'XXX' OR t1.id == t2.id)) AND t1.id == t2.id; -- { serverError 403 } -SELECT * FROM t1 JOIN t2 ON t2.key == t2.key2 AND t1.key == t1.key2 AND t1.key != 'XXX' AND t1.id == t2.id OR t2.key == t2.key2 AND t1.id == t2.id AND t1.id == t2.id ORDER BY ALL; +SELECT * FROM t1 JOIN t2 ON t2.key == t2.key2 AND t1.key == t1.key2 AND t1.key != 'XXX' AND t1.id == t2.id OR t2.key == t2.key2 AND t1.id == t2.id AND t1.id == t2.id; -- non-equi condition containing columns from different tables doesn't supported yet SELECT * FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t1.id >= t2.id; -- { serverError 403 } SELECT * FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2 AND t1.id >= length(t2.key); -- { serverError 403 } @@ -89,10 +89,10 @@ SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.idd and (t1.id == t22.id OR t22 SELECT 't22', * FROM t1 JOIN t22 ON (t22.key == t22.key2 OR t1.id == t22.id) and t1.id == t22.idd; -- { serverError 403 } SELECT 't22', * FROM t1 JOIN t22 ON (t1.id == t22.id OR t22.key == t22.key2) and t1.id == t22.idd; -- { serverError 403 } SELECT 't22', * FROM t1 JOIN t22 ON (t1.id == t22.id OR t22.key == t22.key2) and (t1.id == t22.idd AND (t1.key2 = 'a1' OR t1.key2 = 'a2' OR t1.key2 = 'a3' OR t1.key2 = 'a4' OR t1.key2 = 'a5' OR t1.key2 = 'a6' OR t1.key2 = 'a7' OR t1.key2 = 'a8' OR t1.key2 = 'a9' OR t1.key2 = 'a10' OR t1.key2 = 'a11' OR t1.key2 = 'a12' OR t1.key2 = 'a13' OR t1.key2 = 'a14' OR t1.key2 = 'a15' OR t1.key2 = 'a16' OR t1.key2 = 'a17' OR t1.key2 = 'a18' OR t1.key2 = 'a19' OR t1.key2 = '111')); -- { serverError 403 } -SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.idd and t22.key == t22.key2 OR t1.id == t22.idd and t1.id == t22.id ORDER BY ALL; -SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.idd and t1.id == t22.id OR t1.id == t22.idd and t22.key == t22.key2 ORDER BY ALL; -SELECT 't22', * FROM t1 JOIN t22 ON t22.key == t22.key2 and t1.id == t22.idd OR t1.id == t22.id and t1.id == t22.idd ORDER BY ALL; -SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.id and t1.id == t22.idd OR t22.key == t22.key2 and t1.id == t22.idd ORDER BY ALL; +SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.idd and t22.key == t22.key2 OR t1.id == t22.idd and t1.id == t22.id; +SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.idd and t1.id == t22.id OR t1.id == t22.idd and t22.key == t22.key2; +SELECT 't22', * FROM t1 JOIN t22 ON t22.key == t22.key2 and t1.id == t22.idd OR t1.id == t22.id and t1.id == t22.idd; +SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.id and t1.id == t22.idd OR t22.key == t22.key2 and t1.id == t22.idd; {% endfor -%} diff --git a/tests/queries/0_stateless/02000_join_on_const.reference b/tests/queries/0_stateless/02000_join_on_const.reference index f8e46a2b976..3bd1633ce32 100644 --- a/tests/queries/0_stateless/02000_join_on_const.reference +++ b/tests/queries/0_stateless/02000_join_on_const.reference @@ -33,23 +33,23 @@ 2 2 2 2 -- { echoOn } -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; 1 0 2 2 -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 1 SETTINGS enable_analyzer = 1; -0 3 +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; 2 2 -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 2, 1 SETTINGS enable_analyzer = 1; +0 3 +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; 1 0 2 2 0 3 -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; 1 0 2 0 -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 2 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; 0 2 0 3 -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 2, 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; 1 0 2 0 0 2 @@ -59,11 +59,11 @@ SELECT * FROM (SELECT 1 as a) as t1 LEFT JOIN ( SELECT ('b', 256) as b ) AS t2 1 ('',0) SELECT * FROM (SELECT 1 as a) as t1 RIGHT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; 0 ('b',256) -SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL ORDER BY 2; +SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; 1 ('',0) 0 ('b',256) SELECT * FROM (SELECT 1 as a) as t1 SEMI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; -SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL ORDER BY 2; +SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; 1 ('',0) 2 4 2 Nullable(UInt64) UInt8 diff --git a/tests/queries/0_stateless/02000_join_on_const.sql b/tests/queries/0_stateless/02000_join_on_const.sql index 33638edafa5..da70973ed87 100644 --- a/tests/queries/0_stateless/02000_join_on_const.sql +++ b/tests/queries/0_stateless/02000_join_on_const.sql @@ -73,20 +73,20 @@ SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS enable_analyzer = 0; -- SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS enable_analyzer = 1; -- { echoOn } -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 1 SETTINGS enable_analyzer = 1; -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 1 SETTINGS enable_analyzer = 1; -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 2, 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 SETTINGS enable_analyzer = 1; -SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 1 SETTINGS enable_analyzer = 1; -SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 2 SETTINGS enable_analyzer = 1; -SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 2, 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS enable_analyzer = 1; SELECT * FROM (SELECT 1 as a) as t1 INNER JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; SELECT * FROM (SELECT 1 as a) as t1 LEFT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; SELECT * FROM (SELECT 1 as a) as t1 RIGHT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; -SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL ORDER BY 2; +SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; SELECT * FROM (SELECT 1 as a) as t1 SEMI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; -SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL ORDER BY 2; +SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; -- { echoOff } diff --git a/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 b/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 index 83548e087bd..1726bcb7062 100644 --- a/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 +++ b/tests/queries/0_stateless/02001_join_on_const_bs_long.sql.j2 @@ -1,8 +1,8 @@ DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; -CREATE TABLE t1 (id Int) ENGINE = TinyLog; -CREATE TABLE t2 (id Int) ENGINE = TinyLog; +CREATE TABLE t1 (id Int) ENGINE = MergeTree ORDER BY id; +CREATE TABLE t2 (id Int) ENGINE = MergeTree ORDER BY id; INSERT INTO t1 VALUES (1), (2); INSERT INTO t2 SELECT number + 5 AS x FROM (SELECT * FROM system.numbers LIMIT 1111); diff --git a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql index c3c84ebaded..abc2ee41402 100644 --- a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql +++ b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql @@ -12,9 +12,8 @@ CREATE TABLE without_nullable insert into with_nullable values(0,'f'),(0,'usa'); insert into without_nullable values(0,'usa'),(0,'us2a'); -select if(t0.country is null ,t2.country,t0.country) "country" -from without_nullable t0 right outer join with_nullable t2 on t0.country=t2.country -ORDER BY 1 DESC; +select if(t0.country is null ,t2.country,t0.country) "country" +from without_nullable t0 right outer join with_nullable t2 on t0.country=t2.country; drop table with_nullable; drop table without_nullable; diff --git a/tests/queries/0_stateless/02282_array_distance.sql b/tests/queries/0_stateless/02282_array_distance.sql index 85abc8fa381..2cca853fd67 100644 --- a/tests/queries/0_stateless/02282_array_distance.sql +++ b/tests/queries/0_stateless/02282_array_distance.sql @@ -48,8 +48,7 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2 v1, vec2 v2 -WHERE length(v1.v) == length(v2.v) -ORDER BY ALL; +WHERE length(v1.v) == length(v2.v); INSERT INTO vec2f VALUES (1, [100, 200, 0]), (2, [888, 777, 666]), (3, range(1, 35, 1)), (4, range(3, 37, 1)), (5, range(1, 135, 1)), (6, range(3, 137, 1)); SELECT @@ -62,8 +61,7 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2f v1, vec2f v2 -WHERE length(v1.v) == length(v2.v) -ORDER BY ALL; +WHERE length(v1.v) == length(v2.v); INSERT INTO vec2d VALUES (1, [100, 200, 0]), (2, [888, 777, 666]), (3, range(1, 35, 1)), (4, range(3, 37, 1)), (5, range(1, 135, 1)), (6, range(3, 137, 1)); SELECT @@ -76,8 +74,7 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2d v1, vec2d v2 -WHERE length(v1.v) == length(v2.v) -ORDER BY ALL; +WHERE length(v1.v) == length(v2.v); SELECT v1.id, @@ -89,8 +86,7 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2f v1, vec2d v2 -WHERE length(v1.v) == length(v2.v) -ORDER BY ALL; +WHERE length(v1.v) == length(v2.v); SELECT L1Distance([0, 0], [1]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } SELECT L2Distance([1, 2], (3,4)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/tests/queries/0_stateless/02381_join_dup_columns_in_plan.reference b/tests/queries/0_stateless/02381_join_dup_columns_in_plan.reference index 90aab0a0eb2..365725f8ffe 100644 --- a/tests/queries/0_stateless/02381_join_dup_columns_in_plan.reference +++ b/tests/queries/0_stateless/02381_join_dup_columns_in_plan.reference @@ -148,6 +148,7 @@ Header: key String value String Join Header: __table1.key String + __table3.key String __table3.value String Sorting Header: __table1.key String diff --git a/tests/queries/0_stateless/02461_join_lc_issue_42380.sql b/tests/queries/0_stateless/02461_join_lc_issue_42380.sql index 8b5c6846bd0..f0ecbf64e58 100644 --- a/tests/queries/0_stateless/02461_join_lc_issue_42380.sql +++ b/tests/queries/0_stateless/02461_join_lc_issue_42380.sql @@ -9,5 +9,4 @@ CREATE TABLE t2__fuzz_47 (id LowCardinality(Int16)) ENGINE = MergeTree() ORDER B INSERT INTO t1__fuzz_13 VALUES (1); INSERT INTO t2__fuzz_47 VALUES (1); -SELECT * FROM t1__fuzz_13 FULL OUTER JOIN t2__fuzz_47 ON 1 = 2 -ORDER BY ALL; +SELECT * FROM t1__fuzz_13 FULL OUTER JOIN t2__fuzz_47 ON 1 = 2; diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference index c9bf36f88ea..3c68d14fdf2 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference @@ -117,7 +117,7 @@ ORDER BY t1.number, t2.number -- explain Expression (Project names) Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + Projection)) + Expression ((Before ORDER BY + (Projection + DROP unused columns after JOIN))) Join (JOIN FillRightFirst) Expression ((Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))))) ReadFromSystemNumbers @@ -161,7 +161,7 @@ ORDER BY t1.number, t2.number -- explain Expression (Project names) Sorting (Sorting for ORDER BY) - Expression ((Before ORDER BY + Projection)) + Expression ((Before ORDER BY + (Projection + DROP unused columns after JOIN))) Join (JOIN FillRightFirst) Expression ((Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Before ORDER BY + (Projection + Change column names to column identifiers))))))))) ReadFromSystemNumbers diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference b/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference index baa2be9dfdb..867ae394c1f 100644 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct_analyzer.reference @@ -79,7 +79,7 @@ Expression (Project names) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) Distinct (Preliminary DISTINCT) - Expression (Projection) + Expression ((Projection + DROP unused columns after JOIN)) Join (JOIN FillRightFirst) Expression ((Change column names to column identifiers + Project names)) Distinct (DISTINCT) @@ -244,7 +244,7 @@ Expression ((Project names + (Projection + (Change column names to column identi Sorting (Sorting for ORDER BY) Expression ((Before ORDER BY + Projection)) Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers @@ -280,7 +280,7 @@ Expression (Project names) Sorting (Sorting for ORDER BY) Expression ((Before ORDER BY + Projection)) Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers @@ -315,7 +315,7 @@ Expression (Project names) Expression ((Before ORDER BY + Projection)) Rollup Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers @@ -348,7 +348,7 @@ Expression ((Project names + (Projection + (Change column names to column identi Expression ((Before ORDER BY + Projection)) Rollup Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers @@ -386,7 +386,7 @@ Expression (Project names) Expression ((Before ORDER BY + Projection)) Cube Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers @@ -419,7 +419,7 @@ Expression ((Project names + (Projection + (Change column names to column identi Expression ((Before ORDER BY + Projection)) Cube Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers @@ -457,7 +457,7 @@ Expression (Project names) Expression ((Before ORDER BY + Projection)) TotalsHaving Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers @@ -491,7 +491,7 @@ Expression ((Project names + (Projection + (Change column names to column identi Expression ((Before ORDER BY + Projection)) TotalsHaving Aggregating - Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + Projection)))) + Expression ((Before GROUP BY + (Change column names to column identifiers + (Project names + (Projection + DROP unused columns after JOIN))))) Join (JOIN FillRightFirst) Expression (Change column names to column identifiers) ReadFromSystemNumbers diff --git a/tests/queries/0_stateless/02514_analyzer_drop_join_on.reference b/tests/queries/0_stateless/02514_analyzer_drop_join_on.reference index bbfdf1ad5f4..2c62e278050 100644 --- a/tests/queries/0_stateless/02514_analyzer_drop_join_on.reference +++ b/tests/queries/0_stateless/02514_analyzer_drop_join_on.reference @@ -8,21 +8,24 @@ Header: count() UInt64 Aggregating Header: __table1.a2 String count() UInt64 - Expression (Before GROUP BY) + Expression ((Before GROUP BY + DROP unused columns after JOIN)) Header: __table1.a2 String Join (JOIN FillRightFirst) Header: __table1.a2 String - Expression (JOIN actions) + __table3.c1 UInt64 + Expression ((JOIN actions + DROP unused columns after JOIN)) Header: __table1.a2 String __table3.c1 UInt64 Join (JOIN FillRightFirst) Header: __table1.a2 String + __table2.b1 UInt64 __table3.c1 UInt64 - Expression (JOIN actions) + Expression ((JOIN actions + DROP unused columns after JOIN)) Header: __table1.a2 String __table2.b1 UInt64 Join (JOIN FillRightFirst) - Header: __table1.a2 String + Header: __table1.a1 UInt64 + __table1.a2 String __table2.b1 UInt64 Expression ((JOIN actions + Change column names to column identifiers)) Header: __table1.a1 UInt64 @@ -45,32 +48,39 @@ Header: count() UInt64 EXPLAIN PLAN header = 1 SELECT a.a2, d.d2 FROM a JOIN b USING (k) JOIN c USING (k) JOIN d USING (k) ; -Expression ((Project names + Projection)) +Expression ((Project names + (Projection + DROP unused columns after JOIN))) Header: a2 String d2 String Join (JOIN FillRightFirst) Header: __table1.a2 String + __table1.k UInt64 __table4.d2 String - Join (JOIN FillRightFirst) + Expression (DROP unused columns after JOIN) Header: __table1.a2 String __table1.k UInt64 Join (JOIN FillRightFirst) Header: __table1.a2 String __table1.k UInt64 - Expression (Change column names to column identifiers) + Expression (DROP unused columns after JOIN) Header: __table1.a2 String __table1.k UInt64 - ReadFromMemoryStorage - Header: a2 String - k UInt64 + Join (JOIN FillRightFirst) + Header: __table1.a2 String + __table1.k UInt64 + Expression (Change column names to column identifiers) + Header: __table1.a2 String + __table1.k UInt64 + ReadFromMemoryStorage + Header: a2 String + k UInt64 + Expression (Change column names to column identifiers) + Header: __table2.k UInt64 + ReadFromMemoryStorage + Header: k UInt64 Expression (Change column names to column identifiers) - Header: __table2.k UInt64 + Header: __table3.k UInt64 ReadFromMemoryStorage Header: k UInt64 - Expression (Change column names to column identifiers) - Header: __table3.k UInt64 - ReadFromMemoryStorage - Header: k UInt64 Expression (Change column names to column identifiers) Header: __table4.d2 String __table4.k UInt64 @@ -96,24 +106,27 @@ Header: bx String Header: __table1.a2 String __table2.bx String __table4.c2 String + __table4.c1 UInt64 Expression Header: __table1.a2 String __table2.bx String - __table4.c1 UInt64 __table4.c2 String + __table4.c1 UInt64 Join (JOIN FillRightFirst) Header: __table1.a2 String __table2.bx String - __table4.c1 UInt64 + __table2.b1 UInt64 __table4.c2 String - Expression (JOIN actions) + __table4.c1 UInt64 + Expression ((JOIN actions + DROP unused columns after JOIN)) Header: __table1.a2 String - __table2.b1 UInt64 __table2.bx String + __table2.b1 UInt64 Join (JOIN FillRightFirst) - Header: __table1.a2 String - __table2.b1 UInt64 + Header: __table1.a1 UInt64 + __table1.a2 String __table2.bx String + __table2.b1 UInt64 Expression ((JOIN actions + Change column names to column identifiers)) Header: __table1.a1 UInt64 __table1.a2 String diff --git a/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql b/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql index b10bf38e495..df84e2f50b2 100644 --- a/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql +++ b/tests/queries/0_stateless/02514_analyzer_drop_join_on.sql @@ -16,7 +16,6 @@ CREATE TABLE d (k UInt64, d1 UInt64, d2 String) ENGINE = Memory; INSERT INTO d VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); SET enable_analyzer = 1; -SET query_plan_join_inner_table_selection = 'right'; -- { echoOn } diff --git a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference index 116c78a15e4..86e7e2a6a49 100644 --- a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference +++ b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference @@ -5,7 +5,7 @@ 1 1 -0 +1 \N 100000000000000000000 diff --git a/tests/queries/0_stateless/02835_join_step_explain.reference b/tests/queries/0_stateless/02835_join_step_explain.reference index bdbc019d4f8..06f4a9cfc99 100644 --- a/tests/queries/0_stateless/02835_join_step_explain.reference +++ b/tests/queries/0_stateless/02835_join_step_explain.reference @@ -1,22 +1,22 @@ -Expression ((Project names + Projection)) +Expression ((Project names + (Projection + DROP unused columns after JOIN))) Header: id UInt64 value_1 String rhs.id UInt64 rhs.value_1 String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value_1 String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value_1 String : 3 + INPUT : 2 -> __table2.value_1 String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value_1 :: 1 -> value_1 String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value_1 :: 3 -> rhs.value_1 String : 2 -Positions: 4 0 1 2 + ALIAS __table2.value_1 :: 2 -> rhs.value_1 String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 0 2 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value_1 String - __table2.id UInt64 __table2.value_1 String + __table2.id UInt64 Type: INNER Strictness: ALL Algorithm: HashJoin @@ -50,25 +50,29 @@ Positions: 4 0 1 2 Parts: 1 Granules: 1 -- -Expression ((Project names + Projection)) +Expression ((Project names + (Projection + DROP unused columns after JOIN))) Header: id UInt64 value_1 String rhs.id UInt64 rhs.value_1 String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value_1 String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 + INPUT :: 2 -> __table1.value_2 UInt64 : 2 INPUT : 3 -> __table2.value_1 String : 3 - ALIAS __table1.id :: 0 -> id UInt64 : 4 + INPUT :: 4 -> __table2.value_2 UInt64 : 4 + INPUT : 5 -> __table2.id UInt64 : 5 + ALIAS __table1.id :: 0 -> id UInt64 : 6 ALIAS __table1.value_1 :: 1 -> value_1 String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value_1 :: 3 -> rhs.value_1 String : 2 -Positions: 4 0 1 2 + ALIAS __table2.value_1 :: 3 -> rhs.value_1 String : 1 + ALIAS __table2.id :: 5 -> rhs.id UInt64 : 3 +Positions: 6 0 3 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value_1 String - __table2.id UInt64 + __table1.value_2 UInt64 __table2.value_1 String + __table2.value_2 UInt64 + __table2.id UInt64 Type: INNER Strictness: ASOF Algorithm: HashJoin diff --git a/tests/queries/0_stateless/02835_join_step_explain.sql b/tests/queries/0_stateless/02835_join_step_explain.sql index b803ddbd911..1cdd3684a0b 100644 --- a/tests/queries/0_stateless/02835_join_step_explain.sql +++ b/tests/queries/0_stateless/02835_join_step_explain.sql @@ -19,8 +19,6 @@ CREATE TABLE test_table_2 INSERT INTO test_table_1 VALUES (0, 'Value', 0); INSERT INTO test_table_2 VALUES (0, 'Value', 0); -SET query_plan_join_inner_table_selection = 'right'; - EXPLAIN header = 1, actions = 1 SELECT lhs.id, lhs.value_1, rhs.id, rhs.value_1 FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id; diff --git a/tests/queries/0_stateless/02962_join_using_bug_57894.reference b/tests/queries/0_stateless/02962_join_using_bug_57894.reference index fc6fe462205..454655081df 100644 --- a/tests/queries/0_stateless/02962_join_using_bug_57894.reference +++ b/tests/queries/0_stateless/02962_join_using_bug_57894.reference @@ -31,7 +31,6 @@ 8 9 \N ---- analyzer --- 0 1 2 diff --git a/tests/queries/0_stateless/02962_join_using_bug_57894.sql b/tests/queries/0_stateless/02962_join_using_bug_57894.sql index e29347beb5e..96190241da5 100644 --- a/tests/queries/0_stateless/02962_join_using_bug_57894.sql +++ b/tests/queries/0_stateless/02962_join_using_bug_57894.sql @@ -21,8 +21,6 @@ SETTINGS join_algorithm = 'partial_merge'; SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL SETTINGS join_algorithm = 'full_sorting_merge'; -SELECT '--- analyzer ---'; - SET enable_analyzer = 1; SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL diff --git a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference index b7718d926c6..80f4e309505 100644 --- a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference +++ b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.reference @@ -2,9 +2,7 @@ EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; Expression ((Project names + (Projection + ))) Header: id UInt64 rhs.id UInt64 @@ -12,18 +10,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: INNER Strictness: ALL Algorithm: HashJoin @@ -71,9 +69,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right'; -; +WHERE rhs.id = 5; Expression ((Project names + (Projection + ))) Header: id UInt64 rhs.id UInt64 @@ -81,18 +77,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: INNER Strictness: ALL Algorithm: HashJoin @@ -140,9 +136,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 AND rhs.id = 6 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5 AND rhs.id = 6; Expression ((Project names + (Projection + ))) Header: id UInt64 rhs.id UInt64 @@ -150,18 +144,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: INNER Strictness: ALL Algorithm: HashJoin @@ -212,9 +206,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; Expression ((Project names + (Projection + ))) Header: id UInt64 rhs.id UInt64 @@ -222,18 +214,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: LEFT Strictness: ALL Algorithm: HashJoin @@ -281,9 +273,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE rhs.id = 5; Expression ((Project names + Projection)) Header: id UInt64 rhs.id UInt64 @@ -291,31 +281,31 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 - Filter (WHERE) + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 + Filter ((WHERE + DROP unused columns after JOIN)) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Filter column: equals(__table2.id, 5_UInt8) (removed) Actions: INPUT :: 0 -> __table1.id UInt64 : 0 INPUT :: 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT :: 3 -> __table2.value String : 3 + INPUT :: 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 4 - FUNCTION equals(__table2.id : 2, 5_UInt8 :: 4) -> equals(__table2.id, 5_UInt8) UInt8 : 5 + FUNCTION equals(__table2.id : 3, 5_UInt8 :: 4) -> equals(__table2.id, 5_UInt8) UInt8 : 5 Positions: 5 0 1 2 3 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: LEFT Strictness: ALL Algorithm: HashJoin @@ -357,9 +347,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; Expression ((Project names + Projection)) Header: id UInt64 rhs.id UInt64 @@ -367,31 +355,31 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 - Filter (WHERE) + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 + Filter ((WHERE + DROP unused columns after JOIN)) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Filter column: equals(__table1.id, 5_UInt8) (removed) Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT :: 1 -> __table1.value String : 1 - INPUT :: 2 -> __table2.id UInt64 : 2 - INPUT :: 3 -> __table2.value String : 3 + INPUT :: 2 -> __table2.value String : 2 + INPUT :: 3 -> __table2.id UInt64 : 3 COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 4 FUNCTION equals(__table1.id : 0, 5_UInt8 :: 4) -> equals(__table1.id, 5_UInt8) UInt8 : 5 Positions: 5 0 1 2 3 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: RIGHT Strictness: ALL Algorithm: HashJoin @@ -433,9 +421,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE rhs.id = 5; Expression ((Project names + (Projection + ))) Header: id UInt64 rhs.id UInt64 @@ -443,18 +429,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: RIGHT Strictness: ALL Algorithm: HashJoin @@ -502,9 +488,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; Expression ((Project names + Projection)) Header: id UInt64 rhs.id UInt64 @@ -512,31 +496,31 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 - Filter (WHERE) + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 + Filter ((WHERE + DROP unused columns after JOIN)) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Filter column: equals(__table1.id, 5_UInt8) (removed) Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT :: 1 -> __table1.value String : 1 - INPUT :: 2 -> __table2.id UInt64 : 2 - INPUT :: 3 -> __table2.value String : 3 + INPUT :: 2 -> __table2.value String : 2 + INPUT :: 3 -> __table2.id UInt64 : 3 COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 4 FUNCTION equals(__table1.id : 0, 5_UInt8 :: 4) -> equals(__table1.id, 5_UInt8) UInt8 : 5 Positions: 5 0 1 2 3 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: FULL Strictness: ALL Algorithm: HashJoin @@ -578,9 +562,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE rhs.id = 5; Expression ((Project names + Projection)) Header: id UInt64 rhs.id UInt64 @@ -588,31 +570,31 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 - Filter (WHERE) + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 + Filter ((WHERE + DROP unused columns after JOIN)) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Filter column: equals(__table2.id, 5_UInt8) (removed) Actions: INPUT :: 0 -> __table1.id UInt64 : 0 INPUT :: 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT :: 3 -> __table2.value String : 3 + INPUT :: 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 4 - FUNCTION equals(__table2.id : 2, 5_UInt8 :: 4) -> equals(__table2.id, 5_UInt8) UInt8 : 5 + FUNCTION equals(__table2.id : 3, 5_UInt8 :: 4) -> equals(__table2.id, 5_UInt8) UInt8 : 5 Positions: 5 0 1 2 3 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: FULL Strictness: ALL Algorithm: HashJoin @@ -654,9 +636,7 @@ SELECT '--'; -- EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 AND rhs.id = 6 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5 AND rhs.id = 6; Expression ((Project names + Projection)) Header: id UInt64 rhs.id UInt64 @@ -664,34 +644,34 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 1 0 2 - Filter (WHERE) + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 2 0 1 + Filter ((WHERE + DROP unused columns after JOIN)) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Filter column: and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) (removed) Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT :: 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT :: 3 -> __table2.value String : 3 + INPUT :: 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 COLUMN Const(UInt8) -> 5_UInt8 UInt8 : 4 COLUMN Const(UInt8) -> 6_UInt8 UInt8 : 5 FUNCTION equals(__table1.id : 0, 5_UInt8 :: 4) -> equals(__table1.id, 5_UInt8) UInt8 : 6 - FUNCTION equals(__table2.id : 2, 6_UInt8 :: 5) -> equals(__table2.id, 6_UInt8) UInt8 : 4 + FUNCTION equals(__table2.id : 3, 6_UInt8 :: 5) -> equals(__table2.id, 6_UInt8) UInt8 : 4 FUNCTION and(equals(__table1.id, 5_UInt8) :: 6, equals(__table2.id, 6_UInt8) :: 4) -> and(equals(__table1.id, 5_UInt8), equals(__table2.id, 6_UInt8)) UInt8 : 5 Positions: 5 0 1 2 3 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: FULL Strictness: ALL Algorithm: HashJoin diff --git a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql index d6dcc34c796..e1a13d1ce71 100644 --- a/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql +++ b/tests/queries/0_stateless/03036_join_filter_push_down_equivalent_sets.sql @@ -22,9 +22,7 @@ INSERT INTO test_table_2 SELECT number, number FROM numbers(10); EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; SELECT '--'; @@ -35,9 +33,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right'; -; +WHERE rhs.id = 5; SELECT '--'; @@ -48,9 +44,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 AND rhs.id = 6 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5 AND rhs.id = 6; SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id = 5 AND rhs.id = 6; @@ -59,9 +53,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; SELECT '--'; @@ -72,9 +64,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE rhs.id = 5; SELECT '--'; @@ -85,9 +75,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; SELECT '--'; @@ -98,9 +86,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE rhs.id = 5; SELECT '--'; @@ -111,9 +97,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5; SELECT '--'; @@ -124,9 +108,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE rhs.id = 5 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE rhs.id = 5; SELECT '--'; @@ -137,9 +119,7 @@ SELECT '--'; EXPLAIN header = 1, actions = 1 SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id -WHERE lhs.id = 5 AND rhs.id = 6 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +WHERE lhs.id = 5 AND rhs.id = 6; SELECT '--'; diff --git a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference index 7df38e855f6..cf070eebc38 100644 --- a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference +++ b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.reference @@ -52,9 +52,7 @@ WITH RECURSIVE search_graph AS ( FROM graph g, search_graph sg WHERE g.f = sg.t AND NOT is_cycle ) -SELECT * FROM search_graph -SETTINGS query_plan_join_inner_table_selection = 'right' -; +SELECT * FROM search_graph; 1 2 arc 1 -> 2 false [(1,2)] 1 3 arc 1 -> 3 false [(1,3)] 2 3 arc 2 -> 3 false [(2,3)] diff --git a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql index d33ca7b078e..7dad74893b9 100644 --- a/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql +++ b/tests/queries/0_stateless/03038_recursive_cte_postgres_4.sql @@ -55,9 +55,7 @@ WITH RECURSIVE search_graph AS ( FROM graph g, search_graph sg WHERE g.f = sg.t AND NOT is_cycle ) -SELECT * FROM search_graph -SETTINGS query_plan_join_inner_table_selection = 'right' -; +SELECT * FROM search_graph; -- ordering by the path column has same effect as SEARCH DEPTH FIRST WITH RECURSIVE search_graph AS ( diff --git a/tests/queries/0_stateless/03094_one_thousand_joins.sql b/tests/queries/0_stateless/03094_one_thousand_joins.sql index 69c4fb42a6b..6ae4e4d4d3c 100644 --- a/tests/queries/0_stateless/03094_one_thousand_joins.sql +++ b/tests/queries/0_stateless/03094_one_thousand_joins.sql @@ -3,7 +3,6 @@ SET join_algorithm = 'default'; -- for 'full_sorting_merge' the query is 10x slower SET enable_analyzer = 1; -- old analyzer returns TOO_DEEP_SUBQUERIES -SET query_plan_join_inner_table_selection = 'auto'; -- 'left' is slower -- Bug 33446, marked as 'long' because it still runs around 10 sec SELECT * FROM (SELECT 1 AS x) t1 JOIN (SELECT 1 AS x) t2 ON t1.x = t2.x JOIN (SELECT 1 AS x) t3 ON t1.x = t3.x JOIN (SELECT 1 AS x) t4 ON t1.x = t4.x JOIN (SELECT 1 AS x) t5 ON t1.x = t5.x JOIN (SELECT 1 AS x) t6 ON t1.x = t6.x JOIN (SELECT 1 AS x) t7 ON t1.x = t7.x JOIN (SELECT 1 AS x) t8 ON t1.x = t8.x JOIN (SELECT 1 AS x) t9 ON t1.x = t9.x JOIN (SELECT 1 AS x) t10 ON t1.x = t10.x JOIN (SELECT 1 AS x) t11 ON t1.x = t11.x JOIN (SELECT 1 AS x) t12 ON t1.x = t12.x JOIN (SELECT 1 AS x) t13 ON t1.x = t13.x JOIN (SELECT 1 AS x) t14 ON t1.x = t14.x JOIN (SELECT 1 AS x) t15 ON t1.x = t15.x JOIN (SELECT 1 AS x) t16 ON t1.x = t16.x JOIN (SELECT 1 AS x) t17 ON t1.x = t17.x JOIN (SELECT 1 AS x) t18 ON t1.x = t18.x JOIN (SELECT 1 AS x) t19 ON t1.x = t19.x JOIN (SELECT 1 AS x) t20 ON t1.x = t20.x JOIN (SELECT 1 AS x) t21 ON t1.x = t21.x JOIN (SELECT 1 AS x) t22 ON t1.x = t22.x JOIN (SELECT 1 AS x) t23 ON t1.x = t23.x JOIN (SELECT 1 AS x) t24 ON t1.x = t24.x JOIN (SELECT 1 AS x) t25 ON t1.x = t25.x JOIN (SELECT 1 AS x) t26 ON t1.x = t26.x JOIN (SELECT 1 AS x) t27 ON t1.x = t27.x JOIN (SELECT 1 AS x) t28 ON t1.x = t28.x JOIN (SELECT 1 AS x) t29 ON t1.x = t29.x JOIN (SELECT 1 AS x) t30 ON t1.x = t30.x JOIN (SELECT 1 AS x) t31 ON t1.x = t31.x JOIN (SELECT 1 AS x) t32 ON t1.x = t32.x JOIN (SELECT 1 AS x) t33 ON t1.x = t33.x JOIN (SELECT 1 AS x) t34 ON t1.x = t34.x JOIN (SELECT 1 AS x) t35 ON t1.x = t35.x JOIN (SELECT 1 AS x) t36 ON t1.x = t36.x JOIN (SELECT 1 AS x) t37 ON t1.x = t37.x JOIN (SELECT 1 AS x) t38 ON t1.x = t38.x JOIN (SELECT 1 AS x) t39 ON t1.x = t39.x JOIN (SELECT 1 AS x) t40 ON t1.x = t40.x JOIN (SELECT 1 AS x) t41 ON t1.x = t41.x JOIN (SELECT 1 AS x) t42 ON t1.x = t42.x JOIN (SELECT 1 AS x) t43 ON t1.x = t43.x JOIN (SELECT 1 AS x) t44 ON t1.x = t44.x JOIN (SELECT 1 AS x) t45 ON t1.x = t45.x JOIN (SELECT 1 AS x) t46 ON t1.x = t46.x JOIN (SELECT 1 AS x) t47 ON t1.x = t47.x JOIN (SELECT 1 AS x) t48 ON t1.x = t48.x JOIN (SELECT 1 AS x) t49 ON t1.x = t49.x JOIN (SELECT 1 AS x) t50 ON t1.x = t50.x JOIN (SELECT 1 AS x) t51 ON t1.x = t51.x JOIN (SELECT 1 AS x) t52 ON t1.x = t52.x JOIN (SELECT 1 AS x) t53 ON t1.x = t53.x JOIN (SELECT 1 AS x) t54 ON t1.x = t54.x JOIN (SELECT 1 AS x) t55 ON t1.x = t55.x JOIN (SELECT 1 AS x) t56 ON t1.x = t56.x JOIN (SELECT 1 AS x) t57 ON t1.x = t57.x JOIN (SELECT 1 AS x) t58 ON t1.x = t58.x JOIN (SELECT 1 AS x) t59 ON t1.x = t59.x JOIN (SELECT 1 AS x) t60 ON t1.x = t60.x JOIN (SELECT 1 AS x) t61 ON t1.x = t61.x JOIN (SELECT 1 AS x) t62 ON t1.x = t62.x JOIN (SELECT 1 AS x) t63 ON t1.x = t63.x JOIN (SELECT 1 AS x) t64 ON t1.x = t64.x JOIN (SELECT 1 AS x) t65 ON t1.x = t65.x JOIN (SELECT 1 AS x) t66 ON t1.x = t66.x JOIN (SELECT 1 AS x) t67 ON t1.x = t67.x JOIN (SELECT 1 AS x) t68 ON t1.x = t68.x JOIN (SELECT 1 AS x) t69 ON t1.x = t69.x JOIN (SELECT 1 AS x) t70 ON t1.x = t70.x JOIN (SELECT 1 AS x) t71 ON t1.x = t71.x JOIN (SELECT 1 AS x) t72 ON t1.x = t72.x JOIN (SELECT 1 AS x) t73 ON t1.x = t73.x JOIN (SELECT 1 AS x) t74 ON t1.x = t74.x JOIN (SELECT 1 AS x) t75 ON t1.x = t75.x JOIN (SELECT 1 AS x) t76 ON t1.x = t76.x JOIN (SELECT 1 AS x) t77 ON t1.x = t77.x JOIN (SELECT 1 AS x) t78 ON t1.x = t78.x JOIN (SELECT 1 AS x) t79 ON t1.x = t79.x JOIN (SELECT 1 AS x) t80 ON t1.x = t80.x JOIN (SELECT 1 AS x) t81 ON t1.x = t81.x JOIN (SELECT 1 AS x) t82 ON t1.x = t82.x JOIN (SELECT 1 AS x) t83 ON t1.x = t83.x JOIN (SELECT 1 AS x) t84 ON t1.x = t84.x JOIN (SELECT 1 AS x) t85 ON t1.x = t85.x JOIN (SELECT 1 AS x) t86 ON t1.x = t86.x JOIN (SELECT 1 AS x) t87 ON t1.x = t87.x JOIN (SELECT 1 AS x) t88 ON t1.x = t88.x JOIN (SELECT 1 AS x) t89 ON t1.x = t89.x JOIN (SELECT 1 AS x) t90 ON t1.x = t90.x JOIN (SELECT 1 AS x) t91 ON t1.x = t91.x JOIN (SELECT 1 AS x) t92 ON t1.x = t92.x JOIN (SELECT 1 AS x) t93 ON t1.x = t93.x JOIN (SELECT 1 AS x) t94 ON t1.x = t94.x JOIN (SELECT 1 AS x) t95 ON t1.x = t95.x JOIN (SELECT 1 AS x) t96 ON t1.x = t96.x JOIN (SELECT 1 AS x) t97 ON t1.x = t97.x JOIN (SELECT 1 AS x) t98 ON t1.x = t98.x JOIN (SELECT 1 AS x) t99 ON t1.x = t99.x JOIN (SELECT 1 AS x) t100 ON t1.x = t100.x JOIN (SELECT 1 AS x) t101 ON t1.x = t101.x JOIN (SELECT 1 AS x) t102 ON t1.x = t102.x JOIN (SELECT 1 AS x) t103 ON t1.x = t103.x JOIN (SELECT 1 AS x) t104 ON t1.x = t104.x JOIN (SELECT 1 AS x) t105 ON t1.x = t105.x JOIN (SELECT 1 AS x) t106 ON t1.x = t106.x JOIN (SELECT 1 AS x) t107 ON t1.x = t107.x JOIN (SELECT 1 AS x) t108 ON t1.x = t108.x JOIN (SELECT 1 AS x) t109 ON t1.x = t109.x JOIN (SELECT 1 AS x) t110 ON t1.x = t110.x JOIN (SELECT 1 AS x) t111 ON t1.x = t111.x JOIN (SELECT 1 AS x) t112 ON t1.x = t112.x JOIN (SELECT 1 AS x) t113 ON t1.x = t113.x JOIN (SELECT 1 AS x) t114 ON t1.x = t114.x JOIN (SELECT 1 AS x) t115 ON t1.x = t115.x JOIN (SELECT 1 AS x) t116 ON t1.x = t116.x JOIN (SELECT 1 AS x) t117 ON t1.x = t117.x JOIN (SELECT 1 AS x) t118 ON t1.x = t118.x JOIN (SELECT 1 AS x) t119 ON t1.x = t119.x JOIN (SELECT 1 AS x) t120 ON t1.x = t120.x JOIN (SELECT 1 AS x) t121 ON t1.x = t121.x JOIN (SELECT 1 AS x) t122 ON t1.x = t122.x JOIN (SELECT 1 AS x) t123 ON t1.x = t123.x JOIN (SELECT 1 AS x) t124 ON t1.x = t124.x JOIN (SELECT 1 AS x) t125 ON t1.x = t125.x JOIN (SELECT 1 AS x) t126 ON t1.x = t126.x JOIN (SELECT 1 AS x) t127 ON t1.x = t127.x JOIN (SELECT 1 AS x) t128 ON t1.x = t128.x JOIN (SELECT 1 AS x) t129 ON t1.x = t129.x JOIN (SELECT 1 AS x) t130 ON t1.x = t130.x JOIN (SELECT 1 AS x) t131 ON t1.x = t131.x JOIN (SELECT 1 AS x) t132 ON t1.x = t132.x JOIN (SELECT 1 AS x) t133 ON t1.x = t133.x JOIN (SELECT 1 AS x) t134 ON t1.x = t134.x JOIN (SELECT 1 AS x) t135 ON t1.x = t135.x JOIN (SELECT 1 AS x) t136 ON t1.x = t136.x JOIN (SELECT 1 AS x) t137 ON t1.x = t137.x JOIN (SELECT 1 AS x) t138 ON t1.x = t138.x JOIN (SELECT 1 AS x) t139 ON t1.x = t139.x JOIN (SELECT 1 AS x) t140 ON t1.x = t140.x JOIN (SELECT 1 AS x) t141 ON t1.x = t141.x JOIN (SELECT 1 AS x) t142 ON t1.x = t142.x JOIN (SELECT 1 AS x) t143 ON t1.x = t143.x JOIN (SELECT 1 AS x) t144 ON t1.x = t144.x JOIN (SELECT 1 AS x) t145 ON t1.x = t145.x JOIN (SELECT 1 AS x) t146 ON t1.x = t146.x JOIN (SELECT 1 AS x) t147 ON t1.x = t147.x JOIN (SELECT 1 AS x) t148 ON t1.x = t148.x JOIN (SELECT 1 AS x) t149 ON t1.x = t149.x JOIN (SELECT 1 AS x) t150 ON t1.x = t150.x JOIN (SELECT 1 AS x) t151 ON t1.x = t151.x JOIN (SELECT 1 AS x) t152 ON t1.x = t152.x JOIN (SELECT 1 AS x) t153 ON t1.x = t153.x JOIN (SELECT 1 AS x) t154 ON t1.x = t154.x JOIN (SELECT 1 AS x) t155 ON t1.x = t155.x JOIN (SELECT 1 AS x) t156 ON t1.x = t156.x JOIN (SELECT 1 AS x) t157 ON t1.x = t157.x JOIN (SELECT 1 AS x) t158 ON t1.x = t158.x JOIN (SELECT 1 AS x) t159 ON t1.x = t159.x JOIN (SELECT 1 AS x) t160 ON t1.x = t160.x JOIN (SELECT 1 AS x) t161 ON t1.x = t161.x JOIN (SELECT 1 AS x) t162 ON t1.x = t162.x JOIN (SELECT 1 AS x) t163 ON t1.x = t163.x JOIN (SELECT 1 AS x) t164 ON t1.x = t164.x JOIN (SELECT 1 AS x) t165 ON t1.x = t165.x JOIN (SELECT 1 AS x) t166 ON t1.x = t166.x JOIN (SELECT 1 AS x) t167 ON t1.x = t167.x JOIN (SELECT 1 AS x) t168 ON t1.x = t168.x JOIN (SELECT 1 AS x) t169 ON t1.x = t169.x JOIN (SELECT 1 AS x) t170 ON t1.x = t170.x JOIN (SELECT 1 AS x) t171 ON t1.x = t171.x JOIN (SELECT 1 AS x) t172 ON t1.x = t172.x JOIN (SELECT 1 AS x) t173 ON t1.x = t173.x JOIN (SELECT 1 AS x) t174 ON t1.x = t174.x JOIN (SELECT 1 AS x) t175 ON t1.x = t175.x JOIN (SELECT 1 AS x) t176 ON t1.x = t176.x JOIN (SELECT 1 AS x) t177 ON t1.x = t177.x JOIN (SELECT 1 AS x) t178 ON t1.x = t178.x JOIN (SELECT 1 AS x) t179 ON t1.x = t179.x JOIN (SELECT 1 AS x) t180 ON t1.x = t180.x JOIN (SELECT 1 AS x) t181 ON t1.x = t181.x JOIN (SELECT 1 AS x) t182 ON t1.x = t182.x JOIN (SELECT 1 AS x) t183 ON t1.x = t183.x JOIN (SELECT 1 AS x) t184 ON t1.x = t184.x JOIN (SELECT 1 AS x) t185 ON t1.x = t185.x JOIN (SELECT 1 AS x) t186 ON t1.x = t186.x JOIN (SELECT 1 AS x) t187 ON t1.x = t187.x JOIN (SELECT 1 AS x) t188 ON t1.x = t188.x JOIN (SELECT 1 AS x) t189 ON t1.x = t189.x JOIN (SELECT 1 AS x) t190 ON t1.x = t190.x JOIN (SELECT 1 AS x) t191 ON t1.x = t191.x JOIN (SELECT 1 AS x) t192 ON t1.x = t192.x JOIN (SELECT 1 AS x) t193 ON t1.x = t193.x JOIN (SELECT 1 AS x) t194 ON t1.x = t194.x JOIN (SELECT 1 AS x) t195 ON t1.x = t195.x JOIN (SELECT 1 AS x) t196 ON t1.x = t196.x JOIN (SELECT 1 AS x) t197 ON t1.x = t197.x JOIN (SELECT 1 AS x) t198 ON t1.x = t198.x JOIN (SELECT 1 AS x) t199 ON t1.x = t199.x JOIN (SELECT 1 AS x) t200 ON t1.x = t200.x JOIN (SELECT 1 AS x) t201 ON t1.x = t201.x JOIN (SELECT 1 AS x) t202 ON t1.x = t202.x JOIN (SELECT 1 AS x) t203 ON t1.x = t203.x JOIN (SELECT 1 AS x) t204 ON t1.x = t204.x JOIN (SELECT 1 AS x) t205 ON t1.x = t205.x JOIN (SELECT 1 AS x) t206 ON t1.x = t206.x JOIN (SELECT 1 AS x) t207 ON t1.x = t207.x JOIN (SELECT 1 AS x) t208 ON t1.x = t208.x JOIN (SELECT 1 AS x) t209 ON t1.x = t209.x JOIN (SELECT 1 AS x) t210 ON t1.x = t210.x JOIN (SELECT 1 AS x) t211 ON t1.x = t211.x JOIN (SELECT 1 AS x) t212 ON t1.x = t212.x JOIN (SELECT 1 AS x) t213 ON t1.x = t213.x JOIN (SELECT 1 AS x) t214 ON t1.x = t214.x JOIN (SELECT 1 AS x) t215 ON t1.x = t215.x JOIN (SELECT 1 AS x) t216 ON t1.x = t216.x JOIN (SELECT 1 AS x) t217 ON t1.x = t217.x JOIN (SELECT 1 AS x) t218 ON t1.x = t218.x JOIN (SELECT 1 AS x) t219 ON t1.x = t219.x JOIN (SELECT 1 AS x) t220 ON t1.x = t220.x JOIN (SELECT 1 AS x) t221 ON t1.x = t221.x JOIN (SELECT 1 AS x) t222 ON t1.x = t222.x JOIN (SELECT 1 AS x) t223 ON t1.x = t223.x JOIN (SELECT 1 AS x) t224 ON t1.x = t224.x JOIN (SELECT 1 AS x) t225 ON t1.x = t225.x JOIN (SELECT 1 AS x) t226 ON t1.x = t226.x JOIN (SELECT 1 AS x) t227 ON t1.x = t227.x JOIN (SELECT 1 AS x) t228 ON t1.x = t228.x JOIN (SELECT 1 AS x) t229 ON t1.x = t229.x JOIN (SELECT 1 AS x) t230 ON t1.x = t230.x JOIN (SELECT 1 AS x) t231 ON t1.x = t231.x JOIN (SELECT 1 AS x) t232 ON t1.x = t232.x JOIN (SELECT 1 AS x) t233 ON t1.x = t233.x JOIN (SELECT 1 AS x) t234 ON t1.x = t234.x JOIN (SELECT 1 AS x) t235 ON t1.x = t235.x JOIN (SELECT 1 AS x) t236 ON t1.x = t236.x JOIN (SELECT 1 AS x) t237 ON t1.x = t237.x JOIN (SELECT 1 AS x) t238 ON t1.x = t238.x JOIN (SELECT 1 AS x) t239 ON t1.x = t239.x JOIN (SELECT 1 AS x) t240 ON t1.x = t240.x JOIN (SELECT 1 AS x) t241 ON t1.x = t241.x JOIN (SELECT 1 AS x) t242 ON t1.x = t242.x JOIN (SELECT 1 AS x) t243 ON t1.x = t243.x JOIN (SELECT 1 AS x) t244 ON t1.x = t244.x JOIN (SELECT 1 AS x) t245 ON t1.x = t245.x JOIN (SELECT 1 AS x) t246 ON t1.x = t246.x JOIN (SELECT 1 AS x) t247 ON t1.x = t247.x JOIN (SELECT 1 AS x) t248 ON t1.x = t248.x JOIN (SELECT 1 AS x) t249 ON t1.x = t249.x JOIN (SELECT 1 AS x) t250 ON t1.x = t250.x JOIN (SELECT 1 AS x) t251 ON t1.x = t251.x JOIN (SELECT 1 AS x) t252 ON t1.x = t252.x JOIN (SELECT 1 AS x) t253 ON t1.x = t253.x JOIN (SELECT 1 AS x) t254 ON t1.x = t254.x JOIN (SELECT 1 AS x) t255 ON t1.x = t255.x JOIN (SELECT 1 AS x) t256 ON t1.x = t256.x JOIN (SELECT 1 AS x) t257 ON t1.x = t257.x JOIN (SELECT 1 AS x) t258 ON t1.x = t258.x JOIN (SELECT 1 AS x) t259 ON t1.x = t259.x JOIN (SELECT 1 AS x) t260 ON t1.x = t260.x JOIN (SELECT 1 AS x) t261 ON t1.x = t261.x JOIN (SELECT 1 AS x) t262 ON t1.x = t262.x JOIN (SELECT 1 AS x) t263 ON t1.x = t263.x JOIN (SELECT 1 AS x) t264 ON t1.x = t264.x JOIN (SELECT 1 AS x) t265 ON t1.x = t265.x JOIN (SELECT 1 AS x) t266 ON t1.x = t266.x JOIN (SELECT 1 AS x) t267 ON t1.x = t267.x JOIN (SELECT 1 AS x) t268 ON t1.x = t268.x JOIN (SELECT 1 AS x) t269 ON t1.x = t269.x JOIN (SELECT 1 AS x) t270 ON t1.x = t270.x JOIN (SELECT 1 AS x) t271 ON t1.x = t271.x JOIN (SELECT 1 AS x) t272 ON t1.x = t272.x JOIN (SELECT 1 AS x) t273 ON t1.x = t273.x JOIN (SELECT 1 AS x) t274 ON t1.x = t274.x JOIN (SELECT 1 AS x) t275 ON t1.x = t275.x JOIN (SELECT 1 AS x) t276 ON t1.x = t276.x JOIN (SELECT 1 AS x) t277 ON t1.x = t277.x JOIN (SELECT 1 AS x) t278 ON t1.x = t278.x JOIN (SELECT 1 AS x) t279 ON t1.x = t279.x JOIN (SELECT 1 AS x) t280 ON t1.x = t280.x JOIN (SELECT 1 AS x) t281 ON t1.x = t281.x JOIN (SELECT 1 AS x) t282 ON t1.x = t282.x JOIN (SELECT 1 AS x) t283 ON t1.x = t283.x JOIN (SELECT 1 AS x) t284 ON t1.x = t284.x JOIN (SELECT 1 AS x) t285 ON t1.x = t285.x JOIN (SELECT 1 AS x) t286 ON t1.x = t286.x JOIN (SELECT 1 AS x) t287 ON t1.x = t287.x JOIN (SELECT 1 AS x) t288 ON t1.x = t288.x JOIN (SELECT 1 AS x) t289 ON t1.x = t289.x JOIN (SELECT 1 AS x) t290 ON t1.x = t290.x JOIN (SELECT 1 AS x) t291 ON t1.x = t291.x JOIN (SELECT 1 AS x) t292 ON t1.x = t292.x JOIN (SELECT 1 AS x) t293 ON t1.x = t293.x JOIN (SELECT 1 AS x) t294 ON t1.x = t294.x JOIN (SELECT 1 AS x) t295 ON t1.x = t295.x JOIN (SELECT 1 AS x) t296 ON t1.x = t296.x JOIN (SELECT 1 AS x) t297 ON t1.x = t297.x JOIN (SELECT 1 AS x) t298 ON t1.x = t298.x JOIN (SELECT 1 AS x) t299 ON t1.x = t299.x JOIN (SELECT 1 AS x) t300 ON t1.x = t300.x JOIN (SELECT 1 AS x) t301 ON t1.x = t301.x JOIN (SELECT 1 AS x) t302 ON t1.x = t302.x JOIN (SELECT 1 AS x) t303 ON t1.x = t303.x JOIN (SELECT 1 AS x) t304 ON t1.x = t304.x JOIN (SELECT 1 AS x) t305 ON t1.x = t305.x JOIN (SELECT 1 AS x) t306 ON t1.x = t306.x JOIN (SELECT 1 AS x) t307 ON t1.x = t307.x JOIN (SELECT 1 AS x) t308 ON t1.x = t308.x JOIN (SELECT 1 AS x) t309 ON t1.x = t309.x JOIN (SELECT 1 AS x) t310 ON t1.x = t310.x JOIN (SELECT 1 AS x) t311 ON t1.x = t311.x JOIN (SELECT 1 AS x) t312 ON t1.x = t312.x JOIN (SELECT 1 AS x) t313 ON t1.x = t313.x JOIN (SELECT 1 AS x) t314 ON t1.x = t314.x JOIN (SELECT 1 AS x) t315 ON t1.x = t315.x JOIN (SELECT 1 AS x) t316 ON t1.x = t316.x JOIN (SELECT 1 AS x) t317 ON t1.x = t317.x JOIN (SELECT 1 AS x) t318 ON t1.x = t318.x JOIN (SELECT 1 AS x) t319 ON t1.x = t319.x JOIN (SELECT 1 AS x) t320 ON t1.x = t320.x JOIN (SELECT 1 AS x) t321 ON t1.x = t321.x JOIN (SELECT 1 AS x) t322 ON t1.x = t322.x JOIN (SELECT 1 AS x) t323 ON t1.x = t323.x JOIN (SELECT 1 AS x) t324 ON t1.x = t324.x JOIN (SELECT 1 AS x) t325 ON t1.x = t325.x JOIN (SELECT 1 AS x) t326 ON t1.x = t326.x JOIN (SELECT 1 AS x) t327 ON t1.x = t327.x JOIN (SELECT 1 AS x) t328 ON t1.x = t328.x JOIN (SELECT 1 AS x) t329 ON t1.x = t329.x JOIN (SELECT 1 AS x) t330 ON t1.x = t330.x JOIN (SELECT 1 AS x) t331 ON t1.x = t331.x JOIN (SELECT 1 AS x) t332 ON t1.x = t332.x JOIN (SELECT 1 AS x) t333 ON t1.x = t333.x JOIN (SELECT 1 AS x) t334 ON t1.x = t334.x JOIN (SELECT 1 AS x) t335 ON t1.x = t335.x JOIN (SELECT 1 AS x) t336 ON t1.x = t336.x JOIN (SELECT 1 AS x) t337 ON t1.x = t337.x JOIN (SELECT 1 AS x) t338 ON t1.x = t338.x JOIN (SELECT 1 AS x) t339 ON t1.x = t339.x JOIN (SELECT 1 AS x) t340 ON t1.x = t340.x JOIN (SELECT 1 AS x) t341 ON t1.x = t341.x JOIN (SELECT 1 AS x) t342 ON t1.x = t342.x JOIN (SELECT 1 AS x) t343 ON t1.x = t343.x JOIN (SELECT 1 AS x) t344 ON t1.x = t344.x JOIN (SELECT 1 AS x) t345 ON t1.x = t345.x JOIN (SELECT 1 AS x) t346 ON t1.x = t346.x JOIN (SELECT 1 AS x) t347 ON t1.x = t347.x JOIN (SELECT 1 AS x) t348 ON t1.x = t348.x JOIN (SELECT 1 AS x) t349 ON t1.x = t349.x JOIN (SELECT 1 AS x) t350 ON t1.x = t350.x JOIN (SELECT 1 AS x) t351 ON t1.x = t351.x JOIN (SELECT 1 AS x) t352 ON t1.x = t352.x JOIN (SELECT 1 AS x) t353 ON t1.x = t353.x JOIN (SELECT 1 AS x) t354 ON t1.x = t354.x JOIN (SELECT 1 AS x) t355 ON t1.x = t355.x JOIN (SELECT 1 AS x) t356 ON t1.x = t356.x JOIN (SELECT 1 AS x) t357 ON t1.x = t357.x JOIN (SELECT 1 AS x) t358 ON t1.x = t358.x JOIN (SELECT 1 AS x) t359 ON t1.x = t359.x JOIN (SELECT 1 AS x) t360 ON t1.x = t360.x JOIN (SELECT 1 AS x) t361 ON t1.x = t361.x JOIN (SELECT 1 AS x) t362 ON t1.x = t362.x JOIN (SELECT 1 AS x) t363 ON t1.x = t363.x JOIN (SELECT 1 AS x) t364 ON t1.x = t364.x JOIN (SELECT 1 AS x) t365 ON t1.x = t365.x JOIN (SELECT 1 AS x) t366 ON t1.x = t366.x JOIN (SELECT 1 AS x) t367 ON t1.x = t367.x JOIN (SELECT 1 AS x) t368 ON t1.x = t368.x JOIN (SELECT 1 AS x) t369 ON t1.x = t369.x JOIN (SELECT 1 AS x) t370 ON t1.x = t370.x JOIN (SELECT 1 AS x) t371 ON t1.x = t371.x JOIN (SELECT 1 AS x) t372 ON t1.x = t372.x JOIN (SELECT 1 AS x) t373 ON t1.x = t373.x JOIN (SELECT 1 AS x) t374 ON t1.x = t374.x JOIN (SELECT 1 AS x) t375 ON t1.x = t375.x JOIN (SELECT 1 AS x) t376 ON t1.x = t376.x JOIN (SELECT 1 AS x) t377 ON t1.x = t377.x JOIN (SELECT 1 AS x) t378 ON t1.x = t378.x JOIN (SELECT 1 AS x) t379 ON t1.x = t379.x JOIN (SELECT 1 AS x) t380 ON t1.x = t380.x JOIN (SELECT 1 AS x) t381 ON t1.x = t381.x JOIN (SELECT 1 AS x) t382 ON t1.x = t382.x JOIN (SELECT 1 AS x) t383 ON t1.x = t383.x JOIN (SELECT 1 AS x) t384 ON t1.x = t384.x JOIN (SELECT 1 AS x) t385 ON t1.x = t385.x JOIN (SELECT 1 AS x) t386 ON t1.x = t386.x JOIN (SELECT 1 AS x) t387 ON t1.x = t387.x JOIN (SELECT 1 AS x) t388 ON t1.x = t388.x JOIN (SELECT 1 AS x) t389 ON t1.x = t389.x JOIN (SELECT 1 AS x) t390 ON t1.x = t390.x JOIN (SELECT 1 AS x) t391 ON t1.x = t391.x JOIN (SELECT 1 AS x) t392 ON t1.x = t392.x JOIN (SELECT 1 AS x) t393 ON t1.x = t393.x JOIN (SELECT 1 AS x) t394 ON t1.x = t394.x JOIN (SELECT 1 AS x) t395 ON t1.x = t395.x JOIN (SELECT 1 AS x) t396 ON t1.x = t396.x JOIN (SELECT 1 AS x) t397 ON t1.x = t397.x JOIN (SELECT 1 AS x) t398 ON t1.x = t398.x JOIN (SELECT 1 AS x) t399 ON t1.x = t399.x JOIN (SELECT 1 AS x) t400 ON t1.x = t400.x JOIN (SELECT 1 AS x) t401 ON t1.x = t401.x JOIN (SELECT 1 AS x) t402 ON t1.x = t402.x JOIN (SELECT 1 AS x) t403 ON t1.x = t403.x JOIN (SELECT 1 AS x) t404 ON t1.x = t404.x JOIN (SELECT 1 AS x) t405 ON t1.x = t405.x JOIN (SELECT 1 AS x) t406 ON t1.x = t406.x JOIN (SELECT 1 AS x) t407 ON t1.x = t407.x JOIN (SELECT 1 AS x) t408 ON t1.x = t408.x JOIN (SELECT 1 AS x) t409 ON t1.x = t409.x JOIN (SELECT 1 AS x) t410 ON t1.x = t410.x JOIN (SELECT 1 AS x) t411 ON t1.x = t411.x JOIN (SELECT 1 AS x) t412 ON t1.x = t412.x JOIN (SELECT 1 AS x) t413 ON t1.x = t413.x JOIN (SELECT 1 AS x) t414 ON t1.x = t414.x JOIN (SELECT 1 AS x) t415 ON t1.x = t415.x JOIN (SELECT 1 AS x) t416 ON t1.x = t416.x JOIN (SELECT 1 AS x) t417 ON t1.x = t417.x JOIN (SELECT 1 AS x) t418 ON t1.x = t418.x JOIN (SELECT 1 AS x) t419 ON t1.x = t419.x JOIN (SELECT 1 AS x) t420 ON t1.x = t420.x JOIN (SELECT 1 AS x) t421 ON t1.x = t421.x JOIN (SELECT 1 AS x) t422 ON t1.x = t422.x JOIN (SELECT 1 AS x) t423 ON t1.x = t423.x JOIN (SELECT 1 AS x) t424 ON t1.x = t424.x JOIN (SELECT 1 AS x) t425 ON t1.x = t425.x JOIN (SELECT 1 AS x) t426 ON t1.x = t426.x JOIN (SELECT 1 AS x) t427 ON t1.x = t427.x JOIN (SELECT 1 AS x) t428 ON t1.x = t428.x JOIN (SELECT 1 AS x) t429 ON t1.x = t429.x JOIN (SELECT 1 AS x) t430 ON t1.x = t430.x JOIN (SELECT 1 AS x) t431 ON t1.x = t431.x JOIN (SELECT 1 AS x) t432 ON t1.x = t432.x JOIN (SELECT 1 AS x) t433 ON t1.x = t433.x JOIN (SELECT 1 AS x) t434 ON t1.x = t434.x JOIN (SELECT 1 AS x) t435 ON t1.x = t435.x JOIN (SELECT 1 AS x) t436 ON t1.x = t436.x JOIN (SELECT 1 AS x) t437 ON t1.x = t437.x JOIN (SELECT 1 AS x) t438 ON t1.x = t438.x JOIN (SELECT 1 AS x) t439 ON t1.x = t439.x JOIN (SELECT 1 AS x) t440 ON t1.x = t440.x JOIN (SELECT 1 AS x) t441 ON t1.x = t441.x JOIN (SELECT 1 AS x) t442 ON t1.x = t442.x JOIN (SELECT 1 AS x) t443 ON t1.x = t443.x JOIN (SELECT 1 AS x) t444 ON t1.x = t444.x JOIN (SELECT 1 AS x) t445 ON t1.x = t445.x JOIN (SELECT 1 AS x) t446 ON t1.x = t446.x JOIN (SELECT 1 AS x) t447 ON t1.x = t447.x JOIN (SELECT 1 AS x) t448 ON t1.x = t448.x JOIN (SELECT 1 AS x) t449 ON t1.x = t449.x JOIN (SELECT 1 AS x) t450 ON t1.x = t450.x JOIN (SELECT 1 AS x) t451 ON t1.x = t451.x JOIN (SELECT 1 AS x) t452 ON t1.x = t452.x JOIN (SELECT 1 AS x) t453 ON t1.x = t453.x JOIN (SELECT 1 AS x) t454 ON t1.x = t454.x JOIN (SELECT 1 AS x) t455 ON t1.x = t455.x JOIN (SELECT 1 AS x) t456 ON t1.x = t456.x JOIN (SELECT 1 AS x) t457 ON t1.x = t457.x JOIN (SELECT 1 AS x) t458 ON t1.x = t458.x JOIN (SELECT 1 AS x) t459 ON t1.x = t459.x JOIN (SELECT 1 AS x) t460 ON t1.x = t460.x JOIN (SELECT 1 AS x) t461 ON t1.x = t461.x JOIN (SELECT 1 AS x) t462 ON t1.x = t462.x JOIN (SELECT 1 AS x) t463 ON t1.x = t463.x JOIN (SELECT 1 AS x) t464 ON t1.x = t464.x JOIN (SELECT 1 AS x) t465 ON t1.x = t465.x JOIN (SELECT 1 AS x) t466 ON t1.x = t466.x JOIN (SELECT 1 AS x) t467 ON t1.x = t467.x JOIN (SELECT 1 AS x) t468 ON t1.x = t468.x JOIN (SELECT 1 AS x) t469 ON t1.x = t469.x JOIN (SELECT 1 AS x) t470 ON t1.x = t470.x JOIN (SELECT 1 AS x) t471 ON t1.x = t471.x JOIN (SELECT 1 AS x) t472 ON t1.x = t472.x JOIN (SELECT 1 AS x) t473 ON t1.x = t473.x JOIN (SELECT 1 AS x) t474 ON t1.x = t474.x JOIN (SELECT 1 AS x) t475 ON t1.x = t475.x JOIN (SELECT 1 AS x) t476 ON t1.x = t476.x JOIN (SELECT 1 AS x) t477 ON t1.x = t477.x JOIN (SELECT 1 AS x) t478 ON t1.x = t478.x JOIN (SELECT 1 AS x) t479 ON t1.x = t479.x JOIN (SELECT 1 AS x) t480 ON t1.x = t480.x JOIN (SELECT 1 AS x) t481 ON t1.x = t481.x JOIN (SELECT 1 AS x) t482 ON t1.x = t482.x JOIN (SELECT 1 AS x) t483 ON t1.x = t483.x JOIN (SELECT 1 AS x) t484 ON t1.x = t484.x JOIN (SELECT 1 AS x) t485 ON t1.x = t485.x JOIN (SELECT 1 AS x) t486 ON t1.x = t486.x JOIN (SELECT 1 AS x) t487 ON t1.x = t487.x JOIN (SELECT 1 AS x) t488 ON t1.x = t488.x JOIN (SELECT 1 AS x) t489 ON t1.x = t489.x JOIN (SELECT 1 AS x) t490 ON t1.x = t490.x JOIN (SELECT 1 AS x) t491 ON t1.x = t491.x JOIN (SELECT 1 AS x) t492 ON t1.x = t492.x JOIN (SELECT 1 AS x) t493 ON t1.x = t493.x JOIN (SELECT 1 AS x) t494 ON t1.x = t494.x JOIN (SELECT 1 AS x) t495 ON t1.x = t495.x JOIN (SELECT 1 AS x) t496 ON t1.x = t496.x JOIN (SELECT 1 AS x) t497 ON t1.x = t497.x JOIN (SELECT 1 AS x) t498 ON t1.x = t498.x JOIN (SELECT 1 AS x) t499 ON t1.x = t499.x JOIN (SELECT 1 AS x) t500 ON t1.x = t500.x JOIN (SELECT 1 AS x) t501 ON t1.x = t501.x JOIN (SELECT 1 AS x) t502 ON t1.x = t502.x JOIN (SELECT 1 AS x) t503 ON t1.x = t503.x JOIN (SELECT 1 AS x) t504 ON t1.x = t504.x JOIN (SELECT 1 AS x) t505 ON t1.x = t505.x JOIN (SELECT 1 AS x) t506 ON t1.x = t506.x JOIN (SELECT 1 AS x) t507 ON t1.x = t507.x JOIN (SELECT 1 AS x) t508 ON t1.x = t508.x JOIN (SELECT 1 AS x) t509 ON t1.x = t509.x JOIN (SELECT 1 AS x) t510 ON t1.x = t510.x JOIN (SELECT 1 AS x) t511 ON t1.x = t511.x JOIN (SELECT 1 AS x) t512 ON t1.x = t512.x JOIN (SELECT 1 AS x) t513 ON t1.x = t513.x JOIN (SELECT 1 AS x) t514 ON t1.x = t514.x JOIN (SELECT 1 AS x) t515 ON t1.x = t515.x JOIN (SELECT 1 AS x) t516 ON t1.x = t516.x JOIN (SELECT 1 AS x) t517 ON t1.x = t517.x JOIN (SELECT 1 AS x) t518 ON t1.x = t518.x JOIN (SELECT 1 AS x) t519 ON t1.x = t519.x JOIN (SELECT 1 AS x) t520 ON t1.x = t520.x JOIN (SELECT 1 AS x) t521 ON t1.x = t521.x JOIN (SELECT 1 AS x) t522 ON t1.x = t522.x JOIN (SELECT 1 AS x) t523 ON t1.x = t523.x JOIN (SELECT 1 AS x) t524 ON t1.x = t524.x JOIN (SELECT 1 AS x) t525 ON t1.x = t525.x JOIN (SELECT 1 AS x) t526 ON t1.x = t526.x JOIN (SELECT 1 AS x) t527 ON t1.x = t527.x JOIN (SELECT 1 AS x) t528 ON t1.x = t528.x JOIN (SELECT 1 AS x) t529 ON t1.x = t529.x JOIN (SELECT 1 AS x) t530 ON t1.x = t530.x JOIN (SELECT 1 AS x) t531 ON t1.x = t531.x JOIN (SELECT 1 AS x) t532 ON t1.x = t532.x JOIN (SELECT 1 AS x) t533 ON t1.x = t533.x JOIN (SELECT 1 AS x) t534 ON t1.x = t534.x JOIN (SELECT 1 AS x) t535 ON t1.x = t535.x JOIN (SELECT 1 AS x) t536 ON t1.x = t536.x JOIN (SELECT 1 AS x) t537 ON t1.x = t537.x JOIN (SELECT 1 AS x) t538 ON t1.x = t538.x JOIN (SELECT 1 AS x) t539 ON t1.x = t539.x JOIN (SELECT 1 AS x) t540 ON t1.x = t540.x JOIN (SELECT 1 AS x) t541 ON t1.x = t541.x JOIN (SELECT 1 AS x) t542 ON t1.x = t542.x JOIN (SELECT 1 AS x) t543 ON t1.x = t543.x JOIN (SELECT 1 AS x) t544 ON t1.x = t544.x JOIN (SELECT 1 AS x) t545 ON t1.x = t545.x JOIN (SELECT 1 AS x) t546 ON t1.x = t546.x JOIN (SELECT 1 AS x) t547 ON t1.x = t547.x JOIN (SELECT 1 AS x) t548 ON t1.x = t548.x JOIN (SELECT 1 AS x) t549 ON t1.x = t549.x JOIN (SELECT 1 AS x) t550 ON t1.x = t550.x JOIN (SELECT 1 AS x) t551 ON t1.x = t551.x JOIN (SELECT 1 AS x) t552 ON t1.x = t552.x JOIN (SELECT 1 AS x) t553 ON t1.x = t553.x JOIN (SELECT 1 AS x) t554 ON t1.x = t554.x JOIN (SELECT 1 AS x) t555 ON t1.x = t555.x JOIN (SELECT 1 AS x) t556 ON t1.x = t556.x JOIN (SELECT 1 AS x) t557 ON t1.x = t557.x JOIN (SELECT 1 AS x) t558 ON t1.x = t558.x JOIN (SELECT 1 AS x) t559 ON t1.x = t559.x JOIN (SELECT 1 AS x) t560 ON t1.x = t560.x JOIN (SELECT 1 AS x) t561 ON t1.x = t561.x JOIN (SELECT 1 AS x) t562 ON t1.x = t562.x JOIN (SELECT 1 AS x) t563 ON t1.x = t563.x JOIN (SELECT 1 AS x) t564 ON t1.x = t564.x JOIN (SELECT 1 AS x) t565 ON t1.x = t565.x JOIN (SELECT 1 AS x) t566 ON t1.x = t566.x JOIN (SELECT 1 AS x) t567 ON t1.x = t567.x JOIN (SELECT 1 AS x) t568 ON t1.x = t568.x JOIN (SELECT 1 AS x) t569 ON t1.x = t569.x JOIN (SELECT 1 AS x) t570 ON t1.x = t570.x JOIN (SELECT 1 AS x) t571 ON t1.x = t571.x JOIN (SELECT 1 AS x) t572 ON t1.x = t572.x JOIN (SELECT 1 AS x) t573 ON t1.x = t573.x JOIN (SELECT 1 AS x) t574 ON t1.x = t574.x JOIN (SELECT 1 AS x) t575 ON t1.x = t575.x JOIN (SELECT 1 AS x) t576 ON t1.x = t576.x JOIN (SELECT 1 AS x) t577 ON t1.x = t577.x JOIN (SELECT 1 AS x) t578 ON t1.x = t578.x JOIN (SELECT 1 AS x) t579 ON t1.x = t579.x JOIN (SELECT 1 AS x) t580 ON t1.x = t580.x JOIN (SELECT 1 AS x) t581 ON t1.x = t581.x JOIN (SELECT 1 AS x) t582 ON t1.x = t582.x JOIN (SELECT 1 AS x) t583 ON t1.x = t583.x JOIN (SELECT 1 AS x) t584 ON t1.x = t584.x JOIN (SELECT 1 AS x) t585 ON t1.x = t585.x JOIN (SELECT 1 AS x) t586 ON t1.x = t586.x JOIN (SELECT 1 AS x) t587 ON t1.x = t587.x JOIN (SELECT 1 AS x) t588 ON t1.x = t588.x JOIN (SELECT 1 AS x) t589 ON t1.x = t589.x JOIN (SELECT 1 AS x) t590 ON t1.x = t590.x JOIN (SELECT 1 AS x) t591 ON t1.x = t591.x JOIN (SELECT 1 AS x) t592 ON t1.x = t592.x JOIN (SELECT 1 AS x) t593 ON t1.x = t593.x JOIN (SELECT 1 AS x) t594 ON t1.x = t594.x JOIN (SELECT 1 AS x) t595 ON t1.x = t595.x JOIN (SELECT 1 AS x) t596 ON t1.x = t596.x JOIN (SELECT 1 AS x) t597 ON t1.x = t597.x JOIN (SELECT 1 AS x) t598 ON t1.x = t598.x JOIN (SELECT 1 AS x) t599 ON t1.x = t599.x JOIN (SELECT 1 AS x) t600 ON t1.x = t600.x JOIN (SELECT 1 AS x) t601 ON t1.x = t601.x JOIN (SELECT 1 AS x) t602 ON t1.x = t602.x JOIN (SELECT 1 AS x) t603 ON t1.x = t603.x JOIN (SELECT 1 AS x) t604 ON t1.x = t604.x JOIN (SELECT 1 AS x) t605 ON t1.x = t605.x JOIN (SELECT 1 AS x) t606 ON t1.x = t606.x JOIN (SELECT 1 AS x) t607 ON t1.x = t607.x JOIN (SELECT 1 AS x) t608 ON t1.x = t608.x JOIN (SELECT 1 AS x) t609 ON t1.x = t609.x JOIN (SELECT 1 AS x) t610 ON t1.x = t610.x JOIN (SELECT 1 AS x) t611 ON t1.x = t611.x JOIN (SELECT 1 AS x) t612 ON t1.x = t612.x JOIN (SELECT 1 AS x) t613 ON t1.x = t613.x JOIN (SELECT 1 AS x) t614 ON t1.x = t614.x JOIN (SELECT 1 AS x) t615 ON t1.x = t615.x JOIN (SELECT 1 AS x) t616 ON t1.x = t616.x JOIN (SELECT 1 AS x) t617 ON t1.x = t617.x JOIN (SELECT 1 AS x) t618 ON t1.x = t618.x JOIN (SELECT 1 AS x) t619 ON t1.x = t619.x JOIN (SELECT 1 AS x) t620 ON t1.x = t620.x JOIN (SELECT 1 AS x) t621 ON t1.x = t621.x JOIN (SELECT 1 AS x) t622 ON t1.x = t622.x JOIN (SELECT 1 AS x) t623 ON t1.x = t623.x JOIN (SELECT 1 AS x) t624 ON t1.x = t624.x JOIN (SELECT 1 AS x) t625 ON t1.x = t625.x JOIN (SELECT 1 AS x) t626 ON t1.x = t626.x JOIN (SELECT 1 AS x) t627 ON t1.x = t627.x JOIN (SELECT 1 AS x) t628 ON t1.x = t628.x JOIN (SELECT 1 AS x) t629 ON t1.x = t629.x JOIN (SELECT 1 AS x) t630 ON t1.x = t630.x JOIN (SELECT 1 AS x) t631 ON t1.x = t631.x JOIN (SELECT 1 AS x) t632 ON t1.x = t632.x JOIN (SELECT 1 AS x) t633 ON t1.x = t633.x JOIN (SELECT 1 AS x) t634 ON t1.x = t634.x JOIN (SELECT 1 AS x) t635 ON t1.x = t635.x JOIN (SELECT 1 AS x) t636 ON t1.x = t636.x JOIN (SELECT 1 AS x) t637 ON t1.x = t637.x JOIN (SELECT 1 AS x) t638 ON t1.x = t638.x JOIN (SELECT 1 AS x) t639 ON t1.x = t639.x JOIN (SELECT 1 AS x) t640 ON t1.x = t640.x JOIN (SELECT 1 AS x) t641 ON t1.x = t641.x JOIN (SELECT 1 AS x) t642 ON t1.x = t642.x JOIN (SELECT 1 AS x) t643 ON t1.x = t643.x JOIN (SELECT 1 AS x) t644 ON t1.x = t644.x JOIN (SELECT 1 AS x) t645 ON t1.x = t645.x JOIN (SELECT 1 AS x) t646 ON t1.x = t646.x JOIN (SELECT 1 AS x) t647 ON t1.x = t647.x JOIN (SELECT 1 AS x) t648 ON t1.x = t648.x JOIN (SELECT 1 AS x) t649 ON t1.x = t649.x JOIN (SELECT 1 AS x) t650 ON t1.x = t650.x JOIN (SELECT 1 AS x) t651 ON t1.x = t651.x JOIN (SELECT 1 AS x) t652 ON t1.x = t652.x JOIN (SELECT 1 AS x) t653 ON t1.x = t653.x JOIN (SELECT 1 AS x) t654 ON t1.x = t654.x JOIN (SELECT 1 AS x) t655 ON t1.x = t655.x JOIN (SELECT 1 AS x) t656 ON t1.x = t656.x JOIN (SELECT 1 AS x) t657 ON t1.x = t657.x JOIN (SELECT 1 AS x) t658 ON t1.x = t658.x JOIN (SELECT 1 AS x) t659 ON t1.x = t659.x JOIN (SELECT 1 AS x) t660 ON t1.x = t660.x JOIN (SELECT 1 AS x) t661 ON t1.x = t661.x JOIN (SELECT 1 AS x) t662 ON t1.x = t662.x JOIN (SELECT 1 AS x) t663 ON t1.x = t663.x JOIN (SELECT 1 AS x) t664 ON t1.x = t664.x JOIN (SELECT 1 AS x) t665 ON t1.x = t665.x JOIN (SELECT 1 AS x) t666 ON t1.x = t666.x diff --git a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.reference b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.reference index 5fde4f80c5d..d35bdeff98b 100644 --- a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.reference +++ b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.reference @@ -5,18 +5,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 0 1 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 0 2 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: INNER Strictness: ALL Algorithm: HashJoin @@ -75,18 +75,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 0 1 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 0 2 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: INNER Strictness: ALL Algorithm: HashJoin @@ -145,18 +145,18 @@ Header: id UInt64 rhs.value String Actions: INPUT : 0 -> __table1.id UInt64 : 0 INPUT : 1 -> __table1.value String : 1 - INPUT : 2 -> __table2.id UInt64 : 2 - INPUT : 3 -> __table2.value String : 3 + INPUT : 2 -> __table2.value String : 2 + INPUT : 3 -> __table2.id UInt64 : 3 ALIAS __table1.id :: 0 -> id UInt64 : 4 ALIAS __table1.value :: 1 -> value String : 0 - ALIAS __table2.id :: 2 -> rhs.id UInt64 : 1 - ALIAS __table2.value :: 3 -> rhs.value String : 2 -Positions: 4 0 1 2 + ALIAS __table2.value :: 2 -> rhs.value String : 1 + ALIAS __table2.id :: 3 -> rhs.id UInt64 : 2 +Positions: 4 0 2 1 Join (JOIN FillRightFirst) Header: __table1.id UInt64 __table1.value String - __table2.id UInt64 __table2.value String + __table2.id UInt64 Type: INNER Strictness: ALL Algorithm: HashJoin diff --git a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql index ddefc322b4f..b3d1827d98f 100644 --- a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql +++ b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql @@ -22,10 +22,7 @@ SETTINGS index_granularity = 16 INSERT INTO test_table_1 VALUES (1, 'Value_1'), (2, 'Value_2'); INSERT INTO test_table_2 VALUES (2, 'Value_2'), (3, 'Value_3'); - -EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE rhs.id != 0 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE rhs.id != 0; SELECT '--'; @@ -33,9 +30,7 @@ SELECT * FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs. SELECT '--'; -EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0; SELECT '--'; @@ -43,9 +38,7 @@ SELECT * FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs SELECT '--'; -EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0 AND rhs.id != 0 -SETTINGS query_plan_join_inner_table_selection = 'right' -; +EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0 AND rhs.id != 0; SELECT '--'; diff --git a/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.reference b/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.reference index 1c82e76cc65..7058d36aaf9 100644 --- a/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.reference +++ b/tests/queries/0_stateless/03152_join_filter_push_down_equivalent_columns.reference @@ -65,7 +65,8 @@ SELECT name FROM users RIGHT JOIN users2 USING name WHERE users2.name ='Alice'; Expression ((Project names + (Projection + ))) Header: name String Join (JOIN FillRightFirst) - Header: __table2.name String + Header: __table1.name String + __table2.name String Filter (( + Change column names to column identifiers)) Header: __table1.name String ReadFromMergeTree (default.users) diff --git a/tests/queries/0_stateless/03236_squashing_high_memory.sql b/tests/queries/0_stateless/03236_squashing_high_memory.sql index eeb3ae85e84..f6e5dbdef03 100644 --- a/tests/queries/0_stateless/03236_squashing_high_memory.sql +++ b/tests/queries/0_stateless/03236_squashing_high_memory.sql @@ -11,7 +11,6 @@ CREATE TABLE id_values ENGINE MergeTree ORDER BY id1 AS SELECT arrayJoin(range(500000)) AS id1, arrayJoin(range(1000)) AS id2; SET max_memory_usage = '1G'; -SET query_plan_join_inner_table_selection = 'right'; CREATE TABLE test_table ENGINE MergeTree ORDER BY id AS SELECT id_values.id1 AS id, From 4e30cf7e333312968bebe57dc0f6dd381cbccff5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 6 Nov 2024 16:30:16 +0100 Subject: [PATCH 313/353] Cleanup SettingsChangesHistory for revert --- src/Core/SettingsChangesHistory.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index ed87fde8b7e..64964f294bd 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -73,7 +73,6 @@ static std::initializer_list Date: Wed, 6 Nov 2024 10:50:45 +0100 Subject: [PATCH 314/353] Upgrade clickhouse-server and keeper base images --- docker/keeper/Dockerfile | 10 +++++++--- docker/server/Dockerfile.ubuntu | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index bc76bdbb619..4ecc087afb4 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -1,7 +1,7 @@ # The Dockerfile.ubuntu exists for the tests/ci/docker_server.py script # If the image is built from Dockerfile.alpine, then the `-alpine` suffix is added automatically, # so the only purpose of Dockerfile.ubuntu is to push `latest`, `head` and so on w/o suffixes -FROM ubuntu:20.04 AS glibc-donor +FROM ubuntu:22.04 AS glibc-donor ARG TARGETARCH RUN arch=${TARGETARCH:-amd64} \ @@ -9,7 +9,11 @@ RUN arch=${TARGETARCH:-amd64} \ amd64) rarch=x86_64 ;; \ arm64) rarch=aarch64 ;; \ esac \ - && ln -s "${rarch}-linux-gnu" /lib/linux-gnu + && ln -s "${rarch}-linux-gnu" /lib/linux-gnu \ + && case $arch in \ + amd64) ln /lib/linux-gnu/ld-linux-x86-64.so.2 /lib/linux-gnu/ld-2.35.so ;; \ + arm64) ln /lib/linux-gnu/ld-linux-aarch64.so.1 /lib/linux-gnu/ld-2.35.so ;; \ + esac FROM alpine @@ -20,7 +24,7 @@ ENV LANG=en_US.UTF-8 \ TZ=UTC \ CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml -COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/ +COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.35.so /lib/ COPY --from=glibc-donor /etc/nsswitch.conf /etc/ COPY entrypoint.sh /entrypoint.sh diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 506a627b11c..0d5c983f5e6 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:22.04 # see https://github.com/moby/moby/issues/4032#issuecomment-192327844 # It could be removed after we move on a version 23:04+ From 2903227143360795fc4912322de9963ec7f8c3ef Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 10:58:21 +0100 Subject: [PATCH 315/353] Remove strange wrong named dockerfile --- .../clickhouse-statelest-test-runner.Dockerfile | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile diff --git a/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile b/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile deleted file mode 100644 index a9802f6f1da..00000000000 --- a/docker/test/stateless/clickhouse-statelest-test-runner.Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Since right now we can't set volumes to the docker during build, we split building container in stages: -# 1. build base container -# 2. run base conatiner with mounted volumes -# 3. commit container as image -FROM ubuntu:20.04 as clickhouse-test-runner-base - -# A volume where directory with clickhouse packages to be mounted, -# for later installing. -VOLUME /packages - -CMD apt-get update ;\ - DEBIAN_FRONTEND=noninteractive \ - apt install -y /packages/clickhouse-common-static_*.deb \ - /packages/clickhouse-client_*.deb \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* From 7b1de3fcf792aeae2cc2b197e841afcda9092654 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 11:12:26 +0100 Subject: [PATCH 316/353] We use `aarch64` everywhere in code, so the vars should reflect it --- tests/ci/ci_config.py | 54 ++++++++++++++++----------------- tests/ci/ci_definitions.py | 30 +++++++++--------- tests/ci/compatibility_check.py | 2 +- tests/ci/test_ci_config.py | 8 ++--- tests/ci/test_ci_options.py | 4 +-- 5 files changed, 49 insertions(+), 49 deletions(-) diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 6d23b594b24..67cdbbdcf6d 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -51,11 +51,11 @@ class CI: TAG_CONFIGS = { Tags.DO_NOT_TEST_LABEL: LabelConfig(run_jobs=[JobNames.STYLE_CHECK]), - Tags.CI_SET_ARM: LabelConfig( + Tags.CI_SET_AARCH64: LabelConfig( run_jobs=[ JobNames.STYLE_CHECK, BuildNames.PACKAGE_AARCH64, - JobNames.INTEGRATION_TEST_ARM, + JobNames.INTEGRATION_TEST_AARCH64, ] ), Tags.CI_SET_REQUIRED: LabelConfig( @@ -95,16 +95,16 @@ class CI: static_binary_name="aarch64", additional_pkgs=True, ), - runner_type=Runners.BUILDER_ARM, + runner_type=Runners.BUILDER_AARCH64, ), - BuildNames.PACKAGE_ARM_ASAN: CommonJobConfigs.BUILD.with_properties( + BuildNames.PACKAGE_AARCH64_ASAN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig( - name=BuildNames.PACKAGE_ARM_ASAN, + name=BuildNames.PACKAGE_AARCH64_ASAN, compiler="clang-18-aarch64", sanitizer="address", package_type="deb", ), - runner_type=Runners.BUILDER_ARM, + runner_type=Runners.BUILDER_AARCH64, ), BuildNames.PACKAGE_ASAN: CommonJobConfigs.BUILD.with_properties( build_config=BuildConfig( @@ -276,16 +276,16 @@ class CI: JobNames.INSTALL_TEST_AMD: CommonJobConfigs.INSTALL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE] ), - JobNames.INSTALL_TEST_ARM: CommonJobConfigs.INSTALL_TEST.with_properties( + JobNames.INSTALL_TEST_AARCH64: CommonJobConfigs.INSTALL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.STATEFUL_TEST_ASAN: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN] ), - JobNames.STATEFUL_TEST_ARM_ASAN: CommonJobConfigs.STATEFUL_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ARM_ASAN], - runner_type=Runners.FUNC_TESTER_ARM, + JobNames.STATEFUL_TEST_AARCH64_ASAN: CommonJobConfigs.STATEFUL_TEST.with_properties( + required_builds=[BuildNames.PACKAGE_AARCH64_ASAN], + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATEFUL_TEST_TSAN: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN] @@ -307,7 +307,7 @@ class CI: ), JobNames.STATEFUL_TEST_AARCH64: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATEFUL_TEST_PARALLEL_REPL_RELEASE: CommonJobConfigs.STATEFUL_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE] @@ -335,10 +335,10 @@ class CI: JobNames.STATELESS_TEST_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_ASAN], num_batches=2 ), - JobNames.STATELESS_TEST_ARM_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( - required_builds=[BuildNames.PACKAGE_ARM_ASAN], + JobNames.STATELESS_TEST_AARCH64_ASAN: CommonJobConfigs.STATELESS_TEST.with_properties( + required_builds=[BuildNames.PACKAGE_AARCH64_ASAN], num_batches=2, - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATELESS_TEST_TSAN: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_TSAN], num_batches=4 @@ -360,7 +360,7 @@ class CI: ), JobNames.STATELESS_TEST_AARCH64: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.STATELESS_TEST_OLD_ANALYZER_S3_REPLICATED_RELEASE: CommonJobConfigs.STATELESS_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=2 @@ -432,10 +432,10 @@ class CI: num_batches=6, timeout=9000, # the job timed out with default value (7200) ), - JobNames.INTEGRATION_TEST_ARM: CommonJobConfigs.INTEGRATION_TEST.with_properties( + JobNames.INTEGRATION_TEST_AARCH64: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], num_batches=6, - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.INTEGRATION_TEST: CommonJobConfigs.INTEGRATION_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], @@ -453,10 +453,10 @@ class CI: required_builds=[BuildNames.PACKAGE_RELEASE], required_on_release_branch=True, ), - JobNames.COMPATIBILITY_TEST_ARM: CommonJobConfigs.COMPATIBILITY_TEST.with_properties( + JobNames.COMPATIBILITY_TEST_AARCH64: CommonJobConfigs.COMPATIBILITY_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], required_on_release_branch=True, - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.UNIT_TEST: CommonJobConfigs.UNIT_TEST.with_properties( required_builds=[BuildNames.BINARY_RELEASE], @@ -499,22 +499,22 @@ class CI: required_builds=[BuildNames.BINARY_RELEASE], run_by_labels=[Labels.JEPSEN_TEST], run_command="jepsen_check.py keeper", - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.JEPSEN_SERVER: JobConfig( required_builds=[BuildNames.BINARY_RELEASE], run_by_labels=[Labels.JEPSEN_TEST], run_command="jepsen_check.py server", - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.PERFORMANCE_TEST_AMD64: CommonJobConfigs.PERF_TESTS.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], num_batches=4 ), - JobNames.PERFORMANCE_TEST_ARM64: CommonJobConfigs.PERF_TESTS.with_properties( + JobNames.PERFORMANCE_TEST_AARCH64: CommonJobConfigs.PERF_TESTS.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], num_batches=4, run_by_labels=[Labels.PR_PERFORMANCE], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.SQLANCER: CommonJobConfigs.SQLLANCER_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], @@ -532,9 +532,9 @@ class CI: JobNames.CLICKBENCH_TEST: CommonJobConfigs.CLICKBENCH_TEST.with_properties( required_builds=[BuildNames.PACKAGE_RELEASE], ), - JobNames.CLICKBENCH_TEST_ARM: CommonJobConfigs.CLICKBENCH_TEST.with_properties( + JobNames.CLICKBENCH_TEST_AARCH64: CommonJobConfigs.CLICKBENCH_TEST.with_properties( required_builds=[BuildNames.PACKAGE_AARCH64], - runner_type=Runners.FUNC_TESTER_ARM, + runner_type=Runners.FUNC_TESTER_AARCH64, ), JobNames.LIBFUZZER_TEST: JobConfig( required_builds=[BuildNames.FUZZERS], @@ -572,7 +572,7 @@ class CI: ), JobNames.STYLE_CHECK: JobConfig( run_always=True, - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, ), JobNames.BUGFIX_VALIDATE: JobConfig( run_by_labels=[Labels.PR_BUGFIX, Labels.PR_CRITICAL_BUGFIX], diff --git a/tests/ci/ci_definitions.py b/tests/ci/ci_definitions.py index dd86dc320c2..fb3e55fdbe3 100644 --- a/tests/ci/ci_definitions.py +++ b/tests/ci/ci_definitions.py @@ -58,11 +58,11 @@ class Runners(metaclass=WithIter): """ BUILDER = "builder" - BUILDER_ARM = "builder-aarch64" + BUILDER_AARCH64 = "builder-aarch64" STYLE_CHECKER = "style-checker" - STYLE_CHECKER_ARM = "style-checker-aarch64" + STYLE_CHECKER_AARCH64 = "style-checker-aarch64" FUNC_TESTER = "func-tester" - FUNC_TESTER_ARM = "func-tester-aarch64" + FUNC_TESTER_AARCH64 = "func-tester-aarch64" FUZZER_UNIT_TESTER = "fuzzer-unit-tester" @@ -78,7 +78,7 @@ class Tags(metaclass=WithIter): # to upload all binaries from build jobs UPLOAD_ALL_ARTIFACTS = "upload_all" CI_SET_SYNC = "ci_set_sync" - CI_SET_ARM = "ci_set_arm" + CI_SET_AARCH64 = "ci_set_aarch64" CI_SET_REQUIRED = "ci_set_required" CI_SET_BUILDS = "ci_set_builds" @@ -106,7 +106,7 @@ class BuildNames(metaclass=WithIter): PACKAGE_MSAN = "package_msan" PACKAGE_DEBUG = "package_debug" PACKAGE_AARCH64 = "package_aarch64" - PACKAGE_ARM_ASAN = "package_aarch64_asan" + PACKAGE_AARCH64_ASAN = "package_aarch64_asan" PACKAGE_RELEASE_COVERAGE = "package_release_coverage" BINARY_RELEASE = "binary_release" BINARY_TIDY = "binary_tidy" @@ -134,14 +134,14 @@ class JobNames(metaclass=WithIter): DOCKER_SERVER = "Docker server image" DOCKER_KEEPER = "Docker keeper image" INSTALL_TEST_AMD = "Install packages (release)" - INSTALL_TEST_ARM = "Install packages (aarch64)" + INSTALL_TEST_AARCH64 = "Install packages (aarch64)" STATELESS_TEST_DEBUG = "Stateless tests (debug)" STATELESS_TEST_RELEASE = "Stateless tests (release)" STATELESS_TEST_RELEASE_COVERAGE = "Stateless tests (coverage)" STATELESS_TEST_AARCH64 = "Stateless tests (aarch64)" STATELESS_TEST_ASAN = "Stateless tests (asan)" - STATELESS_TEST_ARM_ASAN = "Stateless tests (aarch64, asan)" + STATELESS_TEST_AARCH64_ASAN = "Stateless tests (aarch64, asan)" STATELESS_TEST_TSAN = "Stateless tests (tsan)" STATELESS_TEST_MSAN = "Stateless tests (msan)" STATELESS_TEST_UBSAN = "Stateless tests (ubsan)" @@ -158,7 +158,7 @@ class JobNames(metaclass=WithIter): STATEFUL_TEST_RELEASE_COVERAGE = "Stateful tests (coverage)" STATEFUL_TEST_AARCH64 = "Stateful tests (aarch64)" STATEFUL_TEST_ASAN = "Stateful tests (asan)" - STATEFUL_TEST_ARM_ASAN = "Stateful tests (aarch64, asan)" + STATEFUL_TEST_AARCH64_ASAN = "Stateful tests (aarch64, asan)" STATEFUL_TEST_TSAN = "Stateful tests (tsan)" STATEFUL_TEST_MSAN = "Stateful tests (msan)" STATEFUL_TEST_UBSAN = "Stateful tests (ubsan)" @@ -181,7 +181,7 @@ class JobNames(metaclass=WithIter): INTEGRATION_TEST_ASAN = "Integration tests (asan)" INTEGRATION_TEST_ASAN_OLD_ANALYZER = "Integration tests (asan, old analyzer)" INTEGRATION_TEST_TSAN = "Integration tests (tsan)" - INTEGRATION_TEST_ARM = "Integration tests (aarch64)" + INTEGRATION_TEST_AARCH64 = "Integration tests (aarch64)" INTEGRATION_TEST_FLAKY = "Integration tests flaky check (asan)" UPGRADE_TEST_DEBUG = "Upgrade check (debug)" @@ -205,7 +205,7 @@ class JobNames(metaclass=WithIter): JEPSEN_SERVER = "ClickHouse Server Jepsen" PERFORMANCE_TEST_AMD64 = "Performance Comparison (release)" - PERFORMANCE_TEST_ARM64 = "Performance Comparison (aarch64)" + PERFORMANCE_TEST_AARCH64 = "Performance Comparison (aarch64)" # SQL_LOGIC_TEST = "Sqllogic test (release)" @@ -214,10 +214,10 @@ class JobNames(metaclass=WithIter): SQLTEST = "SQLTest" COMPATIBILITY_TEST = "Compatibility check (release)" - COMPATIBILITY_TEST_ARM = "Compatibility check (aarch64)" + COMPATIBILITY_TEST_AARCH64 = "Compatibility check (aarch64)" CLICKBENCH_TEST = "ClickBench (release)" - CLICKBENCH_TEST_ARM = "ClickBench (aarch64)" + CLICKBENCH_TEST_AARCH64 = "ClickBench (aarch64)" LIBFUZZER_TEST = "libFuzzer tests" @@ -387,7 +387,7 @@ class CommonJobConfigs: "./tests/ci/upload_result_helper.py", ], ), - runner_type=Runners.STYLE_CHECKER_ARM, + runner_type=Runners.STYLE_CHECKER_AARCH64, disable_await=True, ) COMPATIBILITY_TEST = JobConfig( @@ -634,8 +634,8 @@ REQUIRED_CHECKS = [ JobNames.STATEFUL_TEST_RELEASE, JobNames.STATELESS_TEST_RELEASE, JobNames.STATELESS_TEST_ASAN, - JobNames.STATELESS_TEST_ARM_ASAN, - JobNames.STATEFUL_TEST_ARM_ASAN, + JobNames.STATELESS_TEST_AARCH64_ASAN, + JobNames.STATEFUL_TEST_AARCH64_ASAN, JobNames.STATELESS_TEST_FLAKY_ASAN, JobNames.STATEFUL_TEST_ASAN, JobNames.STYLE_CHECK, diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py index bb0c717160e..38fb2eceb28 100644 --- a/tests/ci/compatibility_check.py +++ b/tests/ci/compatibility_check.py @@ -131,7 +131,7 @@ def main(): check_name = args.check_name or os.getenv("CHECK_NAME") assert check_name check_glibc = True - # currently hardcoded to x86, don't enable for ARM + # currently hardcoded to x86, don't enable for AARCH64 check_distributions = ( "aarch64" not in check_name.lower() and "arm64" not in check_name.lower() ) diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 0e396b827ea..03f28983262 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -36,11 +36,11 @@ class TestCIConfig(unittest.TestCase): elif "binary_" in job.lower() or "package_" in job.lower(): if job.lower() in ( CI.BuildNames.PACKAGE_AARCH64, - CI.BuildNames.PACKAGE_ARM_ASAN, + CI.BuildNames.PACKAGE_AARCH64_ASAN, ): self.assertTrue( - CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER_ARM,), - f"Job [{job}] must have [{CI.Runners.BUILDER_ARM}] runner", + CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER_AARCH64,), + f"Job [{job}] must have [{CI.Runners.BUILDER_AARCH64}] runner", ) else: self.assertTrue( @@ -96,7 +96,7 @@ class TestCIConfig(unittest.TestCase): else: self.assertTrue(CI.JOB_CONFIGS[job].build_config is None) if "asan" in job and "aarch" in job: - expected_builds = [CI.BuildNames.PACKAGE_ARM_ASAN] + expected_builds = [CI.BuildNames.PACKAGE_AARCH64_ASAN] elif "asan" in job: expected_builds = [CI.BuildNames.PACKAGE_ASAN] elif "msan" in job: diff --git a/tests/ci/test_ci_options.py b/tests/ci/test_ci_options.py index 536e18758f8..e1b780387e7 100644 --- a/tests/ci/test_ci_options.py +++ b/tests/ci/test_ci_options.py @@ -10,7 +10,7 @@ from ci_settings import CiSettings _TEST_BODY_1 = """ #### Run only: - [ ] Some Set -- [x] Integration tests (arm64) +- [x] Integration tests (aarch64) - [x] Integration tests - [x] Integration tests - [ ] Integration tests @@ -150,7 +150,7 @@ class TestCIOptions(unittest.TestCase): self.assertFalse(ci_options.no_ci_cache) self.assertTrue(ci_options.no_merge_commit) self.assertTrue(ci_options.woolen_wolfdog) - self.assertEqual(ci_options.ci_sets, ["ci_set_arm"]) + self.assertEqual(ci_options.ci_sets, ["ci_set_aarch64"]) self.assertCountEqual(ci_options.include_keywords, ["foo", "foo_bar"]) self.assertCountEqual(ci_options.exclude_keywords, ["foo", "foo_bar"]) From 52dfad190dc2bb938f68464d42f69bd80ea1b422 Mon Sep 17 00:00:00 2001 From: robot-clickhouse Date: Wed, 6 Nov 2024 15:46:58 +0000 Subject: [PATCH 317/353] Automatic style fix --- tests/ci/test_ci_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/ci/test_ci_config.py b/tests/ci/test_ci_config.py index 03f28983262..65418310c31 100644 --- a/tests/ci/test_ci_config.py +++ b/tests/ci/test_ci_config.py @@ -39,7 +39,8 @@ class TestCIConfig(unittest.TestCase): CI.BuildNames.PACKAGE_AARCH64_ASAN, ): self.assertTrue( - CI.JOB_CONFIGS[job].runner_type in (CI.Runners.BUILDER_AARCH64,), + CI.JOB_CONFIGS[job].runner_type + in (CI.Runners.BUILDER_AARCH64,), f"Job [{job}] must have [{CI.Runners.BUILDER_AARCH64}] runner", ) else: From 8bb656ddec205c9836db55c8a459a6b9c2cbf3d1 Mon Sep 17 00:00:00 2001 From: divanik Date: Wed, 6 Nov 2024 15:55:41 +0000 Subject: [PATCH 318/353] Add context manager for partition manager --- tests/integration/test_quorum_inserts/test.py | 81 ++++++++++--------- 1 file changed, 43 insertions(+), 38 deletions(-) diff --git a/tests/integration/test_quorum_inserts/test.py b/tests/integration/test_quorum_inserts/test.py index a646319c5f9..5e4a960acdf 100644 --- a/tests/integration/test_quorum_inserts/test.py +++ b/tests/integration/test_quorum_inserts/test.py @@ -379,50 +379,55 @@ def test_insert_quorum_with_keeper_loss_connection(started_cluster): ) ) - pm = PartitionManager() - pm.drop_instance_zk_connections(zero) + with PartitionManager() as pm: + pm.drop_instance_zk_connections(zero) - retries = 0 - zk = cluster.get_kazoo_client("zoo1") - while True: - if ( - zk.exists(f"/clickhouse/tables/{table_name}/replicas/zero/is_active") - is None - ): - break - print("replica is still active") - time.sleep(1) - retries += 1 - if retries == 120: - raise Exception("Can not wait cluster replica inactive") + retries = 0 + zk = cluster.get_kazoo_client("zoo1") + while True: + if ( + zk.exists( + f"/clickhouse/tables/{table_name}/replicas/zero/is_active" + ) + is None + ): + break + print("replica is still active") + time.sleep(1) + retries += 1 + if retries == 120: + raise Exception("Can not wait cluster replica inactive") - first.query("SYSTEM ENABLE FAILPOINT finish_set_quorum_failed_parts") - quorum_fail_future = executor.submit( - lambda: first.query( - "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 + first.query("SYSTEM ENABLE FAILPOINT finish_set_quorum_failed_parts") + quorum_fail_future = executor.submit( + lambda: first.query( + "SYSTEM WAIT FAILPOINT finish_set_quorum_failed_parts", timeout=300 + ) ) - ) - first.query(f"SYSTEM START FETCHES {table_name}") + first.query(f"SYSTEM START FETCHES {table_name}") - concurrent.futures.wait([quorum_fail_future]) + concurrent.futures.wait([quorum_fail_future]) - assert quorum_fail_future.exception() is None + assert quorum_fail_future.exception() is None - zero.query("SYSTEM ENABLE FAILPOINT finish_clean_quorum_failed_parts") - clean_quorum_fail_parts_future = executor.submit( - lambda: first.query( - "SYSTEM WAIT FAILPOINT finish_clean_quorum_failed_parts", timeout=300 + zero.query("SYSTEM ENABLE FAILPOINT finish_clean_quorum_failed_parts") + clean_quorum_fail_parts_future = executor.submit( + lambda: first.query( + "SYSTEM WAIT FAILPOINT finish_clean_quorum_failed_parts", + timeout=300, + ) ) - ) - pm.restore_instance_zk_connections(zero) - concurrent.futures.wait([clean_quorum_fail_parts_future]) + pm.restore_instance_zk_connections(zero) + concurrent.futures.wait([clean_quorum_fail_parts_future]) - assert clean_quorum_fail_parts_future.exception() is None + assert clean_quorum_fail_parts_future.exception() is None - zero.query("SYSTEM DISABLE FAILPOINT replicated_merge_tree_insert_retry_pause") - concurrent.futures.wait([insert_future]) - assert insert_future.exception() is not None - assert not zero.contains_in_log("LOGICAL_ERROR") - assert zero.contains_in_log( - "fails to commit and will not retry or clean garbage" - ) + zero.query( + "SYSTEM DISABLE FAILPOINT replicated_merge_tree_insert_retry_pause" + ) + concurrent.futures.wait([insert_future]) + assert insert_future.exception() is not None + assert not zero.contains_in_log("LOGICAL_ERROR") + assert zero.contains_in_log( + "fails to commit and will not retry or clean garbage" + ) From e8a8a4f62eabf854ebabff367d500bcc52456e83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 6 Nov 2024 17:31:57 +0100 Subject: [PATCH 319/353] Add test to check that accessing system.functions does not populate query_log used_functions --- ...nctions_should_not_fill_query_log_functions.reference | 1 + ...tem_functions_should_not_fill_query_log_functions.sql | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.reference create mode 100644 tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.sql diff --git a/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.reference b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.reference new file mode 100644 index 00000000000..021c06382c8 --- /dev/null +++ b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.reference @@ -0,0 +1 @@ +[] ['equals'] [] diff --git a/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.sql b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.sql new file mode 100644 index 00000000000..7e6f384c0a8 --- /dev/null +++ b/tests/queries/0_stateless/03262_system_functions_should_not_fill_query_log_functions.sql @@ -0,0 +1,9 @@ +SELECT * FROM system.functions WHERE name = 'bitShiftLeft' format Null; +SYSTEM FLUSH LOGS; +SELECT used_aggregate_functions, used_functions, used_table_functions +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND current_database = currentDatabase() + AND query LIKE '%bitShiftLeft%'; From 530c04413eaf2839fb3fbdef3619628916e63405 Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Wed, 6 Nov 2024 19:59:41 +0300 Subject: [PATCH 320/353] Analyzer materialized view IN with CTE fix --- src/Analyzer/QueryNode.h | 12 ++++ src/Analyzer/Resolve/QueryAnalyzer.cpp | 48 +++++++++----- src/Analyzer/UnionNode.cpp | 21 +++++++ src/Analyzer/UnionNode.h | 3 + ...er_materialized_view_in_with_cte.reference | 1 + ...analyzer_materialized_view_in_with_cte.sql | 63 +++++++++++++++++++ ...zer_materialized_view_cte_nested.reference | 0 ..._analyzer_materialized_view_cte_nested.sql | 19 ++++++ 8 files changed, 150 insertions(+), 17 deletions(-) create mode 100644 tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference create mode 100644 tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql create mode 100644 tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference create mode 100644 tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql diff --git a/src/Analyzer/QueryNode.h b/src/Analyzer/QueryNode.h index aef0c8805bb..2333fc56218 100644 --- a/src/Analyzer/QueryNode.h +++ b/src/Analyzer/QueryNode.h @@ -602,9 +602,21 @@ public: return projection_columns; } + /// Returns true if query node is resolved, false otherwise + bool isResolved() const + { + return !projection_columns.empty(); + } + /// Resolve query node projection columns void resolveProjectionColumns(NamesAndTypes projection_columns_value); + /// Clear query node projection columns + void clearProjectionColumns() + { + projection_columns.clear(); + } + /// Remove unused projection columns void removeUnusedProjectionColumns(const std::unordered_set & used_projection_columns); diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index cb3087af707..c0a2de0f125 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -2958,27 +2958,28 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi /// Replace storage with values storage of insertion block if (StoragePtr storage = scope.context->getViewSource()) { - QueryTreeNodePtr table_expression; - /// Process possibly nested sub-selects - for (auto * query_node = in_second_argument->as(); query_node; query_node = table_expression->as()) - table_expression = extractLeftTableExpression(query_node->getJoinTree()); + QueryTreeNodePtr table_expression = in_second_argument; - if (table_expression) + /// Process possibly nested sub-selects + while (table_expression) { - if (auto * query_table_node = table_expression->as()) - { - if (query_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) - { - auto replacement_table_expression = std::make_shared(storage, scope.context); - if (std::optional table_expression_modifiers = query_table_node->getTableExpressionModifiers()) - replacement_table_expression->setTableExpressionModifiers(*table_expression_modifiers); - in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression)); - } - } + if (auto * query_node = table_expression->as()) + table_expression = extractLeftTableExpression(query_node->getJoinTree()); + else if (auto * union_node = table_expression->as()) + table_expression = union_node->getQueries().getNodes().at(0); + else + break; + } + + auto * table_expression_table_node = table_expression->as(); + if (table_expression_table_node && + table_expression_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) + { + auto replacement_table_expression_table_node = table_expression_table_node->clone(); + replacement_table_expression_table_node->as().updateStorage(storage, scope.context); + in_second_argument = in_second_argument->cloneAndReplace(table_expression, std::move(replacement_table_expression_table_node)); } } - - resolveExpressionNode(in_second_argument, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/); } /// Edge case when the first argument of IN is scalar subquery. @@ -5310,6 +5311,16 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier auto & query_node_typed = query_node->as(); + /** It is unsafe to call resolveQuery on already resolved query node, because during identifier resolution process + * we replace identifiers with expressions without aliases, also at the end of resolveQuery all aliases from all nodes will be removed. + * For subsequent resolveQuery executions it is possible to have wrong projection header, because for nodes + * with aliases projection name is alias. + * + * If for client it is necessary to resolve query node after clone, client must clear projection columns from query node before resolve. + */ + if (query_node_typed.isResolved()) + return; + if (query_node_typed.isCTE()) ctes_in_resolve_process.insert(query_node_typed.getCTEName()); @@ -5675,6 +5686,9 @@ void QueryAnalyzer::resolveUnion(const QueryTreeNodePtr & union_node, Identifier { auto & union_node_typed = union_node->as(); + if (union_node_typed.isResolved()) + return; + if (union_node_typed.isCTE()) ctes_in_resolve_process.insert(union_node_typed.getCTEName()); diff --git a/src/Analyzer/UnionNode.cpp b/src/Analyzer/UnionNode.cpp index 6f70f01e519..545a6b2195b 100644 --- a/src/Analyzer/UnionNode.cpp +++ b/src/Analyzer/UnionNode.cpp @@ -35,6 +35,7 @@ namespace ErrorCodes { extern const int TYPE_MISMATCH; extern const int BAD_ARGUMENTS; + extern const int LOGICAL_ERROR; } UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_) @@ -50,6 +51,26 @@ UnionNode::UnionNode(ContextMutablePtr context_, SelectUnionMode union_mode_) children[queries_child_index] = std::make_shared(); } +bool UnionNode::isResolved() const +{ + for (const auto & query_node : getQueries().getNodes()) + { + bool is_resolved = false; + + if (auto * query_node_typed = query_node->as()) + is_resolved = query_node_typed->isResolved(); + else if (auto * union_node_typed = query_node->as()) + is_resolved = union_node_typed->isResolved(); + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected query tree node type in UNION node"); + + if (!is_resolved) + return false; + } + + return true; +} + NamesAndTypes UnionNode::computeProjectionColumns() const { if (recursive_cte_table) diff --git a/src/Analyzer/UnionNode.h b/src/Analyzer/UnionNode.h index 40baad1ad57..85d6afb1e47 100644 --- a/src/Analyzer/UnionNode.h +++ b/src/Analyzer/UnionNode.h @@ -163,6 +163,9 @@ public: return children[queries_child_index]; } + /// Returns true if union node is resolved, false otherwise + bool isResolved() const; + /// Compute union node projection columns NamesAndTypes computeProjectionColumns() const; diff --git a/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference new file mode 100644 index 00000000000..5ddf8439af5 --- /dev/null +++ b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.reference @@ -0,0 +1 @@ +1 2 \N test diff --git a/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql new file mode 100644 index 00000000000..4543d336d14 --- /dev/null +++ b/tests/queries/0_stateless/03262_analyzer_materialized_view_in_with_cte.sql @@ -0,0 +1,63 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS mv_test; +DROP TABLE IF EXISTS mv_test_target; +DROP VIEW IF EXISTS mv_test_mv; + +CREATE TABLE mv_test +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE TABLE mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE MATERIALIZED VIEW mv_test_mv TO mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +AS WITH + tester AS + ( + SELECT + id, + ref_id, + final_id, + display + FROM mv_test + ), + id_set AS + ( + SELECT + display, + max(id) AS max_id + FROM mv_test + GROUP BY display + ) +SELECT * +FROM tester +WHERE id IN ( + SELECT max_id + FROM id_set +); + +INSERT INTO mv_test ( id, ref_id, display) values ( 1, 2, 'test'); + +SELECT * FROM mv_test_target; + +DROP VIEW mv_test_mv; +DROP TABLE mv_test_target; +DROP TABLE mv_test; diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql new file mode 100644 index 00000000000..4ea853a7c22 --- /dev/null +++ b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.sql @@ -0,0 +1,19 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +DROP VIEW IF EXISTS test_mv; + +CREATE TABLE test_table ENGINE = MergeTree ORDER BY tuple() AS SELECT 1 as col1; + +CREATE MATERIALIZED VIEW test_mv ENGINE = MergeTree ORDER BY tuple() AS +WITH + subquery_on_source AS (SELECT col1 AS aliased FROM test_table), + output AS (SELECT * FROM test_table WHERE col1 IN (SELECT aliased FROM subquery_on_source)) +SELECT * FROM output; + +INSERT INTO test_table VALUES (2); + +SELECT * FROM test_mv; + +DROP VIEW test_mv; +DROP TABLE test_table; From ea3f9e582184b024bf0cb83c637bed20de5f3cda Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 17:48:04 +0000 Subject: [PATCH 321/353] Add missing reference file --- .../0_stateless/02354_vector_search_multiple_indexes.reference | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference new file mode 100644 index 00000000000..e69de29bb2d From de21dde4cfac2c2fcb7257d018afda9e99c19a11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Wed, 6 Nov 2024 19:26:39 +0100 Subject: [PATCH 322/353] Avoid crash when using UDF in a constraint --- .../UserDefinedSQLFunctionVisitor.cpp | 99 +++---------------- src/Parsers/ASTColumnDeclaration.cpp | 10 ++ src/Parsers/ASTColumnDeclaration.h | 3 + .../03262_udf_in_constraint.reference | 2 + .../0_stateless/03262_udf_in_constraint.sh | 17 ++++ 5 files changed, 45 insertions(+), 86 deletions(-) create mode 100644 tests/queries/0_stateless/03262_udf_in_constraint.reference create mode 100755 tests/queries/0_stateless/03262_udf_in_constraint.sh diff --git a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp index ebd65471449..a04b8d7b998 100644 --- a/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp +++ b/src/Functions/UserDefined/UserDefinedSQLFunctionVisitor.cpp @@ -24,92 +24,7 @@ namespace ErrorCodes void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast) { - if (!ast) - { - chassert(false); - return; - } - - /// FIXME: this helper should use updatePointerToChild(), but - /// forEachPointerToChild() is not implemented for ASTColumnDeclaration - /// (and also some members should be adjusted for this). - const auto visit_child_with_shared_ptr = [&](ASTPtr & child) - { - if (!child) - return; - - auto * old_value = child.get(); - visit(child); - - // child did not change - if (old_value == child.get()) - return; - - // child changed, we need to modify it in the list of children of the parent also - for (auto & current_child : ast->children) - { - if (current_child.get() == old_value) - current_child = child; - } - }; - - if (auto * col_decl = ast->as()) - { - visit_child_with_shared_ptr(col_decl->default_expression); - visit_child_with_shared_ptr(col_decl->ttl); - return; - } - - if (auto * storage = ast->as()) - { - const auto visit_child = [&](IAST * & child) - { - if (!child) - return; - - if (const auto * function = child->template as()) - { - std::unordered_set udf_in_replace_process; - auto replace_result = tryToReplaceFunction(*function, udf_in_replace_process); - if (replace_result) - ast->setOrReplace(child, replace_result); - } - - visit(child); - }; - - visit_child(storage->partition_by); - visit_child(storage->primary_key); - visit_child(storage->order_by); - visit_child(storage->sample_by); - visit_child(storage->ttl_table); - - return; - } - - if (auto * alter = ast->as()) - { - /// It is OK to use updatePointerToChild() because ASTAlterCommand implements forEachPointerToChild() - const auto visit_child_update_parent = [&](ASTPtr & child) - { - if (!child) - return; - - auto * old_ptr = child.get(); - visit(child); - auto * new_ptr = child.get(); - - /// Some AST classes have naked pointers to children elements as members. - /// We have to replace them if the child was replaced. - if (new_ptr != old_ptr) - ast->updatePointerToChild(old_ptr, new_ptr); - }; - - for (auto & children : alter->children) - visit_child_update_parent(children); - - return; - } + chassert(ast); if (const auto * function = ast->template as()) { @@ -120,7 +35,19 @@ void UserDefinedSQLFunctionVisitor::visit(ASTPtr & ast) } for (auto & child : ast->children) + { + if (!child) + return; + + auto * old_ptr = child.get(); visit(child); + auto * new_ptr = child.get(); + + /// Some AST classes have naked pointers to children elements as members. + /// We have to replace them if the child was replaced. + if (new_ptr != old_ptr) + ast->updatePointerToChild(old_ptr, new_ptr); + } } void UserDefinedSQLFunctionVisitor::visit(IAST * ast) diff --git a/src/Parsers/ASTColumnDeclaration.cpp b/src/Parsers/ASTColumnDeclaration.cpp index e7c3fdbb548..1c7d72bafcc 100644 --- a/src/Parsers/ASTColumnDeclaration.cpp +++ b/src/Parsers/ASTColumnDeclaration.cpp @@ -128,4 +128,14 @@ void ASTColumnDeclaration::formatImpl(const FormatSettings & format_settings, Fo } } +void ASTColumnDeclaration::forEachPointerToChild(std::function f) +{ + f(reinterpret_cast(&default_expression)); + f(reinterpret_cast(&comment)); + f(reinterpret_cast(&codec)); + f(reinterpret_cast(&statistics_desc)); + f(reinterpret_cast(&ttl)); + f(reinterpret_cast(&collation)); + f(reinterpret_cast(&settings)); +} } diff --git a/src/Parsers/ASTColumnDeclaration.h b/src/Parsers/ASTColumnDeclaration.h index 914916d5074..0c5076f0201 100644 --- a/src/Parsers/ASTColumnDeclaration.h +++ b/src/Parsers/ASTColumnDeclaration.h @@ -29,6 +29,9 @@ public: ASTPtr clone() const override; void formatImpl(const FormatSettings & format_settings, FormatState & state, FormatStateStacked frame) const override; + +protected: + void forEachPointerToChild(std::function f) override; }; } diff --git a/tests/queries/0_stateless/03262_udf_in_constraint.reference b/tests/queries/0_stateless/03262_udf_in_constraint.reference new file mode 100644 index 00000000000..29d403b85a8 --- /dev/null +++ b/tests/queries/0_stateless/03262_udf_in_constraint.reference @@ -0,0 +1,2 @@ +CREATE TABLE default.t0\n(\n `c0` Int32,\n CONSTRAINT c1 CHECK c0 > 5\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +10 diff --git a/tests/queries/0_stateless/03262_udf_in_constraint.sh b/tests/queries/0_stateless/03262_udf_in_constraint.sh new file mode 100755 index 00000000000..3c36e7caeb4 --- /dev/null +++ b/tests/queries/0_stateless/03262_udf_in_constraint.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q " + CREATE FUNCTION ${CLICKHOUSE_DATABASE}_function AS (x) -> x > 5; + CREATE TABLE t0 (c0 Int, CONSTRAINT c1 CHECK ${CLICKHOUSE_DATABASE}_function(c0)) ENGINE = MergeTree() ORDER BY tuple(); + SHOW CREATE TABLE t0; + INSERT INTO t0(c0) VALUES (10); + INSERT INTO t0(c0) VALUES (3); -- {serverError VIOLATED_CONSTRAINT} + SELECT * FROM t0; + + DROP TABLE t0; + DROP FUNCTION ${CLICKHOUSE_DATABASE}_function; +" From c55840794195689299ccb1b9f838fdb3d1a7edfa Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 19:53:01 +0000 Subject: [PATCH 323/353] Remove duplicate test (same as 02354_vector_search_bugs_multiple_indexes.sql) --- ...02354_vector_search_multiple_indexes.reference | 0 .../02354_vector_search_multiple_indexes.sql | 15 --------------- 2 files changed, 15 deletions(-) delete mode 100644 tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference delete mode 100644 tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql deleted file mode 100644 index aedba286a9f..00000000000 --- a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql +++ /dev/null @@ -1,15 +0,0 @@ --- Tags: no-fasttest, no-ordinary-database - --- Tests that multiple vector similarity indexes can be created on the same column (even if that makes no sense) - -SET allow_experimental_vector_similarity_index = 1; - -DROP TABLE IF EXISTS tab; - -CREATE TABLE tab (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')); - -ALTER TABLE tab ADD INDEX idx(vec) TYPE minmax; -ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); -ALTER TABLE tab ADD INDEX vec_idx2(vec) TYPE vector_similarity('hnsw', 'L2Distance'); -- silly but creating the same index also works for non-vector indexes ... - -DROP TABLE tab; From 26f0ba2c4ceb4b6d52f159943de63d4f2ca10520 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 21:23:06 +0100 Subject: [PATCH 324/353] Update compatibility section for clickhouse-server docker image --- docker/server/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/server/README.md b/docker/server/README.md index 65239126790..1dc636414ac 100644 --- a/docker/server/README.md +++ b/docker/server/README.md @@ -20,6 +20,7 @@ For more information and documentation see https://clickhouse.com/. - The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3. - The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A). +- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run [--privileged | --security-opt seccomp=unconfined]` instead, however that has security implications. ## How to use this image From 157f745136094eb2eaeae72f17d103928194fd52 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Wed, 6 Nov 2024 22:09:12 +0100 Subject: [PATCH 325/353] Write a simple troubleshooting for an old docker and clickhouse-server --- docs/en/operations/_troubleshooting.md | 28 ++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/docs/en/operations/_troubleshooting.md b/docs/en/operations/_troubleshooting.md index 77389782675..f0ee1ca1d29 100644 --- a/docs/en/operations/_troubleshooting.md +++ b/docs/en/operations/_troubleshooting.md @@ -65,6 +65,34 @@ sudo rm -f /etc/yum.repos.d/clickhouse.repo After that follow the [install guide](../getting-started/install.md#from-rpm-packages) +### You Can't Run Docker Container + +You are running a simple `docker run clickhouse/clickhouse-server` and it crashes with a stack trace similar to following: + +``` +$ docker run -it clickhouse/clickhouse-server +........ +2024.11.06 21:04:48.912036 [ 1 ] {} SentryWriter: Sending crash reports is disabled +Poco::Exception. Code: 1000, e.code() = 0, System exception: cannot start thread, Stack trace (when copying this message, always include the lines below): + +0. Poco::ThreadImpl::startImpl(Poco::SharedPtr>) @ 0x00000000157c7b34 +1. Poco::Thread::start(Poco::Runnable&) @ 0x00000000157c8a0e +2. BaseDaemon::initializeTerminationAndSignalProcessing() @ 0x000000000d267a14 +3. BaseDaemon::initialize(Poco::Util::Application&) @ 0x000000000d2652cb +4. DB::Server::initialize(Poco::Util::Application&) @ 0x000000000d128b38 +5. Poco::Util::Application::run() @ 0x000000001581cfda +6. DB::Server::run() @ 0x000000000d1288f0 +7. Poco::Util::ServerApplication::run(int, char**) @ 0x0000000015825e27 +8. mainEntryClickHouseServer(int, char**) @ 0x000000000d125b38 +9. main @ 0x0000000007ea4eee +10. ? @ 0x00007f67ff946d90 +11. ? @ 0x00007f67ff946e40 +12. _start @ 0x00000000062e802e + (version 24.10.1.2812 (official build)) +``` + +The reason is an old docker daemon with version lower than `20.10.10`. A way to fix it either upgrading it, or running `docker run [--privileged | --security-opt seccomp=unconfined]`. The latter has security implications. + ## Connecting to the Server {#troubleshooting-accepts-no-connections} Possible issues: From 29aed6a58629dadca25840e976a4e680ac55a963 Mon Sep 17 00:00:00 2001 From: Michael Kolupaev Date: Wed, 6 Nov 2024 23:38:56 +0000 Subject: [PATCH 326/353] Fix compatibility with refreshable materialized views created by old clickhouse servers --- src/Storages/StorageMaterializedView.cpp | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index d047b28e076..d56b09eec67 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -228,10 +228,20 @@ StorageMaterializedView::StorageMaterializedView( if (!fixed_uuid) { - if (to_inner_uuid != UUIDHelpers::Nil) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "TO INNER UUID is not allowed for materialized views with REFRESH without APPEND"); - if (to_table_id.hasUUID()) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "explicit UUID is not allowed for target table of materialized view with REFRESH without APPEND"); + if (mode >= LoadingStrictnessLevel::ATTACH) + { + /// Old versions of ClickHouse (when refreshable MV was experimental) could add useless + /// UUIDs to attach queries. + to_table_id.uuid = UUIDHelpers::Nil; + to_inner_uuid = UUIDHelpers::Nil; + } + else + { + if (to_inner_uuid != UUIDHelpers::Nil) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "TO INNER UUID is not allowed for materialized views with REFRESH without APPEND"); + if (to_table_id.hasUUID()) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "explicit UUID is not allowed for target table of materialized view with REFRESH without APPEND"); + } } if (!has_inner_table) From 8fb52b72b5bc1a4324cedaf2171e1af4e777f1af Mon Sep 17 00:00:00 2001 From: cangyin Date: Fri, 14 Jun 2024 12:58:46 +0000 Subject: [PATCH 327/353] Fix use-after-dtor logic in hashtable destroyElements --- src/Common/HashTable/HashTable.h | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/src/Common/HashTable/HashTable.h b/src/Common/HashTable/HashTable.h index f4374a0f2ca..d379c3f6a87 100644 --- a/src/Common/HashTable/HashTable.h +++ b/src/Common/HashTable/HashTable.h @@ -658,16 +658,11 @@ protected: { if (!std::is_trivially_destructible_v) { - for (iterator it = begin(), it_end = end(); it != it_end; ++it) + for (iterator it = begin(), it_end = end(); it != it_end;) { - it.ptr->~Cell(); - /// In case of poison_in_dtor=1 it will be poisoned, - /// but it maybe used later, during iteration. - /// - /// NOTE, that technically this is UB [1], but OK for now. - /// - /// [1]: https://github.com/google/sanitizers/issues/854#issuecomment-329661378 - __msan_unpoison(it.ptr, sizeof(*it.ptr)); + auto ptr = it.ptr; + ++it; + ptr->~Cell(); } /// Everything had been destroyed in the loop above, reset the flag From 042e82c6a9cbfa97d68cebb10e88c412c435cd3b Mon Sep 17 00:00:00 2001 From: Maksim Kita Date: Thu, 7 Nov 2024 13:10:51 +0300 Subject: [PATCH 328/353] Fix tests --- src/Analyzer/Resolve/QueryAnalyzer.cpp | 3 ++- .../03263_analyzer_materialized_view_cte_nested.reference | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index c0a2de0f125..c2eac8d008b 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -2971,7 +2971,8 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi break; } - auto * table_expression_table_node = table_expression->as(); + TableNode * table_expression_table_node = table_expression ? table_expression->as() : nullptr; + if (table_expression_table_node && table_expression_table_node->getStorageID().getFullNameNotQuoted() == storage->getStorageID().getFullNameNotQuoted()) { diff --git a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference index e69de29bb2d..0cfbf08886f 100644 --- a/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference +++ b/tests/queries/0_stateless/03263_analyzer_materialized_view_cte_nested.reference @@ -0,0 +1 @@ +2 From e7ad525e0033e1a42cfe6ba35e2a9f0ecd2088b0 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Wed, 6 Nov 2024 20:03:14 +0000 Subject: [PATCH 329/353] Re-introduce support for legacy index creation syntax --- .../table-engines/mergetree-family/annindexes.md | 6 +++--- .../MergeTree/MergeTreeIndexVectorSimilarity.cpp | 6 ++++-- ...or_search_legacy_index_creation_syntax.reference | 0 ...4_vector_search_legacy_index_creation_syntax.sql | 13 +++++++++++++ 4 files changed, 20 insertions(+), 5 deletions(-) create mode 100644 tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.reference create mode 100644 tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index dc12a60e8ef..fcdc16637e6 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -54,7 +54,7 @@ Parameters: - `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a line between two points in Euclidean space), or `cosineDistance` (the [cosine distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors). -- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`) +- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing vectors with reduced precision (optional, default: `bf16`) - `hnsw_max_connections_per_layer`: the number of neighbors per HNSW graph node, also known as `M` in the [HNSW paper](https://doi.org/10.1109/TPAMI.2018.2889473) (optional, default: 32) - `hnsw_candidate_list_size_for_construction`: the size of the dynamic candidate list when constructing the HNSW graph, also known as @@ -92,8 +92,8 @@ Vector similarity indexes currently support two distance functions: - `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors ([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)). -Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`. -If no scalar kind was specified during index creation, `f16` is used as default. +Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16`, `bf16`, +and `i8`. If no scalar kind was specified during index creation, `bf16` is used as default. For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no distance function was specified during index creation, `L2Distance` is used as default. diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index f95b840e223..cca3ca6ce3b 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -531,15 +531,17 @@ void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* atta { const bool has_two_args = (index.arguments.size() == 2); const bool has_five_args = (index.arguments.size() == 5); + const bool has_six_args = (index.arguments.size() == 6); /// Legacy index creation syntax before #70616. Supported only to be able to load old tables, can be removed mid-2025. + /// The 6th argument (ef_search) is ignored. /// Check number and type of arguments - if (!has_two_args && !has_five_args) + if (!has_two_args && !has_five_args && !has_six_args) throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must have two or five arguments"); if (index.arguments[0].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of vector similarity index (method) must be of type String"); if (index.arguments[1].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of vector similarity index (metric) must be of type String"); - if (has_five_args) + if (has_five_args || has_six_args) { if (index.arguments[2].getType() != Field::Types::String) throw Exception(ErrorCodes::INCORRECT_QUERY, "Third argument of vector similarity index (quantization) must be of type String"); diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.reference b/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql b/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql new file mode 100644 index 00000000000..e5dbc6aa6a9 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_legacy_index_creation_syntax.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests the legacy syntax to create vector similarity indexes before #70616. +-- Support for this syntax can be removed after mid-2025. + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f32', 42, 99, 113)) ENGINE = MergeTree ORDER BY id; -- Note the 6th parameter: 133 + +DROP TABLE tab; + From cf594010c862a568b07a440c4d70f9d59319b1a7 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 09:43:42 +0000 Subject: [PATCH 330/353] Rename some tests for more consistency --- ...=> 02354_vector_search_adaptive_index_granularity.reference} | 0 ...y.sql => 02354_vector_search_adaptive_index_granularity.sql} | 0 ...=> 02354_vector_search_and_other_skipping_indexes.reference} | 0 ...1.sql => 02354_vector_search_and_other_skipping_indexes.sql} | 2 +- ...ence => 02354_vector_search_different_array_sizes.reference} | 0 ..._sizes.sql => 02354_vector_search_different_array_sizes.sql} | 0 ...2354_vector_search_empty_arrays_or_default_values.reference} | 0 ...l => 02354_vector_search_empty_arrays_or_default_values.sql} | 2 +- ...reference => 02354_vector_search_multiple_indexes.reference} | 0 ...ple_indexes.sql => 02354_vector_search_multiple_indexes.sql} | 0 ...s.reference => 02354_vector_search_multiple_marks.reference} | 0 ...ultiple_marks.sql => 02354_vector_search_multiple_marks.sql} | 0 ...g_69085.reference => 02354_vector_search_subquery.reference} | 0 ...or_search_bug_69085.sql => 02354_vector_search_subquery.sql} | 2 +- 14 files changed, 3 insertions(+), 3 deletions(-) rename tests/queries/0_stateless/{02354_vector_search_bug_52282.reference => 02354_vector_search_adaptive_index_granularity.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_adaptive_index_granularity.sql => 02354_vector_search_adaptive_index_granularity.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_71381.reference => 02354_vector_search_and_other_skipping_indexes.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_71381.sql => 02354_vector_search_and_other_skipping_indexes.sql} (79%) rename tests/queries/0_stateless/{02354_vector_search_bug_adaptive_index_granularity.reference => 02354_vector_search_different_array_sizes.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_different_array_sizes.sql => 02354_vector_search_different_array_sizes.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_different_array_sizes.reference => 02354_vector_search_empty_arrays_or_default_values.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_52282.sql => 02354_vector_search_empty_arrays_or_default_values.sql} (80%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_indexes.reference => 02354_vector_search_multiple_indexes.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_indexes.sql => 02354_vector_search_multiple_indexes.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_marks.reference => 02354_vector_search_multiple_marks.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_multiple_marks.sql => 02354_vector_search_multiple_marks.sql} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_69085.reference => 02354_vector_search_subquery.reference} (100%) rename tests/queries/0_stateless/{02354_vector_search_bug_69085.sql => 02354_vector_search_subquery.sql} (93%) diff --git a/tests/queries/0_stateless/02354_vector_search_bug_52282.reference b/tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_52282.reference rename to tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql b/tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.sql rename to tests/queries/0_stateless/02354_vector_search_adaptive_index_granularity.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_71381.reference b/tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_71381.reference rename to tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_71381.sql b/tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.sql similarity index 79% rename from tests/queries/0_stateless/02354_vector_search_bug_71381.sql rename to tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.sql index 9e3246700b8..386d3b6e26e 100644 --- a/tests/queries/0_stateless/02354_vector_search_bug_71381.sql +++ b/tests/queries/0_stateless/02354_vector_search_and_other_skipping_indexes.sql @@ -2,7 +2,7 @@ SET allow_experimental_vector_similarity_index = 1; --- Issue #71381: Usage of vector similarity index and further skipping indexes on the same table +-- Usage of vector similarity index and further skipping indexes on the same table (issue #71381) DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference b/tests/queries/0_stateless/02354_vector_search_different_array_sizes.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_adaptive_index_granularity.reference rename to tests/queries/0_stateless/02354_vector_search_different_array_sizes.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql b/tests/queries/0_stateless/02354_vector_search_different_array_sizes.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.sql rename to tests/queries/0_stateless/02354_vector_search_different_array_sizes.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference b/tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_different_array_sizes.reference rename to tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_52282.sql b/tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.sql similarity index 80% rename from tests/queries/0_stateless/02354_vector_search_bug_52282.sql rename to tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.sql index b8066ce278a..e24b1a527be 100644 --- a/tests/queries/0_stateless/02354_vector_search_bug_52282.sql +++ b/tests/queries/0_stateless/02354_vector_search_empty_arrays_or_default_values.sql @@ -2,7 +2,7 @@ SET allow_experimental_vector_similarity_index = 1; --- Issue #52258: Vector similarity indexes must reject empty Arrays or Arrays with default values +-- Vector similarity indexes must reject empty Arrays or Arrays with default values (issue #52258) DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.reference rename to tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_indexes.sql rename to tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference b/tests/queries/0_stateless/02354_vector_search_multiple_marks.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.reference rename to tests/queries/0_stateless/02354_vector_search_multiple_marks.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql b/tests/queries/0_stateless/02354_vector_search_multiple_marks.sql similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_multiple_marks.sql rename to tests/queries/0_stateless/02354_vector_search_multiple_marks.sql diff --git a/tests/queries/0_stateless/02354_vector_search_bug_69085.reference b/tests/queries/0_stateless/02354_vector_search_subquery.reference similarity index 100% rename from tests/queries/0_stateless/02354_vector_search_bug_69085.reference rename to tests/queries/0_stateless/02354_vector_search_subquery.reference diff --git a/tests/queries/0_stateless/02354_vector_search_bug_69085.sql b/tests/queries/0_stateless/02354_vector_search_subquery.sql similarity index 93% rename from tests/queries/0_stateless/02354_vector_search_bug_69085.sql rename to tests/queries/0_stateless/02354_vector_search_subquery.sql index 4dbcdf66e36..65ad0dbcd97 100644 --- a/tests/queries/0_stateless/02354_vector_search_bug_69085.sql +++ b/tests/queries/0_stateless/02354_vector_search_subquery.sql @@ -3,7 +3,7 @@ SET allow_experimental_vector_similarity_index = 1; SET enable_analyzer = 0; --- Issue #69085: Reference vector for vector search is computed by a subquery +-- Reference vector for vector search is computed by a subquery (issue #69085) DROP TABLE IF EXISTS tab; From be10aba49aca0d3253e4c714eabed196fe6411e2 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 10:42:51 +0000 Subject: [PATCH 331/353] Minor cleanup --- .../MergeTree/MergeTreeIndexVectorSimilarity.cpp | 9 +++------ src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h | 3 --- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp index cca3ca6ce3b..0b17fa05072 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -178,23 +178,20 @@ String USearchIndexWithSerialization::Statistics::toString() const } MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_) - : MergeTreeIndexGranuleVectorSimilarity(index_name_, index_sample_block_, metric_kind_, scalar_kind_, usearch_hnsw_params_, nullptr) + : MergeTreeIndexGranuleVectorSimilarity(index_name_, metric_kind_, scalar_kind_, usearch_hnsw_params_, nullptr) { } MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_, USearchIndexWithSerializationPtr index_) : index_name(index_name_) - , index_sample_block(index_sample_block_) , metric_kind(metric_kind_) , scalar_kind(scalar_kind_) , usearch_hnsw_params(usearch_hnsw_params_) @@ -261,7 +258,7 @@ MergeTreeIndexAggregatorVectorSimilarity::MergeTreeIndexAggregatorVectorSimilari MergeTreeIndexGranulePtr MergeTreeIndexAggregatorVectorSimilarity::getGranuleAndReset() { - auto granule = std::make_shared(index_name, index_sample_block, metric_kind, scalar_kind, usearch_hnsw_params, index); + auto granule = std::make_shared(index_name, metric_kind, scalar_kind, usearch_hnsw_params, index); index = nullptr; return granule; } @@ -490,7 +487,7 @@ MergeTreeIndexVectorSimilarity::MergeTreeIndexVectorSimilarity( MergeTreeIndexGranulePtr MergeTreeIndexVectorSimilarity::createIndexGranule() const { - return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind, usearch_hnsw_params); + return std::make_shared(index.name, metric_kind, scalar_kind, usearch_hnsw_params); } MergeTreeIndexAggregatorPtr MergeTreeIndexVectorSimilarity::createIndexAggregator(const MergeTreeWriterSettings & /*settings*/) const diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h index 9a81e168393..fe5049daf77 100644 --- a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -69,14 +69,12 @@ struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranu { MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_); MergeTreeIndexGranuleVectorSimilarity( const String & index_name_, - const Block & index_sample_block_, unum::usearch::metric_kind_t metric_kind_, unum::usearch::scalar_kind_t scalar_kind_, UsearchHnswParams usearch_hnsw_params_, @@ -90,7 +88,6 @@ struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranu bool empty() const override { return !index || index->size() == 0; } const String index_name; - const Block index_sample_block; const unum::usearch::metric_kind_t metric_kind; const unum::usearch::scalar_kind_t scalar_kind; const UsearchHnswParams usearch_hnsw_params; From d8ff6f868fe6cb346ac751b468b462b857399480 Mon Sep 17 00:00:00 2001 From: Pablo Marcos Date: Thu, 7 Nov 2024 12:36:21 +0000 Subject: [PATCH 332/353] bitShift: return 0 instead of throwing an exception if overflow --- src/Functions/bitShiftLeft.cpp | 20 +++++++++++-------- src/Functions/bitShiftRight.cpp | 20 +++++++++++-------- .../02766_bitshift_with_const_arguments.sql | 2 +- ...t_throws_error_for_out_of_bounds.reference | 6 ++++++ ...t_shift_throws_error_for_out_of_bounds.sql | 12 +++++------ 5 files changed, 37 insertions(+), 23 deletions(-) diff --git a/src/Functions/bitShiftLeft.cpp b/src/Functions/bitShiftLeft.cpp index 0eb0d82ef0f..7fd0f7cf631 100644 --- a/src/Functions/bitShiftLeft.cpp +++ b/src/Functions/bitShiftLeft.cpp @@ -25,8 +25,10 @@ struct BitShiftLeftImpl { if constexpr (is_big_int_v) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "BitShiftLeft is not implemented for big integers as second argument"); - else if (b < 0 || static_cast(b) > 8 * sizeof(A)) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); + else if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + else if (static_cast(b) > 8 * sizeof(A)) + return static_cast(0); else if constexpr (is_big_int_v) return static_cast(a) << static_cast(b); else @@ -43,9 +45,10 @@ struct BitShiftLeftImpl const UInt8 word_size = 8 * sizeof(*pos); size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { // insert default value out_vec.push_back(0); @@ -111,9 +114,10 @@ struct BitShiftLeftImpl const UInt8 word_size = 8; size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { // insert default value out_vec.resize_fill(out_vec.size() + n); diff --git a/src/Functions/bitShiftRight.cpp b/src/Functions/bitShiftRight.cpp index 16032b32f68..19ea7b8c751 100644 --- a/src/Functions/bitShiftRight.cpp +++ b/src/Functions/bitShiftRight.cpp @@ -26,8 +26,10 @@ struct BitShiftRightImpl { if constexpr (is_big_int_v) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "BitShiftRight is not implemented for big integers as second argument"); - else if (b < 0 || static_cast(b) > 8 * sizeof(A)) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); + else if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + else if (static_cast(b) > 8 * sizeof(A)) + return static_cast(0); else if constexpr (is_big_int_v) return static_cast(a) >> static_cast(b); else @@ -59,9 +61,10 @@ struct BitShiftRightImpl const UInt8 word_size = 8; size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { /// insert default value out_vec.push_back(0); @@ -99,9 +102,10 @@ struct BitShiftRightImpl const UInt8 word_size = 8; size_t n = end - pos; const UInt128 bit_limit = static_cast(word_size) * n; - if (b < 0 || static_cast(b) > bit_limit) - throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value and less or equal to the bit width of the value to shift"); - if (b == bit_limit) + if (b < 0) + throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "The number of shift positions needs to be a non-negative value"); + + if (b == bit_limit || static_cast(b) > bit_limit) { // insert default value out_vec.resize_fill(out_vec.size() + n); diff --git a/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql b/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql index 91e8624057c..6b2961f0555 100644 --- a/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql +++ b/tests/queries/0_stateless/02766_bitshift_with_const_arguments.sql @@ -10,7 +10,7 @@ DROP TABLE IF EXISTS t1; CREATE TABLE t0 (vkey UInt32, pkey UInt32, c0 UInt32) engine = TinyLog; CREATE TABLE t1 (vkey UInt32) ENGINE = AggregatingMergeTree ORDER BY vkey; INSERT INTO t0 VALUES (15, 25000, 58); -SELECT ref_5.pkey AS c_2_c2392_6 FROM t0 AS ref_5 WHERE 'J[' < multiIf(ref_5.pkey IN ( SELECT 1 ), bitShiftLeft(multiIf(ref_5.c0 > NULL, '1', ')'), 40), NULL); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT ref_5.pkey AS c_2_c2392_6 FROM t0 AS ref_5 WHERE 'J[' < multiIf(ref_5.pkey IN ( SELECT 1 ), bitShiftLeft(multiIf(ref_5.c0 > NULL, '1', ')'), 40), NULL); DROP TABLE t0; DROP TABLE t1; diff --git a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference index 33b8cd6ee26..1fda82a9747 100644 --- a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference +++ b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.reference @@ -1,3 +1,9 @@ -- bitShiftRight +0 + +\0\0\0\0\0\0\0\0 -- bitShiftLeft +0 + +\0\0\0\0\0\0\0\0 OK diff --git a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql index aec01753673..340cc1292e4 100644 --- a/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql +++ b/tests/queries/0_stateless/03198_bit_shift_throws_error_for_out_of_bounds.sql @@ -1,17 +1,17 @@ SELECT '-- bitShiftRight'; SELECT bitShiftRight(1, -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftRight(toUInt8(1), 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight(toUInt8(1), 8 + 1); SELECT bitShiftRight('hola', -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftRight('hola', 4 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight('hola', 4 * 8 + 1); SELECT bitShiftRight(toFixedString('hola', 8), -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftRight(toFixedString('hola', 8), 8 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight(toFixedString('hola', 8), 8 * 8 + 1); SELECT '-- bitShiftLeft'; SELECT bitShiftLeft(1, -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftLeft(toUInt8(1), 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft(toUInt8(1), 8 + 1); SELECT bitShiftLeft('hola', -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftLeft('hola', 4 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft('hola', 4 * 8 + 1); SELECT bitShiftLeft(toFixedString('hola', 8), -1); -- { serverError ARGUMENT_OUT_OF_BOUND } -SELECT bitShiftLeft(toFixedString('hola', 8), 8 * 8 + 1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft(toFixedString('hola', 8), 8 * 8 + 1); SELECT 'OK'; \ No newline at end of file From f727a3931bfa0d7b3945bfb8703665aef3fc0695 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 12:41:48 +0000 Subject: [PATCH 333/353] Clarify query cache docs and remove obsolete setting --- docs/en/operations/query-cache.md | 23 +++++++++++------------ src/Core/Settings.cpp | 1 - 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index 955cec0234e..f0941aa28aa 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -25,9 +25,10 @@ Query caches can generally be viewed as transactionally consistent or inconsiste slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be served directly from the query cache. In this example, a reasonable validity period could be 30 min. -Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result, -the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. -This reduces maintenance effort and avoids redundancy. +Transactionally inconsistent caching is traditionally provided by client tools or proxy packages (e.g. +[chproxy](https://www.chproxy.org/configuration/caching/)) interacting with the database. As a result, the same caching logic and +configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. This reduces maintenance +effort and avoids redundancy. ## Configuration Settings and Usage @@ -138,7 +139,10 @@ is only cached if the query runs longer than 5 seconds. It is also possible to s cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs). Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different -value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). +value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). The query +cache evicts entries "lazily", i.e. when an entry becomes stale, it is not immediately removed from the cache. Instead, when a new entry +is to be inserted into the query cache, the database checks whether the cache has enough free space for the new entry. If this is not the +case, the database tries to remove all stale entries. If the cache still has not enough free space, the new entry is not inserted. Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries). @@ -188,14 +192,9 @@ Also, results of queries with non-deterministic functions are not cached by defa To force caching of results of queries with non-deterministic functions regardless, use setting [query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling). -Results of queries that involve system tables, e.g. `system.processes` or `information_schema.tables`, are not cached by default. To force -caching of results of queries with system tables regardless, use setting -[query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling). - -:::note -Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether -results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect. -::: +Results of queries that involve system tables (e.g. [system.processes](system-tables/processes.md)` or +[information_schema.tables](system-tables/information_schema.md)) are not cached by default. To force caching of results of queries with +system tables regardless, use setting [query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling). Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index c2ffc2ddf0e..3bfa58e4f98 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -5916,7 +5916,6 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, UInt64, parallel_replicas_min_number_of_granules_to_enable, 0) \ MAKE_OBSOLETE(M, ParallelReplicasCustomKeyFilterType, parallel_replicas_custom_key_filter_type, ParallelReplicasCustomKeyFilterType::DEFAULT) \ MAKE_OBSOLETE(M, Bool, query_plan_optimize_projection, true) \ - MAKE_OBSOLETE(M, Bool, query_cache_store_results_of_queries_with_nondeterministic_functions, false) \ MAKE_OBSOLETE(M, Bool, allow_experimental_annoy_index, false) \ MAKE_OBSOLETE(M, UInt64, max_threads_for_annoy_index_creation, 4) \ MAKE_OBSOLETE(M, Int64, annoy_index_search_k_nodes, -1) \ From d43329f254eaaddaece94d4f96631b3307be23bb Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 5 Nov 2024 13:31:10 +0100 Subject: [PATCH 334/353] UX: slightly improve cache await interface --- tests/ci/ci_cache.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 6f2e3e70736..5ebed827926 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -795,11 +795,12 @@ class CiCache: # start waiting for the next TIMEOUT seconds if there are more than X(=4) jobs to wait # wait TIMEOUT seconds in rounds. Y(=5) is the max number of rounds expired_sec = 0 - start_at = int(time.time()) + start_at = time.time() while expired_sec < TIMEOUT and self.jobs_to_wait: await_finished: Set[str] = set() if not dry_run: - time.sleep(poll_interval_sec) + # Do not sleep longer than required + time.sleep(min(poll_interval_sec, TIMEOUT - expired_sec)) self.update() for job_name, job_config in self.jobs_to_wait.items(): num_batches = job_config.num_batches @@ -844,7 +845,8 @@ class CiCache: del self.jobs_to_wait[job] if not dry_run: - expired_sec = int(time.time()) - start_at + # Avoid `seconds left [-3]` + expired_sec = min(int(time.time() - start_at), TIMEOUT) print( f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" ) From ccaa66963dfa937f6a2562ff22d9b90254fefea3 Mon Sep 17 00:00:00 2001 From: "Mikhail f. Shiryaev" Date: Tue, 5 Nov 2024 13:37:35 +0100 Subject: [PATCH 335/353] Print a proper message for finished awaiting --- tests/ci/ci_cache.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/ci/ci_cache.py b/tests/ci/ci_cache.py index 5ebed827926..c271339db8b 100644 --- a/tests/ci/ci_cache.py +++ b/tests/ci/ci_cache.py @@ -845,11 +845,12 @@ class CiCache: del self.jobs_to_wait[job] if not dry_run: - # Avoid `seconds left [-3]` - expired_sec = min(int(time.time() - start_at), TIMEOUT) - print( - f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" - ) + expired_sec = int(time.time() - start_at) + msg = f"...awaiting continues... seconds left [{TIMEOUT - expired_sec}]" + if expired_sec >= TIMEOUT: + # Avoid `seconds left [-3]` + msg = f"awaiting for round {round_cnt} is finished" + print(msg) else: # make up for 2 iterations in dry_run expired_sec += int(TIMEOUT / 2) + 1 From 5cc42571f326ac409abdf612278042c84c4e3a74 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 14:57:24 +0000 Subject: [PATCH 336/353] Revert obsolete settings removal --- src/Core/Settings.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 3bfa58e4f98..0d322f107de 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -5859,7 +5859,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs // Please add settings related to formats in Core/FormatFactorySettings.h, move obsolete settings to OBSOLETE_SETTINGS and obsolete format settings to OBSOLETE_FORMAT_SETTINGS. #define OBSOLETE_SETTINGS(M, ALIAS) \ - /** Obsolete settings that do nothing but left for compatibility reasons. Remove each one after half a year of obsolescence. */ \ + /** Obsolete settings which are kept around for compatibility reasons. They have no effect anymore. */ \ MAKE_OBSOLETE(M, Bool, update_insert_deduplication_token_in_dependent_materialized_views, 0) \ MAKE_OBSOLETE(M, UInt64, max_memory_usage_for_all_queries, 0) \ MAKE_OBSOLETE(M, UInt64, multiple_joins_rewriter_version, 0) \ @@ -5916,6 +5916,7 @@ Experimental data deduplication for SELECT queries based on part UUIDs MAKE_OBSOLETE(M, UInt64, parallel_replicas_min_number_of_granules_to_enable, 0) \ MAKE_OBSOLETE(M, ParallelReplicasCustomKeyFilterType, parallel_replicas_custom_key_filter_type, ParallelReplicasCustomKeyFilterType::DEFAULT) \ MAKE_OBSOLETE(M, Bool, query_plan_optimize_projection, true) \ + MAKE_OBSOLETE(M, Bool, query_cache_store_results_of_queries_with_nondeterministic_functions, false) \ MAKE_OBSOLETE(M, Bool, allow_experimental_annoy_index, false) \ MAKE_OBSOLETE(M, UInt64, max_threads_for_annoy_index_creation, 4) \ MAKE_OBSOLETE(M, Int64, annoy_index_search_k_nodes, -1) \ From de03a5dae75b06520ab19a5fd34a561f83ae74e2 Mon Sep 17 00:00:00 2001 From: Robert Schulze Date: Thu, 7 Nov 2024 15:04:53 +0000 Subject: [PATCH 337/353] Fix test which used an obsolete setting --- tests/queries/0_stateless/02494_query_cache_normalize_ast.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql index 1dbb3ef8158..cb53c4db7de 100644 --- a/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql +++ b/tests/queries/0_stateless/02494_query_cache_normalize_ast.sql @@ -7,7 +7,7 @@ SYSTEM DROP QUERY CACHE; -- Run query whose result gets cached in the query cache. -- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care -- *that* they are passed and not about their effect. -SELECT 1 SETTINGS use_query_cache = true, query_cache_store_results_of_queries_with_nondeterministic_functions = true, max_threads = 16; +SELECT 1 SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'save', max_threads = 16; -- Check that entry in QC exists SELECT COUNT(*) FROM system.query_cache; From a01c2e3f8c265aceb3042cdee1abafeed4f68485 Mon Sep 17 00:00:00 2001 From: Pervakov Grigorii Date: Thu, 7 Nov 2024 16:51:53 +0300 Subject: [PATCH 338/353] Keep materialized view security overriden context until end of query --- src/Processors/Sinks/SinkToStorage.h | 4 ++++ src/Storages/StorageMaterializedView.cpp | 2 ++ ...67_materialized_view_keeps_security_context.reference | 1 + .../03267_materialized_view_keeps_security_context.sql | 9 +++++++++ 4 files changed, 16 insertions(+) create mode 100644 tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference create mode 100644 tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql diff --git a/src/Processors/Sinks/SinkToStorage.h b/src/Processors/Sinks/SinkToStorage.h index c728fa87b1e..4bdcb2fe855 100644 --- a/src/Processors/Sinks/SinkToStorage.h +++ b/src/Processors/Sinks/SinkToStorage.h @@ -5,6 +5,8 @@ namespace DB { +class Context; + /// Sink which is returned from Storage::write. class SinkToStorage : public ExceptionKeepingTransform { @@ -16,12 +18,14 @@ public: const Block & getHeader() const { return inputs.front().getHeader(); } void addTableLock(const TableLockHolder & lock) { table_locks.push_back(lock); } + void addInterpreterContext(std::shared_ptr context) { interpreter_context.emplace_back(std::move(context)); } protected: virtual void consume(Chunk & chunk) = 0; private: std::vector table_locks; + std::vector> interpreter_context; void onConsume(Chunk chunk) override; GenerateResult onGenerate() override; diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index d047b28e076..3289ff1ae25 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -382,6 +382,7 @@ void StorageMaterializedView::read( } query_plan.addStorageHolder(storage); + query_plan.addInterpreterContext(context); query_plan.addTableLock(std::move(lock)); } } @@ -405,6 +406,7 @@ SinkToStoragePtr StorageMaterializedView::write(const ASTPtr & query, const Stor auto sink = storage->write(query, metadata_snapshot, context, async_insert); + sink->addInterpreterContext(context); sink->addTableLock(lock); return sink; } diff --git a/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference new file mode 100644 index 00000000000..d00491fd7e5 --- /dev/null +++ b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql new file mode 100644 index 00000000000..bb44e4920af --- /dev/null +++ b/tests/queries/0_stateless/03267_materialized_view_keeps_security_context.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.rview; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.wview; + +-- Read from view +CREATE MATERIALIZED VIEW rview ENGINE = File(CSV) POPULATE AS SELECT 1 AS c0; +SELECT 1 FROM rview; + +-- Write through view populate +CREATE MATERIALIZED VIEW wview ENGINE = Join(ALL, INNER, c0) POPULATE AS SELECT 1 AS c0; From 96b59a2ef679b6b23ffcecafd59c05a0ea784ada Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 13:43:58 +0100 Subject: [PATCH 339/353] Avoid port clash in CoordinationTest/0.TestSummingRaft1 --- src/Coordination/tests/gtest_coordination.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Coordination/tests/gtest_coordination.cpp b/src/Coordination/tests/gtest_coordination.cpp index 9648fdd4530..c56e698766a 100644 --- a/src/Coordination/tests/gtest_coordination.cpp +++ b/src/Coordination/tests/gtest_coordination.cpp @@ -330,7 +330,7 @@ TYPED_TEST(CoordinationTest, TestSummingRaft1) this->setLogDirectory("./logs"); this->setStateFileDirectory("."); - SummingRaftServer s1(1, "localhost", 44444, this->keeper_context); + SummingRaftServer s1(1, "localhost", 0, this->keeper_context); SCOPE_EXIT(if (std::filesystem::exists("./state")) std::filesystem::remove("./state");); /// Single node is leader From bfad05ac60b90bf7b4000cf6f87b54730ce108a5 Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 7 Nov 2024 17:35:10 +0100 Subject: [PATCH 340/353] Shrink to fit index granularity array in memory to reduce memory footprint --- src/Storages/MergeTree/IMergeTreeDataPart.cpp | 2 ++ src/Storages/MergeTree/MergeTreeIndexGranularity.cpp | 6 ++++++ src/Storages/MergeTree/MergeTreeIndexGranularity.h | 2 ++ 3 files changed, 10 insertions(+) diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 41783ffddb0..7453d609fa9 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -735,7 +735,9 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks loadUUID(); loadColumns(require_columns_checksums); loadChecksums(require_columns_checksums); + loadIndexGranularity(); + index_granularity.shrinkToFitInMemory(); if (!(*storage.getSettings())[MergeTreeSetting::primary_key_lazy_load]) getIndex(); diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp index d69a00643f0..c3e740bde84 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.cpp @@ -122,4 +122,10 @@ std::string MergeTreeIndexGranularity::describe() const { return fmt::format("initialized: {}, marks_rows_partial_sums: [{}]", initialized, fmt::join(marks_rows_partial_sums, ", ")); } + +void MergeTreeIndexGranularity::shrinkToFitInMemory() +{ + marks_rows_partial_sums.shrink_to_fit(); +} + } diff --git a/src/Storages/MergeTree/MergeTreeIndexGranularity.h b/src/Storages/MergeTree/MergeTreeIndexGranularity.h index f66e721ec1e..9b8375dd2d8 100644 --- a/src/Storages/MergeTree/MergeTreeIndexGranularity.h +++ b/src/Storages/MergeTree/MergeTreeIndexGranularity.h @@ -100,6 +100,8 @@ public: void resizeWithFixedGranularity(size_t size, size_t fixed_granularity); std::string describe() const; + + void shrinkToFitInMemory(); }; } From 95d821549106ecff95e6e42e19b014aa6ac0e669 Mon Sep 17 00:00:00 2001 From: kssenii Date: Thu, 7 Nov 2024 17:34:52 +0100 Subject: [PATCH 341/353] Fix --- src/Interpreters/Cache/FileCache.cpp | 21 +++++++++++++++++++-- tests/config/config.d/storage_conf.xml | 1 + 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/Interpreters/Cache/FileCache.cpp b/src/Interpreters/Cache/FileCache.cpp index f7b7ffc5aea..7de3f7af78d 100644 --- a/src/Interpreters/Cache/FileCache.cpp +++ b/src/Interpreters/Cache/FileCache.cpp @@ -37,6 +37,11 @@ namespace ProfileEvents extern const Event FilesystemCacheFailToReserveSpaceBecauseOfCacheResize; } +namespace CurrentMetrics +{ + extern const Metric FilesystemCacheDownloadQueueElements; +} + namespace DB { @@ -918,7 +923,13 @@ bool FileCache::tryReserve( if (!query_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, {}, user.user_id, cache_lock)) { - failure_reason = "cannot evict enough space for query limit"; + const auto & stat = reserve_stat.total_stat; + failure_reason = fmt::format( + "cannot evict enough space for query limit " + "(non-releasable count: {}, non-releasable size: {}, " + "releasable count: {}, releasable size: {}, background download elements: {})", + stat.non_releasable_count, stat.non_releasable_size, stat.releasable_count, stat.releasable_size, + CurrentMetrics::get(CurrentMetrics::FilesystemCacheDownloadQueueElements)); return false; } @@ -933,7 +944,13 @@ bool FileCache::tryReserve( if (!main_priority->collectCandidatesForEviction( size, required_elements_num, reserve_stat, eviction_candidates, queue_iterator, user.user_id, cache_lock)) { - failure_reason = "cannot evict enough space"; + const auto & stat = reserve_stat.total_stat; + failure_reason = fmt::format( + "cannot evict enough space " + "(non-releasable count: {}, non-releasable size: {}, " + "releasable count: {}, releasable size: {}, background download elements: {})", + stat.non_releasable_count, stat.non_releasable_size, stat.releasable_count, stat.releasable_size, + CurrentMetrics::get(CurrentMetrics::FilesystemCacheDownloadQueueElements)); return false; } diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index 74bad7528c8..fee7ce841a6 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -27,6 +27,7 @@ 0.3 0.15 0.15 + 50 0 From 2c59fce5b488c9ddd2d99e0dcbaaf84d2f36ef04 Mon Sep 17 00:00:00 2001 From: Kseniia Sumarokova <54203879+kssenii@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:44:41 +0100 Subject: [PATCH 342/353] Update test.py --- tests/integration/test_storage_s3_queue/test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index c495fc1d44f..284b304c632 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -1403,8 +1403,8 @@ def test_shards_distributed(started_cluster, mode, processing_threads): # A unique path is necessary for repeatable tests keeper_path = f"/clickhouse/test_{table_name}_{generate_random_string()}" files_path = f"{table_name}_data" - files_to_generate = 300 - row_num = 300 + files_to_generate = 600 + row_num = 1000 total_rows = row_num * files_to_generate shards_num = 2 From 45aaebc41a73131c4ceee63214afbc88104dd59f Mon Sep 17 00:00:00 2001 From: alesapin Date: Thu, 7 Nov 2024 18:24:36 +0100 Subject: [PATCH 343/353] Review fix --- src/Storages/MergeTree/MergedBlockOutputStream.cpp | 2 ++ src/Storages/MergeTree/MutateTask.cpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/Storages/MergeTree/MergedBlockOutputStream.cpp b/src/Storages/MergeTree/MergedBlockOutputStream.cpp index 77c34aae30a..39096718b5c 100644 --- a/src/Storages/MergeTree/MergedBlockOutputStream.cpp +++ b/src/Storages/MergeTree/MergedBlockOutputStream.cpp @@ -207,6 +207,8 @@ MergedBlockOutputStream::Finalizer MergedBlockOutputStream::finalizePartAsync( new_part->setBytesOnDisk(checksums.getTotalSizeOnDisk()); new_part->setBytesUncompressedOnDisk(checksums.getTotalSizeUncompressedOnDisk()); new_part->index_granularity = writer->getIndexGranularity(); + /// Just in case + new_part->index_granularity.shrinkToFitInMemory(); new_part->calculateColumnsAndSecondaryIndicesSizesOnDisk(); /// In mutation, existing_rows_count is already calculated in PartMergerWriter diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 936df7b0275..7f6588fc632 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -984,6 +984,8 @@ void finalizeMutatedPart( new_data_part->rows_count = source_part->rows_count; new_data_part->index_granularity = source_part->index_granularity; + /// Just in case + new_data_part->index_granularity.shrinkToFitInMemory(); new_data_part->setIndex(*source_part->getIndex()); new_data_part->minmax_idx = source_part->minmax_idx; new_data_part->modification_time = time(nullptr); From 2fa357f3747a9436acdeefd4c255e5333c461c3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Mar=C3=ADn?= Date: Thu, 7 Nov 2024 20:51:39 +0100 Subject: [PATCH 344/353] Revert "Enable enable_job_stack_trace by default" --- src/Core/Settings.cpp | 2 +- src/Core/SettingsChangesHistory.cpp | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 01339226c2d..6f0109fa300 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -2869,7 +2869,7 @@ Limit on size of multipart/form-data content. This setting cannot be parsed from DECLARE(Bool, calculate_text_stack_trace, true, R"( Calculate text stack trace in case of exceptions during query execution. This is the default. It requires symbol lookups that may slow down fuzzing tests when a huge amount of wrong queries are executed. In normal cases, you should not disable this option. )", 0) \ - DECLARE(Bool, enable_job_stack_trace, true, R"( + DECLARE(Bool, enable_job_stack_trace, false, R"( Output stack trace of a job creator when job results in exception )", 0) \ DECLARE(Bool, allow_ddl, true, R"( diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index edf4e60706b..c6223bef2b2 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -80,7 +80,6 @@ static std::initializer_list Date: Thu, 7 Nov 2024 21:48:11 +0000 Subject: [PATCH 345/353] Update version_date.tsv and changelogs after v24.3.13.40-lts --- docs/changelogs/v24.3.13.40-lts.md | 31 ++++++++++++++++++++++++++++ utils/list-versions/version_date.tsv | 1 + 2 files changed, 32 insertions(+) create mode 100644 docs/changelogs/v24.3.13.40-lts.md diff --git a/docs/changelogs/v24.3.13.40-lts.md b/docs/changelogs/v24.3.13.40-lts.md new file mode 100644 index 00000000000..cec96e16292 --- /dev/null +++ b/docs/changelogs/v24.3.13.40-lts.md @@ -0,0 +1,31 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.13.40-lts (7acabd77389) FIXME as compared to v24.3.12.75-lts (7cb5dff8019) + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#63976](https://github.com/ClickHouse/ClickHouse/issues/63976): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)). +* Backported in [#71482](https://github.com/ClickHouse/ClickHouse/issues/71482): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70451](https://github.com/ClickHouse/ClickHouse/issues/70451): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)). +* Backported in [#70619](https://github.com/ClickHouse/ClickHouse/issues/70619): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#70877](https://github.com/ClickHouse/ClickHouse/issues/70877): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)). +* Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). +* Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). +* Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). +* Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)). +* Backported in [#71594](https://github.com/ClickHouse/ClickHouse/issues/71594): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Backported in [#71022](https://github.com/ClickHouse/ClickHouse/issues/71022): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)). +* Backported in [#70384](https://github.com/ClickHouse/ClickHouse/issues/70384): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)). +* Backported in [#70538](https://github.com/ClickHouse/ClickHouse/issues/70538): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)). +* Backported in [#70971](https://github.com/ClickHouse/ClickHouse/issues/70971): Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)). + diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index cf28db5d49a..fab562a8cbb 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -31,6 +31,7 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.13.40-lts 2024-11-07 v24.3.12.75-lts 2024-10-08 v24.3.11.7-lts 2024-09-06 v24.3.10.33-lts 2024-09-03 From f71b00c5136bec4fe40393a45310c1f85a50e5d0 Mon Sep 17 00:00:00 2001 From: Konstantin Bogdanov Date: Thu, 7 Nov 2024 22:52:27 +0100 Subject: [PATCH 346/353] Lint --- docs/changelogs/v24.3.13.40-lts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/changelogs/v24.3.13.40-lts.md b/docs/changelogs/v24.3.13.40-lts.md index cec96e16292..bce45e88710 100644 --- a/docs/changelogs/v24.3.13.40-lts.md +++ b/docs/changelogs/v24.3.13.40-lts.md @@ -16,7 +16,7 @@ sidebar_label: 2024 * Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)). * Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)). * Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)). -* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): `GroupArraySortedData` uses a PODArray with non-POD elements, manually calling constructors and destructors for the elements as needed. But it wasn't careful enough: in two places it forgot to call destructor, in one place it left elements uninitialized if an exception is thrown when deserializing previous elements. Then `GroupArraySortedData`'s destructor called destructors on uninitialized elements and crashed: ``` 2024.10.17 22:58:23.523790 [ 5233 ] {} BaseDaemon: ########## Short fault info ############ 2024.10.17 22:58:23.523834 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) Received signal 11 2024.10.17 22:58:23.523862 [ 5233 ] {} BaseDaemon: Signal description: Segmentation fault 2024.10.17 22:58:23.523883 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523908 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.523936 [ 5233 ] {} BaseDaemon: ######################################## 2024.10.17 22:58:23.523959 [ 5233 ] {} BaseDaemon: (version 24.6.1.4609 (official build), build id: 5423339A6571004018D55BBE05D464AFA35E6718, git hash: fa6cdfda8a94890eb19bc7f22f8b0b56292f7a26) (from thread 682) (query_id: 6c8a33a2-f45a-4a3b-bd71-ded6a1c9ccd3::202410_534066_534078_2) (query: ) Received signal Segmentation fault (11) 2024.10.17 22:58:23.523977 [ 5233 ] {} BaseDaemon: Address: 0x8f. Access: . Address not mapped to object. 2024.10.17 22:58:23.523993 [ 5233 ] {} BaseDaemon: Stack trace: 0x0000aaaac4b78308 0x0000ffffb7701850 0x0000aaaac0104855 0x0000aaaac01048a0 0x0000aaaac501e84c 0x0000aaaac7c510d0 0x0000aaaac7c4ba20 0x0000aaaac968bbfc 0x0000aaaac968fab0 0x0000aaaac969bf50 0x0000aaaac9b7520c 0x0000aaaac9b74c74 0x0000aaaac9b8a150 0x0000aaaac9b809f0 0x0000aaaac9b80574 0x0000aaaac9b8e364 0x0000aaaac9b8e4fc 0x0000aaaac94f4328 0x0000aaaac94f428c 0x0000aaaac94f7df0 0x0000aaaac98b5a3c 0x0000aaaac950b234 0x0000aaaac49ae264 0x0000aaaac49b1dd0 0x0000aaaac49b0a80 0x0000ffffb755d5c8 0x0000ffffb75c5edc 2024.10.17 22:58:23.524817 [ 5233 ] {} BaseDaemon: 0. signalHandler(int, siginfo_t*, void*) @ 0x000000000c6f8308 2024.10.17 22:58:23.524917 [ 5233 ] {} BaseDaemon: 1. ? @ 0x0000ffffb7701850 2024.10.17 22:58:23.524962 [ 5233 ] {} BaseDaemon: 2. DB::Field::~Field() @ 0x0000000007c84855 2024.10.17 22:58:23.525012 [ 5233 ] {} BaseDaemon: 3. DB::Field::~Field() @ 0x0000000007c848a0 2024.10.17 22:58:23.526626 [ 5233 ] {} BaseDaemon: 4. DB::IAggregateFunctionDataHelper, DB::(anonymous namespace)::GroupArraySorted, DB::Field>>::destroy(char*) const (.5a6a451027f732f9fd91c13f4a13200c) @ 0x000000000cb9e84c 2024.10.17 22:58:23.527322 [ 5233 ] {} BaseDaemon: 5. DB::SerializationAggregateFunction::deserializeBinaryBulk(DB::IColumn&, DB::ReadBuffer&, unsigned long, double) const @ 0x000000000f7d10d0 2024.10.17 22:58:23.528470 [ 5233 ] {} BaseDaemon: 6. DB::ISerialization::deserializeBinaryBulkWithMultipleStreams(COW::immutable_ptr&, unsigned long, DB::ISerialization::DeserializeBinaryBulkSettings&, std::shared_ptr&, std::unordered_map::immutable_ptr, std::hash, std::equal_to, std::allocator::immutable_ptr>>>*) const @ 0x000000000f7cba20 2024.10.17 22:58:23.529213 [ 5233 ] {} BaseDaemon: 7. DB::MergeTreeReaderCompact::readData(DB::NameAndTypePair const&, COW::immutable_ptr&, unsigned long, std::function const&) @ 0x000000001120bbfc 2024.10.17 22:58:23.529277 [ 5233 ] {} BaseDaemon: 8. DB::MergeTreeReaderCompactSingleBuffer::readRows(unsigned long, unsigned long, bool, unsigned long, std::vector::immutable_ptr, std::allocator::immutable_ptr>>&) @ 0x000000001120fab0 2024.10.17 22:58:23.529319 [ 5233 ] {} BaseDaemon: 9. DB::MergeTreeSequentialSource::generate() @ 0x000000001121bf50 2024.10.17 22:58:23.529346 [ 5233 ] {} BaseDaemon: 10. DB::ISource::tryGenerate() @ 0x00000000116f520c 2024.10.17 22:58:23.529653 [ 5233 ] {} BaseDaemon: 11. DB::ISource::work() @ 0x00000000116f4c74 2024.10.17 22:58:23.529679 [ 5233 ] {} BaseDaemon: 12. DB::ExecutionThreadContext::executeTask() @ 0x000000001170a150 2024.10.17 22:58:23.529733 [ 5233 ] {} BaseDaemon: 13. DB::PipelineExecutor::executeStepImpl(unsigned long, std::atomic*) @ 0x00000000117009f0 2024.10.17 22:58:23.529763 [ 5233 ] {} BaseDaemon: 14. DB::PipelineExecutor::executeStep(std::atomic*) @ 0x0000000011700574 2024.10.17 22:58:23.530089 [ 5233 ] {} BaseDaemon: 15. DB::PullingPipelineExecutor::pull(DB::Chunk&) @ 0x000000001170e364 2024.10.17 22:58:23.530277 [ 5233 ] {} BaseDaemon: 16. DB::PullingPipelineExecutor::pull(DB::Block&) @ 0x000000001170e4fc 2024.10.17 22:58:23.530295 [ 5233 ] {} BaseDaemon: 17. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() @ 0x0000000011074328 2024.10.17 22:58:23.530318 [ 5233 ] {} BaseDaemon: 18. DB::MergeTask::ExecuteAndFinalizeHorizontalPart::execute() @ 0x000000001107428c 2024.10.17 22:58:23.530339 [ 5233 ] {} BaseDaemon: 19. DB::MergeTask::execute() @ 0x0000000011077df0 2024.10.17 22:58:23.530362 [ 5233 ] {} BaseDaemon: 20. DB::SharedMergeMutateTaskBase::executeStep() @ 0x0000000011435a3c 2024.10.17 22:58:23.530384 [ 5233 ] {} BaseDaemon: 21. DB::MergeTreeBackgroundExecutor::threadFunction() @ 0x000000001108b234 2024.10.17 22:58:23.530410 [ 5233 ] {} BaseDaemon: 22. ThreadPoolImpl>::worker(std::__list_iterator, void*>) @ 0x000000000c52e264 2024.10.17 22:58:23.530448 [ 5233 ] {} BaseDaemon: 23. void std::__function::__policy_invoker::__call_impl::ThreadFromGlobalPoolImpl>::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>(void&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x000000000c531dd0 2024.10.17 22:58:23.530476 [ 5233 ] {} BaseDaemon: 24. void* std::__thread_proxy[abi:v15000]>, void ThreadPoolImpl::scheduleImpl(std::function, Priority, std::optional, bool)::'lambda0'()>>(void*) @ 0x000000000c530a80 2024.10.17 22:58:23.530514 [ 5233 ] {} BaseDaemon: 25. ? @ 0x000000000007d5c8 2024.10.17 22:58:23.530534 [ 5233 ] {} BaseDaemon: 26. ? @ 0x00000000000e5edc 2024.10.17 22:58:23.530551 [ 5233 ] {} BaseDaemon: Integrity check of the executable skipped because the reference checksum could not be read. 2024.10.17 22:58:23.531083 [ 5233 ] {} BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues 2024.10.17 22:58:23.531294 [ 5233 ] {} BaseDaemon: Changed settings: max_insert_threads = 4, max_threads = 42, use_hedged_requests = false, distributed_foreground_insert = true, alter_sync = 0, enable_memory_bound_merging_of_aggregation_results = true, cluster_for_parallel_replicas = 'default', do_not_merge_across_partitions_select_final = false, log_queries = true, log_queries_probability = 1., max_http_get_redirects = 10, enable_deflate_qpl_codec = false, enable_zstd_qat_codec = false, query_profiler_real_time_period_ns = 0, query_profiler_cpu_time_period_ns = 0, max_bytes_before_external_group_by = 90194313216, max_bytes_before_external_sort = 90194313216, max_memory_usage = 180388626432, backup_restore_keeper_retry_max_backoff_ms = 60000, cancel_http_readonly_queries_on_client_close = true, max_table_size_to_drop = 1000000000000, max_partition_size_to_drop = 1000000000000, default_table_engine = 'ReplicatedMergeTree', mutations_sync = 0, optimize_trivial_insert_select = false, database_replicated_allow_only_replicated_engine = true, cloud_mode = true, cloud_mode_engine = 2, distributed_ddl_output_mode = 'none_only_active', distributed_ddl_entry_format_version = 6, async_insert_max_data_size = 10485760, async_insert_busy_timeout_max_ms = 1000, enable_filesystem_cache_on_write_operations = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, filesystem_prefetch_max_memory_usage = 18038862643, filesystem_prefetches_limit = 200, compatibility = '24.6', insert_keeper_max_retries = 20, allow_experimental_materialized_postgresql_table = false, date_time_input_format = 'best_effort' ```. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). +* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): Fix a crash and a leak in AggregateFunctionGroupArraySorted. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)). * Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)). * Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)). * Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)). From 0f945cadc74aed12e6a1f05d7cde98aa02e369b7 Mon Sep 17 00:00:00 2001 From: Derek Chia Date: Fri, 8 Nov 2024 17:34:53 +0800 Subject: [PATCH 347/353] Update settings.md Remove duplicated `background_pool_size` description --- .../server-configuration-parameters/settings.md | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 02fa5a8ca58..c5f92ccdf68 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -131,16 +131,6 @@ Type: UInt64 Default: 8 -## background_pool_size - -Sets the number of threads performing background merges and mutations for tables with MergeTree engines. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance. - -Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`. - -Type: UInt64 - -Default: 16 - ## background_schedule_pool_size The maximum number of threads that will be used for constantly executing some lightweight periodic operations for replicated tables, Kafka streaming, and DNS cache updates. From 87b9f5cb4ef65bd8c7313bd4f2563e41b974e951 Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 12:24:29 +0100 Subject: [PATCH 348/353] Add min_parts_to_merge_at_once setting --- .../MergeTree/MergeSelectors/SimpleMergeSelector.cpp | 5 ++++- src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h | 2 ++ src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp | 2 ++ src/Storages/MergeTree/MergeTreeSettings.cpp | 1 + 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp index c393349ef32..4f786215cbe 100644 --- a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp +++ b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.cpp @@ -116,7 +116,7 @@ bool allow( double sum_size, double max_size, double min_age, - double range_size, + size_t range_size, double partition_size, double min_size_to_lower_base_log, double max_size_to_lower_base_log, @@ -125,6 +125,9 @@ bool allow( if (settings.min_age_to_force_merge && min_age >= settings.min_age_to_force_merge) return true; + if (settings.min_parts_to_merge_at_once && range_size < settings.min_parts_to_merge_at_once) + return false; + /// Map size to 0..1 using logarithmic scale /// Use log(1 + x) instead of log1p(x) because our sum_size is always integer. /// Also log1p seems to be slow and significantly affect performance of merges assignment. diff --git a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h index 2d4129b8bf8..1e7676c6aed 100644 --- a/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h +++ b/src/Storages/MergeTree/MergeSelectors/SimpleMergeSelector.h @@ -90,6 +90,8 @@ public: { /// Zero means unlimited. Can be overridden by the same merge tree setting. size_t max_parts_to_merge_at_once = 100; + /// Zero means no minimum. Can be overridden by the same merge tree setting. + size_t min_parts_to_merge_at_once = 0; /// Some sort of a maximum number of parts in partition. Can be overridden by the same merge tree setting. size_t parts_to_throw_insert = 3000; diff --git a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp index 37b6539755c..488f4b2390d 100644 --- a/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp +++ b/src/Storages/MergeTree/MergeTreeDataMergerMutator.cpp @@ -82,6 +82,7 @@ namespace MergeTreeSetting extern const MergeTreeSettingsMergeSelectorAlgorithm merge_selector_algorithm; extern const MergeTreeSettingsBool merge_selector_enable_heuristic_to_remove_small_parts_at_right; extern const MergeTreeSettingsFloat merge_selector_base; + extern const MergeTreeSettingsUInt64 min_parts_to_merge_at_once; } namespace ErrorCodes @@ -566,6 +567,7 @@ SelectPartsDecision MergeTreeDataMergerMutator::selectPartsToMergeFromRanges( simple_merge_settings.max_parts_to_merge_at_once = (*data_settings)[MergeTreeSetting::max_parts_to_merge_at_once]; simple_merge_settings.enable_heuristic_to_remove_small_parts_at_right = (*data_settings)[MergeTreeSetting::merge_selector_enable_heuristic_to_remove_small_parts_at_right]; simple_merge_settings.base = (*data_settings)[MergeTreeSetting::merge_selector_base]; + simple_merge_settings.min_parts_to_merge_at_once = (*data_settings)[MergeTreeSetting::min_parts_to_merge_at_once]; if (!(*data_settings)[MergeTreeSetting::min_age_to_force_merge_on_partition_only]) simple_merge_settings.min_age_to_force_merge = (*data_settings)[MergeTreeSetting::min_age_to_force_merge_seconds]; diff --git a/src/Storages/MergeTree/MergeTreeSettings.cpp b/src/Storages/MergeTree/MergeTreeSettings.cpp index 33910d1048d..fcd4e05cf00 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeSettings.cpp @@ -102,6 +102,7 @@ namespace ErrorCodes DECLARE(MergeSelectorAlgorithm, merge_selector_algorithm, MergeSelectorAlgorithm::SIMPLE, "The algorithm to select parts for merges assignment", EXPERIMENTAL) \ DECLARE(Bool, merge_selector_enable_heuristic_to_remove_small_parts_at_right, true, "Enable heuristic for selecting parts for merge which removes parts from right side of range, if their size is less than specified ratio (0.01) of sum_size. Works for Simple and StochasticSimple merge selectors", 0) \ DECLARE(Float, merge_selector_base, 5.0, "Affects write amplification of assigned merges (expert level setting, don't change if you don't understand what it is doing). Works for Simple and StochasticSimple merge selectors", 0) \ + DECLARE(UInt64, min_parts_to_merge_at_once, 0, "Minimal amount of data parts which merge selector can pick to merge at once (expert level setting, don't change if you don't understand what it is doing). 0 - disabled. Works for Simple and StochasticSimple merge selectors.", 0) \ \ /** Inserts settings. */ \ DECLARE(UInt64, parts_to_delay_insert, 1000, "If table contains at least that many active parts in single partition, artificially slow down insert into table. Disabled if set to 0", 0) \ From b6cad9c913b304052939cd100ba4e9d35b44c47a Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 12:25:26 +0100 Subject: [PATCH 349/353] Add test --- ...03267_min_parts_to_merge_at_once.reference | 4 ++ .../03267_min_parts_to_merge_at_once.sh | 43 +++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference create mode 100755 tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh diff --git a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference new file mode 100644 index 00000000000..966a0980e59 --- /dev/null +++ b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.reference @@ -0,0 +1,4 @@ +2 +3 +4 +1 diff --git a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh new file mode 100755 index 00000000000..e069b57bf86 --- /dev/null +++ b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS t;" + +$CLICKHOUSE_CLIENT --query "CREATE TABLE t (key UInt64) ENGINE = MergeTree() ORDER BY tuple() SETTINGS min_parts_to_merge_at_once=5, merge_selector_base=1" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (1)" +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (2);" + +# doesn't make test flaky +sleep 1 + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (3)" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (4)" + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (5)" + +counter=0 retries=60 + +I=0 +while [[ $counter -lt $retries ]]; do + result=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'") + if [ "$result" -eq "1" ];then + break; + fi + sleep 0.5 + counter=$((counter + 1)) +done + +$CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'" + +$CLICKHOUSE_CLIENT --query "DROP TABLE IF EXISTS t" From 4c644a98f5985a540ee75dc5a1f5ae31be39cc15 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Fri, 8 Nov 2024 12:29:04 +0100 Subject: [PATCH 350/353] Fix broken 03247_ghdata_string_to_json_alter --- .../queries/0_stateless/03247_ghdata_string_to_json_alter.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh index 931d106120c..a2d1788cb5d 100755 --- a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh @@ -18,12 +18,12 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q \ "SELECT data.repo.name, count() AS stars FROM ghdata \ - WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ "SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ ARRAY JOIN data.payload.commits[].author.name \ - GROUP BY name ORDER BY c DESC, name LIMIT 5" + GROUP BY name ORDER BY c DESC, name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" From 1bd6b9df95792e8917e1da744a0d8e7d586949ed Mon Sep 17 00:00:00 2001 From: alesapin Date: Fri, 8 Nov 2024 12:47:48 +0100 Subject: [PATCH 351/353] Fix style check --- tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh index e069b57bf86..90b9d0339cf 100755 --- a/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh +++ b/tests/queries/0_stateless/03267_min_parts_to_merge_at_once.sh @@ -28,7 +28,6 @@ $CLICKHOUSE_CLIENT --query "INSERT INTO t VALUES (5)" counter=0 retries=60 -I=0 while [[ $counter -lt $retries ]]; do result=$($CLICKHOUSE_CLIENT --query "SELECT count() FROM system.parts WHERE active and database = currentDatabase() and table = 't'") if [ "$result" -eq "1" ];then From da0e267278efa2f42e0f18bf5a4b78a5d16dbe99 Mon Sep 17 00:00:00 2001 From: Pavel Kruglov <48961922+Avogar@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:30:21 +0100 Subject: [PATCH 352/353] Fix typo --- .../queries/0_stateless/03247_ghdata_string_to_json_alter.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh index a2d1788cb5d..e8368b6702a 100755 --- a/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh +++ b/tests/queries/0_stateless/03247_ghdata_string_to_json_alter.sh @@ -18,12 +18,12 @@ ${CLICKHOUSE_CLIENT} -q "SELECT count() FROM ghdata WHERE NOT ignore(*)" ${CLICKHOUSE_CLIENT} -q \ "SELECT data.repo.name, count() AS stars FROM ghdata \ - WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 + WHERE data.type = 'WatchEvent' GROUP BY data.repo.name ORDER BY stars DESC, data.repo.name LIMIT 5" --allow_suspicious_types_in_group_by=1 --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} --enable_analyzer=1 -q \ "SELECT data.payload.commits[].author.name AS name, count() AS c FROM ghdata \ ARRAY JOIN data.payload.commits[].author.name \ - GROUP BY name ORDER BY c DESC, name LIMIT 5" --allow_suspicious_types_in_group_by=1, --allow_suspicious_types_in_order_by=1 + GROUP BY name ORDER BY c DESC, name LIMIT 5" --allow_suspicious_types_in_group_by=1 --allow_suspicious_types_in_order_by=1 ${CLICKHOUSE_CLIENT} -q "SELECT max(data.payload.pull_request.assignees[].size0) FROM ghdata" From fe39c4b65bfee09d9c7d5327963983fbd4cdd234 Mon Sep 17 00:00:00 2001 From: Tanya Bragin Date: Fri, 8 Nov 2024 08:55:20 -0800 Subject: [PATCH 353/353] Update README.md - Update meetups Add Stockholm --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index dcaeda13acd..abaf27abf11 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ Upcoming meetups * [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21 * [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26 * [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3 +* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9 * [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9 * [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12