mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
Merge remote-tracking branch 'upstream/master' into lower-memory-usage
This commit is contained in:
commit
ef133bfd47
@ -1,8 +1,5 @@
|
||||
if (NOT ENABLE_LIBRARIES)
|
||||
set(DEFAULT_ENABLE_RUST FALSE)
|
||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "aarch64"))
|
||||
message(STATUS "Rust is not available on aarch64-apple-darwin")
|
||||
set(DEFAULT_ENABLE_RUST FALSE)
|
||||
else()
|
||||
list (APPEND CMAKE_MODULE_PATH "${ClickHouse_SOURCE_DIR}/contrib/corrosion/cmake")
|
||||
find_package(Rust)
|
||||
@ -19,7 +16,9 @@ message(STATUS "Checking Rust toolchain for current target")
|
||||
|
||||
# See https://doc.rust-lang.org/nightly/rustc/platform-support.html
|
||||
|
||||
if((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
||||
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
|
||||
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
|
||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64") AND (CMAKE_TOOLCHAIN_FILE MATCHES "musl"))
|
||||
set(Rust_CARGO_TARGET "x86_64-unknown-linux-musl")
|
||||
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-x86_64")
|
||||
set(Rust_CARGO_TARGET "x86_64-unknown-linux-gnu")
|
||||
@ -29,14 +28,14 @@ elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-aarch64")
|
||||
set(Rust_CARGO_TARGET "aarch64-unknown-linux-gnu")
|
||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
||||
set(Rust_CARGO_TARGET "x86_64-apple-darwin")
|
||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "darwin") AND (CMAKE_TOOLCHAIN_FILE MATCHES "darwin"))
|
||||
set(Rust_CARGO_TARGET "aarch64-apple-darwin")
|
||||
elseif((CMAKE_TOOLCHAIN_FILE MATCHES "freebsd") AND (CMAKE_TOOLCHAIN_FILE MATCHES "x86_64"))
|
||||
set(Rust_CARGO_TARGET "x86_64-unknown-freebsd")
|
||||
elseif(CMAKE_TOOLCHAIN_FILE MATCHES "linux/toolchain-riscv64")
|
||||
set(Rust_CARGO_TARGET "riscv64gc-unknown-linux-gnu")
|
||||
endif()
|
||||
|
||||
if(CMAKE_TOOLCHAIN_FILE MATCHES "ppc64le")
|
||||
set(Rust_CARGO_TARGET "powerpc64le-unknown-linux-gnu")
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported rust target")
|
||||
endif()
|
||||
|
||||
message(STATUS "Switched Rust target to ${Rust_CARGO_TARGET}")
|
||||
|
@ -1805,6 +1805,7 @@ Example of settings:
|
||||
``` xml
|
||||
<source>
|
||||
<postgresql>
|
||||
<host>postgresql-hostname</hoat>
|
||||
<port>5432</port>
|
||||
<user>clickhouse</user>
|
||||
<password>qwerty</password>
|
||||
|
@ -8,8 +8,6 @@ sidebar_label: VIEW
|
||||
|
||||
You can modify `SELECT` query that was specified when a [materialized view](../create/view.md#materialized) was created with the `ALTER TABLE … MODIFY QUERY` statement without interrupting ingestion process.
|
||||
|
||||
The `allow_experimental_alter_materialized_view_structure` setting must be enabled.
|
||||
|
||||
This command is created to change materialized view created with `TO [db.]name` clause. It does not change the structure of the underling storage table and it does not change the columns' definition of the materialized view, because of this the application of this command is very limited for materialized views are created without `TO [db.]name` clause.
|
||||
|
||||
**Example with TO table**
|
||||
|
@ -97,7 +97,7 @@ This feature is deprecated and will be removed in the future.
|
||||
|
||||
For your convenience, the old documentation is located [here](https://pastila.nl/?00f32652/fdf07272a7b54bda7e13b919264e449f.md)
|
||||
|
||||
## Refreshable Materialized View {#refreshable-materialized-view}
|
||||
## Refreshable Materialized View [Experimental] {#refreshable-materialized-view}
|
||||
|
||||
```sql
|
||||
CREATE MATERIALIZED VIEW [IF NOT EXISTS] [db.]table_name
|
||||
@ -120,7 +120,8 @@ Differences from regular non-refreshable materialized views:
|
||||
|
||||
:::note
|
||||
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
|
||||
* not compatible with Replicated database or table engines,
|
||||
* not compatible with Replicated database or table engines
|
||||
* It is not supported in ClickHouse Cloud
|
||||
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
|
||||
* no retries for failed refresh - we just skip to the next scheduled refresh time,
|
||||
* no limit on number of concurrent refreshes.
|
||||
|
124
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp
Normal file
124
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.cpp
Normal file
@ -0,0 +1,124 @@
|
||||
#include <Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h>
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <Interpreters/ExternalDictionariesLoader.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
const std::unordered_set<String> possibly_injective_function_names
|
||||
{
|
||||
"dictGet",
|
||||
"dictGetString",
|
||||
"dictGetUInt8",
|
||||
"dictGetUInt16",
|
||||
"dictGetUInt32",
|
||||
"dictGetUInt64",
|
||||
"dictGetInt8",
|
||||
"dictGetInt16",
|
||||
"dictGetInt32",
|
||||
"dictGetInt64",
|
||||
"dictGetFloat32",
|
||||
"dictGetFloat64",
|
||||
"dictGetDate",
|
||||
"dictGetDateTime"
|
||||
};
|
||||
|
||||
class OptimizeGroupByInjectiveFunctionsVisitor : public InDepthQueryTreeVisitorWithContext<OptimizeGroupByInjectiveFunctionsVisitor>
|
||||
{
|
||||
using Base = InDepthQueryTreeVisitorWithContext<OptimizeGroupByInjectiveFunctionsVisitor>;
|
||||
public:
|
||||
explicit OptimizeGroupByInjectiveFunctionsVisitor(ContextPtr context)
|
||||
: Base(std::move(context))
|
||||
{}
|
||||
|
||||
void enterImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_injective_functions_in_group_by)
|
||||
return;
|
||||
|
||||
auto * query = node->as<QueryNode>();
|
||||
if (!query)
|
||||
return;
|
||||
|
||||
if (!query->hasGroupBy())
|
||||
return;
|
||||
|
||||
if (query->isGroupByWithCube() || query->isGroupByWithRollup())
|
||||
return;
|
||||
|
||||
auto & group_by = query->getGroupBy().getNodes();
|
||||
if (query->isGroupByWithGroupingSets())
|
||||
{
|
||||
for (auto & set : group_by)
|
||||
{
|
||||
auto & grouping_set = set->as<ListNode>()->getNodes();
|
||||
optimizeGroupingSet(grouping_set);
|
||||
}
|
||||
}
|
||||
else
|
||||
optimizeGroupingSet(group_by);
|
||||
}
|
||||
|
||||
private:
|
||||
void optimizeGroupingSet(QueryTreeNodes & grouping_set)
|
||||
{
|
||||
auto context = getContext();
|
||||
|
||||
QueryTreeNodes new_group_by_keys;
|
||||
new_group_by_keys.reserve(grouping_set.size());
|
||||
for (auto & group_by_elem : grouping_set)
|
||||
{
|
||||
std::queue<QueryTreeNodePtr> nodes_to_process;
|
||||
nodes_to_process.push(group_by_elem);
|
||||
|
||||
while (!nodes_to_process.empty())
|
||||
{
|
||||
auto node_to_process = nodes_to_process.front();
|
||||
nodes_to_process.pop();
|
||||
|
||||
auto const * function_node = node_to_process->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
{
|
||||
// Constant aggregation keys are removed in PlannerExpressionAnalysis.cpp
|
||||
new_group_by_keys.push_back(node_to_process);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Aggregate functions are not allowed in GROUP BY clause
|
||||
auto function = function_node->getFunctionOrThrow();
|
||||
bool can_be_eliminated = function->isInjective(function_node->getArgumentColumns());
|
||||
|
||||
if (can_be_eliminated)
|
||||
{
|
||||
for (auto const & argument : function_node->getArguments())
|
||||
{
|
||||
// We can skip constants here because aggregation key is already not a constant.
|
||||
if (argument->getNodeType() != QueryTreeNodeType::CONSTANT)
|
||||
nodes_to_process.push(argument);
|
||||
}
|
||||
}
|
||||
else
|
||||
new_group_by_keys.push_back(node_to_process);
|
||||
}
|
||||
}
|
||||
|
||||
grouping_set = std::move(new_group_by_keys);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void OptimizeGroupByInjectiveFunctionsPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
OptimizeGroupByInjectiveFunctionsVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
}
|
20
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h
Normal file
20
src/Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h
Normal file
@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <Analyzer/IQueryTreePass.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/* Eliminates injective functions in GROUP BY section.
|
||||
*/
|
||||
class OptimizeGroupByInjectiveFunctionsPass final : public IQueryTreePass
|
||||
{
|
||||
public:
|
||||
String getName() override { return "OptimizeGroupByInjectiveFunctionsPass"; }
|
||||
|
||||
String getDescription() override { return "Replaces injective functions by it's arguments in GROUP BY section."; }
|
||||
|
||||
void run(QueryTreeNodePtr query_tree_node, ContextPtr context) override;
|
||||
};
|
||||
|
||||
}
|
@ -2321,11 +2321,15 @@ std::pair<bool, UInt64> QueryAnalyzer::recursivelyCollectMaxOrdinaryExpressions(
|
||||
*/
|
||||
void QueryAnalyzer::expandGroupByAll(QueryNode & query_tree_node_typed)
|
||||
{
|
||||
if (!query_tree_node_typed.isGroupByAll())
|
||||
return;
|
||||
|
||||
auto & group_by_nodes = query_tree_node_typed.getGroupBy().getNodes();
|
||||
auto & projection_list = query_tree_node_typed.getProjection();
|
||||
|
||||
for (auto & node : projection_list.getNodes())
|
||||
recursivelyCollectMaxOrdinaryExpressions(node, group_by_nodes);
|
||||
query_tree_node_typed.setIsGroupByAll(false);
|
||||
}
|
||||
|
||||
void QueryAnalyzer::expandOrderByAll(QueryNode & query_tree_node_typed)
|
||||
@ -7422,7 +7426,6 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
||||
node->removeAlias();
|
||||
}
|
||||
|
||||
if (query_node_typed.isGroupByAll())
|
||||
expandGroupByAll(query_node_typed);
|
||||
|
||||
validateFilters(query_node);
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <memory>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include "Analyzer/Passes/OptimizeGroupByInjectiveFunctionsPass.h"
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/Operators.h>
|
||||
@ -163,8 +164,6 @@ private:
|
||||
|
||||
/** ClickHouse query tree pass manager.
|
||||
*
|
||||
* TODO: Support setting optimize_substitute_columns.
|
||||
* TODO: Support GROUP BY injective function elimination.
|
||||
* TODO: Support setting optimize_aggregators_of_group_by_keys.
|
||||
* TODO: Support setting optimize_monotonous_functions_in_order_by.
|
||||
* TODO: Add optimizations based on function semantics. Example: SELECT * FROM test_table WHERE id != id. (id is not nullable column).
|
||||
@ -268,6 +267,7 @@ void addQueryTreePasses(QueryTreePassManager & manager)
|
||||
manager.addPass(std::make_unique<AggregateFunctionsArithmericOperationsPass>());
|
||||
manager.addPass(std::make_unique<UniqInjectiveFunctionsEliminationPass>());
|
||||
manager.addPass(std::make_unique<OptimizeGroupByFunctionKeysPass>());
|
||||
manager.addPass(std::make_unique<OptimizeGroupByInjectiveFunctionsPass>());
|
||||
|
||||
manager.addPass(std::make_unique<MultiIfToIfPass>());
|
||||
manager.addPass(std::make_unique<IfConstantConditionPass>());
|
||||
|
@ -699,6 +699,7 @@ class IColumn;
|
||||
M(SetOperationMode, intersect_default_mode, SetOperationMode::ALL, "Set default mode in INTERSECT query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without mode will throw exception.", 0) \
|
||||
M(SetOperationMode, except_default_mode, SetOperationMode::ALL, "Set default mode in EXCEPT query. Possible values: empty string, 'ALL', 'DISTINCT'. If empty, query without mode will throw exception.", 0) \
|
||||
M(Bool, optimize_aggregators_of_group_by_keys, true, "Eliminates min/max/any/anyLast aggregators of GROUP BY keys in SELECT section", 0) \
|
||||
M(Bool, optimize_injective_functions_in_group_by, true, "Replaces injective functions by it's arguments in GROUP BY section", 0) \
|
||||
M(Bool, optimize_group_by_function_keys, true, "Eliminates functions of other keys in GROUP BY section", 0) \
|
||||
M(Bool, optimize_group_by_constant_keys, true, "Optimize GROUP BY when all keys in block are constant", 0) \
|
||||
M(Bool, legacy_column_name_of_tuple_literal, false, "List all names of element of large tuple literals in their column names instead of hash. This settings exists only for compatibility reasons. It makes sense to set to 'true', while doing rolling update of cluster from version lower than 21.7 to higher.", 0) \
|
||||
|
@ -99,7 +99,8 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
|
||||
{"output_format_pretty_color", true, "auto", "Setting is changed to allow also for auto value, disabling ANSI escapes if output is not a tty"},
|
||||
{"function_visible_width_behavior", 0, 1, "We changed the default behavior of `visibleWidth` to be more precise"},
|
||||
{"max_estimated_execution_time", 0, 0, "Separate max_execution_time and max_estimated_execution_time"},
|
||||
{"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"}}},
|
||||
{"iceberg_engine_ignore_schema_evolution", false, false, "Allow to ignore schema evolution in Iceberg table engine"},
|
||||
{"optimize_injective_functions_in_group_by", false, true, "Replace injective functions by it's arguments in GROUP BY section in analyzer"}}},
|
||||
{"23.12", {{"allow_suspicious_ttl_expressions", true, false, "It is a new setting, and in previous versions the behavior was equivalent to allowing."},
|
||||
{"input_format_parquet_allow_missing_columns", false, true, "Allow missing columns in Parquet files by default"},
|
||||
{"input_format_orc_allow_missing_columns", false, true, "Allow missing columns in ORC files by default"},
|
||||
|
@ -716,7 +716,7 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
|
||||
setEngine(create);
|
||||
|
||||
/// We have to check access rights again (in case engine was changed).
|
||||
if (create.storage)
|
||||
if (create.storage && create.storage->engine)
|
||||
{
|
||||
auto source_access_type = StorageFactory::instance().getSourceAccessType(create.storage->engine->name);
|
||||
if (source_access_type != AccessType::NONE)
|
||||
|
@ -1096,6 +1096,8 @@ void StorageReplicatedMergeTree::drop()
|
||||
/// Table can be shut down, restarting thread is not active
|
||||
/// and calling StorageReplicatedMergeTree::getZooKeeper()/getAuxiliaryZooKeeper() won't suffice.
|
||||
zookeeper = getZooKeeperIfTableShutDown();
|
||||
/// Update zookeeper client, since existing may be expired, while ZooKeeper is required inside dropAllData().
|
||||
current_zookeeper = zookeeper;
|
||||
|
||||
/// If probably there is metadata in ZooKeeper, we don't allow to drop the table.
|
||||
if (!zookeeper)
|
||||
|
@ -444,10 +444,10 @@ create view query_logs as
|
||||
create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-arrays.tsv')
|
||||
as
|
||||
with (
|
||||
-- sumMapState with the list of all keys with '-0.' values. Negative zero is because
|
||||
-- sumMap removes keys with positive zeros.
|
||||
-- sumMapState with the list of all keys with 'nan' values. 'nan' is because
|
||||
-- sumMap removes keys with positive/negative zeros.
|
||||
with (select groupUniqArrayArray(mapKeys(ProfileEvents)) from query_logs) as all_names
|
||||
select arrayReduce('sumMapState', [(all_names, arrayMap(x->-0., all_names))])
|
||||
select arrayReduce('sumMapState', [(all_names, arrayMap(x->nan, all_names))])
|
||||
) as all_metrics
|
||||
select test, query_index, version, query_id,
|
||||
(finalizeAggregation(
|
||||
@ -460,13 +460,13 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
|
||||
),
|
||||
arrayReduce('sumMapState', [(
|
||||
['client_time', 'server_time', 'memory_usage'],
|
||||
arrayMap(x->if(x != 0., x, -0.), [
|
||||
arrayMap(x->if(x != 0., x, nan), [
|
||||
toFloat64(query_runs.time),
|
||||
toFloat64(query_duration_ms / 1000.),
|
||||
toFloat64(memory_usage)]))])
|
||||
]
|
||||
)) as metrics_tuple).1 metric_names,
|
||||
metrics_tuple.2 metric_values
|
||||
arrayMap(x->if(isNaN(x),0,x), metrics_tuple.2) metric_values
|
||||
from query_logs
|
||||
right join query_runs
|
||||
on query_logs.query_id = query_runs.query_id
|
||||
|
@ -1 +1,24 @@
|
||||
1.1
|
||||
SELECT dictGet(\'dictdb_01376.dict_exists\', \'value\', toUInt64(1)) AS val
|
||||
FROM numbers(2)
|
||||
GROUP BY toUInt64(1)
|
||||
QUERY id: 0
|
||||
PROJECTION COLUMNS
|
||||
val Float64
|
||||
PROJECTION
|
||||
LIST id: 1, nodes: 1
|
||||
FUNCTION id: 2, function_name: dictGet, function_type: ordinary, result_type: Float64
|
||||
ARGUMENTS
|
||||
LIST id: 3, nodes: 3
|
||||
CONSTANT id: 4, constant_value: \'dictdb_01376.dict_exists\', constant_value_type: String
|
||||
CONSTANT id: 5, constant_value: \'value\', constant_value_type: String
|
||||
COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
|
||||
JOIN TREE
|
||||
TABLE_FUNCTION id: 7, alias: __table1, table_function_name: numbers
|
||||
ARGUMENTS
|
||||
LIST id: 8, nodes: 1
|
||||
CONSTANT id: 9, constant_value: UInt64_2, constant_value_type: UInt8
|
||||
GROUP BY
|
||||
LIST id: 10, nodes: 1
|
||||
COLUMN id: 6, column_name: number, result_type: UInt64, source_id: 7
|
||||
SETTINGS allow_experimental_analyzer=1
|
||||
|
@ -23,7 +23,7 @@ INSERT INTO dictdb_01376.table_for_dict VALUES (1, 1.1);
|
||||
CREATE DICTIONARY IF NOT EXISTS dictdb_01376.dict_exists
|
||||
(
|
||||
key_column UInt64,
|
||||
value Float64 DEFAULT 77.77
|
||||
value Float64 DEFAULT 77.77 INJECTIVE
|
||||
)
|
||||
PRIMARY KEY key_column
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB 'dictdb_01376'))
|
||||
@ -32,6 +32,14 @@ LAYOUT(FLAT());
|
||||
|
||||
SELECT dictGet('dictdb_01376.dict_exists', 'value', toUInt64(1)) as val FROM numbers(2) GROUP BY val;
|
||||
|
||||
EXPLAIN SYNTAX SELECT dictGet('dictdb_01376.dict_exists', 'value', toUInt64(1)) as val FROM numbers(2) GROUP BY val;
|
||||
|
||||
EXPLAIN QUERY TREE
|
||||
SELECT dictGet('dictdb_01376.dict_exists', 'value', number) as val
|
||||
FROM numbers(2)
|
||||
GROUP BY val
|
||||
SETTINGS allow_experimental_analyzer = 1;
|
||||
|
||||
DROP DICTIONARY dictdb_01376.dict_exists;
|
||||
DROP TABLE dictdb_01376.table_for_dict;
|
||||
DROP DATABASE dictdb_01376;
|
||||
|
@ -20,17 +20,17 @@ clickhouse-client --allow_experimental_analyzer=1 --query_kind initial_query -q
|
||||
Expression ((Project names + Projection))
|
||||
Header: dummy String
|
||||
Aggregating
|
||||
Header: toString(__table1.dummy) String
|
||||
Header: __table1.dummy UInt8
|
||||
Expression ((Before GROUP BY + Change column names to column identifiers))
|
||||
Header: toString(__table1.dummy) String
|
||||
Header: __table1.dummy UInt8
|
||||
ReadFromStorage (SystemOne)
|
||||
Header: dummy UInt8
|
||||
clickhouse-local --allow_experimental_analyzer=1 --query_kind initial_query -q explain plan header=1 select toString(dummy) as dummy from system.one group by dummy
|
||||
Expression ((Project names + Projection))
|
||||
Header: dummy String
|
||||
Aggregating
|
||||
Header: toString(__table1.dummy) String
|
||||
Header: __table1.dummy UInt8
|
||||
Expression ((Before GROUP BY + Change column names to column identifiers))
|
||||
Header: toString(__table1.dummy) String
|
||||
Header: __table1.dummy UInt8
|
||||
ReadFromStorage (SystemOne)
|
||||
Header: dummy UInt8
|
||||
|
@ -0,0 +1,142 @@
|
||||
QUERY id: 0
|
||||
PROJECTION COLUMNS
|
||||
val String
|
||||
count() UInt64
|
||||
PROJECTION
|
||||
LIST id: 1, nodes: 2
|
||||
FUNCTION id: 2, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 3, nodes: 1
|
||||
FUNCTION id: 4, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 5, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
FUNCTION id: 11, function_name: count, function_type: aggregate, result_type: UInt64
|
||||
JOIN TREE
|
||||
TABLE_FUNCTION id: 9, alias: __table1, table_function_name: numbers
|
||||
ARGUMENTS
|
||||
LIST id: 12, nodes: 1
|
||||
CONSTANT id: 13, constant_value: UInt64_2, constant_value_type: UInt8
|
||||
GROUP BY
|
||||
LIST id: 14, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
ORDER BY
|
||||
LIST id: 15, nodes: 1
|
||||
SORT id: 16, sort_direction: ASCENDING, with_fill: 0
|
||||
EXPRESSION
|
||||
FUNCTION id: 2, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 3, nodes: 1
|
||||
FUNCTION id: 4, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 5, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
1 1
|
||||
2 1
|
||||
QUERY id: 0
|
||||
PROJECTION COLUMNS
|
||||
val String
|
||||
count() UInt64
|
||||
PROJECTION
|
||||
LIST id: 1, nodes: 2
|
||||
FUNCTION id: 2, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 3, nodes: 1
|
||||
FUNCTION id: 4, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 5, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
FUNCTION id: 11, function_name: count, function_type: aggregate, result_type: UInt64
|
||||
JOIN TREE
|
||||
TABLE_FUNCTION id: 9, alias: __table1, table_function_name: numbers
|
||||
ARGUMENTS
|
||||
LIST id: 12, nodes: 1
|
||||
CONSTANT id: 13, constant_value: UInt64_2, constant_value_type: UInt8
|
||||
GROUP BY
|
||||
LIST id: 14, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
ORDER BY
|
||||
LIST id: 15, nodes: 1
|
||||
SORT id: 16, sort_direction: ASCENDING, with_fill: 0
|
||||
EXPRESSION
|
||||
FUNCTION id: 2, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 3, nodes: 1
|
||||
FUNCTION id: 4, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 5, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
CHECK WITH TOTALS
|
||||
QUERY id: 0, is_group_by_with_totals: 1
|
||||
PROJECTION COLUMNS
|
||||
val String
|
||||
count() UInt64
|
||||
PROJECTION
|
||||
LIST id: 1, nodes: 2
|
||||
FUNCTION id: 2, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 3, nodes: 1
|
||||
FUNCTION id: 4, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 5, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
FUNCTION id: 11, function_name: count, function_type: aggregate, result_type: UInt64
|
||||
JOIN TREE
|
||||
TABLE_FUNCTION id: 9, alias: __table1, table_function_name: numbers
|
||||
ARGUMENTS
|
||||
LIST id: 12, nodes: 1
|
||||
CONSTANT id: 13, constant_value: UInt64_2, constant_value_type: UInt8
|
||||
GROUP BY
|
||||
LIST id: 14, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
ORDER BY
|
||||
LIST id: 15, nodes: 1
|
||||
SORT id: 16, sort_direction: ASCENDING, with_fill: 0
|
||||
EXPRESSION
|
||||
FUNCTION id: 2, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 3, nodes: 1
|
||||
FUNCTION id: 4, function_name: toString, function_type: ordinary, result_type: String
|
||||
ARGUMENTS
|
||||
LIST id: 5, nodes: 1
|
||||
FUNCTION id: 6, function_name: plus, function_type: ordinary, result_type: UInt64
|
||||
ARGUMENTS
|
||||
LIST id: 7, nodes: 2
|
||||
COLUMN id: 8, column_name: number, result_type: UInt64, source_id: 9
|
||||
CONSTANT id: 10, constant_value: UInt64_1, constant_value_type: UInt8
|
||||
1 1
|
||||
2 1
|
||||
|
||||
0 2
|
@ -0,0 +1,31 @@
|
||||
set allow_experimental_analyzer = 1;
|
||||
|
||||
EXPLAIN QUERY TREE
|
||||
SELECT toString(toString(number + 1)) as val, count()
|
||||
FROM numbers(2)
|
||||
GROUP BY val
|
||||
ORDER BY val;
|
||||
|
||||
SELECT toString(toString(number + 1)) as val, count()
|
||||
FROM numbers(2)
|
||||
GROUP BY ALL
|
||||
ORDER BY val;
|
||||
|
||||
EXPLAIN QUERY TREE
|
||||
SELECT toString(toString(number + 1)) as val, count()
|
||||
FROM numbers(2)
|
||||
GROUP BY ALL
|
||||
ORDER BY val;
|
||||
|
||||
SELECT 'CHECK WITH TOTALS';
|
||||
|
||||
EXPLAIN QUERY TREE
|
||||
SELECT toString(toString(number + 1)) as val, count()
|
||||
FROM numbers(2)
|
||||
GROUP BY val WITH TOTALS
|
||||
ORDER BY val;
|
||||
|
||||
SELECT toString(toString(number + 1)) as val, count()
|
||||
FROM numbers(2)
|
||||
GROUP BY val WITH TOTALS
|
||||
ORDER BY val;
|
Loading…
Reference in New Issue
Block a user