mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge branch 'master' into grant_bug_fixed
This commit is contained in:
commit
4fd5884698
@ -174,7 +174,7 @@ Parameters:
|
||||
</table_hits>
|
||||
|
||||
<!-- Next table to copy. It is not copied until previous table is copying. -->
|
||||
</table_visits>
|
||||
<table_visits>
|
||||
...
|
||||
</table_visits>
|
||||
...
|
||||
|
@ -6,7 +6,7 @@ toc_title: Overview
|
||||
|
||||
# ClickHouse Utility {#clickhouse-utility}
|
||||
|
||||
- [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without stopping the ClickHouse server, similar to how `awk` does this.
|
||||
- [clickhouse-local](../../operations/utilities/clickhouse-local.md) — Allows running SQL queries on data without starting the ClickHouse server, similar to how `awk` does this.
|
||||
- [clickhouse-copier](../../operations/utilities/clickhouse-copier.md) — Copies (and reshards) data from one cluster to another cluster.
|
||||
- [clickhouse-benchmark](../../operations/utilities/clickhouse-benchmark.md) — Loads server with the custom queries and settings.
|
||||
- [clickhouse-format](../../operations/utilities/clickhouse-format.md) — Enables formatting input queries.
|
||||
|
@ -10,7 +10,12 @@ Shows the execution plan of a statement.
|
||||
Syntax:
|
||||
|
||||
```sql
|
||||
EXPLAIN [AST | SYNTAX | PLAN | PIPELINE] [setting = value, ...] SELECT ... [FORMAT ...]
|
||||
EXPLAIN [AST | SYNTAX | PLAN | PIPELINE | TABLE OVERRIDE] [setting = value, ...]
|
||||
[
|
||||
SELECT ... |
|
||||
tableFunction(...) [COLUMNS (...)] [ORDER BY ...] [PARTITION BY ...] [PRIMARY KEY] [SAMPLE BY ...] [TTL ...]
|
||||
]
|
||||
[FORMAT ...]
|
||||
```
|
||||
|
||||
Example:
|
||||
@ -412,4 +417,37 @@ Result:
|
||||
└──────────┴───────┴───────┴──────┴───────┘
|
||||
```
|
||||
|
||||
### EXPLAIN TABLE OVERRIDE {#explain-table-override}
|
||||
|
||||
Shows the result of a table override on a table schema accessed through a table function.
|
||||
Also does some validation, throwing an exception if the override would have caused some kind of failure.
|
||||
|
||||
**Example**
|
||||
|
||||
Assume you have a remote MySQL table like this:
|
||||
|
||||
```sql
|
||||
CREATE TABLE db.tbl (
|
||||
id INT PRIMARY KEY,
|
||||
created DATETIME DEFAULT now()
|
||||
)
|
||||
```
|
||||
|
||||
```sql
|
||||
EXPLAIN TABLE OVERRIDE mysql('127.0.0.1:3306', 'db', 'tbl', 'root', 'clickhouse')
|
||||
PARTITION BY toYYYYMM(assumeNotNull(created))
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌─explain─────────────────────────────────────────────────┐
|
||||
│ PARTITION BY uses columns: `created` Nullable(DateTime) │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
!!! note "Note"
|
||||
The validation is not complete, so a successfull query does not guarantee that the override would
|
||||
not cause issues.
|
||||
|
||||
[Оriginal article](https://clickhouse.com/docs/en/sql-reference/statements/explain/) <!--hide-->
|
||||
|
@ -163,7 +163,7 @@ $ clickhouse-copier copier --daemon --config zookeeper.xml --task-path /task/pat
|
||||
</table_hits>
|
||||
|
||||
<!-- Next table to copy. It is not copied until previous table is copying. -->
|
||||
</table_visits>
|
||||
<table_visits>
|
||||
...
|
||||
</table_visits>
|
||||
...
|
||||
|
@ -40,10 +40,10 @@ ClickHouse не работает и не собирается на 32-битны
|
||||
|
||||
Выполните в терминале:
|
||||
|
||||
git clone git@github.com:your_github_username/ClickHouse.git
|
||||
git clone git@github.com:your_github_username/ClickHouse.git --recursive
|
||||
cd ClickHouse
|
||||
|
||||
Замените первое вхождение слова `ClickHouse` в команде для git на имя вашего аккаунта на GitHub.
|
||||
Замените слово `your_github_username` в команде для git на имя вашего аккаунта на GitHub.
|
||||
|
||||
Эта команда создаст директорию ClickHouse, содержащую рабочую копию проекта.
|
||||
|
||||
|
@ -171,7 +171,7 @@ $ clickhouse-copier --daemon --config zookeeper.xml --task-path /task/path --bas
|
||||
</table_hits>
|
||||
|
||||
<!-- Next table to copy. It is not copied until previous table is copying. -->
|
||||
</table_visits>
|
||||
<table_visits>
|
||||
...
|
||||
</table_visits>
|
||||
...
|
||||
|
@ -1,18 +1,13 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
## system.asynchronous_metric_log {#system-tables-async-log}
|
||||
|
||||
包含每分钟记录一次的 `system.asynchronous_metrics`历史值. 默认开启.
|
||||
包含每分钟记录一次的 `system.asynchronous_metrics`历史值。默认开启。
|
||||
|
||||
列:
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件日期.
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间.
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件时间(微秒).
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — 指标名.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值.
|
||||
- `event_date` ([Date](../../sql-reference/data-types/date.md)) — 事件日期。
|
||||
- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — 事件时间。
|
||||
- `event_time_microseconds` ([DateTime64](../../sql-reference/data-types/datetime64.md)) — 事件时间(微秒)。
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) — 指标名。
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — 指标值。
|
||||
|
||||
**示例**
|
||||
``` sql
|
||||
|
@ -1,8 +1,3 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
---
|
||||
|
||||
# system.asynchronous_metrics {#system_tables-asynchronous_metrics}
|
||||
|
||||
包含在后台定期计算的指标。 例如,在使用的RAM量。
|
||||
@ -33,8 +28,8 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
└─────────────────────────────────────────┴────────────┘
|
||||
```
|
||||
|
||||
**另请参阅**
|
||||
- [监测](../../operations/monitoring.md) — ClickHouse监控的基本概念。
|
||||
- [系统。指标](../../operations/system-tables/metrics.md#system_tables-metrics) — 包含即时计算的指标。
|
||||
- [系统。活动](../../operations/system-tables/events.md#system_tables-events) — 包含出现的事件的次数。
|
||||
- [系统。metric\_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — 包含`system.metrics` 和 `system.events`表中的指标的历史值。
|
||||
**参见**
|
||||
- [监控](../../operations/monitoring.md) — ClickHouse监控的基本概念。
|
||||
- [system.metrics](../../operations/system-tables/metrics.md#system_tables-metrics) — 包含即时计算的指标。
|
||||
- [system.events](../../operations/system-tables/events.md#system_tables-events) — 包含已发生的事件数。
|
||||
- [system.metric_log](../../operations/system-tables/metric_log.md#system_tables-metric_log) — 包含 `system.metrics` 和 `system.events` 表中的指标的历史值。
|
||||
|
@ -158,7 +158,7 @@ clickhouse-copier --daemon --config zookeeper.xml --task-path /task/path --base-
|
||||
</table_hits>
|
||||
|
||||
<!-- Next table to copy. It is not copied until previous table is copying. -->
|
||||
</table_visits>
|
||||
<table_visits>
|
||||
...
|
||||
</table_visits>
|
||||
...
|
||||
|
@ -91,6 +91,25 @@ T execute(nanodbc::ConnectionHolderPtr connection_holder, std::function<T(nanodb
|
||||
connection_holder->updateConnection();
|
||||
return query_func(connection_holder->get());
|
||||
}
|
||||
|
||||
/// psqlodbc driver error handling is incomplete and under some scenarious
|
||||
/// it doesn't propagate correct errors to the caller.
|
||||
/// As a quick workaround we run a quick "ping" query over the connection
|
||||
/// on generic errors.
|
||||
/// If "ping" fails, recycle the connection and try the query once more.
|
||||
if (e.state().starts_with("HY00"))
|
||||
{
|
||||
try
|
||||
{
|
||||
just_execute(connection_holder->get(), "SELECT 1");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
connection_holder->updateConnection();
|
||||
return query_func(connection_holder->get());
|
||||
}
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -605,6 +605,7 @@
|
||||
M(634, MONGODB_ERROR) \
|
||||
M(635, CANNOT_POLL) \
|
||||
M(636, CANNOT_EXTRACT_TABLE_STRUCTURE) \
|
||||
M(637, INVALID_TABLE_OVERRIDE) \
|
||||
\
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
|
@ -7,10 +7,12 @@
|
||||
#include <Interpreters/InterpreterSelectWithUnionQuery.h>
|
||||
#include <Interpreters/InterpreterSelectQuery.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/TableOverrideUtils.h>
|
||||
#include <Formats/FormatFactory.h>
|
||||
#include <Parsers/DumpASTNode.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
#include <Parsers/ASTExplainQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTSelectQuery.h>
|
||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||
|
||||
@ -250,117 +252,141 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
|
||||
|
||||
WriteBufferFromOwnString buf;
|
||||
bool single_line = false;
|
||||
bool insert_buf = true;
|
||||
|
||||
if (ast.getKind() == ASTExplainQuery::ParsedAST)
|
||||
switch (ast.getKind())
|
||||
{
|
||||
if (ast.getSettings())
|
||||
throw Exception("Settings are not supported for EXPLAIN AST query.", ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
dumpAST(*ast.getExplainedQuery(), buf);
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::AnalyzedSyntax)
|
||||
{
|
||||
if (ast.getSettings())
|
||||
throw Exception("Settings are not supported for EXPLAIN SYNTAX query.", ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
ExplainAnalyzedSyntaxVisitor::Data data(getContext());
|
||||
ExplainAnalyzedSyntaxVisitor(data).visit(query);
|
||||
|
||||
ast.getExplainedQuery()->format(IAST::FormatSettings(buf, false));
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::QueryPlan)
|
||||
{
|
||||
if (!dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
throw Exception("Only SELECT is supported for EXPLAIN query", ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
auto settings = checkAndGetSettings<QueryPlanSettings>(ast.getSettings());
|
||||
QueryPlan plan;
|
||||
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions());
|
||||
interpreter.buildQueryPlan(plan);
|
||||
|
||||
if (settings.optimize)
|
||||
plan.optimize(QueryPlanOptimizationSettings::fromContext(getContext()));
|
||||
|
||||
if (settings.json)
|
||||
case ASTExplainQuery::ParsedAST:
|
||||
{
|
||||
/// Add extra layers to make plan look more like from postgres.
|
||||
auto plan_map = std::make_unique<JSONBuilder::JSONMap>();
|
||||
plan_map->add("Plan", plan.explainPlan(settings.query_plan_options));
|
||||
auto plan_array = std::make_unique<JSONBuilder::JSONArray>();
|
||||
plan_array->add(std::move(plan_map));
|
||||
if (ast.getSettings())
|
||||
throw Exception("Settings are not supported for EXPLAIN AST query.", ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
auto format_settings = getFormatSettings(getContext());
|
||||
format_settings.json.quote_64bit_integers = false;
|
||||
|
||||
JSONBuilder::FormatSettings json_format_settings{.settings = format_settings};
|
||||
JSONBuilder::FormatContext format_context{.out = buf};
|
||||
|
||||
plan_array->format(json_format_settings, format_context);
|
||||
|
||||
single_line = true;
|
||||
dumpAST(*ast.getExplainedQuery(), buf);
|
||||
break;
|
||||
}
|
||||
else
|
||||
plan.explainPlan(buf, settings.query_plan_options);
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::QueryPipeline)
|
||||
{
|
||||
if (dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
case ASTExplainQuery::AnalyzedSyntax:
|
||||
{
|
||||
auto settings = checkAndGetSettings<QueryPipelineSettings>(ast.getSettings());
|
||||
if (ast.getSettings())
|
||||
throw Exception("Settings are not supported for EXPLAIN SYNTAX query.", ErrorCodes::UNKNOWN_SETTING);
|
||||
|
||||
ExplainAnalyzedSyntaxVisitor::Data data(getContext());
|
||||
ExplainAnalyzedSyntaxVisitor(data).visit(query);
|
||||
|
||||
ast.getExplainedQuery()->format(IAST::FormatSettings(buf, false));
|
||||
break;
|
||||
}
|
||||
case ASTExplainQuery::QueryPlan:
|
||||
{
|
||||
if (!dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
throw Exception("Only SELECT is supported for EXPLAIN query", ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
auto settings = checkAndGetSettings<QueryPlanSettings>(ast.getSettings());
|
||||
QueryPlan plan;
|
||||
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions());
|
||||
interpreter.buildQueryPlan(plan);
|
||||
auto pipeline = plan.buildQueryPipeline(
|
||||
|
||||
if (settings.optimize)
|
||||
plan.optimize(QueryPlanOptimizationSettings::fromContext(getContext()));
|
||||
|
||||
if (settings.json)
|
||||
{
|
||||
/// Add extra layers to make plan look more like from postgres.
|
||||
auto plan_map = std::make_unique<JSONBuilder::JSONMap>();
|
||||
plan_map->add("Plan", plan.explainPlan(settings.query_plan_options));
|
||||
auto plan_array = std::make_unique<JSONBuilder::JSONArray>();
|
||||
plan_array->add(std::move(plan_map));
|
||||
|
||||
auto format_settings = getFormatSettings(getContext());
|
||||
format_settings.json.quote_64bit_integers = false;
|
||||
|
||||
JSONBuilder::FormatSettings json_format_settings{.settings = format_settings};
|
||||
JSONBuilder::FormatContext format_context{.out = buf};
|
||||
|
||||
plan_array->format(json_format_settings, format_context);
|
||||
|
||||
single_line = true;
|
||||
}
|
||||
else
|
||||
plan.explainPlan(buf, settings.query_plan_options);
|
||||
break;
|
||||
}
|
||||
case ASTExplainQuery::QueryPipeline:
|
||||
{
|
||||
if (dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
{
|
||||
auto settings = checkAndGetSettings<QueryPipelineSettings>(ast.getSettings());
|
||||
QueryPlan plan;
|
||||
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions());
|
||||
interpreter.buildQueryPlan(plan);
|
||||
auto pipeline = plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(getContext()),
|
||||
BuildQueryPipelineSettings::fromContext(getContext()));
|
||||
|
||||
if (settings.graph)
|
||||
{
|
||||
/// Pipe holds QueryPlan, should not go out-of-scope
|
||||
auto pipe = QueryPipelineBuilder::getPipe(std::move(*pipeline));
|
||||
const auto & processors = pipe.getProcessors();
|
||||
|
||||
if (settings.compact)
|
||||
printPipelineCompact(processors, buf, settings.query_pipeline_options.header);
|
||||
else
|
||||
printPipeline(processors, buf);
|
||||
}
|
||||
else
|
||||
{
|
||||
plan.explainPipeline(buf, settings.query_pipeline_options);
|
||||
}
|
||||
}
|
||||
else if (dynamic_cast<const ASTInsertQuery *>(ast.getExplainedQuery().get()))
|
||||
{
|
||||
InterpreterInsertQuery insert(ast.getExplainedQuery(), getContext());
|
||||
auto io = insert.execute();
|
||||
printPipeline(io.pipeline.getProcessors(), buf);
|
||||
}
|
||||
else
|
||||
throw Exception("Only SELECT and INSERT is supported for EXPLAIN PIPELINE query", ErrorCodes::INCORRECT_QUERY);
|
||||
break;
|
||||
}
|
||||
case ASTExplainQuery::QueryEstimates:
|
||||
{
|
||||
if (!dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
throw Exception("Only SELECT is supported for EXPLAIN ESTIMATE query", ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
auto settings = checkAndGetSettings<QueryPlanSettings>(ast.getSettings());
|
||||
QueryPlan plan;
|
||||
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions());
|
||||
interpreter.buildQueryPlan(plan);
|
||||
// collect the selected marks, rows, parts during build query pipeline.
|
||||
plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(getContext()),
|
||||
BuildQueryPipelineSettings::fromContext(getContext()));
|
||||
|
||||
if (settings.graph)
|
||||
{
|
||||
/// Pipe holds QueryPlan, should not go out-of-scope
|
||||
auto pipe = QueryPipelineBuilder::getPipe(std::move(*pipeline));
|
||||
const auto & processors = pipe.getProcessors();
|
||||
|
||||
if (settings.compact)
|
||||
printPipelineCompact(processors, buf, settings.query_pipeline_options.header);
|
||||
else
|
||||
printPipeline(processors, buf);
|
||||
}
|
||||
else
|
||||
{
|
||||
plan.explainPipeline(buf, settings.query_pipeline_options);
|
||||
}
|
||||
if (settings.optimize)
|
||||
plan.optimize(QueryPlanOptimizationSettings::fromContext(getContext()));
|
||||
plan.explainEstimate(res_columns);
|
||||
insert_buf = false;
|
||||
break;
|
||||
}
|
||||
else if (dynamic_cast<const ASTInsertQuery *>(ast.getExplainedQuery().get()))
|
||||
case ASTExplainQuery::TableOverride:
|
||||
{
|
||||
InterpreterInsertQuery insert(ast.getExplainedQuery(), getContext());
|
||||
auto io = insert.execute();
|
||||
printPipeline(io.pipeline.getProcessors(), buf);
|
||||
if (auto * table_function = ast.getTableFunction()->as<ASTFunction>(); !table_function || table_function->name != "mysql")
|
||||
{
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY, "EXPLAIN TABLE OVERRIDE is not supported for the {}() table function", table_function->name);
|
||||
}
|
||||
auto storage = getContext()->getQueryContext()->executeTableFunction(ast.getTableFunction());
|
||||
auto metadata_snapshot = storage->getInMemoryMetadata();
|
||||
TableOverrideAnalyzer::Result override_info;
|
||||
TableOverrideAnalyzer override_analyzer(ast.getTableOverride());
|
||||
override_analyzer.analyze(metadata_snapshot, override_info);
|
||||
override_info.appendTo(buf);
|
||||
break;
|
||||
}
|
||||
else
|
||||
throw Exception("Only SELECT and INSERT is supported for EXPLAIN PIPELINE query", ErrorCodes::INCORRECT_QUERY);
|
||||
}
|
||||
else if (ast.getKind() == ASTExplainQuery::QueryEstimates)
|
||||
{
|
||||
if (!dynamic_cast<const ASTSelectWithUnionQuery *>(ast.getExplainedQuery().get()))
|
||||
throw Exception("Only SELECT is supported for EXPLAIN ESTIMATE query", ErrorCodes::INCORRECT_QUERY);
|
||||
|
||||
auto settings = checkAndGetSettings<QueryPlanSettings>(ast.getSettings());
|
||||
QueryPlan plan;
|
||||
|
||||
InterpreterSelectWithUnionQuery interpreter(ast.getExplainedQuery(), getContext(), SelectQueryOptions());
|
||||
interpreter.buildQueryPlan(plan);
|
||||
// collect the selected marks, rows, parts during build query pipeline.
|
||||
plan.buildQueryPipeline(
|
||||
QueryPlanOptimizationSettings::fromContext(getContext()),
|
||||
BuildQueryPipelineSettings::fromContext(getContext()));
|
||||
|
||||
if (settings.optimize)
|
||||
plan.optimize(QueryPlanOptimizationSettings::fromContext(getContext()));
|
||||
plan.explainEstimate(res_columns);
|
||||
}
|
||||
if (ast.getKind() != ASTExplainQuery::QueryEstimates)
|
||||
if (insert_buf)
|
||||
{
|
||||
if (single_line)
|
||||
res_columns[0]->insertData(buf.str().data(), buf.str().size());
|
||||
|
@ -401,7 +401,7 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
||||
view = nullptr;
|
||||
}
|
||||
|
||||
if (try_move_to_prewhere && storage && storage->supportsPrewhere() && query.where() && !query.prewhere())
|
||||
if (try_move_to_prewhere && storage && storage->canMoveConditionsToPrewhere() && query.where() && !query.prewhere())
|
||||
{
|
||||
/// PREWHERE optimization: transfer some condition from WHERE to PREWHERE if enabled and viable
|
||||
if (const auto & column_sizes = storage->getColumnSizes(); !column_sizes.empty())
|
||||
|
174
src/Interpreters/TableOverrideUtils.cpp
Normal file
174
src/Interpreters/TableOverrideUtils.cpp
Normal file
@ -0,0 +1,174 @@
|
||||
#include <Interpreters/TableOverrideUtils.h>
|
||||
|
||||
#include <Common/quoteString.h>
|
||||
#include <DataTypes/IDataType.h>
|
||||
#include <DataTypes/DataTypeFactory.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Interpreters/InDepthNodeVisitor.h>
|
||||
#include <Interpreters/RequiredSourceColumnsVisitor.h>
|
||||
#include <Parsers/ASTColumnDeclaration.h>
|
||||
#include <Parsers/ASTCreateQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ASTTableOverrides.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <Storages/ColumnDefault.h>
|
||||
#include <Storages/StorageInMemoryMetadata.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int INVALID_TABLE_OVERRIDE;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
class MaybeNullableColumnsMatcher
|
||||
{
|
||||
public:
|
||||
using Visitor = ConstInDepthNodeVisitor<MaybeNullableColumnsMatcher, false>;
|
||||
using Data = RequiredSourceColumnsData;
|
||||
|
||||
static bool needChildVisit(const ASTPtr & node, const ASTPtr & child)
|
||||
{
|
||||
if (const auto * f = node->as<ASTFunction>(); f && f->name == "assumeNotNull")
|
||||
return false;
|
||||
return RequiredSourceColumnsMatcher::needChildVisit(node, child);
|
||||
}
|
||||
|
||||
static void visit(const ASTPtr & ast, Data & data)
|
||||
{
|
||||
RequiredSourceColumnsMatcher::visit(ast, data);
|
||||
}
|
||||
};
|
||||
|
||||
using MaybeNullableColumnsVisitor = MaybeNullableColumnsMatcher::Visitor;
|
||||
|
||||
}
|
||||
|
||||
static void checkRequiredColumns(const IAST * ast, const NameToTypeMap & existing_types, NamesAndTypes & used_columns, const String & what, bool allow_nulls = false)
|
||||
{
|
||||
if (!ast)
|
||||
return;
|
||||
RequiredSourceColumnsData columns_data;
|
||||
RequiredSourceColumnsVisitor(columns_data).visit(ast->clone());
|
||||
auto required_columns = columns_data.requiredColumns();
|
||||
for (const auto & column : required_columns)
|
||||
{
|
||||
auto type = existing_types.find(column);
|
||||
if (type == existing_types.end())
|
||||
throw Exception(ErrorCodes::INVALID_TABLE_OVERRIDE, "{} override refers to unknown column {}", what, backQuote(column));
|
||||
}
|
||||
if (!allow_nulls)
|
||||
{
|
||||
RequiredSourceColumnsData nullable_data;
|
||||
MaybeNullableColumnsVisitor(nullable_data).visit(ast->clone());
|
||||
for (const auto & column : nullable_data.requiredColumns())
|
||||
{
|
||||
if (existing_types.find(column)->second->isNullable())
|
||||
throw Exception(
|
||||
ErrorCodes::INVALID_TABLE_OVERRIDE,
|
||||
"{} override refers to nullable column {} (use assumeNotNull() if the column does not in fact contain NULL values)",
|
||||
what,
|
||||
backQuote(column));
|
||||
}
|
||||
}
|
||||
for (const auto & col : required_columns)
|
||||
{
|
||||
used_columns.push_back({col, existing_types.find(col)->second});
|
||||
}
|
||||
}
|
||||
|
||||
void TableOverrideAnalyzer::analyze(const StorageInMemoryMetadata & metadata, Result & result) const
|
||||
{
|
||||
for (const auto & column : metadata.columns)
|
||||
result.existing_types[column.name] = column.type;
|
||||
checkRequiredColumns(override->storage->order_by, result.existing_types, result.order_by_columns, "ORDER BY");
|
||||
checkRequiredColumns(override->storage->primary_key, result.existing_types, result.primary_key_columns, "PRIMARY KEY");
|
||||
checkRequiredColumns(override->storage->partition_by, result.existing_types, result.partition_by_columns, "PARTITION BY");
|
||||
checkRequiredColumns(override->storage->sample_by, result.existing_types, result.sample_by_columns, "SAMPLE BY");
|
||||
checkRequiredColumns(override->storage->ttl_table, result.existing_types, result.ttl_columns, "TTL");
|
||||
if (override->columns && override->columns->columns)
|
||||
{
|
||||
for (const auto & column_ast : override->columns->columns->children)
|
||||
{
|
||||
auto * override_column = column_ast->as<ASTColumnDeclaration>();
|
||||
auto override_type = DataTypeFactory::instance().get(override_column->type);
|
||||
auto found = metadata.columns.tryGetColumnOrSubcolumn(ColumnsDescription::GetFlags::All, override_column->name);
|
||||
std::optional<ColumnDefaultKind> override_default_kind;
|
||||
if (!override_column->default_specifier.empty())
|
||||
override_default_kind = columnDefaultKindFromString(override_column->default_specifier);
|
||||
if (found)
|
||||
{
|
||||
std::optional<ColumnDefaultKind> existing_default_kind;
|
||||
if (auto col_default = metadata.columns.getDefault(found->name))
|
||||
existing_default_kind = col_default->kind;
|
||||
if (existing_default_kind != override_default_kind)
|
||||
throw Exception(ErrorCodes::INVALID_TABLE_OVERRIDE, "column {}: modifying default specifier is not allowed", backQuote(override_column->name));
|
||||
result.modified_columns.push_back({found->name, override_type});
|
||||
/// TODO: validate that the original type can be converted to the overridden type
|
||||
}
|
||||
else
|
||||
{
|
||||
if (override_default_kind && *override_default_kind == ColumnDefaultKind::Alias)
|
||||
result.added_columns.push_back({override_column->name, override_type});
|
||||
else
|
||||
throw Exception(ErrorCodes::INVALID_TABLE_OVERRIDE, "column {}: can only add ALIAS columns", backQuote(override_column->name));
|
||||
}
|
||||
/// TODO: validate default and materialized expressions (use checkRequiredColumns, allowing nulls)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TableOverrideAnalyzer::Result::appendTo(WriteBuffer & ostr)
|
||||
{
|
||||
const auto & format_names = [&](const NamesAndTypes & names) -> String
|
||||
{
|
||||
WriteBufferFromOwnString buf;
|
||||
bool first = true;
|
||||
for (const auto & name : names)
|
||||
{
|
||||
if (!first)
|
||||
buf << ", ";
|
||||
first = false;
|
||||
buf << backQuote(name.name) << " ";
|
||||
auto old_type = existing_types.find(name.name);
|
||||
if (old_type != existing_types.end() && old_type->second != name.type)
|
||||
buf << old_type->second->getName() << " -> ";
|
||||
buf << name.type->getName();
|
||||
}
|
||||
return buf.str();
|
||||
};
|
||||
if (!modified_columns.empty())
|
||||
{
|
||||
ostr << "Modified columns: " << format_names(modified_columns) << "\n";
|
||||
}
|
||||
if (!added_columns.empty())
|
||||
{
|
||||
ostr << "Added columns: " << format_names(added_columns) << "\n";
|
||||
}
|
||||
if (!order_by_columns.empty())
|
||||
{
|
||||
ostr << "ORDER BY uses columns: " << format_names(order_by_columns) << "\n";
|
||||
}
|
||||
if (!primary_key_columns.empty())
|
||||
{
|
||||
ostr << "PRIMARY KEY uses columns: " << format_names(primary_key_columns) << "\n";
|
||||
}
|
||||
if (!partition_by_columns.empty())
|
||||
{
|
||||
ostr << "PARTITION BY uses columns: " << format_names(partition_by_columns) << "\n";
|
||||
}
|
||||
if (!sample_by_columns.empty())
|
||||
{
|
||||
ostr << "SAMPLE BY uses columns: " << format_names(sample_by_columns) << "\n";
|
||||
}
|
||||
if (!ttl_columns.empty())
|
||||
{
|
||||
ostr << "TTL uses columns: " << format_names(ttl_columns) << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
}
|
38
src/Interpreters/TableOverrideUtils.h
Normal file
38
src/Interpreters/TableOverrideUtils.h
Normal file
@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Core/Names.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Parsers/ASTTableOverrides.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
struct StorageInMemoryMetadata;
|
||||
|
||||
using NameToTypeMap = std::map<String, DataTypePtr>;
|
||||
|
||||
struct TableOverrideAnalyzer
|
||||
{
|
||||
struct Result
|
||||
{
|
||||
NameToTypeMap existing_types;
|
||||
NamesAndTypes order_by_columns;
|
||||
NamesAndTypes primary_key_columns;
|
||||
NamesAndTypes partition_by_columns;
|
||||
NamesAndTypes sample_by_columns;
|
||||
NamesAndTypes ttl_columns;
|
||||
NamesAndTypes added_columns;
|
||||
NamesAndTypes modified_columns;
|
||||
|
||||
void appendTo(WriteBuffer &);
|
||||
};
|
||||
|
||||
ASTTableOverride * override;
|
||||
|
||||
explicit TableOverrideAnalyzer(ASTPtr ast) : override(assert_cast<ASTTableOverride *>(ast.get())) { }
|
||||
|
||||
void analyze(const StorageInMemoryMetadata & metadata, Result & result) const;
|
||||
};
|
||||
|
||||
}
|
@ -18,6 +18,7 @@ public:
|
||||
QueryPlan, /// 'EXPLAIN SELECT ...'
|
||||
QueryPipeline, /// 'EXPLAIN PIPELINE ...'
|
||||
QueryEstimates, /// 'EXPLAIN ESTIMATE ...'
|
||||
TableOverride, /// 'EXPLAIN TABLE OVERRIDE ...'
|
||||
};
|
||||
|
||||
explicit ASTExplainQuery(ExplainKind kind_) : kind(kind_) {}
|
||||
@ -45,8 +46,22 @@ public:
|
||||
ast_settings = std::move(settings_);
|
||||
}
|
||||
|
||||
void setTableFunction(ASTPtr table_function_)
|
||||
{
|
||||
children.emplace_back(table_function_);
|
||||
table_function = std::move(table_function_);
|
||||
}
|
||||
|
||||
void setTableOverride(ASTPtr table_override_)
|
||||
{
|
||||
children.emplace_back(table_override_);
|
||||
table_override = std::move(table_override_);
|
||||
}
|
||||
|
||||
const ASTPtr & getExplainedQuery() const { return query; }
|
||||
const ASTPtr & getSettings() const { return ast_settings; }
|
||||
const ASTPtr & getTableFunction() const { return table_function; }
|
||||
const ASTPtr & getTableOverride() const { return table_override; }
|
||||
|
||||
protected:
|
||||
void formatQueryImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override
|
||||
@ -59,8 +74,21 @@ protected:
|
||||
ast_settings->formatImpl(settings, state, frame);
|
||||
}
|
||||
|
||||
settings.ostr << settings.nl_or_ws;
|
||||
query->formatImpl(settings, state, frame);
|
||||
if (query)
|
||||
{
|
||||
settings.ostr << settings.nl_or_ws;
|
||||
query->formatImpl(settings, state, frame);
|
||||
}
|
||||
if (table_function)
|
||||
{
|
||||
settings.ostr << settings.nl_or_ws;
|
||||
table_function->formatImpl(settings, state, frame);
|
||||
}
|
||||
if (table_override)
|
||||
{
|
||||
settings.ostr << settings.nl_or_ws;
|
||||
table_override->formatImpl(settings, state, frame);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
@ -69,6 +97,10 @@ private:
|
||||
ASTPtr query;
|
||||
ASTPtr ast_settings;
|
||||
|
||||
/// Used by EXPLAIN TABLE OVERRIDE
|
||||
ASTPtr table_function;
|
||||
ASTPtr table_override;
|
||||
|
||||
static String toString(ExplainKind kind)
|
||||
{
|
||||
switch (kind)
|
||||
@ -78,6 +110,7 @@ private:
|
||||
case QueryPlan: return "EXPLAIN";
|
||||
case QueryPipeline: return "EXPLAIN PIPELINE";
|
||||
case QueryEstimates: return "EXPLAIN ESTIMATE";
|
||||
case TableOverride: return "EXPLAIN TABLE OVERRIDE";
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
|
@ -31,13 +31,19 @@ void ASTTableOverride::formatImpl(const FormatSettings & settings_, FormatState
|
||||
String hl_keyword = settings.hilite ? hilite_keyword : "";
|
||||
String hl_none = settings.hilite ? hilite_none : "";
|
||||
|
||||
settings.ostr << hl_keyword << "TABLE OVERRIDE " << hl_none;
|
||||
ASTIdentifier(table_name).formatImpl(settings, state, frame);
|
||||
if (is_standalone)
|
||||
{
|
||||
settings.ostr << hl_keyword << "TABLE OVERRIDE " << hl_none;
|
||||
ASTIdentifier(table_name).formatImpl(settings, state, frame);
|
||||
}
|
||||
if (!columns && (!storage || storage->children.empty()))
|
||||
return;
|
||||
auto override_frame = frame;
|
||||
++override_frame.indent;
|
||||
settings.ostr << nl_or_ws << '(' << nl_or_nothing;
|
||||
if (is_standalone)
|
||||
{
|
||||
++override_frame.indent;
|
||||
settings.ostr << nl_or_ws << '(' << nl_or_nothing;
|
||||
}
|
||||
String indent_str = settings.one_line ? "" : String(4 * override_frame.indent, ' ');
|
||||
size_t override_elems = 0;
|
||||
if (columns)
|
||||
@ -68,7 +74,8 @@ void ASTTableOverride::formatImpl(const FormatSettings & settings_, FormatState
|
||||
format_storage_elem(storage->ttl_table, "TTL");
|
||||
}
|
||||
|
||||
settings.ostr << nl_or_nothing << ')';
|
||||
if (is_standalone)
|
||||
settings.ostr << nl_or_nothing << ')';
|
||||
}
|
||||
|
||||
ASTPtr ASTTableOverrideList::clone() const
|
||||
|
@ -15,7 +15,7 @@ class ASTStorage;
|
||||
|
||||
/// Storage and column overrides for a single table, for example:
|
||||
///
|
||||
/// TABLE OVERRIDE `foo` PARTITION BY toYYYYMM(`createtime`)
|
||||
/// TABLE OVERRIDE `foo` (PARTITION BY toYYYYMM(`createtime`))
|
||||
///
|
||||
class ASTTableOverride : public IAST
|
||||
{
|
||||
@ -23,6 +23,7 @@ public:
|
||||
String table_name;
|
||||
ASTColumns * columns = nullptr;
|
||||
ASTStorage * storage = nullptr;
|
||||
bool is_standalone = true;
|
||||
String getID(char) const override { return "TableOverride " + table_name; }
|
||||
ASTPtr clone() const override;
|
||||
void formatImpl(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||
|
@ -969,14 +969,15 @@ bool ParserTableOverrideDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
ASTPtr sample_by;
|
||||
ASTPtr ttl_table;
|
||||
|
||||
if (!s_table_override.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
if (!table_name_p.parse(pos, table_name, expected))
|
||||
return false;
|
||||
|
||||
if (!lparen_p.ignore(pos, expected))
|
||||
return false;
|
||||
if (is_standalone)
|
||||
{
|
||||
if (!s_table_override.ignore(pos, expected))
|
||||
return false;
|
||||
if (!table_name_p.parse(pos, table_name, expected))
|
||||
return false;
|
||||
if (!lparen_p.ignore(pos, expected))
|
||||
return false;
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
@ -1034,7 +1035,7 @@ bool ParserTableOverrideDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
break;
|
||||
}
|
||||
|
||||
if (!rparen_p.ignore(pos, expected))
|
||||
if (is_standalone && !rparen_p.ignore(pos, expected))
|
||||
return false;
|
||||
|
||||
auto storage = std::make_shared<ASTStorage>();
|
||||
@ -1045,7 +1046,9 @@ bool ParserTableOverrideDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expecte
|
||||
storage->set(storage->ttl_table, ttl_table);
|
||||
|
||||
auto res = std::make_shared<ASTTableOverride>();
|
||||
res->table_name = table_name->as<ASTIdentifier>()->name();
|
||||
if (table_name)
|
||||
res->table_name = table_name->as<ASTIdentifier>()->name();
|
||||
res->is_standalone = is_standalone;
|
||||
res->set(res->storage, storage);
|
||||
if (columns)
|
||||
res->set(res->columns, columns);
|
||||
|
@ -389,6 +389,10 @@ protected:
|
||||
|
||||
class ParserTableOverrideDeclaration : public IParserBase
|
||||
{
|
||||
public:
|
||||
const bool is_standalone;
|
||||
ParserTableOverrideDeclaration(bool is_standalone_ = true) : is_standalone(is_standalone_) { }
|
||||
|
||||
protected:
|
||||
const char * getName() const override { return "table override declaration"; }
|
||||
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
|
||||
|
@ -21,6 +21,7 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
ParserKeyword s_pipeline("PIPELINE");
|
||||
ParserKeyword s_plan("PLAN");
|
||||
ParserKeyword s_estimates("ESTIMATE");
|
||||
ParserKeyword s_table_override("TABLE OVERRIDE");
|
||||
|
||||
if (s_explain.ignore(pos, expected))
|
||||
{
|
||||
@ -36,6 +37,8 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
kind = ASTExplainQuery::ExplainKind::QueryPlan; //-V1048
|
||||
else if (s_estimates.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::QueryEstimates; //-V1048
|
||||
else if (s_table_override.ignore(pos, expected))
|
||||
kind = ASTExplainQuery::ExplainKind::TableOverride;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
@ -65,6 +68,17 @@ bool ParserExplainQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected
|
||||
else
|
||||
return false;
|
||||
}
|
||||
else if (kind == ASTExplainQuery::ExplainKind::TableOverride)
|
||||
{
|
||||
ASTPtr table_function;
|
||||
if (!ParserFunction(true, true).parse(pos, table_function, expected))
|
||||
return false;
|
||||
ASTPtr table_override;
|
||||
if (!ParserTableOverrideDeclaration(false).parse(pos, table_override, expected))
|
||||
return false;
|
||||
explain_query->setTableFunction(table_function);
|
||||
explain_query->setTableOverride(table_override);
|
||||
}
|
||||
else if (select_p.parse(pos, query, expected) ||
|
||||
create_p.parse(pos, query, expected) ||
|
||||
insert_p.parse(pos, query, expected))
|
||||
|
@ -135,6 +135,9 @@ public:
|
||||
/// Returns true if the storage supports queries with the PREWHERE section.
|
||||
virtual bool supportsPrewhere() const { return false; }
|
||||
|
||||
/// Returns true if the storage supports optimization of moving conditions to PREWHERE section.
|
||||
virtual bool canMoveConditionsToPrewhere() const { return supportsPrewhere(); }
|
||||
|
||||
/// Returns true if the storage replicates SELECT, INSERT and ALTER commands among replicas.
|
||||
virtual bool supportsReplication() const { return false; }
|
||||
|
||||
|
@ -53,6 +53,10 @@ public:
|
||||
bool supportsSubcolumns() const override { return true; }
|
||||
StoragePolicyPtr getStoragePolicy() const override;
|
||||
|
||||
/// Do not apply moving to PREWHERE optimization for distributed tables,
|
||||
/// because we can't be sure that underlying table supports PREWHERE.
|
||||
bool canMoveConditionsToPrewhere() const override { return false; }
|
||||
|
||||
bool isRemote() const override { return true; }
|
||||
|
||||
QueryProcessingStage::Enum
|
||||
|
@ -116,6 +116,15 @@ StoragePtr StorageMerge::getFirstTable(F && predicate) const
|
||||
return {};
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
void StorageMerge::forEachTable(F && func) const
|
||||
{
|
||||
getFirstTable([&func](const auto & table)
|
||||
{
|
||||
func(table);
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
bool StorageMerge::isRemote() const
|
||||
{
|
||||
@ -123,6 +132,16 @@ bool StorageMerge::isRemote() const
|
||||
return first_remote_table != nullptr;
|
||||
}
|
||||
|
||||
bool StorageMerge::canMoveConditionsToPrewhere() const
|
||||
{
|
||||
/// NOTE: This check is used during query analysis as condition for applying
|
||||
/// "move to PREWHERE" optimization. However, it contains a logical race:
|
||||
/// If new table that matches regexp for current storage and doesn't support PREWHERE
|
||||
/// will appear after this check and before calling "read" method, the optimized query may fail.
|
||||
/// Since it's quite rare case, we just ignore this possibility.
|
||||
|
||||
return getFirstTable([](const auto & table) { return !table->canMoveConditionsToPrewhere(); }) == nullptr;
|
||||
}
|
||||
|
||||
bool StorageMerge::mayBenefitFromIndexForIn(const ASTPtr & left_in_operand, ContextPtr query_context, const StorageMetadataPtr & /*metadata_snapshot*/) const
|
||||
{
|
||||
@ -771,10 +790,15 @@ void StorageMerge::convertingSourceStream(
|
||||
|
||||
IStorage::ColumnSizeByName StorageMerge::getColumnSizes() const
|
||||
{
|
||||
auto first_materialized_mysql = getFirstTable([](const StoragePtr & table) { return table && table->getName() == "MaterializedMySQL"; });
|
||||
if (!first_materialized_mysql)
|
||||
return {};
|
||||
return first_materialized_mysql->getColumnSizes();
|
||||
ColumnSizeByName column_sizes;
|
||||
|
||||
forEachTable([&](const auto & table)
|
||||
{
|
||||
for (const auto & [name, size] : table->getColumnSizes())
|
||||
column_sizes[name].add(size);
|
||||
});
|
||||
|
||||
return column_sizes;
|
||||
}
|
||||
|
||||
|
||||
|
@ -22,10 +22,12 @@ public:
|
||||
|
||||
/// The check is delayed to the read method. It checks the support of the tables used.
|
||||
bool supportsSampling() const override { return true; }
|
||||
bool supportsPrewhere() const override { return true; }
|
||||
bool supportsFinal() const override { return true; }
|
||||
bool supportsIndexForIn() const override { return true; }
|
||||
bool supportsSubcolumns() const override { return true; }
|
||||
bool supportsPrewhere() const override { return true; }
|
||||
|
||||
bool canMoveConditionsToPrewhere() const override;
|
||||
|
||||
QueryProcessingStage::Enum
|
||||
getQueryProcessingStage(ContextPtr, QueryProcessingStage::Enum, const StorageMetadataPtr &, SelectQueryInfo &) const override;
|
||||
@ -75,6 +77,9 @@ private:
|
||||
template <typename F>
|
||||
StoragePtr getFirstTable(F && predicate) const;
|
||||
|
||||
template <typename F>
|
||||
void forEachTable(F && func) const;
|
||||
|
||||
DatabaseTablesIteratorPtr getDatabaseIterator(const String & database_name, ContextPtr context) const;
|
||||
|
||||
DatabaseTablesIterators getDatabaseIterators(ContextPtr context) const;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Interpreters/InterpreterAlterQuery.h>
|
||||
#include <Interpreters/InterpreterDropQuery.h>
|
||||
#include <Interpreters/QueryAliasesVisitor.h>
|
||||
#include <Interpreters/QueryNormalizer.h>
|
||||
#include <Interpreters/getTableExpressions.h>
|
||||
#include <Parsers/ASTAlterQuery.h>
|
||||
#include <Parsers/ASTAsterisk.h>
|
||||
@ -91,10 +92,6 @@ namespace
|
||||
data.is_hop = t->name == "hop";
|
||||
auto temp_node = t->clone();
|
||||
temp_node->setAlias("");
|
||||
if (startsWith(t->arguments->children[0]->getColumnName(), "toDateTime"))
|
||||
throw Exception(
|
||||
"The first argument of time window function should not be a constant value.",
|
||||
ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
|
||||
if (!data.window_function)
|
||||
{
|
||||
data.serialized_window_function = serializeAST(*temp_node);
|
||||
@ -565,7 +562,13 @@ std::shared_ptr<ASTCreateQuery> StorageWindowView::getInnerTableCreateQuery(
|
||||
inner_create_query->setDatabase(database_name);
|
||||
inner_create_query->setTable(table_name);
|
||||
|
||||
auto inner_select_query = std::static_pointer_cast<ASTSelectQuery>(inner_query);
|
||||
Aliases aliases;
|
||||
QueryAliasesVisitor(aliases).visit(inner_query);
|
||||
auto inner_query_normalized = inner_query->clone();
|
||||
QueryNormalizer::Data normalizer_data(aliases, {}, false, getContext()->getSettingsRef(), false);
|
||||
QueryNormalizer(normalizer_data).visit(inner_query_normalized);
|
||||
|
||||
auto inner_select_query = std::static_pointer_cast<ASTSelectQuery>(inner_query_normalized);
|
||||
|
||||
auto t_sample_block
|
||||
= InterpreterSelectQuery(
|
||||
@ -582,6 +585,8 @@ std::shared_ptr<ASTCreateQuery> StorageWindowView::getInnerTableCreateQuery(
|
||||
columns_list->children.push_back(column_window);
|
||||
}
|
||||
|
||||
bool has_window_id = false;
|
||||
|
||||
for (const auto & column : t_sample_block.getColumnsWithTypeAndName())
|
||||
{
|
||||
ParserIdentifierWithOptionalParameters parser;
|
||||
@ -591,8 +596,18 @@ std::shared_ptr<ASTCreateQuery> StorageWindowView::getInnerTableCreateQuery(
|
||||
column_dec->name = column.name;
|
||||
column_dec->type = ast;
|
||||
columns_list->children.push_back(column_dec);
|
||||
if (!is_time_column_func_now && !has_window_id)
|
||||
{
|
||||
if (startsWith(column.name, "windowID"))
|
||||
has_window_id = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_time_column_func_now && !has_window_id)
|
||||
throw Exception(
|
||||
"The first argument of time window function should not be a constant value.",
|
||||
ErrorCodes::QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW);
|
||||
|
||||
ToIdentifierMatcher::Data query_data;
|
||||
query_data.window_id_name = window_id_name;
|
||||
query_data.window_id_alias = window_id_alias;
|
||||
@ -634,10 +649,15 @@ std::shared_ptr<ASTCreateQuery> StorageWindowView::getInnerTableCreateQuery(
|
||||
/// tumble/hop -> windowID
|
||||
func_window_visitor.visit(node);
|
||||
to_identifier_visitor.visit(node);
|
||||
QueryNormalizer(normalizer_data).visit(node);
|
||||
node->setAlias("");
|
||||
new_storage->set(field, node);
|
||||
}
|
||||
};
|
||||
|
||||
for (auto & [alias_name, ast] : aliases)
|
||||
ast = std::make_shared<ASTIdentifier>(ast->getColumnName());
|
||||
|
||||
visit(storage->partition_by, new_storage->partition_by);
|
||||
visit(storage->primary_key, new_storage->primary_key);
|
||||
visit(storage->order_by, new_storage->order_by);
|
||||
|
@ -1079,9 +1079,41 @@ def table_overrides(clickhouse_node, mysql_node, service_name):
|
||||
check_query(clickhouse_node, "SELECT count() FROM table_overrides.t1", "1000\n")
|
||||
mysql_node.query("INSERT INTO table_overrides.t1 VALUES(1001, '2021-10-01 00:00:00', 42.0)")
|
||||
check_query(clickhouse_node, "SELECT count() FROM table_overrides.t1", "1001\n")
|
||||
|
||||
explain_with_table_func = f"EXPLAIN TABLE OVERRIDE mysql('{service_name}:3306', 'table_overrides', 't1', 'root', 'clickhouse')"
|
||||
|
||||
for what in ['ORDER BY', 'PRIMARY KEY', 'SAMPLE BY', 'PARTITION BY', 'TTL']:
|
||||
with pytest.raises(QueryRuntimeException) as exc:
|
||||
clickhouse_node.query(f"{explain_with_table_func} {what} temperature")
|
||||
assert f'{what} override refers to nullable column `temperature`' in \
|
||||
str(exc.value)
|
||||
assert f"{what} uses columns: `temperature` Nullable(Float32)" in \
|
||||
clickhouse_node.query(f"{explain_with_table_func} {what} assumeNotNull(temperature)")
|
||||
|
||||
for testcase in [
|
||||
('COLUMNS (temperature Nullable(Float32) MATERIALIZED 1.0)',
|
||||
'column `temperature`: modifying default specifier is not allowed'),
|
||||
('COLUMNS (sensor_id UInt64 ALIAS 42)',
|
||||
'column `sensor_id`: modifying default specifier is not allowed')
|
||||
]:
|
||||
with pytest.raises(QueryRuntimeException) as exc:
|
||||
clickhouse_node.query(f"{explain_with_table_func} {testcase[0]}")
|
||||
assert testcase[1] in str(exc.value)
|
||||
|
||||
for testcase in [
|
||||
('COLUMNS (temperature Nullable(Float64))',
|
||||
'Modified columns: `temperature` Nullable(Float32) -> Nullable(Float64)'),
|
||||
('COLUMNS (temp_f Nullable(Float32) ALIAS if(temperature IS NULL, NULL, (temperature * 9.0 / 5.0) + 32),\
|
||||
temp_k Nullable(Float32) ALIAS if(temperature IS NULL, NULL, temperature + 273.15))',
|
||||
'Added columns: `temp_f` Nullable(Float32), `temp_k` Nullable(Float32)')
|
||||
]:
|
||||
assert testcase[1] in clickhouse_node.query(
|
||||
f"{explain_with_table_func} {testcase[0]}")
|
||||
|
||||
clickhouse_node.query("DROP DATABASE IF EXISTS table_overrides")
|
||||
mysql_node.query("DROP DATABASE IF EXISTS table_overrides")
|
||||
|
||||
|
||||
def materialized_database_support_all_kinds_of_mysql_datatype(clickhouse_node, mysql_node, service_name):
|
||||
mysql_node.query("DROP DATABASE IF EXISTS test_database_datatype")
|
||||
clickhouse_node.query("DROP DATABASE IF EXISTS test_database_datatype")
|
||||
|
@ -338,6 +338,8 @@ def test_postgres_odbc_hashed_dictionary_with_schema(started_cluster):
|
||||
cursor.execute("truncate table clickhouse.test_table")
|
||||
cursor.execute("insert into clickhouse.test_table values(1, 1, 'hello'),(2, 2, 'world')")
|
||||
node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_hashed")
|
||||
node1.exec_in_container(["ss", "-K", "dport", "postgresql"], privileged=True, user='root')
|
||||
node1.query("SYSTEM RELOAD DICTIONARY postgres_odbc_hashed")
|
||||
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(1))", "hello")
|
||||
assert_eq_with_retry(node1, "select dictGetString('postgres_odbc_hashed', 'column2', toUInt64(2))", "world")
|
||||
|
||||
|
@ -1,8 +1,12 @@
|
||||
---TUMBLE---
|
||||
||---DEFAULT ENGINE WITH DATA COLUMN ALIAS---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPRIMARY KEY `windowID(timestamp, toIntervalSecond(\'1\'))`\nORDER BY (`windowID(timestamp, toIntervalSecond(\'1\'))`, b)\nSETTINGS index_granularity = 8192
|
||||
||---WINDOW COLUMN NAME---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'))`\nSETTINGS index_granularity = 8192
|
||||
||---WINDOW COLUMN ALIAS---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'))`\nSETTINGS index_granularity = 8192
|
||||
||---DATA COLUMN ALIAS---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY b\nSETTINGS index_granularity = 8192
|
||||
||---IDENTIFIER---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPRIMARY KEY `windowID(timestamp, toIntervalSecond(\'1\'))`\nORDER BY (`windowID(timestamp, toIntervalSecond(\'1\'))`, b)\nSETTINGS index_granularity = 8192
|
||||
||---FUNCTION---
|
||||
@ -10,10 +14,14 @@ CREATE TABLE test_01047.`.inner.wv`\n(\n `plus(a, b)` Int64,\n `windowID(t
|
||||
||---PARTITION---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(____timestamp, toIntervalSecond(\'1\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPARTITION BY `windowID(____timestamp, toIntervalSecond(\'1\'))`\nORDER BY `windowID(____timestamp, toIntervalSecond(\'1\'))`\nSETTINGS index_granularity = 8192
|
||||
---HOP---
|
||||
||---DEFAULT ENGINE WITH DATA COLUMN ALIAS---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPRIMARY KEY `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nORDER BY (`windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`, b)\nSETTINGS index_granularity = 8192
|
||||
||---WINDOW COLUMN NAME---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nSETTINGS index_granularity = 8192
|
||||
||---WINDOW COLUMN ALIAS---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nSETTINGS index_granularity = 8192
|
||||
||---DATA COLUMN ALIAS---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nORDER BY b\nSETTINGS index_granularity = 8192
|
||||
||---IDENTIFIER---
|
||||
CREATE TABLE test_01047.`.inner.wv`\n(\n `b` Int32,\n `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))` UInt32,\n `count(a)` AggregateFunction(count, Int32)\n)\nENGINE = AggregatingMergeTree\nPRIMARY KEY `windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`\nORDER BY (`windowID(timestamp, toIntervalSecond(\'1\'), toIntervalSecond(\'3\'))`, b)\nSETTINGS index_granularity = 8192
|
||||
||---FUNCTION---
|
||||
|
@ -9,6 +9,12 @@ DROP TABLE IF EXISTS test_01047.mt;
|
||||
CREATE TABLE test_01047.mt(a Int32, b Int32, timestamp DateTime) ENGINE=MergeTree ORDER BY tuple();
|
||||
|
||||
SELECT '---TUMBLE---';
|
||||
SELECT '||---DEFAULT ENGINE WITH DATA COLUMN ALIAS---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
CREATE WINDOW VIEW test_01047.wv AS SELECT count(a) AS count, b as id FROM test_01047.mt GROUP BY id, tumble(timestamp, INTERVAL '1' SECOND);
|
||||
SHOW CREATE TABLE test_01047.`.inner.wv`;
|
||||
|
||||
SELECT '||---WINDOW COLUMN NAME---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
@ -21,6 +27,12 @@ DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
CREATE WINDOW VIEW test_01047.wv ENGINE AggregatingMergeTree ORDER BY wid AS SELECT count(a) AS count, tumble(timestamp, INTERVAL '1' SECOND) AS wid FROM test_01047.mt GROUP BY wid;
|
||||
SHOW CREATE TABLE test_01047.`.inner.wv`;
|
||||
|
||||
SELECT '||---DATA COLUMN ALIAS---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
CREATE WINDOW VIEW test_01047.wv ENGINE AggregatingMergeTree ORDER BY id AS SELECT count(a) AS count, b as id FROM test_01047.mt GROUP BY id, tumble(timestamp, INTERVAL '1' SECOND);
|
||||
SHOW CREATE TABLE test_01047.`.inner.wv`;
|
||||
|
||||
SELECT '||---IDENTIFIER---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
@ -41,6 +53,12 @@ SHOW CREATE TABLE test_01047.`.inner.wv`;
|
||||
|
||||
|
||||
SELECT '---HOP---';
|
||||
SELECT '||---DEFAULT ENGINE WITH DATA COLUMN ALIAS---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
CREATE WINDOW VIEW test_01047.wv AS SELECT count(a) AS count, b as id FROM test_01047.mt GROUP BY id, hop(timestamp, INTERVAL '1' SECOND, INTERVAL '3' SECOND);
|
||||
SHOW CREATE TABLE test_01047.`.inner.wv`;
|
||||
|
||||
SELECT '||---WINDOW COLUMN NAME---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
@ -53,6 +71,12 @@ DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
CREATE WINDOW VIEW test_01047.wv ENGINE AggregatingMergeTree ORDER BY wid AS SELECT count(a) AS count, hop(timestamp, INTERVAL '1' SECOND, INTERVAL '3' SECOND) AS wid FROM test_01047.mt GROUP BY wid;
|
||||
SHOW CREATE TABLE test_01047.`.inner.wv`;
|
||||
|
||||
SELECT '||---DATA COLUMN ALIAS---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
CREATE WINDOW VIEW test_01047.wv ENGINE AggregatingMergeTree ORDER BY id AS SELECT count(a) AS count, b as id FROM test_01047.mt GROUP BY id, hop(timestamp, INTERVAL '1' SECOND, INTERVAL '3' SECOND);
|
||||
SHOW CREATE TABLE test_01047.`.inner.wv`;
|
||||
|
||||
SELECT '||---IDENTIFIER---';
|
||||
DROP TABLE IF EXISTS test_01047.wv;
|
||||
DROP TABLE IF EXISTS test_01047.`.inner.wv`;
|
||||
|
@ -0,0 +1,13 @@
|
||||
SELECT count()
|
||||
FROM t_02156_merge1
|
||||
PREWHERE k = 1
|
||||
WHERE (k = 1) AND notEmpty(v)
|
||||
2
|
||||
SELECT count()
|
||||
FROM t_02156_merge2
|
||||
WHERE (k = 1) AND notEmpty(v)
|
||||
2
|
||||
SELECT count()
|
||||
FROM t_02156_merge3
|
||||
WHERE (k = 1) AND notEmpty(v)
|
||||
2
|
38
tests/queries/0_stateless/02156_storage_merge_prewhere.sql
Normal file
38
tests/queries/0_stateless/02156_storage_merge_prewhere.sql
Normal file
@ -0,0 +1,38 @@
|
||||
DROP TABLE IF EXISTS t_02156_mt1;
|
||||
DROP TABLE IF EXISTS t_02156_mt2;
|
||||
DROP TABLE IF EXISTS t_02156_log;
|
||||
DROP TABLE IF EXISTS t_02156_dist;
|
||||
DROP TABLE IF EXISTS t_02156_merge1;
|
||||
DROP TABLE IF EXISTS t_02156_merge2;
|
||||
DROP TABLE IF EXISTS t_02156_merge3;
|
||||
|
||||
CREATE TABLE t_02156_mt1 (k UInt32, v String) ENGINE = MergeTree ORDER BY k;
|
||||
CREATE TABLE t_02156_mt2 (k UInt32, v String) ENGINE = MergeTree ORDER BY k;
|
||||
CREATE TABLE t_02156_log (k UInt32, v String) ENGINE = Log;
|
||||
|
||||
CREATE TABLE t_02156_dist (k UInt32, v String) ENGINE = Distributed(test_shard_localhost, currentDatabase(), t_02156_mt1);
|
||||
|
||||
CREATE TABLE t_02156_merge1 (k UInt32, v String) ENGINE = Merge(currentDatabase(), 't_02156_mt1|t_02156_mt2');
|
||||
CREATE TABLE t_02156_merge2 (k UInt32, v String) ENGINE = Merge(currentDatabase(), 't_02156_mt1|t_02156_log');
|
||||
CREATE TABLE t_02156_merge3 (k UInt32, v String) ENGINE = Merge(currentDatabase(), 't_02156_mt2|t_02156_dist');
|
||||
|
||||
INSERT INTO t_02156_mt1 SELECT number, toString(number) FROM numbers(10000);
|
||||
INSERT INTO t_02156_mt2 SELECT number, toString(number) FROM numbers(10000);
|
||||
INSERT INTO t_02156_log SELECT number, toString(number) FROM numbers(10000);
|
||||
|
||||
EXPLAIN SYNTAX SELECT count() FROM t_02156_merge1 WHERE k = 1 AND notEmpty(v);
|
||||
SELECT count() FROM t_02156_merge1 WHERE k = 1 AND notEmpty(v);
|
||||
|
||||
EXPLAIN SYNTAX SELECT count() FROM t_02156_merge2 WHERE k = 1 AND notEmpty(v);
|
||||
SELECT count() FROM t_02156_merge2 WHERE k = 1 AND notEmpty(v);
|
||||
|
||||
EXPLAIN SYNTAX SELECT count() FROM t_02156_merge3 WHERE k = 1 AND notEmpty(v);
|
||||
SELECT count() FROM t_02156_merge3 WHERE k = 1 AND notEmpty(v);
|
||||
|
||||
DROP TABLE IF EXISTS t_02156_mt1;
|
||||
DROP TABLE IF EXISTS t_02156_mt2;
|
||||
DROP TABLE IF EXISTS t_02156_log;
|
||||
DROP TABLE IF EXISTS t_02156_dist;
|
||||
DROP TABLE IF EXISTS t_02156_merge1;
|
||||
DROP TABLE IF EXISTS t_02156_merge2;
|
||||
DROP TABLE IF EXISTS t_02156_merge3;
|
Loading…
Reference in New Issue
Block a user