mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into in_memory_raft
This commit is contained in:
commit
dd54e5afe8
@ -851,7 +851,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Saturation can occur if 29 Feb is mapped to non-leap year.
|
/// Saturation can occur if 29 Feb is mapped to non-leap year.
|
||||||
inline time_t addYears(time_t t, Int64 delta) const
|
inline NO_SANITIZE_UNDEFINED time_t addYears(time_t t, Int64 delta) const
|
||||||
{
|
{
|
||||||
DayNum result_day = addYears(toDayNum(t), delta);
|
DayNum result_day = addYears(toDayNum(t), delta);
|
||||||
|
|
||||||
|
@ -104,8 +104,3 @@ template <> struct is_big_int<wUInt256> { static constexpr bool value = true; };
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
inline constexpr bool is_big_int_v = is_big_int<T>::value;
|
inline constexpr bool is_big_int_v = is_big_int<T>::value;
|
||||||
|
|
||||||
template <typename To, typename From>
|
|
||||||
inline To bigint_cast(const From & x [[maybe_unused]])
|
|
||||||
{
|
|
||||||
return static_cast<To>(x);
|
|
||||||
}
|
|
||||||
|
2
contrib/cassandra
vendored
2
contrib/cassandra
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 9cbc1a806df5d40fddbf84533b9873542c6513d8
|
Subproject commit b446d7eb68e6962f431e2b3771313bfe9a2bbd93
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import clickhouse_driver
|
import clickhouse_driver
|
||||||
|
@ -114,6 +114,10 @@ CREATE TABLE big_table (name String, value UInt32) ENGINE = S3('https://storage.
|
|||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
- `_file` — Name of the file.
|
- `_file` — Name of the file.
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||||
|
|
||||||
## S3-related settings {#settings}
|
## S3-related settings {#settings}
|
||||||
|
|
||||||
The following settings can be set before query execution or placed into configuration file.
|
The following settings can be set before query execution or placed into configuration file.
|
||||||
@ -124,8 +128,29 @@ The following settings can be set before query execution or placed into configur
|
|||||||
|
|
||||||
Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration.
|
Security consideration: if malicious user can specify arbitrary S3 URLs, `s3_max_redirects` must be set to zero to avoid [SSRF](https://en.wikipedia.org/wiki/Server-side_request_forgery) attacks; or alternatively, `remote_host_filter` must be specified in server configuration.
|
||||||
|
|
||||||
**See Also**
|
### Endpoint-based settings {#endpointsettings}
|
||||||
|
|
||||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
The following settings can be specified in configuration file for given endpoint (which will be matched by exact prefix of a URL):
|
||||||
|
|
||||||
|
- `endpoint` — Mandatory. Specifies prefix of an endpoint.
|
||||||
|
- `access_key_id` and `secret_access_key` — Optional. Specifies credentials to use with given endpoint.
|
||||||
|
- `use_environment_credentials` — Optional, default value is `false`. If set to `true`, S3 client will try to obtain credentials from environment variables and Amazon EC2 metadata for given endpoint.
|
||||||
|
- `header` — Optional, can be speficied multiple times. Adds specified HTTP header to a request to given endpoint.
|
||||||
|
|
||||||
|
This configuration also applies to S3 disks in `MergeTree` table engine family.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
<s3>
|
||||||
|
<endpoint-name>
|
||||||
|
<endpoint>https://storage.yandexcloud.net/my-test-bucket-768/</endpoint>
|
||||||
|
<!-- <access_key_id>ACCESS_KEY_ID</access_key_id> -->
|
||||||
|
<!-- <secret_access_key>SECRET_ACCESS_KEY</secret_access_key> -->
|
||||||
|
<!-- <use_environment_credentials>false</use_environment_credentials> -->
|
||||||
|
<!-- <header>Authorization: Bearer SOME-TOKEN</header> -->
|
||||||
|
</endpoint-name>
|
||||||
|
</s3>
|
||||||
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/s3/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/operations/table_engines/s3/) <!--hide-->
|
||||||
|
@ -254,7 +254,6 @@ ENGINE = MergeTree()
|
|||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -450,7 +449,6 @@ ENGINE = CollapsingMergeTree(Sign)
|
|||||||
PARTITION BY toYYYYMM(StartDate)
|
PARTITION BY toYYYYMM(StartDate)
|
||||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want.
|
You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want.
|
||||||
|
@ -2489,7 +2489,6 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
|
||||||
## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty}
|
## aggregate_functions_null_for_empty {#aggregate_functions_null_for_empty}
|
||||||
|
|
||||||
Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility.
|
Enables or disables rewriting all aggregate functions in a query, adding [-OrNull](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-ornull) suffix to them. Enable it for SQL standard compatibility.
|
||||||
@ -2523,7 +2522,6 @@ With `aggregate_functions_null_for_empty = 1` the result would be:
|
|||||||
└───────────────┴──────────────┘
|
└───────────────┴──────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## union_default_mode {#union-default-mode}
|
## union_default_mode {#union-default-mode}
|
||||||
|
|
||||||
Sets a mode for combining `SELECT` query results. The setting is only used when shared with [UNION](../../sql-reference/statements/select/union.md) without explicitly specifying the `UNION ALL` or `UNION DISTINCT`.
|
Sets a mode for combining `SELECT` query results. The setting is only used when shared with [UNION](../../sql-reference/statements/select/union.md) without explicitly specifying the `UNION ALL` or `UNION DISTINCT`.
|
||||||
@ -2538,7 +2536,6 @@ Default value: `''`.
|
|||||||
|
|
||||||
See examples in [UNION](../../sql-reference/statements/select/union.md).
|
See examples in [UNION](../../sql-reference/statements/select/union.md).
|
||||||
|
|
||||||
|
|
||||||
## data_type_default_nullable {#data_type_default_nullable}
|
## data_type_default_nullable {#data_type_default_nullable}
|
||||||
|
|
||||||
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
Allows data types without explicit modifiers [NULL or NOT NULL](../../sql-reference/statements/create/table.md#null-modifiers) in column definition will be [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable).
|
||||||
@ -2550,7 +2547,6 @@ Possible values:
|
|||||||
|
|
||||||
Default value: `0`.
|
Default value: `0`.
|
||||||
|
|
||||||
|
|
||||||
## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold}
|
## execute_merges_on_single_replica_time_threshold {#execute-merges-on-single-replica-time-threshold}
|
||||||
|
|
||||||
Enables special logic to perform merges on replicas.
|
Enables special logic to perform merges on replicas.
|
||||||
@ -2570,4 +2566,15 @@ High values for that threshold may lead to replication delays.
|
|||||||
|
|
||||||
It can be useful when merges are CPU bounded not IO bounded (performing heavy data compression, calculating aggregate functions or default expressions that require a large amount of calculations, or just very high number of tiny merges).
|
It can be useful when merges are CPU bounded not IO bounded (performing heavy data compression, calculating aggregate functions or default expressions that require a large amount of calculations, or just very high number of tiny merges).
|
||||||
|
|
||||||
|
## max_final_threads {#max-final-threads}
|
||||||
|
|
||||||
|
Sets the maximum number of parallel threads for the `SELECT` query data read phase with the [FINAL](../../sql-reference/statements/select/from.md#select-from-final) modifier.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Positive integer.
|
||||||
|
- 0 or 1 — Disabled. `SELECT` queries are executed in a single thread.
|
||||||
|
|
||||||
|
Default value: `16`.
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
[Original article](https://clickhouse.tech/docs/en/operations/settings/settings/) <!-- hide -->
|
||||||
|
@ -413,4 +413,68 @@ Result:
|
|||||||
|
|
||||||
- [log(x)](../../sql-reference/functions/math-functions.md#logx-lnx)
|
- [log(x)](../../sql-reference/functions/math-functions.md#logx-lnx)
|
||||||
|
|
||||||
|
## sign(x) {#signx}
|
||||||
|
|
||||||
|
The `sign` function can extract the sign of a real number.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
sign(x)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
|
||||||
|
- `x` — Values from `-∞` to `+∞`. Support all numeric types in ClickHouse.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- -1 for `x < 0`
|
||||||
|
- 0 for `x = 0`
|
||||||
|
- 1 for `x > 0`
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT sign(0);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─sign(0)─┐
|
||||||
|
│ 0 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT sign(1);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─sign(1)─┐
|
||||||
|
│ 1 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT sign(-1);
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─sign(-1)─┐
|
||||||
|
│ -1 │
|
||||||
|
└──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) <!--hide-->
|
[Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) <!--hide-->
|
||||||
|
@ -45,7 +45,7 @@ Creates a table with the same structure as another table. You can specify a diff
|
|||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function()
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name AS table_function()
|
||||||
```
|
```
|
||||||
|
|
||||||
Creates a table with the structure and data returned by a [table function](../../../sql-reference/table-functions/index.md#table-functions).
|
Creates a table with the same result as that of the [table function](../../../sql-reference/table-functions/index.md#table-functions) specified. The created table will also work in the same way as the corresponding table function that was specified.
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name ENGINE = engine AS SELECT ...
|
||||||
|
@ -13,7 +13,7 @@ Basic query format:
|
|||||||
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
|
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
|
||||||
```
|
```
|
||||||
|
|
||||||
You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#apply-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
|
You can specify a list of columns to insert using the `(c1, c2, c3)`. You can also use an expression with column [matcher](../../sql-reference/statements/select/index.md#asterisk) such as `*` and/or [modifiers](../../sql-reference/statements/select/index.md#select-modifiers) such as [APPLY](../../sql-reference/statements/select/index.md#apply-modifier), [EXCEPT](../../sql-reference/statements/select/index.md#apply-modifier), [REPLACE](../../sql-reference/statements/select/index.md#replace-modifier).
|
||||||
|
|
||||||
For example, consider the table:
|
For example, consider the table:
|
||||||
|
|
||||||
@ -30,7 +30,6 @@ CREATE TABLE insert_select_testtable
|
|||||||
)
|
)
|
||||||
ENGINE = MergeTree()
|
ENGINE = MergeTree()
|
||||||
ORDER BY a
|
ORDER BY a
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -55,7 +54,7 @@ SELECT * FROM insert_select_testtable;
|
|||||||
│ 1 │ a │ 1 │
|
│ 1 │ a │ 1 │
|
||||||
└───┴───┴───┘
|
└───┴───┴───┘
|
||||||
```
|
```
|
||||||
|
|
||||||
In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default.
|
In this example, we see that the second inserted row has `a` and `c` columns filled by the passed values, and `b` filled with value by default.
|
||||||
|
|
||||||
If a list of columns doesn't include all existing columns, the rest of the columns are filled with:
|
If a list of columns doesn't include all existing columns, the rest of the columns are filled with:
|
||||||
|
@ -25,6 +25,8 @@ It is applicable when selecting data from tables that use the [MergeTree](../../
|
|||||||
- [Replicated](../../../engines/table-engines/mergetree-family/replication.md) versions of `MergeTree` engines.
|
- [Replicated](../../../engines/table-engines/mergetree-family/replication.md) versions of `MergeTree` engines.
|
||||||
- [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) engines that operate over other engines, provided they were created over `MergeTree`-engine tables.
|
- [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), and [MaterializedView](../../../engines/table-engines/special/materializedview.md) engines that operate over other engines, provided they were created over `MergeTree`-engine tables.
|
||||||
|
|
||||||
|
Now `SELECT` queries with `FINAL` are executed in parallel and slightly faster. But there are drawbacks (see below). The [max_final_threads](../../../operations/settings/settings.md#max-final-threads) setting limits the number of threads used.
|
||||||
|
|
||||||
### Drawbacks {#drawbacks}
|
### Drawbacks {#drawbacks}
|
||||||
|
|
||||||
Queries that use `FINAL` are executed slightly slower than similar queries that don’t, because:
|
Queries that use `FINAL` are executed slightly slower than similar queries that don’t, because:
|
||||||
|
@ -256,7 +256,6 @@ ENGINE = MergeTree()
|
|||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -452,7 +451,6 @@ ENGINE = CollapsingMergeTree(Sign)
|
|||||||
PARTITION BY toYYYYMM(StartDate)
|
PARTITION BY toYYYYMM(StartDate)
|
||||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Puede ejecutar esas consultas utilizando el modo interactivo de `clickhouse-client` (simplemente ejecútelo en un terminal sin especificar una consulta por adelantado) o pruebe algunos [interfaz alternativa](../interfaces/index.md) Si quieres.
|
Puede ejecutar esas consultas utilizando el modo interactivo de `clickhouse-client` (simplemente ejecútelo en un terminal sin especificar una consulta por adelantado) o pruebe algunos [interfaz alternativa](../interfaces/index.md) Si quieres.
|
||||||
|
@ -256,7 +256,6 @@ ENGINE = MergeTree()
|
|||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -452,7 +451,6 @@ ENGINE = CollapsingMergeTree(Sign)
|
|||||||
PARTITION BY toYYYYMM(StartDate)
|
PARTITION BY toYYYYMM(StartDate)
|
||||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Vous pouvez exécuter ces requêtes en utilisant le mode interactif de `clickhouse-client` (lancez - le simplement dans un terminal sans spécifier une requête à l'avance) ou essayez-en [interface de rechange](../interfaces/index.md) Si tu veux.
|
Vous pouvez exécuter ces requêtes en utilisant le mode interactif de `clickhouse-client` (lancez - le simplement dans un terminal sans spécifier une requête à l'avance) ou essayez-en [interface de rechange](../interfaces/index.md) Si tu veux.
|
||||||
|
@ -262,7 +262,6 @@ ENGINE = MergeTree()
|
|||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -458,7 +457,6 @@ ENGINE = CollapsingMergeTree(Sign)
|
|||||||
PARTITION BY toYYYYMM(StartDate)
|
PARTITION BY toYYYYMM(StartDate)
|
||||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
これらのクエリは、`clickhouse-client` の対話型モード(事前にクエリを指定せずにターミナルで起動するだけです)を使って実行するか、[代替インターフェイス](../interfaces/index.md) で実行できます。
|
これらのクエリは、`clickhouse-client` の対話型モード(事前にクエリを指定せずにターミナルで起動するだけです)を使って実行するか、[代替インターフェイス](../interfaces/index.md) で実行できます。
|
||||||
|
@ -254,7 +254,6 @@ ENGINE = MergeTree()
|
|||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -450,7 +449,6 @@ ENGINE = CollapsingMergeTree(Sign)
|
|||||||
PARTITION BY toYYYYMM(StartDate)
|
PARTITION BY toYYYYMM(StartDate)
|
||||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want.
|
You can execute those queries using the interactive mode of `clickhouse-client` (just launch it in a terminal without specifying a query in advance) or try some [alternative interface](../interfaces/index.md) if you want.
|
||||||
|
@ -2437,4 +2437,15 @@ SELECT SUM(-1), MAX(0) FROM system.one WHERE 0;
|
|||||||
|
|
||||||
Эта настройка полезна, когда скорость слияния ограничивается мощностью процессора, а не скоростью операций ввода-вывода (при выполнении "тяжелого" сжатия данных, при расчете агрегатных функций или выражений по умолчанию, требующих большого объема вычислений, или просто при большом количестве мелких слияний).
|
Эта настройка полезна, когда скорость слияния ограничивается мощностью процессора, а не скоростью операций ввода-вывода (при выполнении "тяжелого" сжатия данных, при расчете агрегатных функций или выражений по умолчанию, требующих большого объема вычислений, или просто при большом количестве мелких слияний).
|
||||||
|
|
||||||
|
## max_final_threads {#max-final-threads}
|
||||||
|
|
||||||
|
Устанавливает максимальное количество параллельных потоков для фазы чтения данных запроса `SELECT` с модификатором [FINAL](../../sql-reference/statements/select/from.md#select-from-final).
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
- 0 или 1 — настройка отключена. `SELECT` запросы выполняются в один поток.
|
||||||
|
|
||||||
|
Значение по умолчанию: `16`.
|
||||||
|
|
||||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->
|
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/settings/settings/) <!--hide-->
|
||||||
|
@ -13,7 +13,7 @@ toc_title: INSERT INTO
|
|||||||
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
|
INSERT INTO [db.]table [(c1, c2, c3)] VALUES (v11, v12, v13), (v21, v22, v23), ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Вы можете указать список столбцов для вставки, используя синтаксис `(c1, c2, c3)`. Также можно использовать выражение cо [звездочкой](../../sql-reference/statements/select/index.md#asterisk) и/или модификаторами, такими как `APPLY`, `EXCEPT`, `REPLACE`.
|
Вы можете указать список столбцов для вставки, используя синтаксис `(c1, c2, c3)`. Также можно использовать выражение cо [звездочкой](../../sql-reference/statements/select/index.md#asterisk) и/или модификаторами, такими как `APPLY`, `EXCEPT`, `REPLACE`.
|
||||||
|
|
||||||
В качестве примера рассмотрим таблицу:
|
В качестве примера рассмотрим таблицу:
|
||||||
|
|
||||||
@ -30,13 +30,12 @@ SHOW CREATE insert_select_testtable
|
|||||||
`c` Int8
|
`c` Int8
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree()
|
ENGINE = MergeTree()
|
||||||
ORDER BY a
|
ORDER BY a │
|
||||||
SETTINGS index_granularity = 8192 │
|
|
||||||
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1)
|
INSERT INTO insert_select_testtable (*) VALUES (1, 'a', 1)
|
||||||
```
|
```
|
||||||
|
|
||||||
Если вы хотите вставить данные во все столбцы, кроме 'b', вам нужно передать столько значений, сколько столбцов вы указали в скобках:
|
Если вы хотите вставить данные во все столбцы, кроме 'b', вам нужно передать столько значений, сколько столбцов вы указали в скобках:
|
||||||
|
@ -25,6 +25,8 @@ toc_title: FROM
|
|||||||
- [Replicated](../../../engines/table-engines/mergetree-family/replication.md) варианты исполнения `MergeTree` движков.
|
- [Replicated](../../../engines/table-engines/mergetree-family/replication.md) варианты исполнения `MergeTree` движков.
|
||||||
- [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), и [MaterializedView](../../../engines/table-engines/special/materializedview.md), которые работают поверх других движков, если они созданы для таблиц с движками семейства `MergeTree`.
|
- [View](../../../engines/table-engines/special/view.md), [Buffer](../../../engines/table-engines/special/buffer.md), [Distributed](../../../engines/table-engines/special/distributed.md), и [MaterializedView](../../../engines/table-engines/special/materializedview.md), которые работают поверх других движков, если они созданы для таблиц с движками семейства `MergeTree`.
|
||||||
|
|
||||||
|
Теперь `SELECT` запросы с `FINAL` выполняются параллельно и, следовательно, немного быстрее. Но имеются серьезные недостатки при их использовании (смотрите ниже). Настройка [max_final_threads](../../../operations/settings/settings.md#max-final-threads) устанавливает максимальное количество потоков.
|
||||||
|
|
||||||
### Недостатки {#drawbacks}
|
### Недостатки {#drawbacks}
|
||||||
|
|
||||||
Запросы, которые используют `FINAL` выполняются немного медленее, чем аналогичные запросы без него, потому что:
|
Запросы, которые используют `FINAL` выполняются немного медленее, чем аналогичные запросы без него, потому что:
|
||||||
|
@ -254,7 +254,6 @@ ENGINE = MergeTree()
|
|||||||
PARTITION BY toYYYYMM(EventDate)
|
PARTITION BY toYYYYMM(EventDate)
|
||||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
@ -450,7 +449,6 @@ ENGINE = CollapsingMergeTree(Sign)
|
|||||||
PARTITION BY toYYYYMM(StartDate)
|
PARTITION BY toYYYYMM(StartDate)
|
||||||
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
|
||||||
SAMPLE BY intHash32(UserID)
|
SAMPLE BY intHash32(UserID)
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
您可以使用`clickhouse-client`的交互模式执行这些查询(只需在终端中启动它,而不需要提前指定查询)。或者如果你愿意,可以尝试一些[替代接口](../interfaces/index.md)。
|
您可以使用`clickhouse-client`的交互模式执行这些查询(只需在终端中启动它,而不需要提前指定查询)。或者如果你愿意,可以尝试一些[替代接口](../interfaces/index.md)。
|
||||||
|
@ -25,7 +25,6 @@ CREATE TABLE insert_select_testtable
|
|||||||
)
|
)
|
||||||
ENGINE = MergeTree()
|
ENGINE = MergeTree()
|
||||||
ORDER BY a
|
ORDER BY a
|
||||||
SETTINGS index_granularity = 8192
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -138,6 +138,7 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
|
|||||||
|
|
||||||
void Connection::disconnect()
|
void Connection::disconnect()
|
||||||
{
|
{
|
||||||
|
maybe_compressed_out = nullptr;
|
||||||
in = nullptr;
|
in = nullptr;
|
||||||
last_input_packet_type.reset();
|
last_input_packet_type.reset();
|
||||||
out = nullptr; // can write to socket
|
out = nullptr; // can write to socket
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
#include <Common/WeakHash.h>
|
#include <Common/WeakHash.h>
|
||||||
#include <Common/HashTable/Hash.h>
|
#include <Common/HashTable/Hash.h>
|
||||||
#include <Core/BigInt.h>
|
|
||||||
|
|
||||||
#include <common/unaligned.h>
|
#include <common/unaligned.h>
|
||||||
#include <common/sort.h>
|
#include <common/sort.h>
|
||||||
|
@ -37,33 +37,16 @@ namespace ErrorCodes
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
StringRef ColumnVector<T>::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
|
StringRef ColumnVector<T>::serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const
|
||||||
{
|
{
|
||||||
if constexpr (is_big_int_v<T>)
|
auto * pos = arena.allocContinue(sizeof(T), begin);
|
||||||
{
|
unalignedStore<T>(pos, data[n]);
|
||||||
static constexpr size_t bytesize = BigInt<T>::size;
|
return StringRef(pos, sizeof(T));
|
||||||
char * pos = arena.allocContinue(bytesize, begin);
|
|
||||||
return BigInt<T>::serialize(data[n], pos);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto * pos = arena.allocContinue(sizeof(T), begin);
|
|
||||||
unalignedStore<T>(pos, data[n]);
|
|
||||||
return StringRef(pos, sizeof(T));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
const char * ColumnVector<T>::deserializeAndInsertFromArena(const char * pos)
|
const char * ColumnVector<T>::deserializeAndInsertFromArena(const char * pos)
|
||||||
{
|
{
|
||||||
if constexpr (is_big_int_v<T>)
|
data.emplace_back(unalignedLoad<T>(pos));
|
||||||
{
|
return pos + sizeof(T);
|
||||||
data.emplace_back(BigInt<T>::deserialize(pos));
|
|
||||||
return pos + BigInt<T>::size;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
data.emplace_back(unalignedLoad<T>(pos));
|
|
||||||
return pos + sizeof(T);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -299,18 +282,10 @@ MutableColumnPtr ColumnVector<T>::cloneResized(size_t size) const
|
|||||||
new_col.data.resize(size);
|
new_col.data.resize(size);
|
||||||
|
|
||||||
size_t count = std::min(this->size(), size);
|
size_t count = std::min(this->size(), size);
|
||||||
if constexpr (is_POD)
|
memcpy(new_col.data.data(), data.data(), count * sizeof(data[0]));
|
||||||
{
|
|
||||||
memcpy(new_col.data.data(), data.data(), count * sizeof(data[0]));
|
|
||||||
|
|
||||||
if (size > count)
|
if (size > count)
|
||||||
memset(static_cast<void *>(&new_col.data[count]), static_cast<int>(ValueType()), (size - count) * sizeof(ValueType));
|
memset(static_cast<void *>(&new_col.data[count]), static_cast<int>(ValueType()), (size - count) * sizeof(ValueType));
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < count; i++)
|
|
||||||
new_col.data[i] = data[i];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
@ -348,15 +323,7 @@ void ColumnVector<T>::insertRangeFrom(const IColumn & src, size_t start, size_t
|
|||||||
|
|
||||||
size_t old_size = data.size();
|
size_t old_size = data.size();
|
||||||
data.resize(old_size + length);
|
data.resize(old_size + length);
|
||||||
if constexpr (is_POD)
|
memcpy(data.data() + old_size, &src_vec.data[start], length * sizeof(data[0]));
|
||||||
{
|
|
||||||
memcpy(data.data() + old_size, &src_vec.data[start], length * sizeof(data[0]));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < length; i++)
|
|
||||||
data[old_size + i] = src_vec.data[start + i];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
@ -372,70 +339,52 @@ ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_s
|
|||||||
if (result_size_hint)
|
if (result_size_hint)
|
||||||
res_data.reserve(result_size_hint > 0 ? result_size_hint : size);
|
res_data.reserve(result_size_hint > 0 ? result_size_hint : size);
|
||||||
|
|
||||||
if constexpr (is_POD)
|
const UInt8 * filt_pos = filt.data();
|
||||||
{
|
const UInt8 * filt_end = filt_pos + size;
|
||||||
const UInt8 * filt_pos = filt.data();
|
const T * data_pos = data.data();
|
||||||
const UInt8 * filt_end = filt_pos + size;
|
|
||||||
const T * data_pos = data.data();
|
|
||||||
|
|
||||||
#ifdef __SSE2__
|
#ifdef __SSE2__
|
||||||
/** A slightly more optimized version.
|
/** A slightly more optimized version.
|
||||||
* Based on the assumption that often pieces of consecutive values
|
* Based on the assumption that often pieces of consecutive values
|
||||||
* completely pass or do not pass the filter.
|
* completely pass or do not pass the filter.
|
||||||
* Therefore, we will optimistically check the parts of `SIMD_BYTES` values.
|
* Therefore, we will optimistically check the parts of `SIMD_BYTES` values.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static constexpr size_t SIMD_BYTES = 16;
|
static constexpr size_t SIMD_BYTES = 16;
|
||||||
const __m128i zero16 = _mm_setzero_si128();
|
const __m128i zero16 = _mm_setzero_si128();
|
||||||
const UInt8 * filt_end_sse = filt_pos + size / SIMD_BYTES * SIMD_BYTES;
|
const UInt8 * filt_end_sse = filt_pos + size / SIMD_BYTES * SIMD_BYTES;
|
||||||
|
|
||||||
while (filt_pos < filt_end_sse)
|
while (filt_pos < filt_end_sse)
|
||||||
|
{
|
||||||
|
int mask = _mm_movemask_epi8(_mm_cmpgt_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i *>(filt_pos)), zero16));
|
||||||
|
|
||||||
|
if (0 == mask)
|
||||||
{
|
{
|
||||||
int mask = _mm_movemask_epi8(_mm_cmpgt_epi8(_mm_loadu_si128(reinterpret_cast<const __m128i *>(filt_pos)), zero16));
|
/// Nothing is inserted.
|
||||||
|
|
||||||
if (0 == mask)
|
|
||||||
{
|
|
||||||
/// Nothing is inserted.
|
|
||||||
}
|
|
||||||
else if (0xFFFF == mask)
|
|
||||||
{
|
|
||||||
res_data.insert(data_pos, data_pos + SIMD_BYTES);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (size_t i = 0; i < SIMD_BYTES; ++i)
|
|
||||||
if (filt_pos[i])
|
|
||||||
res_data.push_back(data_pos[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
filt_pos += SIMD_BYTES;
|
|
||||||
data_pos += SIMD_BYTES;
|
|
||||||
}
|
}
|
||||||
|
else if (0xFFFF == mask)
|
||||||
|
{
|
||||||
|
res_data.insert(data_pos, data_pos + SIMD_BYTES);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
for (size_t i = 0; i < SIMD_BYTES; ++i)
|
||||||
|
if (filt_pos[i])
|
||||||
|
res_data.push_back(data_pos[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
filt_pos += SIMD_BYTES;
|
||||||
|
data_pos += SIMD_BYTES;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
while (filt_pos < filt_end)
|
while (filt_pos < filt_end)
|
||||||
{
|
|
||||||
if (*filt_pos)
|
|
||||||
res_data.push_back(*data_pos);
|
|
||||||
|
|
||||||
++filt_pos;
|
|
||||||
++data_pos;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
const auto * filt_pos = filt.begin();
|
if (*filt_pos)
|
||||||
const auto * filt_end = filt.end();
|
res_data.push_back(*data_pos);
|
||||||
auto data_pos = data.begin();
|
|
||||||
|
|
||||||
while (filt_pos < filt_end)
|
++filt_pos;
|
||||||
{
|
++data_pos;
|
||||||
if (*filt_pos)
|
|
||||||
res_data.push_back(*data_pos);
|
|
||||||
|
|
||||||
++filt_pos;
|
|
||||||
++data_pos;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <Columns/ColumnVectorHelper.h>
|
#include <Columns/ColumnVectorHelper.h>
|
||||||
#include <common/unaligned.h>
|
#include <common/unaligned.h>
|
||||||
#include <Core/Field.h>
|
#include <Core/Field.h>
|
||||||
#include <Core/BigInt.h>
|
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
|
|
||||||
@ -107,10 +106,7 @@ private:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
using ValueType = T;
|
using ValueType = T;
|
||||||
static constexpr bool is_POD = !is_big_int_v<T>;
|
using Container = PaddedPODArray<ValueType>;
|
||||||
using Container = std::conditional_t<is_POD,
|
|
||||||
PaddedPODArray<ValueType>,
|
|
||||||
std::vector<ValueType>>;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ColumnVector() {}
|
ColumnVector() {}
|
||||||
@ -136,10 +132,7 @@ public:
|
|||||||
|
|
||||||
void insertData(const char * pos, size_t) override
|
void insertData(const char * pos, size_t) override
|
||||||
{
|
{
|
||||||
if constexpr (is_POD)
|
data.emplace_back(unalignedLoad<T>(pos));
|
||||||
data.emplace_back(unalignedLoad<T>(pos));
|
|
||||||
else
|
|
||||||
data.emplace_back(BigInt<T>::deserialize(pos));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertDefault() override
|
void insertDefault() override
|
||||||
@ -149,18 +142,12 @@ public:
|
|||||||
|
|
||||||
void insertManyDefaults(size_t length) override
|
void insertManyDefaults(size_t length) override
|
||||||
{
|
{
|
||||||
if constexpr (is_POD)
|
data.resize_fill(data.size() + length, T());
|
||||||
data.resize_fill(data.size() + length, T());
|
|
||||||
else
|
|
||||||
data.resize(data.size() + length, T());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void popBack(size_t n) override
|
void popBack(size_t n) override
|
||||||
{
|
{
|
||||||
if constexpr (is_POD)
|
data.resize_assume_reserved(data.size() - n);
|
||||||
data.resize_assume_reserved(data.size() - n);
|
|
||||||
else
|
|
||||||
data.resize(data.size() - n);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
|
StringRef serializeValueIntoArena(size_t n, Arena & arena, char const *& begin) const override;
|
||||||
@ -185,16 +172,12 @@ public:
|
|||||||
|
|
||||||
size_t allocatedBytes() const override
|
size_t allocatedBytes() const override
|
||||||
{
|
{
|
||||||
if constexpr (is_POD)
|
return data.allocated_bytes();
|
||||||
return data.allocated_bytes();
|
|
||||||
else
|
|
||||||
return data.capacity() * sizeof(data[0]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void protect() override
|
void protect() override
|
||||||
{
|
{
|
||||||
if constexpr (is_POD)
|
data.protect();
|
||||||
data.protect();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertValue(const T value)
|
void insertValue(const T value)
|
||||||
|
@ -235,7 +235,7 @@ public:
|
|||||||
else if constexpr (std::is_same_v<T, UInt128>)
|
else if constexpr (std::is_same_v<T, UInt128>)
|
||||||
throw Exception("No conversion to old UInt128 from " + demangle(typeid(U).name()), ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("No conversion to old UInt128 from " + demangle(typeid(U).name()), ErrorCodes::NOT_IMPLEMENTED);
|
||||||
else
|
else
|
||||||
return bigint_cast<T>(x);
|
return static_cast<T>(x);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <common/types.h>
|
#include <common/types.h>
|
||||||
#include <Core/BigInt.h>
|
|
||||||
#include <Common/UInt128.h>
|
#include <Common/UInt128.h>
|
||||||
#include <common/unaligned.h>
|
#include <common/unaligned.h>
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
#include <Core/Defines.h>
|
#include <Core/Defines.h>
|
||||||
#include <Core/BigInt.h>
|
|
||||||
|
|
||||||
#define ROTL(x, b) static_cast<UInt64>(((x) << (b)) | ((x) >> (64 - (b))))
|
#define ROTL(x, b) static_cast<UInt64>(((x) << (b)) | ((x) >> (64 - (b))))
|
||||||
|
|
||||||
@ -136,23 +136,11 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::enable_if_t<std::has_unique_object_representations_v<T>, void> update(const T & x)
|
void update(const T & x)
|
||||||
{
|
{
|
||||||
update(reinterpret_cast<const char *>(&x), sizeof(x));
|
update(reinterpret_cast<const char *>(&x), sizeof(x));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<(std::is_floating_point_v<T> || std::is_same_v<T, CityHash_v1_0_2::uint128>), void> update(const T & x)
|
|
||||||
{
|
|
||||||
update(reinterpret_cast<const char *>(&x), sizeof(x));
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<is_big_int_v<T> && !std::has_unique_object_representations_v<T>, void> update(const T & x)
|
|
||||||
{
|
|
||||||
update(DB::BigInt<T>::serialize(x));
|
|
||||||
}
|
|
||||||
|
|
||||||
void update(const std::string & x)
|
void update(const std::string & x)
|
||||||
{
|
{
|
||||||
update(x.data(), x.length());
|
update(x.data(), x.length());
|
||||||
@ -205,27 +193,13 @@ inline UInt64 sipHash64(const char * data, const size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
std::enable_if_t<std::has_unique_object_representations_v<T>, UInt64> sipHash64(const T & x)
|
UInt64 sipHash64(const T & x)
|
||||||
{
|
{
|
||||||
SipHash hash;
|
SipHash hash;
|
||||||
hash.update(x);
|
hash.update(x);
|
||||||
return hash.get64();
|
return hash.get64();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<(std::is_floating_point_v<T> || (is_big_int_v<T> && !std::has_unique_object_representations_v<T>)), UInt64> sipHash64(const T & x)
|
|
||||||
{
|
|
||||||
SipHash hash;
|
|
||||||
hash.update(x);
|
|
||||||
return hash.get64();
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
std::enable_if_t<DB::IsDecimalNumber<T>, UInt64> sipHash64(const T & x)
|
|
||||||
{
|
|
||||||
return sipHash64(x.value);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline UInt64 sipHash64(const std::string & s)
|
inline UInt64 sipHash64(const std::string & s)
|
||||||
{
|
{
|
||||||
return sipHash64(s.data(), s.size());
|
return sipHash64(s.data(), s.size());
|
||||||
|
139
src/Common/ZooKeeper/TestKeeperStorageDispatcher.cpp
Normal file
139
src/Common/ZooKeeper/TestKeeperStorageDispatcher.cpp
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
#include <Common/ZooKeeper/TestKeeperStorageDispatcher.h>
|
||||||
|
#include <Common/setThreadName.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
extern const int TIMEOUT_EXCEEDED;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
namespace zkutil
|
||||||
|
{
|
||||||
|
|
||||||
|
void TestKeeperStorageDispatcher::processingThread()
|
||||||
|
{
|
||||||
|
setThreadName("TestKeeperSProc");
|
||||||
|
|
||||||
|
while (!shutdown)
|
||||||
|
{
|
||||||
|
RequestInfo info;
|
||||||
|
|
||||||
|
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
|
||||||
|
|
||||||
|
if (requests_queue.tryPop(info, max_wait))
|
||||||
|
{
|
||||||
|
if (shutdown)
|
||||||
|
break;
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto responses = storage.processRequest(info.request, info.session_id);
|
||||||
|
for (const auto & response_for_session : responses)
|
||||||
|
setResponse(response_for_session.session_id, response_for_session.response);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestKeeperStorageDispatcher::setResponse(int64_t session_id, const Coordination::ZooKeeperResponsePtr & response)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
|
auto session_writer = session_to_response_callback.find(session_id);
|
||||||
|
if (session_writer == session_to_response_callback.end())
|
||||||
|
return;
|
||||||
|
|
||||||
|
session_writer->second(response);
|
||||||
|
/// Session closed, no more writes
|
||||||
|
if (response->xid != Coordination::WATCH_XID && response->getOpNum() == Coordination::OpNum::Close)
|
||||||
|
session_to_response_callback.erase(session_writer);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestKeeperStorageDispatcher::finalize()
|
||||||
|
{
|
||||||
|
{
|
||||||
|
std::lock_guard lock(push_request_mutex);
|
||||||
|
|
||||||
|
if (shutdown)
|
||||||
|
return;
|
||||||
|
|
||||||
|
shutdown = true;
|
||||||
|
|
||||||
|
if (processing_thread.joinable())
|
||||||
|
processing_thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
RequestInfo info;
|
||||||
|
TestKeeperStorage::RequestsForSessions expired_requests;
|
||||||
|
while (requests_queue.tryPop(info))
|
||||||
|
expired_requests.push_back(TestKeeperStorage::RequestForSession{info.session_id, info.request});
|
||||||
|
|
||||||
|
auto expired_responses = storage.finalize(expired_requests);
|
||||||
|
|
||||||
|
for (const auto & response_for_session : expired_responses)
|
||||||
|
setResponse(response_for_session.session_id, response_for_session.response);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestKeeperStorageDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id)
|
||||||
|
{
|
||||||
|
|
||||||
|
{
|
||||||
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
|
if (session_to_response_callback.count(session_id) == 0)
|
||||||
|
throw Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown session id {}", session_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
RequestInfo request_info;
|
||||||
|
request_info.time = clock::now();
|
||||||
|
request_info.request = request;
|
||||||
|
request_info.session_id = session_id;
|
||||||
|
|
||||||
|
std::lock_guard lock(push_request_mutex);
|
||||||
|
/// Put close requests without timeouts
|
||||||
|
if (request->getOpNum() == Coordination::OpNum::Close)
|
||||||
|
requests_queue.push(std::move(request_info));
|
||||||
|
else if (!requests_queue.tryPush(std::move(request_info), operation_timeout.totalMilliseconds()))
|
||||||
|
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||||
|
}
|
||||||
|
|
||||||
|
TestKeeperStorageDispatcher::TestKeeperStorageDispatcher()
|
||||||
|
{
|
||||||
|
processing_thread = ThreadFromGlobalPool([this] { processingThread(); });
|
||||||
|
}
|
||||||
|
|
||||||
|
TestKeeperStorageDispatcher::~TestKeeperStorageDispatcher()
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
finalize();
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestKeeperStorageDispatcher::registerSession(int64_t session_id, ZooKeeperResponseCallback callback)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
|
if (!session_to_response_callback.try_emplace(session_id, callback).second)
|
||||||
|
throw Exception(DB::ErrorCodes::LOGICAL_ERROR, "Session with id {} already registered in dispatcher", session_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestKeeperStorageDispatcher::finishSession(int64_t session_id)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(session_to_response_callback_mutex);
|
||||||
|
auto session_it = session_to_response_callback.find(session_id);
|
||||||
|
if (session_it != session_to_response_callback.end())
|
||||||
|
session_to_response_callback.erase(session_it);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <common/types.h>
|
#include <Core/Types.h>
|
||||||
#include <Compression/ICompressionCodec.h>
|
#include <Compression/ICompressionCodec.h>
|
||||||
|
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ using bool_if_gt_int_vs_uint = std::enable_if_t<is_gt_int_vs_uint<TInt, TUInt>,
|
|||||||
template <typename TInt, typename TUInt>
|
template <typename TInt, typename TUInt>
|
||||||
inline bool_if_gt_int_vs_uint<TInt, TUInt> greaterOpTmpl(TInt a, TUInt b)
|
inline bool_if_gt_int_vs_uint<TInt, TUInt> greaterOpTmpl(TInt a, TUInt b)
|
||||||
{
|
{
|
||||||
return bigint_cast<TInt>(a) > bigint_cast<TInt>(b);
|
return static_cast<TInt>(a) > static_cast<TInt>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename TInt, typename TUInt>
|
template <typename TInt, typename TUInt>
|
||||||
@ -101,19 +101,19 @@ inline bool_if_gt_int_vs_uint<TInt, TUInt> greaterOpTmpl(TUInt a, TInt b)
|
|||||||
{
|
{
|
||||||
using CastA = std::conditional_t<is_big_int_v<TInt> && std::is_same_v<TUInt, DB::UInt128>, DB::UInt256, TInt>;
|
using CastA = std::conditional_t<is_big_int_v<TInt> && std::is_same_v<TUInt, DB::UInt128>, DB::UInt256, TInt>;
|
||||||
|
|
||||||
return bigint_cast<CastA>(a) > b;
|
return static_cast<CastA>(a) > b;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename TInt, typename TUInt>
|
template <typename TInt, typename TUInt>
|
||||||
inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TInt a, TUInt b)
|
inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TInt a, TUInt b)
|
||||||
{
|
{
|
||||||
return bigint_cast<TInt>(a) == bigint_cast<TInt>(b);
|
return static_cast<TInt>(a) == static_cast<TInt>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename TInt, typename TUInt>
|
template <typename TInt, typename TUInt>
|
||||||
inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TUInt a, TInt b)
|
inline bool_if_gt_int_vs_uint<TInt, TUInt> equalsOpTmpl(TUInt a, TInt b)
|
||||||
{
|
{
|
||||||
return bigint_cast<TInt>(a) == bigint_cast<TInt>(b);
|
return static_cast<TInt>(a) == static_cast<TInt>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ inline bool_if_safe_conversion<A, B> greaterOp(A a, B b)
|
|||||||
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
||||||
|
|
||||||
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
||||||
return bigint_cast<CastA>(a) > bigint_cast<CastB>(b);
|
return static_cast<CastA>(a) > static_cast<CastB>(b);
|
||||||
else
|
else
|
||||||
return a > b;
|
return a > b;
|
||||||
}
|
}
|
||||||
@ -306,7 +306,7 @@ inline bool_if_safe_conversion<A, B> equalsOp(A a, B b)
|
|||||||
{
|
{
|
||||||
using LargestType = std::conditional_t<(sizeof(A) > sizeof(B)) || ((sizeof(A) == sizeof(B)) && !std::is_same_v<A, DB::UInt128>), A, B>;
|
using LargestType = std::conditional_t<(sizeof(A) > sizeof(B)) || ((sizeof(A) == sizeof(B)) && !std::is_same_v<A, DB::UInt128>), A, B>;
|
||||||
|
|
||||||
return bigint_cast<LargestType>(a) == bigint_cast<LargestType>(b);
|
return static_cast<LargestType>(a) == static_cast<LargestType>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
@ -429,7 +429,7 @@ inline bool_if_safe_conversion<A, B> notEqualsOp(A a, B b)
|
|||||||
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
||||||
|
|
||||||
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
||||||
return bigint_cast<CastA>(a) != bigint_cast<CastB>(b);
|
return static_cast<CastA>(a) != static_cast<CastB>(b);
|
||||||
else
|
else
|
||||||
return a != b;
|
return a != b;
|
||||||
}
|
}
|
||||||
@ -451,7 +451,7 @@ inline bool_if_safe_conversion<A, B> lessOp(A a, B b)
|
|||||||
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
||||||
|
|
||||||
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
||||||
return bigint_cast<CastA>(a) < bigint_cast<CastB>(b);
|
return static_cast<CastA>(a) < static_cast<CastB>(b);
|
||||||
else
|
else
|
||||||
return a < b;
|
return a < b;
|
||||||
}
|
}
|
||||||
@ -475,7 +475,7 @@ inline bool_if_safe_conversion<A, B> lessOrEqualsOp(A a, B b)
|
|||||||
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
||||||
|
|
||||||
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
||||||
return bigint_cast<CastA>(a) <= bigint_cast<CastB>(b);
|
return static_cast<CastA>(a) <= static_cast<CastB>(b);
|
||||||
else
|
else
|
||||||
return a <= b;
|
return a <= b;
|
||||||
}
|
}
|
||||||
@ -499,7 +499,7 @@ inline bool_if_safe_conversion<A, B> greaterOrEqualsOp(A a, B b)
|
|||||||
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
using CastB = std::conditional_t<is_big_int_v<A> && std::is_same_v<B, DB::UInt128>, A, CastB1>;
|
||||||
|
|
||||||
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
if constexpr (is_big_int_v<A> || is_big_int_v<B>)
|
||||||
return bigint_cast<CastA>(a) >= bigint_cast<CastB>(b);
|
return static_cast<CastA>(a) >= static_cast<CastB>(b);
|
||||||
else
|
else
|
||||||
return a >= b;
|
return a >= b;
|
||||||
}
|
}
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <common/StringRef.h>
|
|
||||||
#include <common/unaligned.h>
|
|
||||||
#include <Core/Types.h>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
|
||||||
{
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
struct BigInt
|
|
||||||
{
|
|
||||||
static_assert(sizeof(T) == 32);
|
|
||||||
static constexpr size_t size = 32;
|
|
||||||
|
|
||||||
static StringRef serialize(const T & x, char * pos)
|
|
||||||
{
|
|
||||||
unalignedStore<T>(pos, x);
|
|
||||||
return StringRef(pos, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
static String serialize(const T & x)
|
|
||||||
{
|
|
||||||
String str(size, '\0');
|
|
||||||
serialize(x, str.data());
|
|
||||||
return str;
|
|
||||||
}
|
|
||||||
|
|
||||||
static T deserialize(const char * pos)
|
|
||||||
{
|
|
||||||
return unalignedLoad<T>(pos);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
@ -233,9 +233,9 @@ private:
|
|||||||
bool overflow = false;
|
bool overflow = false;
|
||||||
|
|
||||||
if constexpr (sizeof(A) > sizeof(CompareInt))
|
if constexpr (sizeof(A) > sizeof(CompareInt))
|
||||||
overflow |= (bigint_cast<A>(x) != a);
|
overflow |= (static_cast<A>(x) != a);
|
||||||
if constexpr (sizeof(B) > sizeof(CompareInt))
|
if constexpr (sizeof(B) > sizeof(CompareInt))
|
||||||
overflow |= (bigint_cast<B>(y) != b);
|
overflow |= (static_cast<B>(y) != b);
|
||||||
if constexpr (is_unsigned_v<A>)
|
if constexpr (is_unsigned_v<A>)
|
||||||
overflow |= (x < 0);
|
overflow |= (x < 0);
|
||||||
if constexpr (is_unsigned_v<B>)
|
if constexpr (is_unsigned_v<B>)
|
||||||
|
@ -326,6 +326,7 @@ class IColumn;
|
|||||||
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log and query_thread_log.", 0) \
|
M(Bool, log_profile_events, true, "Log query performance statistics into the query_log and query_thread_log.", 0) \
|
||||||
M(Bool, log_query_settings, true, "Log query settings into the query_log.", 0) \
|
M(Bool, log_query_settings, true, "Log query settings into the query_log.", 0) \
|
||||||
M(Bool, log_query_threads, true, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \
|
M(Bool, log_query_threads, true, "Log query threads into system.query_thread_log table. This setting have effect only when 'log_queries' is true.", 0) \
|
||||||
|
M(String, log_comment, "", "Log comment into system.query_log table and server log. It can be set to arbitrary string no longer than max_query_size.", 0) \
|
||||||
M(LogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
|
M(LogsLevel, send_logs_level, LogsLevel::fatal, "Send server text logs with specified minimum level to client. Valid values: 'trace', 'debug', 'information', 'warning', 'error', 'fatal', 'none'", 0) \
|
||||||
M(Bool, enable_optimize_predicate_expression, 1, "If it is set to true, optimize predicates to subqueries.", 0) \
|
M(Bool, enable_optimize_predicate_expression, 1, "If it is set to true, optimize predicates to subqueries.", 0) \
|
||||||
M(Bool, enable_optimize_predicate_expression_to_final_subquery, 1, "Allow push predicate to final subquery.", 0) \
|
M(Bool, enable_optimize_predicate_expression_to_final_subquery, 1, "Allow push predicate to final subquery.", 0) \
|
||||||
@ -404,7 +405,7 @@ class IColumn;
|
|||||||
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
|
M(MySQLDataTypesSupport, mysql_datatypes_support_level, 0, "Which MySQL types should be converted to corresponding ClickHouse types (rather than being represented as String). Can be empty or any combination of 'decimal' or 'datetime64'. When empty MySQL's DECIMAL and DATETIME/TIMESTAMP with non-zero precision are seen as String on ClickHouse's side.", 0) \
|
||||||
M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \
|
M(Bool, optimize_trivial_insert_select, true, "Optimize trivial 'INSERT INTO table SELECT ... FROM TABLES' query", 0) \
|
||||||
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
|
M(Bool, allow_non_metadata_alters, true, "Allow to execute alters which affects not only tables metadata, but also data on disk", 0) \
|
||||||
M(Bool, enable_global_with_statement, false, "Propagate WITH statements to UNION queries and all subqueries", 0) \
|
M(Bool, enable_global_with_statement, true, "Propagate WITH statements to UNION queries and all subqueries", 0) \
|
||||||
M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \
|
M(Bool, aggregate_functions_null_for_empty, false, "Rewrite all aggregate functions in a query, adding -OrNull suffix to them", 0) \
|
||||||
M(Bool, flatten_nested, true, "If true, columns of type Nested will be flatten to separate array columns instead of one array of tuples", 0) \
|
M(Bool, flatten_nested, true, "If true, columns of type Nested will be flatten to separate array columns instead of one array of tuples", 0) \
|
||||||
M(Bool, asterisk_include_materialized_columns, false, "Include MATERIALIZED columns for wildcard query", 0) \
|
M(Bool, asterisk_include_materialized_columns, false, "Include MATERIALIZED columns for wildcard query", 0) \
|
||||||
|
@ -158,7 +158,7 @@ struct Decimal
|
|||||||
return convertTo<typename U::NativeType>();
|
return convertTo<typename U::NativeType>();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return bigint_cast<U>(value);
|
return static_cast<U>(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Decimal<T> & operator += (const T & x) { value += x; return *this; }
|
const Decimal<T> & operator += (const T & x) { value += x; return *this; }
|
||||||
|
@ -597,6 +597,7 @@ inline bool isEnum(const DataTypePtr & data_type) { return WhichDataType(data_ty
|
|||||||
inline bool isDecimal(const DataTypePtr & data_type) { return WhichDataType(data_type).isDecimal(); }
|
inline bool isDecimal(const DataTypePtr & data_type) { return WhichDataType(data_type).isDecimal(); }
|
||||||
inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_type).isTuple(); }
|
inline bool isTuple(const DataTypePtr & data_type) { return WhichDataType(data_type).isTuple(); }
|
||||||
inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); }
|
inline bool isArray(const DataTypePtr & data_type) { return WhichDataType(data_type).isArray(); }
|
||||||
|
inline bool isMap(const DataTypePtr & data_type) {return WhichDataType(data_type).isMap(); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline bool isUInt8(const T & data_type)
|
inline bool isUInt8(const T & data_type)
|
||||||
|
@ -218,7 +218,7 @@ using ResultOfGreatest = std::conditional_t<LeastGreatestSpecialCase<A, B>,
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
static inline auto littleBits(const T & x)
|
static inline auto littleBits(const T & x)
|
||||||
{
|
{
|
||||||
return bigint_cast<UInt8>(x);
|
return static_cast<UInt8>(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ DataTypePtr convertMySQLDataType(MultiEnum<MySQLDataTypesSupport> type_support,
|
|||||||
// 4. type_with_params(param1, param2, ...) options
|
// 4. type_with_params(param1, param2, ...) options
|
||||||
// The options can be unsigned, zerofill, or some other strings.
|
// The options can be unsigned, zerofill, or some other strings.
|
||||||
auto data_type = std::string_view(mysql_data_type);
|
auto data_type = std::string_view(mysql_data_type);
|
||||||
const auto type_end_pos = data_type.find_first_of("(\x20"); // FIXME: fix style-check script instead
|
const auto type_end_pos = data_type.find_first_of(R"(( )"); // FIXME: fix style-check script instead
|
||||||
const auto type_name = data_type.substr(0, type_end_pos);
|
const auto type_name = data_type.substr(0, type_end_pos);
|
||||||
|
|
||||||
DataTypePtr res;
|
DataTypePtr res;
|
||||||
|
@ -344,7 +344,9 @@ std::vector<DictionaryAttribute> DictionaryStructure::getAttributes(
|
|||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
{
|
{
|
||||||
e.addMessage("error parsing null_value");
|
String dictionary_name = config.getString(".dictionary.name", "");
|
||||||
|
e.addMessage("While parsing null_value for attribute with name " + name
|
||||||
|
+ " in dictionary " + dictionary_name);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -51,9 +51,9 @@ inline auto checkedDivision(A a, B b)
|
|||||||
throwIfDivisionLeadsToFPE(a, b);
|
throwIfDivisionLeadsToFPE(a, b);
|
||||||
|
|
||||||
if constexpr (is_big_int_v<A> && std::is_floating_point_v<B>)
|
if constexpr (is_big_int_v<A> && std::is_floating_point_v<B>)
|
||||||
return bigint_cast<B>(a) / b;
|
return static_cast<B>(a) / b;
|
||||||
else if constexpr (is_big_int_v<B> && std::is_floating_point_v<A>)
|
else if constexpr (is_big_int_v<B> && std::is_floating_point_v<A>)
|
||||||
return a / bigint_cast<A>(b);
|
return a / static_cast<A>(b);
|
||||||
else if constexpr (is_big_int_v<A> && is_big_int_v<B>)
|
else if constexpr (is_big_int_v<A> && is_big_int_v<B>)
|
||||||
return static_cast<A>(a / b);
|
return static_cast<A>(a / b);
|
||||||
else if constexpr (!is_big_int_v<A> && is_big_int_v<B>)
|
else if constexpr (!is_big_int_v<A> && is_big_int_v<B>)
|
||||||
@ -84,10 +84,10 @@ struct DivideIntegralImpl
|
|||||||
using SignedCastA = make_signed_t<CastA>;
|
using SignedCastA = make_signed_t<CastA>;
|
||||||
using SignedCastB = std::conditional_t<sizeof(A) <= sizeof(B), make_signed_t<CastB>, SignedCastA>;
|
using SignedCastB = std::conditional_t<sizeof(A) <= sizeof(B), make_signed_t<CastB>, SignedCastA>;
|
||||||
|
|
||||||
return bigint_cast<Result>(checkedDivision(bigint_cast<SignedCastA>(a), bigint_cast<SignedCastB>(b)));
|
return static_cast<Result>(checkedDivision(static_cast<SignedCastA>(a), static_cast<SignedCastB>(b)));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return bigint_cast<Result>(checkedDivision(CastA(a), CastB(b)));
|
return static_cast<Result>(checkedDivision(CastA(a), CastB(b)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
@ -110,7 +110,7 @@ struct ModuloImpl
|
|||||||
if constexpr (std::is_floating_point_v<ResultType>)
|
if constexpr (std::is_floating_point_v<ResultType>)
|
||||||
{
|
{
|
||||||
/// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance.
|
/// This computation is similar to `fmod` but the latter is not inlined and has 40 times worse performance.
|
||||||
return bigint_cast<ResultType>(a) - trunc(bigint_cast<ResultType>(a) / bigint_cast<ResultType>(b)) * bigint_cast<ResultType>(b);
|
return static_cast<ResultType>(a) - trunc(static_cast<ResultType>(a) / static_cast<ResultType>(b)) * static_cast<ResultType>(b);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -125,9 +125,9 @@ struct ModuloImpl
|
|||||||
CastB int_b(b);
|
CastB int_b(b);
|
||||||
|
|
||||||
if constexpr (is_big_int_v<IntegerBType> && sizeof(IntegerAType) <= sizeof(IntegerBType))
|
if constexpr (is_big_int_v<IntegerBType> && sizeof(IntegerAType) <= sizeof(IntegerBType))
|
||||||
return bigint_cast<Result>(bigint_cast<CastB>(int_a) % int_b);
|
return static_cast<Result>(static_cast<CastB>(int_a) % int_b);
|
||||||
else
|
else
|
||||||
return bigint_cast<Result>(int_a % bigint_cast<CastA>(int_b));
|
return static_cast<Result>(int_a % static_cast<CastA>(int_b));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return IntegerAType(a) % IntegerBType(b);
|
return IntegerAType(a) % IntegerBType(b);
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <Columns/ColumnString.h>
|
#include <Columns/ColumnString.h>
|
||||||
#include <Columns/ColumnFixedString.h>
|
#include <Columns/ColumnFixedString.h>
|
||||||
#include <Columns/ColumnArray.h>
|
#include <Columns/ColumnArray.h>
|
||||||
|
#include <Columns/ColumnMap.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -42,7 +43,7 @@ public:
|
|||||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||||
{
|
{
|
||||||
if (!isStringOrFixedString(arguments[0])
|
if (!isStringOrFixedString(arguments[0])
|
||||||
&& !isArray(arguments[0]))
|
&& !isArray(arguments[0]) && !isMap(arguments[0]))
|
||||||
throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
throw Exception("Illegal type " + arguments[0]->getName() + " of argument of function " + getName(), ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||||
|
|
||||||
return std::make_shared<DataTypeNumber<ResultType>>();
|
return std::make_shared<DataTypeNumber<ResultType>>();
|
||||||
@ -93,6 +94,16 @@ public:
|
|||||||
|
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
|
else if (const ColumnMap * col_map = checkAndGetColumn<ColumnMap>(column.get()))
|
||||||
|
{
|
||||||
|
auto col_res = ColumnVector<ResultType>::create();
|
||||||
|
typename ColumnVector<ResultType>::Container & vec_res = col_res->getData();
|
||||||
|
vec_res.resize(col_map->size());
|
||||||
|
const auto & col_nested = col_map->getNestedColumn();
|
||||||
|
|
||||||
|
Impl::array(col_nested.getOffsets(), vec_res);
|
||||||
|
return col_res;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw Exception("Illegal column " + arguments[0].column->getName() + " of argument of function " + getName(),
|
throw Exception("Illegal column " + arguments[0].column->getName() + " of argument of function " + getName(),
|
||||||
ErrorCodes::ILLEGAL_COLUMN);
|
ErrorCodes::ILLEGAL_COLUMN);
|
||||||
|
@ -79,8 +79,9 @@ struct InvalidType;
|
|||||||
template <template <typename> class Op, typename Name, bool is_injective>
|
template <template <typename> class Op, typename Name, bool is_injective>
|
||||||
class FunctionUnaryArithmetic : public IFunction
|
class FunctionUnaryArithmetic : public IFunction
|
||||||
{
|
{
|
||||||
static constexpr bool allow_decimal = IsUnaryOperation<Op>::negate || IsUnaryOperation<Op>::abs;
|
static constexpr bool allow_decimal = IsUnaryOperation<Op>::negate || IsUnaryOperation<Op>::abs || IsUnaryOperation<Op>::sign;
|
||||||
static constexpr bool allow_fixed_string = Op<UInt8>::allow_fixed_string;
|
static constexpr bool allow_fixed_string = Op<UInt8>::allow_fixed_string;
|
||||||
|
static constexpr bool is_sign_function = IsUnaryOperation<Op>::sign;
|
||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
static bool castType(const IDataType * type, F && f)
|
static bool castType(const IDataType * type, F && f)
|
||||||
@ -137,7 +138,7 @@ public:
|
|||||||
{
|
{
|
||||||
using T0 = typename DataType::FieldType;
|
using T0 = typename DataType::FieldType;
|
||||||
|
|
||||||
if constexpr (IsDataTypeDecimal<DataType>)
|
if constexpr (IsDataTypeDecimal<DataType> && !is_sign_function)
|
||||||
{
|
{
|
||||||
if constexpr (!allow_decimal)
|
if constexpr (!allow_decimal)
|
||||||
return false;
|
return false;
|
||||||
@ -183,12 +184,24 @@ public:
|
|||||||
{
|
{
|
||||||
if (auto col = checkAndGetColumn<ColumnDecimal<T0>>(arguments[0].column.get()))
|
if (auto col = checkAndGetColumn<ColumnDecimal<T0>>(arguments[0].column.get()))
|
||||||
{
|
{
|
||||||
auto col_res = ColumnDecimal<typename Op<T0>::ResultType>::create(0, type.getScale());
|
if constexpr (is_sign_function)
|
||||||
auto & vec_res = col_res->getData();
|
{
|
||||||
vec_res.resize(col->getData().size());
|
auto col_res = ColumnVector<typename Op<T0>::ResultType>::create();
|
||||||
UnaryOperationImpl<T0, Op<T0>>::vector(col->getData(), vec_res);
|
auto & vec_res = col_res->getData();
|
||||||
result_column = std::move(col_res);
|
vec_res.resize(col->getData().size());
|
||||||
return true;
|
UnaryOperationImpl<T0, Op<T0>>::vector(col->getData(), vec_res);
|
||||||
|
result_column = std::move(col_res);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto col_res = ColumnDecimal<typename Op<T0>::ResultType>::create(0, type.getScale());
|
||||||
|
auto & vec_res = col_res->getData();
|
||||||
|
vec_res.resize(col->getData().size());
|
||||||
|
UnaryOperationImpl<T0, Op<T0>>::vector(col->getData(), vec_res);
|
||||||
|
result_column = std::move(col_res);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -81,8 +81,8 @@ namespace ErrorCodes
|
|||||||
template <typename A, typename B, typename Op>
|
template <typename A, typename B, typename Op>
|
||||||
struct NumComparisonImpl
|
struct NumComparisonImpl
|
||||||
{
|
{
|
||||||
using ContainerA = std::conditional_t<!is_big_int_v<A>, PaddedPODArray<A>, std::vector<A>>;
|
using ContainerA = PaddedPODArray<A>;
|
||||||
using ContainerB = std::conditional_t<!is_big_int_v<B>, PaddedPODArray<B>, std::vector<B>>;
|
using ContainerB = PaddedPODArray<B>;
|
||||||
|
|
||||||
/// If you don't specify NO_INLINE, the compiler will inline this function, but we don't need this as this function contains tight loop inside.
|
/// If you don't specify NO_INLINE, the compiler will inline this function, but we don't need this as this function contains tight loop inside.
|
||||||
static void NO_INLINE vectorVector(const ContainerA & a, const ContainerB & b, PaddedPODArray<UInt8> & c)
|
static void NO_INLINE vectorVector(const ContainerA & a, const ContainerB & b, PaddedPODArray<UInt8> & c)
|
||||||
|
@ -806,16 +806,7 @@ private:
|
|||||||
size_t size = vec_from.size();
|
size_t size = vec_from.size();
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
ToType h;
|
ToType h = Impl::apply(reinterpret_cast<const char *>(&vec_from[i]), sizeof(vec_from[i]));
|
||||||
if constexpr (OverBigInt<FromType>)
|
|
||||||
{
|
|
||||||
using NativeT = typename NativeType<FromType>::Type;
|
|
||||||
|
|
||||||
std::string buffer = BigInt<NativeT>::serialize(vec_from[i]);
|
|
||||||
h = Impl::apply(buffer.data(), buffer.size());
|
|
||||||
}
|
|
||||||
else
|
|
||||||
h = Impl::apply(reinterpret_cast<const char *>(&vec_from[i]), sizeof(vec_from[i]));
|
|
||||||
|
|
||||||
if constexpr (first)
|
if constexpr (first)
|
||||||
vec_to[i] = h;
|
vec_to[i] = h;
|
||||||
@ -827,16 +818,7 @@ private:
|
|||||||
{
|
{
|
||||||
auto value = col_from_const->template getValue<FromType>();
|
auto value = col_from_const->template getValue<FromType>();
|
||||||
|
|
||||||
ToType h;
|
ToType h = Impl::apply(reinterpret_cast<const char *>(&value), sizeof(value));
|
||||||
if constexpr (OverBigInt<FromType>)
|
|
||||||
{
|
|
||||||
using NativeT = typename NativeType<FromType>::Type;
|
|
||||||
|
|
||||||
std::string buffer = BigInt<NativeT>::serialize(value);
|
|
||||||
h = Impl::apply(buffer.data(), buffer.size());
|
|
||||||
}
|
|
||||||
else
|
|
||||||
h = Impl::apply(reinterpret_cast<const char *>(&value), sizeof(value));
|
|
||||||
|
|
||||||
size_t size = vec_to.size();
|
size_t size = vec_to.size();
|
||||||
if constexpr (first)
|
if constexpr (first)
|
||||||
|
67
src/Functions/GCDLCMImpl.h
Normal file
67
src/Functions/GCDLCMImpl.h
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <DataTypes/NumberTraits.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
#include <numeric>
|
||||||
|
#include <limits>
|
||||||
|
#include <type_traits>
|
||||||
|
|
||||||
|
#if !defined(ARCADIA_BUILD)
|
||||||
|
# include "config_core.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
extern const int DECIMAL_OVERFLOW;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
inline constexpr bool is_gcd_lcm_implemeted = !(is_big_int_v<T> || std::is_floating_point_v<T>);
|
||||||
|
|
||||||
|
template <typename A, typename B, typename Impl, typename Name>
|
||||||
|
struct GCDLCMImpl
|
||||||
|
{
|
||||||
|
using ResultType = typename NumberTraits::ResultOfAdditionMultiplication<A, B>::Type;
|
||||||
|
static const constexpr bool allow_fixed_string = false;
|
||||||
|
|
||||||
|
template <typename Result = ResultType>
|
||||||
|
static inline std::enable_if_t<!is_gcd_lcm_implemeted<Result>, Result>
|
||||||
|
apply(A, B)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "{} is not implemented for big integers and floats", Name::name);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename Result = ResultType>
|
||||||
|
static inline std::enable_if_t<is_gcd_lcm_implemeted<Result>, Result>
|
||||||
|
apply(A a, B b)
|
||||||
|
{
|
||||||
|
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<A>::Type(a), typename NumberTraits::ToInteger<B>::Type(b));
|
||||||
|
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<B>::Type(b), typename NumberTraits::ToInteger<A>::Type(a));
|
||||||
|
|
||||||
|
using Int = typename NumberTraits::ToInteger<Result>::Type;
|
||||||
|
|
||||||
|
if constexpr (is_signed_v<Result>)
|
||||||
|
{
|
||||||
|
/// gcd() internally uses std::abs()
|
||||||
|
Int a_s = static_cast<Int>(a);
|
||||||
|
Int b_s = static_cast<Int>(b);
|
||||||
|
Int min = std::numeric_limits<Int>::min();
|
||||||
|
Int max = std::numeric_limits<Int>::max();
|
||||||
|
if (unlikely((a_s == min || a_s == max) || (b_s == min || b_s == max)))
|
||||||
|
throw Exception(ErrorCodes::DECIMAL_OVERFLOW, "Intermediate result overflow (signed a = {}, signed b = {}, min = {}, max = {})", a_s, b_s, min, max);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Impl::applyImpl(a, b);
|
||||||
|
}
|
||||||
|
|
||||||
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
static constexpr bool compilable = false; /// exceptions (and a non-trivial algorithm)
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -50,9 +50,9 @@ void writeSlice(const NumericArraySlice<T> & slice, NumericArraySink<U> & sink)
|
|||||||
throw Exception("No conversion between UInt128 and " + demangle(typeid(T).name()), ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("No conversion between UInt128 and " + demangle(typeid(T).name()), ErrorCodes::NOT_IMPLEMENTED);
|
||||||
}
|
}
|
||||||
else if constexpr (IsDecimalNumber<T>)
|
else if constexpr (IsDecimalNumber<T>)
|
||||||
dst = bigint_cast<NativeU>(src.value);
|
dst = static_cast<NativeU>(src.value);
|
||||||
else
|
else
|
||||||
dst = bigint_cast<NativeU>(src);
|
dst = static_cast<NativeU>(src);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
dst = static_cast<NativeU>(src);
|
dst = static_cast<NativeU>(src);
|
||||||
|
@ -20,6 +20,9 @@ template <typename, typename> struct NotEqualsOp;
|
|||||||
template <typename, typename> struct LessOrEqualsOp;
|
template <typename, typename> struct LessOrEqualsOp;
|
||||||
template <typename, typename> struct GreaterOrEqualsOp;
|
template <typename, typename> struct GreaterOrEqualsOp;
|
||||||
|
|
||||||
|
template <typename>
|
||||||
|
struct SignImpl;
|
||||||
|
|
||||||
template <template <typename, typename> typename Op1, template <typename, typename> typename Op2>
|
template <template <typename, typename> typename Op1, template <typename, typename> typename Op2>
|
||||||
struct IsSameOperation
|
struct IsSameOperation
|
||||||
{
|
{
|
||||||
@ -31,6 +34,7 @@ struct IsUnaryOperation
|
|||||||
{
|
{
|
||||||
static constexpr bool abs = std::is_same_v<Op<Int8>, AbsImpl<Int8>>;
|
static constexpr bool abs = std::is_same_v<Op<Int8>, AbsImpl<Int8>>;
|
||||||
static constexpr bool negate = std::is_same_v<Op<Int8>, NegateImpl<Int8>>;
|
static constexpr bool negate = std::is_same_v<Op<Int8>, NegateImpl<Int8>>;
|
||||||
|
static constexpr bool sign = std::is_same_v<Op<Int8>, SignImpl<Int8>>;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <template <typename, typename> typename Op>
|
template <template <typename, typename> typename Op>
|
||||||
|
@ -21,7 +21,7 @@ struct BitAndImpl
|
|||||||
template <typename Result = ResultType>
|
template <typename Result = ResultType>
|
||||||
static inline Result apply(A a, B b)
|
static inline Result apply(A a, B b)
|
||||||
{
|
{
|
||||||
return bigint_cast<Result>(a) & bigint_cast<Result>(b);
|
return static_cast<Result>(a) & static_cast<Result>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
@ -20,7 +20,7 @@ struct BitOrImpl
|
|||||||
template <typename Result = ResultType>
|
template <typename Result = ResultType>
|
||||||
static inline Result apply(A a, B b)
|
static inline Result apply(A a, B b)
|
||||||
{
|
{
|
||||||
return bigint_cast<Result>(a) | bigint_cast<Result>(b);
|
return static_cast<Result>(a) | static_cast<Result>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
@ -24,7 +24,7 @@ struct BitShiftLeftImpl
|
|||||||
if constexpr (is_big_int_v<B>)
|
if constexpr (is_big_int_v<B>)
|
||||||
throw Exception("BitShiftLeft is not implemented for big integers as second argument", ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("BitShiftLeft is not implemented for big integers as second argument", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
else if constexpr (is_big_int_v<A>)
|
else if constexpr (is_big_int_v<A>)
|
||||||
return bigint_cast<Result>(a) << bigint_cast<UInt32>(b);
|
return static_cast<Result>(a) << static_cast<UInt32>(b);
|
||||||
else
|
else
|
||||||
return static_cast<Result>(a) << static_cast<Result>(b);
|
return static_cast<Result>(a) << static_cast<Result>(b);
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ struct BitShiftRightImpl
|
|||||||
if constexpr (is_big_int_v<B>)
|
if constexpr (is_big_int_v<B>)
|
||||||
throw Exception("BitShiftRight is not implemented for big integers as second argument", ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("BitShiftRight is not implemented for big integers as second argument", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
else if constexpr (is_big_int_v<A>)
|
else if constexpr (is_big_int_v<A>)
|
||||||
return bigint_cast<Result>(a) >> bigint_cast<UInt32>(b);
|
return static_cast<Result>(a) >> static_cast<UInt32>(b);
|
||||||
else
|
else
|
||||||
return static_cast<Result>(a) >> static_cast<Result>(b);
|
return static_cast<Result>(a) >> static_cast<Result>(b);
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ struct BitXorImpl
|
|||||||
template <typename Result = ResultType>
|
template <typename Result = ResultType>
|
||||||
static inline Result apply(A a, B b)
|
static inline Result apply(A a, B b)
|
||||||
{
|
{
|
||||||
return bigint_cast<Result>(a) ^ bigint_cast<Result>(b);
|
return static_cast<Result>(a) ^ static_cast<Result>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
@ -1,45 +1,28 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionBinaryArithmetic.h>
|
#include <Functions/FunctionBinaryArithmetic.h>
|
||||||
#include <numeric>
|
#include <Functions/GCDLCMImpl.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int NOT_IMPLEMENTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
|
struct NameGCD { static constexpr auto name = "gcd"; };
|
||||||
|
|
||||||
template <typename A, typename B>
|
template <typename A, typename B>
|
||||||
struct GCDImpl
|
struct GCDImpl : public GCDLCMImpl<A, B, GCDImpl<A, B>, NameGCD>
|
||||||
{
|
{
|
||||||
using ResultType = typename NumberTraits::ResultOfAdditionMultiplication<A, B>::Type;
|
using ResultType = typename GCDLCMImpl<A, B, GCDImpl, NameGCD>::ResultType;
|
||||||
static const constexpr bool allow_fixed_string = false;
|
|
||||||
|
|
||||||
template <typename Result = ResultType>
|
static ResultType applyImpl(A a, B b)
|
||||||
static inline Result apply([[maybe_unused]] A a, [[maybe_unused]] B b)
|
|
||||||
{
|
{
|
||||||
if constexpr (is_big_int_v<A> || is_big_int_v<B> || is_big_int_v<Result>)
|
using Int = typename NumberTraits::ToInteger<ResultType>::Type;
|
||||||
throw Exception("GCD is not implemented for big integers", ErrorCodes::NOT_IMPLEMENTED);
|
return std::gcd(Int(a), Int(b));
|
||||||
else
|
|
||||||
{
|
|
||||||
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<A>::Type(a), typename NumberTraits::ToInteger<B>::Type(b));
|
|
||||||
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<B>::Type(b), typename NumberTraits::ToInteger<A>::Type(a));
|
|
||||||
return std::gcd(
|
|
||||||
typename NumberTraits::ToInteger<Result>::Type(a),
|
|
||||||
typename NumberTraits::ToInteger<Result>::Type(b));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
|
||||||
static constexpr bool compilable = false; /// exceptions (and a non-trivial algorithm)
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NameGCD { static constexpr auto name = "gcd"; };
|
|
||||||
using FunctionGCD = BinaryArithmeticOverloadResolver<GCDImpl, NameGCD, false>;
|
using FunctionGCD = BinaryArithmeticOverloadResolver<GCDImpl, NameGCD, false>;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -16,8 +16,8 @@ struct GreatestBaseImpl
|
|||||||
template <typename Result = ResultType>
|
template <typename Result = ResultType>
|
||||||
static inline Result apply(A a, B b)
|
static inline Result apply(A a, B b)
|
||||||
{
|
{
|
||||||
return bigint_cast<Result>(a) > bigint_cast<Result>(b) ?
|
return static_cast<Result>(a) > static_cast<Result>(b) ?
|
||||||
bigint_cast<Result>(a) : bigint_cast<Result>(b);
|
static_cast<Result>(a) : static_cast<Result>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
@ -60,7 +60,7 @@ struct NumIfImpl
|
|||||||
typename ColVecResult::Container & res = col_res->getData();
|
typename ColVecResult::Container & res = col_res->getData();
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
res[i] = cond[i] ? bigint_cast<ResultType>(a[i]) : bigint_cast<ResultType>(b[i]);
|
res[i] = cond[i] ? static_cast<ResultType>(a[i]) : static_cast<ResultType>(b[i]);
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ struct NumIfImpl
|
|||||||
typename ColVecResult::Container & res = col_res->getData();
|
typename ColVecResult::Container & res = col_res->getData();
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
res[i] = cond[i] ? bigint_cast<ResultType>(a[i]) : bigint_cast<ResultType>(b);
|
res[i] = cond[i] ? static_cast<ResultType>(a[i]) : static_cast<ResultType>(b);
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ struct NumIfImpl
|
|||||||
typename ColVecResult::Container & res = col_res->getData();
|
typename ColVecResult::Container & res = col_res->getData();
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
res[i] = cond[i] ? bigint_cast<ResultType>(a) : bigint_cast<ResultType>(b[i]);
|
res[i] = cond[i] ? static_cast<ResultType>(a) : static_cast<ResultType>(b[i]);
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ struct NumIfImpl
|
|||||||
typename ColVecResult::Container & res = col_res->getData();
|
typename ColVecResult::Container & res = col_res->getData();
|
||||||
|
|
||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
res[i] = cond[i] ? bigint_cast<ResultType>(a) : bigint_cast<ResultType>(b);
|
res[i] = cond[i] ? static_cast<ResultType>(a) : static_cast<ResultType>(b);
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1,10 +1,6 @@
|
|||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
#include <Functions/FunctionBinaryArithmetic.h>
|
#include <Functions/FunctionBinaryArithmetic.h>
|
||||||
|
#include <Functions/GCDLCMImpl.h>
|
||||||
#include <numeric>
|
|
||||||
#include <limits>
|
|
||||||
#include <type_traits>
|
|
||||||
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
@ -27,33 +23,21 @@ constexpr T abs(T value) noexcept
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int NOT_IMPLEMENTED;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
|
struct NameLCM { static constexpr auto name = "lcm"; };
|
||||||
|
|
||||||
template <typename A, typename B>
|
template <typename A, typename B>
|
||||||
struct LCMImpl
|
struct LCMImpl : public GCDLCMImpl<A, B, LCMImpl<A, B>, NameLCM>
|
||||||
{
|
{
|
||||||
using ResultType = typename NumberTraits::ResultOfAdditionMultiplication<A, B>::Type;
|
using ResultType = typename GCDLCMImpl<A, B, LCMImpl<A, B>, NameLCM>::ResultType;
|
||||||
static const constexpr bool allow_fixed_string = false;
|
|
||||||
|
|
||||||
template <typename Result = ResultType>
|
static ResultType applyImpl(A a, B b)
|
||||||
static inline std::enable_if_t<is_big_int_v<A> || is_big_int_v<B> || is_big_int_v<Result>, Result>
|
|
||||||
apply([[maybe_unused]] A a, [[maybe_unused]] B b)
|
|
||||||
{
|
{
|
||||||
throw Exception("LCM is not implemented for big integers", ErrorCodes::NOT_IMPLEMENTED);
|
using Int = typename NumberTraits::ToInteger<ResultType>::Type;
|
||||||
}
|
using Unsigned = make_unsigned_t<Int>;
|
||||||
|
|
||||||
template <typename Result = ResultType>
|
|
||||||
static inline std::enable_if_t<!is_big_int_v<A> && !is_big_int_v<B> && !is_big_int_v<Result>, Result>
|
|
||||||
apply([[maybe_unused]] A a, [[maybe_unused]] B b)
|
|
||||||
{
|
|
||||||
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<A>::Type(a), typename NumberTraits::ToInteger<B>::Type(b));
|
|
||||||
throwIfDivisionLeadsToFPE(typename NumberTraits::ToInteger<B>::Type(b), typename NumberTraits::ToInteger<A>::Type(a));
|
|
||||||
|
|
||||||
/** It's tempting to use std::lcm function.
|
/** It's tempting to use std::lcm function.
|
||||||
* But it has undefined behaviour on overflow.
|
* But it has undefined behaviour on overflow.
|
||||||
@ -62,22 +46,14 @@ struct LCMImpl
|
|||||||
* (example: throw an exception or overflow in implementation specific way).
|
* (example: throw an exception or overflow in implementation specific way).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
using Int = typename NumberTraits::ToInteger<Result>::Type;
|
|
||||||
using Unsigned = make_unsigned_t<Int>;
|
|
||||||
|
|
||||||
Unsigned val1 = abs<Int>(a) / std::gcd(Int(a), Int(b));
|
Unsigned val1 = abs<Int>(a) / std::gcd(Int(a), Int(b));
|
||||||
Unsigned val2 = abs<Int>(b);
|
Unsigned val2 = abs<Int>(b);
|
||||||
|
|
||||||
/// Overflow in implementation specific way.
|
/// Overflow in implementation specific way.
|
||||||
return Result(val1 * val2);
|
return ResultType(val1 * val2);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
|
||||||
static constexpr bool compilable = false; /// exceptions (and a non-trivial algorithm)
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NameLCM { static constexpr auto name = "lcm"; };
|
|
||||||
using FunctionLCM = BinaryArithmeticOverloadResolver<LCMImpl, NameLCM, false>;
|
using FunctionLCM = BinaryArithmeticOverloadResolver<LCMImpl, NameLCM, false>;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ struct LeastBaseImpl
|
|||||||
static inline Result apply(A a, B b)
|
static inline Result apply(A a, B b)
|
||||||
{
|
{
|
||||||
/** gcc 4.9.2 successfully vectorizes a loop from this function. */
|
/** gcc 4.9.2 successfully vectorizes a loop from this function. */
|
||||||
return bigint_cast<Result>(a) < bigint_cast<Result>(b) ? bigint_cast<Result>(a) : bigint_cast<Result>(b);
|
return static_cast<Result>(a) < static_cast<Result>(b) ? static_cast<Result>(a) : static_cast<Result>(b);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_EMBEDDED_COMPILER
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
@ -19,7 +19,7 @@ struct MinusImpl
|
|||||||
using CastA = std::conditional_t<std::is_floating_point_v<B>, B, A>;
|
using CastA = std::conditional_t<std::is_floating_point_v<B>, B, A>;
|
||||||
using CastB = std::conditional_t<std::is_floating_point_v<A>, A, B>;
|
using CastB = std::conditional_t<std::is_floating_point_v<A>, A, B>;
|
||||||
|
|
||||||
return bigint_cast<Result>(bigint_cast<CastA>(a)) - bigint_cast<Result>(bigint_cast<CastB>(b));
|
return static_cast<Result>(static_cast<CastA>(a)) - static_cast<Result>(static_cast<CastB>(b));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return static_cast<Result>(a) - b;
|
return static_cast<Result>(a) - b;
|
||||||
|
@ -20,7 +20,7 @@ struct MultiplyImpl
|
|||||||
using CastA = std::conditional_t<std::is_floating_point_v<B>, B, A>;
|
using CastA = std::conditional_t<std::is_floating_point_v<B>, B, A>;
|
||||||
using CastB = std::conditional_t<std::is_floating_point_v<A>, A, B>;
|
using CastB = std::conditional_t<std::is_floating_point_v<A>, A, B>;
|
||||||
|
|
||||||
return bigint_cast<Result>(bigint_cast<CastA>(a)) * bigint_cast<Result>(bigint_cast<CastB>(b));
|
return static_cast<Result>(static_cast<CastA>(a)) * static_cast<Result>(static_cast<CastB>(b));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return static_cast<Result>(a) * b;
|
return static_cast<Result>(a) * b;
|
||||||
|
@ -17,8 +17,10 @@ namespace ErrorCodes
|
|||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
|
template <bool keep_names>
|
||||||
struct Impl
|
struct Impl
|
||||||
{
|
{
|
||||||
|
static constexpr auto name = keep_names ? "normalizeQueryKeepNames" : "normalizeQuery";
|
||||||
static void vector(const ColumnString::Chars & data,
|
static void vector(const ColumnString::Chars & data,
|
||||||
const ColumnString::Offsets & offsets,
|
const ColumnString::Offsets & offsets,
|
||||||
ColumnString::Chars & res_data,
|
ColumnString::Chars & res_data,
|
||||||
@ -32,7 +34,7 @@ struct Impl
|
|||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
ColumnString::Offset curr_src_offset = offsets[i];
|
ColumnString::Offset curr_src_offset = offsets[i];
|
||||||
normalizeQueryToPODArray(
|
normalizeQueryToPODArray<keep_names>(
|
||||||
reinterpret_cast<const char *>(&data[prev_src_offset]),
|
reinterpret_cast<const char *>(&data[prev_src_offset]),
|
||||||
reinterpret_cast<const char *>(&data[curr_src_offset - 1]),
|
reinterpret_cast<const char *>(&data[curr_src_offset - 1]),
|
||||||
res_data);
|
res_data);
|
||||||
@ -47,16 +49,12 @@ struct Impl
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Name
|
|
||||||
{
|
|
||||||
static constexpr auto name = "normalizeQuery";
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void registerFunctionNormalizeQuery(FunctionFactory & factory)
|
void registerFunctionNormalizeQuery(FunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction<FunctionStringToString<Impl, Name>>();
|
factory.registerFunction<FunctionStringToString<Impl<true>, Impl<true>>>();
|
||||||
|
factory.registerFunction<FunctionStringToString<Impl<false>, Impl<false>>>();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@ namespace ErrorCodes
|
|||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
|
template <bool keep_names>
|
||||||
struct Impl
|
struct Impl
|
||||||
{
|
{
|
||||||
static void vector(
|
static void vector(
|
||||||
@ -38,17 +39,18 @@ struct Impl
|
|||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
ColumnString::Offset curr_src_offset = offsets[i];
|
ColumnString::Offset curr_src_offset = offsets[i];
|
||||||
res_data[i] = normalizedQueryHash(
|
res_data[i] = normalizedQueryHash<keep_names>(
|
||||||
reinterpret_cast<const char *>(&data[prev_src_offset]), reinterpret_cast<const char *>(&data[curr_src_offset - 1]));
|
reinterpret_cast<const char *>(&data[prev_src_offset]), reinterpret_cast<const char *>(&data[curr_src_offset - 1]));
|
||||||
prev_src_offset = offsets[i];
|
prev_src_offset = offsets[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <bool keep_names>
|
||||||
class FunctionNormalizedQueryHash : public IFunction
|
class FunctionNormalizedQueryHash : public IFunction
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static constexpr auto name = "normalizedQueryHash";
|
static constexpr auto name = keep_names ? "normalizedQueryHashKeepNames" : "normalizedQueryHash";
|
||||||
static FunctionPtr create(const Context &)
|
static FunctionPtr create(const Context &)
|
||||||
{
|
{
|
||||||
return std::make_shared<FunctionNormalizedQueryHash>();
|
return std::make_shared<FunctionNormalizedQueryHash>();
|
||||||
@ -82,7 +84,7 @@ public:
|
|||||||
auto col_res = ColumnUInt64::create();
|
auto col_res = ColumnUInt64::create();
|
||||||
typename ColumnUInt64::Container & vec_res = col_res->getData();
|
typename ColumnUInt64::Container & vec_res = col_res->getData();
|
||||||
vec_res.resize(col->size());
|
vec_res.resize(col->size());
|
||||||
Impl::vector(col->getChars(), col->getOffsets(), vec_res);
|
Impl<keep_names>::vector(col->getChars(), col->getOffsets(), vec_res);
|
||||||
return col_res;
|
return col_res;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -95,7 +97,8 @@ public:
|
|||||||
|
|
||||||
void registerFunctionNormalizedQueryHash(FunctionFactory & factory)
|
void registerFunctionNormalizedQueryHash(FunctionFactory & factory)
|
||||||
{
|
{
|
||||||
factory.registerFunction<FunctionNormalizedQueryHash>();
|
factory.registerFunction<FunctionNormalizedQueryHash<true>>();
|
||||||
|
factory.registerFunction<FunctionNormalizedQueryHash<false>>();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ struct PlusImpl
|
|||||||
using CastA = std::conditional_t<std::is_floating_point_v<B>, B, A>;
|
using CastA = std::conditional_t<std::is_floating_point_v<B>, B, A>;
|
||||||
using CastB = std::conditional_t<std::is_floating_point_v<A>, A, B>;
|
using CastB = std::conditional_t<std::is_floating_point_v<A>, A, B>;
|
||||||
|
|
||||||
return bigint_cast<Result>(bigint_cast<CastA>(a)) + bigint_cast<Result>(bigint_cast<CastB>(b));
|
return static_cast<Result>(static_cast<CastA>(a)) + static_cast<Result>(static_cast<CastB>(b));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return static_cast<Result>(a) + b;
|
return static_cast<Result>(a) + b;
|
||||||
|
@ -33,6 +33,7 @@ void registerFunctionAsinh(FunctionFactory & factory);
|
|||||||
void registerFunctionAcosh(FunctionFactory & factory);
|
void registerFunctionAcosh(FunctionFactory & factory);
|
||||||
void registerFunctionAtanh(FunctionFactory & factory);
|
void registerFunctionAtanh(FunctionFactory & factory);
|
||||||
void registerFunctionPow(FunctionFactory & factory);
|
void registerFunctionPow(FunctionFactory & factory);
|
||||||
|
void registerFunctionSign(FunctionFactory & factory);
|
||||||
|
|
||||||
|
|
||||||
void registerFunctionsMath(FunctionFactory & factory)
|
void registerFunctionsMath(FunctionFactory & factory)
|
||||||
@ -68,6 +69,7 @@ void registerFunctionsMath(FunctionFactory & factory)
|
|||||||
registerFunctionAcosh(factory);
|
registerFunctionAcosh(factory);
|
||||||
registerFunctionAtanh(factory);
|
registerFunctionAtanh(factory);
|
||||||
registerFunctionPow(factory);
|
registerFunctionPow(factory);
|
||||||
|
registerFunctionSign(factory);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
47
src/Functions/sign.cpp
Normal file
47
src/Functions/sign.cpp
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Functions/FunctionUnaryArithmetic.h>
|
||||||
|
#include <DataTypes/NumberTraits.h>
|
||||||
|
#include <Common/FieldVisitors.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
template <typename A>
|
||||||
|
struct SignImpl
|
||||||
|
{
|
||||||
|
using ResultType = Int8;
|
||||||
|
static const constexpr bool allow_fixed_string = false;
|
||||||
|
|
||||||
|
static inline NO_SANITIZE_UNDEFINED ResultType apply(A a)
|
||||||
|
{
|
||||||
|
if constexpr (IsDecimalNumber<A> || std::is_floating_point_v<A>)
|
||||||
|
return a < A(0) ? -1 : a == A(0) ? 0 : 1;
|
||||||
|
else if constexpr (is_signed_v<A>)
|
||||||
|
return a < 0 ? -1 : a == 0 ? 0 : 1;
|
||||||
|
else if constexpr (is_unsigned_v<A>)
|
||||||
|
return a == 0 ? 0 : 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if USE_EMBEDDED_COMPILER
|
||||||
|
static constexpr bool compilable = false;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
struct NameSign
|
||||||
|
{
|
||||||
|
static constexpr auto name = "sign";
|
||||||
|
};
|
||||||
|
using FunctionSign = FunctionUnaryArithmetic<SignImpl, NameSign, false>;
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct FunctionUnaryArithmeticMonotonicity<NameSign>
|
||||||
|
{
|
||||||
|
static bool has() { return true; }
|
||||||
|
static IFunction::Monotonicity get(const Field &, const Field &) { return {true, true, false}; }
|
||||||
|
};
|
||||||
|
|
||||||
|
void registerFunctionSign(FunctionFactory & factory)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionSign>(FunctionFactory::CaseInsensitive);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -428,6 +428,7 @@ SRCS(
|
|||||||
runningDifference.cpp
|
runningDifference.cpp
|
||||||
runningDifferenceStartingWithFirstValue.cpp
|
runningDifferenceStartingWithFirstValue.cpp
|
||||||
sigmoid.cpp
|
sigmoid.cpp
|
||||||
|
sign.cpp
|
||||||
sin.cpp
|
sin.cpp
|
||||||
sinh.cpp
|
sinh.cpp
|
||||||
sleep.cpp
|
sleep.cpp
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
#include <Core/DecimalFunctions.h>
|
#include <Core/DecimalFunctions.h>
|
||||||
#include <Core/UUID.h>
|
#include <Core/UUID.h>
|
||||||
#include <Core/BigInt.h>
|
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
@ -121,17 +120,6 @@ inline void readFloatBinary(T & x, ReadBuffer & buf)
|
|||||||
readPODBinary(x, buf);
|
readPODBinary(x, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void readBigIntBinary(T & x, ReadBuffer & buf)
|
|
||||||
{
|
|
||||||
static const constexpr size_t bytesize = BigInt<T>::size;
|
|
||||||
char bytes[bytesize];
|
|
||||||
|
|
||||||
buf.readStrict(bytes, bytesize);
|
|
||||||
|
|
||||||
x = BigInt<T>::deserialize(bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline void readStringBinary(std::string & s, ReadBuffer & buf, size_t MAX_STRING_SIZE = DEFAULT_MAX_STRING_SIZE)
|
inline void readStringBinary(std::string & s, ReadBuffer & buf, size_t MAX_STRING_SIZE = DEFAULT_MAX_STRING_SIZE)
|
||||||
{
|
{
|
||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
@ -849,11 +837,11 @@ inline void readBinary(DummyUInt256 & x, ReadBuffer & buf) { readPODBinary(x, bu
|
|||||||
inline void readBinary(Decimal32 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
inline void readBinary(Decimal32 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||||
inline void readBinary(Decimal64 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
inline void readBinary(Decimal64 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||||
inline void readBinary(Decimal128 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
inline void readBinary(Decimal128 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||||
inline void readBinary(Decimal256 & x, ReadBuffer & buf) { readBigIntBinary(x.value, buf); }
|
inline void readBinary(Decimal256 & x, ReadBuffer & buf) { readPODBinary(x.value, buf); }
|
||||||
inline void readBinary(LocalDate & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
inline void readBinary(LocalDate & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||||
|
|
||||||
inline void readBinary(UInt256 & x, ReadBuffer & buf) { readBigIntBinary(x, buf); }
|
inline void readBinary(UInt256 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||||
inline void readBinary(Int256 & x, ReadBuffer & buf) { readBigIntBinary(x, buf); }
|
inline void readBinary(Int256 & x, ReadBuffer & buf) { readPODBinary(x, buf); }
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline std::enable_if_t<is_arithmetic_v<T> && (sizeof(T) <= 8), void>
|
inline std::enable_if_t<is_arithmetic_v<T> && (sizeof(T) <= 8), void>
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
#include <Core/DecimalFunctions.h>
|
#include <Core/DecimalFunctions.h>
|
||||||
#include <Core/Types.h>
|
#include <Core/Types.h>
|
||||||
#include <Core/UUID.h>
|
#include <Core/UUID.h>
|
||||||
#include <Core/BigInt.h>
|
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
@ -121,17 +120,6 @@ inline void writeStringBinary(const std::string_view & s, WriteBuffer & buf)
|
|||||||
writeStringBinary(StringRef{s}, buf);
|
writeStringBinary(StringRef{s}, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void writeBigIntBinary(const T & x, WriteBuffer & buf)
|
|
||||||
{
|
|
||||||
static const constexpr size_t bytesize = BigInt<T>::size;
|
|
||||||
char bytes[bytesize];
|
|
||||||
|
|
||||||
BigInt<T>::serialize(x, bytes);
|
|
||||||
|
|
||||||
buf.write(bytes, bytesize);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void writeVectorBinary(const std::vector<T> & v, WriteBuffer & buf)
|
void writeVectorBinary(const std::vector<T> & v, WriteBuffer & buf)
|
||||||
{
|
{
|
||||||
@ -926,12 +914,12 @@ inline void writeBinary(const DummyUInt256 & x, WriteBuffer & buf) { writePODBin
|
|||||||
inline void writeBinary(const Decimal32 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
inline void writeBinary(const Decimal32 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||||
inline void writeBinary(const Decimal64 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
inline void writeBinary(const Decimal64 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||||
inline void writeBinary(const Decimal128 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
inline void writeBinary(const Decimal128 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||||
inline void writeBinary(const Decimal256 & x, WriteBuffer & buf) { writeBigIntBinary(x.value, buf); }
|
inline void writeBinary(const Decimal256 & x, WriteBuffer & buf) { writePODBinary(x.value, buf); }
|
||||||
inline void writeBinary(const LocalDate & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
inline void writeBinary(const LocalDate & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||||
inline void writeBinary(const LocalDateTime & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
inline void writeBinary(const LocalDateTime & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||||
|
|
||||||
inline void writeBinary(const UInt256 & x, WriteBuffer & buf) { writeBigIntBinary(x, buf); }
|
inline void writeBinary(const UInt256 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||||
inline void writeBinary(const Int256 & x, WriteBuffer & buf) { writeBigIntBinary(x, buf); }
|
inline void writeBinary(const Int256 & x, WriteBuffer & buf) { writePODBinary(x, buf); }
|
||||||
|
|
||||||
/// Methods for outputting the value in text form for a tab-separated format.
|
/// Methods for outputting the value in text form for a tab-separated format.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
|
@ -12,12 +12,10 @@ void ApplyWithAliasVisitor::visit(ASTPtr & ast, const Data & data)
|
|||||||
std::optional<Data> new_data;
|
std::optional<Data> new_data;
|
||||||
if (auto with = node_select->with())
|
if (auto with = node_select->with())
|
||||||
{
|
{
|
||||||
for (auto & child : with->children)
|
|
||||||
visit(child, data);
|
|
||||||
|
|
||||||
std::set<String> current_names;
|
std::set<String> current_names;
|
||||||
for (auto & child : with->children)
|
for (auto & child : with->children)
|
||||||
{
|
{
|
||||||
|
visit(child, new_data ? *new_data : data);
|
||||||
if (auto * ast_with_alias = dynamic_cast<ASTWithAlias *>(child.get()))
|
if (auto * ast_with_alias = dynamic_cast<ASTWithAlias *>(child.get()))
|
||||||
{
|
{
|
||||||
if (!new_data)
|
if (!new_data)
|
||||||
|
@ -3,51 +3,77 @@
|
|||||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||||
#include <Parsers/ASTWithAlias.h>
|
#include <Parsers/ASTWithAlias.h>
|
||||||
|
|
||||||
#include <map>
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
void ApplyWithGlobalVisitor::visit(ASTSelectQuery & select, const std::map<String, ASTPtr> & exprs, const ASTPtr & with_expression_list)
|
||||||
|
{
|
||||||
|
auto with = select.with();
|
||||||
|
if (with)
|
||||||
|
{
|
||||||
|
std::set<String> current_names;
|
||||||
|
for (const auto & child : with->children)
|
||||||
|
{
|
||||||
|
if (const auto * ast_with_alias = dynamic_cast<const ASTWithAlias *>(child.get()))
|
||||||
|
current_names.insert(ast_with_alias->alias);
|
||||||
|
}
|
||||||
|
for (const auto & with_alias : exprs)
|
||||||
|
{
|
||||||
|
if (!current_names.count(with_alias.first))
|
||||||
|
with->children.push_back(with_alias.second->clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
select.setExpression(ASTSelectQuery::Expression::WITH, with_expression_list->clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
void ApplyWithGlobalVisitor::visit(
|
||||||
|
ASTSelectWithUnionQuery & selects, const std::map<String, ASTPtr> & exprs, const ASTPtr & with_expression_list)
|
||||||
|
{
|
||||||
|
for (auto & select : selects.list_of_selects->children)
|
||||||
|
{
|
||||||
|
if (ASTSelectWithUnionQuery * node_union = select->as<ASTSelectWithUnionQuery>())
|
||||||
|
{
|
||||||
|
visit(*node_union, exprs, with_expression_list);
|
||||||
|
}
|
||||||
|
else if (ASTSelectQuery * node_select = select->as<ASTSelectQuery>())
|
||||||
|
{
|
||||||
|
visit(*node_select, exprs, with_expression_list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void ApplyWithGlobalVisitor::visit(ASTPtr & ast)
|
void ApplyWithGlobalVisitor::visit(ASTPtr & ast)
|
||||||
{
|
{
|
||||||
if (ASTSelectWithUnionQuery * node_union = ast->as<ASTSelectWithUnionQuery>())
|
if (ASTSelectWithUnionQuery * node_union = ast->as<ASTSelectWithUnionQuery>())
|
||||||
{
|
{
|
||||||
auto & first_select = node_union->list_of_selects->children[0]->as<ASTSelectQuery &>();
|
if (auto * first_select = node_union->list_of_selects->children[0]->as<ASTSelectQuery>())
|
||||||
ASTPtr with_expression_list = first_select.with();
|
|
||||||
|
|
||||||
if (with_expression_list)
|
|
||||||
{
|
{
|
||||||
std::map<String, ASTPtr> exprs;
|
ASTPtr with_expression_list = first_select->with();
|
||||||
for (auto & child : with_expression_list->children)
|
if (with_expression_list)
|
||||||
{
|
{
|
||||||
if (auto * ast_with_alias = dynamic_cast<ASTWithAlias *>(child.get()))
|
std::map<String, ASTPtr> exprs;
|
||||||
exprs[ast_with_alias->alias] = child;
|
for (auto & child : with_expression_list->children)
|
||||||
}
|
|
||||||
for (auto it = node_union->list_of_selects->children.begin() + 1; it != node_union->list_of_selects->children.end(); ++it)
|
|
||||||
{
|
|
||||||
auto & select = (*it)->as<ASTSelectQuery &>();
|
|
||||||
auto with = select.with();
|
|
||||||
if (with)
|
|
||||||
{
|
{
|
||||||
std::set<String> current_names;
|
if (auto * ast_with_alias = dynamic_cast<ASTWithAlias *>(child.get()))
|
||||||
for (auto & child : with->children)
|
exprs[ast_with_alias->alias] = child;
|
||||||
{
|
}
|
||||||
if (auto * ast_with_alias = dynamic_cast<ASTWithAlias *>(child.get()))
|
for (auto it = node_union->list_of_selects->children.begin() + 1; it != node_union->list_of_selects->children.end(); ++it)
|
||||||
current_names.insert(ast_with_alias->alias);
|
{
|
||||||
}
|
if (auto * union_child = (*it)->as<ASTSelectWithUnionQuery>())
|
||||||
for (auto & with_alias : exprs)
|
visit(*union_child, exprs, with_expression_list);
|
||||||
{
|
else if (auto * select_child = (*it)->as<ASTSelectQuery>())
|
||||||
if (!current_names.count(with_alias.first))
|
visit(*select_child, exprs, with_expression_list);
|
||||||
with->children.push_back(with_alias.second->clone());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
select.setExpression(ASTSelectQuery::Expression::WITH, with_expression_list->clone());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else
|
||||||
for (auto & child : ast->children)
|
{
|
||||||
visit(child);
|
// Other non-SELECT queries that contains SELECT children, such as EXPLAIN or INSERT
|
||||||
|
for (auto & child : ast->children)
|
||||||
|
visit(child);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,23 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Parsers/IAST.h>
|
#include <Parsers/IAST.h>
|
||||||
|
#include <map>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
class ASTSelectWithUnionQuery;
|
||||||
|
class ASTSelectQuery;
|
||||||
|
|
||||||
/// Pull out the WITH statement from the first child of ASTSelectWithUnion query if any.
|
/// Pull out the WITH statement from the first child of ASTSelectWithUnion query if any.
|
||||||
class ApplyWithGlobalVisitor
|
class ApplyWithGlobalVisitor
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
static void visit(ASTPtr & ast);
|
static void visit(ASTPtr & ast);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static void visit(ASTSelectWithUnionQuery & selects, const std::map<String, ASTPtr> & exprs, const ASTPtr & with_expression_list);
|
||||||
|
static void visit(ASTSelectQuery & select, const std::map<String, ASTPtr> & exprs, const ASTPtr & with_expression_list);
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Interpreters/misc.h>
|
#include <Interpreters/misc.h>
|
||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/ASTSelectQuery.h>
|
#include <Parsers/ASTSelectQuery.h>
|
||||||
|
#include <Parsers/ASTSubquery.h>
|
||||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||||
#include <Parsers/ASTWithElement.h>
|
#include <Parsers/ASTWithElement.h>
|
||||||
|
|
||||||
@ -55,10 +56,13 @@ void ApplyWithSubqueryVisitor::visit(ASTTableExpression & table, const Data & da
|
|||||||
auto subquery_it = data.subqueries.find(table_id.table_name);
|
auto subquery_it = data.subqueries.find(table_id.table_name);
|
||||||
if (subquery_it != data.subqueries.end())
|
if (subquery_it != data.subqueries.end())
|
||||||
{
|
{
|
||||||
|
auto old_alias = table.database_and_table_name->tryGetAlias();
|
||||||
table.children.clear();
|
table.children.clear();
|
||||||
table.database_and_table_name.reset();
|
table.database_and_table_name.reset();
|
||||||
table.subquery = subquery_it->second->clone();
|
table.subquery = subquery_it->second->clone();
|
||||||
dynamic_cast<ASTWithAlias &>(*table.subquery).alias = table_id.table_name;
|
table.subquery->as<ASTSubquery &>().cte_name = table_id.table_name;
|
||||||
|
if (!old_alias.empty())
|
||||||
|
table.subquery->setAlias(old_alias);
|
||||||
table.children.emplace_back(table.subquery);
|
table.children.emplace_back(table.subquery);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -78,8 +82,11 @@ void ApplyWithSubqueryVisitor::visit(ASTFunction & func, const Data & data)
|
|||||||
auto subquery_it = data.subqueries.find(table_id.table_name);
|
auto subquery_it = data.subqueries.find(table_id.table_name);
|
||||||
if (subquery_it != data.subqueries.end())
|
if (subquery_it != data.subqueries.end())
|
||||||
{
|
{
|
||||||
|
auto old_alias = func.arguments->children[1]->tryGetAlias();
|
||||||
func.arguments->children[1] = subquery_it->second->clone();
|
func.arguments->children[1] = subquery_it->second->clone();
|
||||||
dynamic_cast<ASTWithAlias &>(*func.arguments->children[1]).alias = table_id.table_name;
|
func.arguments->children[1]->as<ASTSubquery &>().cte_name = table_id.table_name;
|
||||||
|
if (!old_alias.empty())
|
||||||
|
func.arguments->children[1]->setAlias(old_alias);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,15 @@ DatabaseAndTableWithAlias::DatabaseAndTableWithAlias(const ASTTableExpression &
|
|||||||
else if (table_expression.table_function)
|
else if (table_expression.table_function)
|
||||||
alias = table_expression.table_function->tryGetAlias();
|
alias = table_expression.table_function->tryGetAlias();
|
||||||
else if (table_expression.subquery)
|
else if (table_expression.subquery)
|
||||||
|
{
|
||||||
|
const auto & cte_name = table_expression.subquery->as<const ASTSubquery &>().cte_name;
|
||||||
|
if (!cte_name.empty())
|
||||||
|
{
|
||||||
|
database = current_database;
|
||||||
|
table = cte_name;
|
||||||
|
}
|
||||||
alias = table_expression.subquery->tryGetAlias();
|
alias = table_expression.subquery->tryGetAlias();
|
||||||
|
}
|
||||||
else
|
else
|
||||||
throw Exception("Logical error: no known elements in ASTTableExpression", ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Logical error: no known elements in ASTTableExpression", ErrorCodes::LOGICAL_ERROR);
|
||||||
}
|
}
|
||||||
|
@ -55,6 +55,7 @@
|
|||||||
#include <IO/WriteBufferFromString.h>
|
#include <IO/WriteBufferFromString.h>
|
||||||
|
|
||||||
#include <Processors/Executors/PullingPipelineExecutor.h>
|
#include <Processors/Executors/PullingPipelineExecutor.h>
|
||||||
|
#include <Parsers/formatAST.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
@ -31,6 +31,12 @@ public:
|
|||||||
{
|
{
|
||||||
if (child->as<ASTSubquery>() || child->as<ASTSelectQuery>())
|
if (child->as<ASTSubquery>() || child->as<ASTSelectQuery>())
|
||||||
return false;
|
return false;
|
||||||
|
if (auto * select = node->as<ASTSelectQuery>())
|
||||||
|
{
|
||||||
|
// We don't analysis WITH statement because it might contain useless aggregates
|
||||||
|
if (child == select->with())
|
||||||
|
return false;
|
||||||
|
}
|
||||||
if (auto * func = node->as<ASTFunction>())
|
if (auto * func = node->as<ASTFunction>())
|
||||||
{
|
{
|
||||||
if (isAggregateFunction(*func))
|
if (isAggregateFunction(*func))
|
||||||
|
@ -294,9 +294,13 @@ InterpreterSelectQuery::InterpreterSelectQuery(
|
|||||||
source_header = input_pipe->getHeader();
|
source_header = input_pipe->getHeader();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (context->getSettingsRef().enable_global_with_statement)
|
// Only propagate WITH elements to subqueries if we're not a subquery
|
||||||
ApplyWithAliasVisitor().visit(query_ptr);
|
if (options.subquery_depth == 0)
|
||||||
ApplyWithSubqueryVisitor().visit(query_ptr);
|
{
|
||||||
|
if (context->getSettingsRef().enable_global_with_statement)
|
||||||
|
ApplyWithAliasVisitor().visit(query_ptr);
|
||||||
|
ApplyWithSubqueryVisitor().visit(query_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
JoinedTables joined_tables(getSubqueryContext(*context), getSelectQuery());
|
JoinedTables joined_tables(getSubqueryContext(*context), getSelectQuery());
|
||||||
|
|
||||||
|
@ -930,7 +930,7 @@ std::shared_ptr<Block> MergeJoin::loadRightBlock(size_t pos) const
|
|||||||
{
|
{
|
||||||
auto load_func = [&]() -> std::shared_ptr<Block>
|
auto load_func = [&]() -> std::shared_ptr<Block>
|
||||||
{
|
{
|
||||||
TemporaryFileStream input(flushed_right_blocks[pos]->path(), right_sample_block);
|
TemporaryFileStream input(flushed_right_blocks[pos]->path(), materializeBlock(right_sample_block));
|
||||||
return std::make_shared<Block>(input.block_in->read());
|
return std::make_shared<Block>(input.block_in->read());
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -60,12 +60,6 @@ static bool isUnlimitedQuery(const IAST * ast)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ProcessList::ProcessList(size_t max_size_)
|
|
||||||
: max_size(max_size_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * ast, Context & query_context)
|
ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * ast, Context & query_context)
|
||||||
{
|
{
|
||||||
EntryPtr res;
|
EntryPtr res;
|
||||||
@ -202,7 +196,7 @@ ProcessList::EntryPtr ProcessList::insert(const String & query_, const IAST * as
|
|||||||
thread_group->performance_counters.setParent(&user_process_list.user_performance_counters);
|
thread_group->performance_counters.setParent(&user_process_list.user_performance_counters);
|
||||||
thread_group->memory_tracker.setParent(&user_process_list.user_memory_tracker);
|
thread_group->memory_tracker.setParent(&user_process_list.user_memory_tracker);
|
||||||
thread_group->query = process_it->query;
|
thread_group->query = process_it->query;
|
||||||
thread_group->normalized_query_hash = normalizedQueryHash(process_it->query);
|
thread_group->normalized_query_hash = normalizedQueryHash<false>(process_it->query);
|
||||||
|
|
||||||
/// Set query-level memory trackers
|
/// Set query-level memory trackers
|
||||||
thread_group->memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage);
|
thread_group->memory_tracker.setOrRaiseHardLimit(settings.max_memory_usage);
|
||||||
|
@ -297,8 +297,6 @@ protected:
|
|||||||
QueryStatus * tryGetProcessListElement(const String & current_query_id, const String & current_user);
|
QueryStatus * tryGetProcessListElement(const String & current_query_id, const String & current_user);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ProcessList(size_t max_size_ = 0);
|
|
||||||
|
|
||||||
using EntryPtr = std::shared_ptr<ProcessListEntry>;
|
using EntryPtr = std::shared_ptr<ProcessListEntry>;
|
||||||
|
|
||||||
/** Register running query. Returns refcounted object, that will remove element from list in destructor.
|
/** Register running query. Returns refcounted object, that will remove element from list in destructor.
|
||||||
|
@ -92,6 +92,8 @@ Block QueryLogElement::createBlock()
|
|||||||
|
|
||||||
{std::make_shared<DataTypeUInt32>(), "revision"},
|
{std::make_shared<DataTypeUInt32>(), "revision"},
|
||||||
|
|
||||||
|
{std::make_shared<DataTypeString>(), "log_comment"},
|
||||||
|
|
||||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "thread_ids"},
|
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "thread_ids"},
|
||||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "ProfileEvents.Names"},
|
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeString>()), "ProfileEvents.Names"},
|
||||||
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "ProfileEvents.Values"},
|
{std::make_shared<DataTypeArray>(std::make_shared<DataTypeUInt64>()), "ProfileEvents.Values"},
|
||||||
@ -168,6 +170,8 @@ void QueryLogElement::appendToBlock(MutableColumns & columns) const
|
|||||||
|
|
||||||
columns[i++]->insert(ClickHouseRevision::getVersionRevision());
|
columns[i++]->insert(ClickHouseRevision::getVersionRevision());
|
||||||
|
|
||||||
|
columns[i++]->insertData(log_comment.data(), log_comment.size());
|
||||||
|
|
||||||
{
|
{
|
||||||
Array threads_array;
|
Array threads_array;
|
||||||
threads_array.reserve(thread_ids.size());
|
threads_array.reserve(thread_ids.size());
|
||||||
|
@ -74,6 +74,8 @@ struct QueryLogElement
|
|||||||
|
|
||||||
ClientInfo client_info;
|
ClientInfo client_info;
|
||||||
|
|
||||||
|
String log_comment;
|
||||||
|
|
||||||
std::vector<UInt64> thread_ids;
|
std::vector<UInt64> thread_ids;
|
||||||
std::shared_ptr<ProfileEvents::Counters> profile_counters;
|
std::shared_ptr<ProfileEvents::Counters> profile_counters;
|
||||||
std::shared_ptr<Settings> query_settings;
|
std::shared_ptr<Settings> query_settings;
|
||||||
|
@ -123,8 +123,12 @@ void RequiredSourceColumnsMatcher::visit(const ASTSelectQuery & select, const AS
|
|||||||
|
|
||||||
std::vector<ASTPtr *> out;
|
std::vector<ASTPtr *> out;
|
||||||
for (const auto & node : select.children)
|
for (const auto & node : select.children)
|
||||||
if (node != select.select())
|
{
|
||||||
|
// We should not go into WITH statement because all needed aliases are already expanded to
|
||||||
|
// the right place after normalization. And it might contain unused unknown columns.
|
||||||
|
if (node != select.select() && node != select.with())
|
||||||
Visitor(data).visit(node);
|
Visitor(data).visit(node);
|
||||||
|
}
|
||||||
|
|
||||||
/// revisit select_expression_list (with children) when all the aliases are set
|
/// revisit select_expression_list (with children) when all the aliases are set
|
||||||
Visitor(data).visit(select.select());
|
Visitor(data).visit(select.select());
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <DataStreams/MergingSortedBlockInputStream.h>
|
#include <DataStreams/MergingSortedBlockInputStream.h>
|
||||||
#include <DataStreams/OneBlockInputStream.h>
|
#include <DataStreams/OneBlockInputStream.h>
|
||||||
#include <DataStreams/TemporaryFileStream.h>
|
#include <DataStreams/TemporaryFileStream.h>
|
||||||
|
#include <DataStreams/materializeBlock.h>
|
||||||
#include <Disks/StoragePolicy.h>
|
#include <Disks/StoragePolicy.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -198,7 +199,7 @@ SortedBlocksWriter::SortedFiles SortedBlocksWriter::finishMerge(std::function<vo
|
|||||||
|
|
||||||
BlockInputStreamPtr SortedBlocksWriter::streamFromFile(const TmpFilePtr & file) const
|
BlockInputStreamPtr SortedBlocksWriter::streamFromFile(const TmpFilePtr & file) const
|
||||||
{
|
{
|
||||||
return std::make_shared<TemporaryFileLazyInputStream>(file->path(), sample_block);
|
return std::make_shared<TemporaryFileLazyInputStream>(file->path(), materializeBlock(sample_block));
|
||||||
}
|
}
|
||||||
|
|
||||||
String SortedBlocksWriter::getPath() const
|
String SortedBlocksWriter::getPath() const
|
||||||
|
@ -157,11 +157,21 @@ static void logQuery(const String & query, const Context & context, bool interna
|
|||||||
const auto & initial_query_id = client_info.initial_query_id;
|
const auto & initial_query_id = client_info.initial_query_id;
|
||||||
const auto & current_user = client_info.current_user;
|
const auto & current_user = client_info.current_user;
|
||||||
|
|
||||||
LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(from {}{}{}, using {} parser) {}",
|
String comment = context.getSettingsRef().log_comment;
|
||||||
|
size_t max_query_size = context.getSettingsRef().max_query_size;
|
||||||
|
|
||||||
|
if (comment.size() > max_query_size)
|
||||||
|
comment.resize(max_query_size);
|
||||||
|
|
||||||
|
if (!comment.empty())
|
||||||
|
comment = fmt::format(" (comment: {})", comment);
|
||||||
|
|
||||||
|
LOG_DEBUG(&Poco::Logger::get("executeQuery"), "(from {}{}{}, using {} parser){} {}",
|
||||||
client_info.current_address.toString(),
|
client_info.current_address.toString(),
|
||||||
(current_user != "default" ? ", user: " + current_user : ""),
|
(current_user != "default" ? ", user: " + current_user : ""),
|
||||||
(!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()),
|
(!initial_query_id.empty() && current_query_id != initial_query_id ? ", initial_query_id: " + initial_query_id : std::string()),
|
||||||
(context.getSettingsRef().use_antlr_parser ? "experimental" : "production"),
|
(context.getSettingsRef().use_antlr_parser ? "experimental" : "production"),
|
||||||
|
comment,
|
||||||
joinLines(query));
|
joinLines(query));
|
||||||
|
|
||||||
if (client_info.client_trace_context.trace_id)
|
if (client_info.client_trace_context.trace_id)
|
||||||
@ -196,13 +206,17 @@ static void setExceptionStackTrace(QueryLogElement & elem)
|
|||||||
/// Log exception (with query info) into text log (not into system table).
|
/// Log exception (with query info) into text log (not into system table).
|
||||||
static void logException(Context & context, QueryLogElement & elem)
|
static void logException(Context & context, QueryLogElement & elem)
|
||||||
{
|
{
|
||||||
|
String comment;
|
||||||
|
if (!elem.log_comment.empty())
|
||||||
|
comment = fmt::format(" (comment: {})", elem.log_comment);
|
||||||
|
|
||||||
if (elem.stack_trace.empty())
|
if (elem.stack_trace.empty())
|
||||||
LOG_ERROR(&Poco::Logger::get("executeQuery"), "{} (from {}) (in query: {})",
|
LOG_ERROR(&Poco::Logger::get("executeQuery"), "{} (from {}){} (in query: {})",
|
||||||
elem.exception, context.getClientInfo().current_address.toString(), joinLines(elem.query));
|
elem.exception, context.getClientInfo().current_address.toString(), comment, joinLines(elem.query));
|
||||||
else
|
else
|
||||||
LOG_ERROR(&Poco::Logger::get("executeQuery"), "{} (from {}) (in query: {})"
|
LOG_ERROR(&Poco::Logger::get("executeQuery"), "{} (from {}){} (in query: {})"
|
||||||
", Stack trace (when copying this message, always include the lines below):\n\n{}",
|
", Stack trace (when copying this message, always include the lines below):\n\n{}",
|
||||||
elem.exception, context.getClientInfo().current_address.toString(), joinLines(elem.query), elem.stack_trace);
|
elem.exception, context.getClientInfo().current_address.toString(), comment, joinLines(elem.query), elem.stack_trace);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline UInt64 time_in_microseconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
|
inline UInt64 time_in_microseconds(std::chrono::time_point<std::chrono::system_clock> timepoint)
|
||||||
@ -239,7 +253,7 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c
|
|||||||
|
|
||||||
elem.current_database = context.getCurrentDatabase();
|
elem.current_database = context.getCurrentDatabase();
|
||||||
elem.query = query_for_logging;
|
elem.query = query_for_logging;
|
||||||
elem.normalized_query_hash = normalizedQueryHash(query_for_logging);
|
elem.normalized_query_hash = normalizedQueryHash<false>(query_for_logging);
|
||||||
|
|
||||||
// We don't calculate query_kind, databases, tables and columns when the query isn't able to start
|
// We don't calculate query_kind, databases, tables and columns when the query isn't able to start
|
||||||
|
|
||||||
@ -248,6 +262,10 @@ static void onExceptionBeforeStart(const String & query_for_logging, Context & c
|
|||||||
|
|
||||||
elem.client_info = context.getClientInfo();
|
elem.client_info = context.getClientInfo();
|
||||||
|
|
||||||
|
elem.log_comment = settings.log_comment;
|
||||||
|
if (elem.log_comment.size() > settings.max_query_size)
|
||||||
|
elem.log_comment.resize(settings.max_query_size);
|
||||||
|
|
||||||
if (settings.calculate_text_stack_trace)
|
if (settings.calculate_text_stack_trace)
|
||||||
setExceptionStackTrace(elem);
|
setExceptionStackTrace(elem);
|
||||||
logException(context, elem);
|
logException(context, elem);
|
||||||
@ -604,7 +622,7 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
|
|
||||||
elem.current_database = context.getCurrentDatabase();
|
elem.current_database = context.getCurrentDatabase();
|
||||||
elem.query = query_for_logging;
|
elem.query = query_for_logging;
|
||||||
elem.normalized_query_hash = normalizedQueryHash(query_for_logging);
|
elem.normalized_query_hash = normalizedQueryHash<false>(query_for_logging);
|
||||||
|
|
||||||
elem.client_info = context.getClientInfo();
|
elem.client_info = context.getClientInfo();
|
||||||
|
|
||||||
@ -626,6 +644,10 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
|
|||||||
if (settings.log_query_settings)
|
if (settings.log_query_settings)
|
||||||
elem.query_settings = std::make_shared<Settings>(context.getSettingsRef());
|
elem.query_settings = std::make_shared<Settings>(context.getSettingsRef());
|
||||||
|
|
||||||
|
elem.log_comment = settings.log_comment;
|
||||||
|
if (elem.log_comment.size() > settings.max_query_size)
|
||||||
|
elem.log_comment.resize(settings.max_query_size);
|
||||||
|
|
||||||
if (elem.type >= settings.log_queries_min_type && !settings.log_queries_min_query_duration_ms.totalMilliseconds())
|
if (elem.type >= settings.log_queries_min_type && !settings.log_queries_min_query_duration_ms.totalMilliseconds())
|
||||||
{
|
{
|
||||||
if (auto query_log = context.getQueryLog())
|
if (auto query_log = context.getQueryLog())
|
||||||
|
@ -73,8 +73,12 @@ ASTPtr extractTableExpression(const ASTSelectQuery & select, size_t table_number
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static NamesAndTypesList getColumnsFromTableExpression(const ASTTableExpression & table_expression, const Context & context,
|
static NamesAndTypesList getColumnsFromTableExpression(
|
||||||
NamesAndTypesList & materialized, NamesAndTypesList & aliases, NamesAndTypesList & virtuals)
|
const ASTTableExpression & table_expression,
|
||||||
|
const Context & context,
|
||||||
|
NamesAndTypesList & materialized,
|
||||||
|
NamesAndTypesList & aliases,
|
||||||
|
NamesAndTypesList & virtuals)
|
||||||
{
|
{
|
||||||
NamesAndTypesList names_and_type_list;
|
NamesAndTypesList names_and_type_list;
|
||||||
if (table_expression.subquery)
|
if (table_expression.subquery)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#include <Parsers/ASTSubquery.h>
|
#include <Parsers/ASTSubquery.h>
|
||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
|
#include <Common/SipHash.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -12,6 +13,10 @@ void ASTSubquery::appendColumnNameImpl(WriteBuffer & ostr) const
|
|||||||
{
|
{
|
||||||
writeString(alias, ostr);
|
writeString(alias, ostr);
|
||||||
}
|
}
|
||||||
|
else if (!cte_name.empty())
|
||||||
|
{
|
||||||
|
writeString(cte_name, ostr);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Hash hash = getTreeHash();
|
Hash hash = getTreeHash();
|
||||||
@ -24,6 +29,14 @@ void ASTSubquery::appendColumnNameImpl(WriteBuffer & ostr) const
|
|||||||
|
|
||||||
void ASTSubquery::formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
void ASTSubquery::formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const
|
||||||
{
|
{
|
||||||
|
if (!cte_name.empty())
|
||||||
|
{
|
||||||
|
settings.ostr << (settings.hilite ? hilite_identifier : "");
|
||||||
|
settings.writeIdentifier(cte_name);
|
||||||
|
settings.ostr << (settings.hilite ? hilite_none : "");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
std::string indent_str = settings.one_line ? "" : std::string(4u * frame.indent, ' ');
|
std::string indent_str = settings.one_line ? "" : std::string(4u * frame.indent, ' ');
|
||||||
std::string nl_or_nothing = settings.one_line ? "" : "\n";
|
std::string nl_or_nothing = settings.one_line ? "" : "\n";
|
||||||
|
|
||||||
@ -35,5 +48,12 @@ void ASTSubquery::formatImplWithoutAlias(const FormatSettings & settings, Format
|
|||||||
settings.ostr << nl_or_nothing << indent_str << ")";
|
settings.ostr << nl_or_nothing << indent_str << ")";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ASTSubquery::updateTreeHashImpl(SipHash & hash_state) const
|
||||||
|
{
|
||||||
|
if (!cte_name.empty())
|
||||||
|
hash_state.update(cte_name);
|
||||||
|
IAST::updateTreeHashImpl(hash_state);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,6 +12,10 @@ namespace DB
|
|||||||
class ASTSubquery : public ASTWithAlias
|
class ASTSubquery : public ASTWithAlias
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
// Stored the name when the subquery is defined in WITH clause. For example:
|
||||||
|
// WITH (SELECT 1) AS a SELECT * FROM a AS b; cte_name will be `a`.
|
||||||
|
std::string cte_name;
|
||||||
|
|
||||||
/** Get the text that identifies this element. */
|
/** Get the text that identifies this element. */
|
||||||
String getID(char) const override { return "Subquery"; }
|
String getID(char) const override { return "Subquery"; }
|
||||||
|
|
||||||
@ -28,6 +32,8 @@ public:
|
|||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateTreeHashImpl(SipHash & hash_state) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
void formatImplWithoutAlias(const FormatSettings & settings, FormatState & state, FormatStateStacked frame) const override;
|
||||||
void appendColumnNameImpl(WriteBuffer & ostr) const override;
|
void appendColumnNameImpl(WriteBuffer & ostr) const override;
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include <Parsers/ASTWithElement.h>
|
#include <Parsers/ASTWithElement.h>
|
||||||
|
#include <Parsers/ASTWithAlias.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -8,7 +9,6 @@ ASTPtr ASTWithElement::clone() const
|
|||||||
{
|
{
|
||||||
const auto res = std::make_shared<ASTWithElement>(*this);
|
const auto res = std::make_shared<ASTWithElement>(*this);
|
||||||
res->children.clear();
|
res->children.clear();
|
||||||
res->name = name;
|
|
||||||
res->subquery = subquery->clone();
|
res->subquery = subquery->clone();
|
||||||
res->children.emplace_back(res->subquery);
|
res->children.emplace_back(res->subquery);
|
||||||
return res;
|
return res;
|
||||||
@ -18,6 +18,7 @@ void ASTWithElement::formatImpl(const FormatSettings & settings, FormatState & s
|
|||||||
{
|
{
|
||||||
settings.writeIdentifier(name);
|
settings.writeIdentifier(name);
|
||||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS " << (settings.hilite ? hilite_none : "");
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << " AS " << (settings.hilite ? hilite_none : "");
|
||||||
subquery->formatImpl(settings, state, frame);
|
dynamic_cast<const ASTWithAlias *>(subquery.get())->formatImplWithoutAlias(settings, state, frame);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@ bool ParserWithElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
auto with_element = std::make_shared<ASTWithElement>();
|
auto with_element = std::make_shared<ASTWithElement>();
|
||||||
tryGetIdentifierNameInto(name, with_element->name);
|
tryGetIdentifierNameInto(name, with_element->name);
|
||||||
with_element->subquery = subquery;
|
with_element->subquery = subquery;
|
||||||
|
with_element->children.push_back(with_element->subquery);
|
||||||
node = with_element;
|
node = with_element;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
template <bool keep_names>
|
||||||
inline UInt64 ALWAYS_INLINE normalizedQueryHash(const char * begin, const char * end)
|
inline UInt64 ALWAYS_INLINE normalizedQueryHash(const char * begin, const char * end)
|
||||||
{
|
{
|
||||||
SipHash hash;
|
SipHash hash;
|
||||||
@ -61,31 +62,39 @@ inline UInt64 ALWAYS_INLINE normalizedQueryHash(const char * begin, const char *
|
|||||||
/// By the way, there is padding in columns and pointer dereference is Ok.
|
/// By the way, there is padding in columns and pointer dereference is Ok.
|
||||||
|| (token.type == TokenType::BareWord && *token.end != '('))
|
|| (token.type == TokenType::BareWord && *token.end != '('))
|
||||||
{
|
{
|
||||||
/// Identifier is complex if it contains whitespace or more than two digits
|
/// Explicitly ask to keep identifier names
|
||||||
/// or it's at least 36 bytes long (UUID for example).
|
if constexpr (keep_names)
|
||||||
size_t num_digits = 0;
|
|
||||||
|
|
||||||
const char * pos = token.begin;
|
|
||||||
if (token.size() < 36)
|
|
||||||
{
|
{
|
||||||
for (; pos != token.end; ++pos)
|
hash.update(token.begin, token.size());
|
||||||
{
|
}
|
||||||
if (isWhitespaceASCII(*pos))
|
else
|
||||||
break;
|
{
|
||||||
|
/// Identifier is complex if it contains whitespace or more than two digits
|
||||||
|
/// or it's at least 36 bytes long (UUID for example).
|
||||||
|
size_t num_digits = 0;
|
||||||
|
|
||||||
if (isNumericASCII(*pos))
|
const char * pos = token.begin;
|
||||||
|
if (token.size() < 36)
|
||||||
|
{
|
||||||
|
for (; pos != token.end; ++pos)
|
||||||
{
|
{
|
||||||
++num_digits;
|
if (isWhitespaceASCII(*pos))
|
||||||
if (num_digits > 2)
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
if (isNumericASCII(*pos))
|
||||||
|
{
|
||||||
|
++num_digits;
|
||||||
|
if (num_digits > 2)
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (pos == token.end)
|
if (pos == token.end)
|
||||||
hash.update(token.begin, token.size());
|
hash.update(token.begin, token.size());
|
||||||
else
|
else
|
||||||
hash.update("\x01", 1);
|
hash.update("\x01", 1);
|
||||||
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -99,12 +108,14 @@ inline UInt64 ALWAYS_INLINE normalizedQueryHash(const char * begin, const char *
|
|||||||
return hash.get64();
|
return hash.get64();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <bool keep_names>
|
||||||
inline UInt64 ALWAYS_INLINE normalizedQueryHash(const String & query)
|
inline UInt64 ALWAYS_INLINE normalizedQueryHash(const String & query)
|
||||||
{
|
{
|
||||||
return normalizedQueryHash(query.data(), query.data() + query.size());
|
return normalizedQueryHash<keep_names>(query.data(), query.data() + query.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <bool keep_names>
|
||||||
inline void ALWAYS_INLINE normalizeQueryToPODArray(const char * begin, const char * end, PaddedPODArray<UInt8> & res_data)
|
inline void ALWAYS_INLINE normalizeQueryToPODArray(const char * begin, const char * end, PaddedPODArray<UInt8> & res_data)
|
||||||
{
|
{
|
||||||
Lexer lexer(begin, end);
|
Lexer lexer(begin, end);
|
||||||
@ -187,36 +198,44 @@ inline void ALWAYS_INLINE normalizeQueryToPODArray(const char * begin, const cha
|
|||||||
/// By the way, there is padding in columns and pointer dereference is Ok.
|
/// By the way, there is padding in columns and pointer dereference is Ok.
|
||||||
|| (token.type == TokenType::BareWord && *token.end != '('))
|
|| (token.type == TokenType::BareWord && *token.end != '('))
|
||||||
{
|
{
|
||||||
/// Identifier is complex if it contains whitespace or more than two digits
|
/// Explicitly ask to normalize with identifier names
|
||||||
/// or it's at least 36 bytes long (UUID for example).
|
if constexpr (keep_names)
|
||||||
size_t num_digits = 0;
|
|
||||||
|
|
||||||
const char * pos = token.begin;
|
|
||||||
if (token.size() < 36)
|
|
||||||
{
|
|
||||||
for (; pos != token.end; ++pos)
|
|
||||||
{
|
|
||||||
if (isWhitespaceASCII(*pos))
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (isNumericASCII(*pos))
|
|
||||||
{
|
|
||||||
++num_digits;
|
|
||||||
if (num_digits > 2)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pos == token.end)
|
|
||||||
{
|
{
|
||||||
res_data.insert(token.begin, token.end);
|
res_data.insert(token.begin, token.end);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
res_data.push_back('`');
|
/// Identifier is complex if it contains whitespace or more than two digits
|
||||||
res_data.push_back('?');
|
/// or it's at least 36 bytes long (UUID for example).
|
||||||
res_data.push_back('`');
|
size_t num_digits = 0;
|
||||||
|
|
||||||
|
const char * pos = token.begin;
|
||||||
|
if (token.size() < 36)
|
||||||
|
{
|
||||||
|
for (; pos != token.end; ++pos)
|
||||||
|
{
|
||||||
|
if (isWhitespaceASCII(*pos))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (isNumericASCII(*pos))
|
||||||
|
{
|
||||||
|
++num_digits;
|
||||||
|
if (num_digits > 2)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pos == token.end)
|
||||||
|
{
|
||||||
|
res_data.insert(token.begin, token.end);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
res_data.push_back('`');
|
||||||
|
res_data.push_back('?');
|
||||||
|
res_data.push_back('`');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
|
@ -216,7 +216,8 @@ StorageS3::StorageS3(
|
|||||||
storage_metadata.setConstraints(constraints_);
|
storage_metadata.setConstraints(constraints_);
|
||||||
setInMemoryMetadata(storage_metadata);
|
setInMemoryMetadata(storage_metadata);
|
||||||
|
|
||||||
auto settings = context_.getStorageS3Settings().getSettings(uri.endpoint);
|
auto settings = context_.getStorageS3Settings().getSettings(uri.uri.toString());
|
||||||
|
|
||||||
Aws::Auth::AWSCredentials credentials(access_key_id_, secret_access_key_);
|
Aws::Auth::AWSCredentials credentials(access_key_id_, secret_access_key_);
|
||||||
if (access_key_id_.empty())
|
if (access_key_id_.empty())
|
||||||
credentials = Aws::Auth::AWSCredentials(std::move(settings.access_key_id), std::move(settings.secret_access_key));
|
credentials = Aws::Auth::AWSCredentials(std::move(settings.access_key_id), std::move(settings.secret_access_key));
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
#include <Poco/Util/AbstractConfiguration.h>
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
|
#include <boost/algorithm/string/predicate.hpp>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -23,39 +25,50 @@ void StorageS3Settings::loadFromConfig(const String & config_elem, const Poco::U
|
|||||||
|
|
||||||
for (const String & key : config_keys)
|
for (const String & key : config_keys)
|
||||||
{
|
{
|
||||||
auto endpoint = config.getString(config_elem + "." + key + ".endpoint");
|
if (config.has(config_elem + "." + key + ".endpoint"))
|
||||||
auto access_key_id = config.getString(config_elem + "." + key + ".access_key_id", "");
|
|
||||||
auto secret_access_key = config.getString(config_elem + "." + key + ".secret_access_key", "");
|
|
||||||
std::optional<bool> use_environment_credentials;
|
|
||||||
if (config.has(config_elem + "." + key + ".use_environment_credentials"))
|
|
||||||
{
|
{
|
||||||
use_environment_credentials = config.getBool(config_elem + "." + key + ".use_environment_credentials");
|
auto endpoint = config.getString(config_elem + "." + key + ".endpoint");
|
||||||
}
|
auto access_key_id = config.getString(config_elem + "." + key + ".access_key_id", "");
|
||||||
|
auto secret_access_key = config.getString(config_elem + "." + key + ".secret_access_key", "");
|
||||||
HeaderCollection headers;
|
std::optional<bool> use_environment_credentials;
|
||||||
Poco::Util::AbstractConfiguration::Keys subconfig_keys;
|
if (config.has(config_elem + "." + key + ".use_environment_credentials"))
|
||||||
config.keys(config_elem + "." + key, subconfig_keys);
|
|
||||||
for (const String & subkey : subconfig_keys)
|
|
||||||
{
|
|
||||||
if (subkey.starts_with("header"))
|
|
||||||
{
|
{
|
||||||
auto header_str = config.getString(config_elem + "." + key + "." + subkey);
|
use_environment_credentials = config.getBool(config_elem + "." + key + ".use_environment_credentials");
|
||||||
auto delimiter = header_str.find(':');
|
|
||||||
if (delimiter == String::npos)
|
|
||||||
throw Exception("Malformed s3 header value", ErrorCodes::INVALID_CONFIG_PARAMETER);
|
|
||||||
headers.emplace_back(HttpHeader{header_str.substr(0, delimiter), header_str.substr(delimiter + 1, String::npos)});
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
settings.emplace(endpoint, S3AuthSettings{std::move(access_key_id), std::move(secret_access_key), std::move(headers), use_environment_credentials});
|
HeaderCollection headers;
|
||||||
|
Poco::Util::AbstractConfiguration::Keys subconfig_keys;
|
||||||
|
config.keys(config_elem + "." + key, subconfig_keys);
|
||||||
|
for (const String & subkey : subconfig_keys)
|
||||||
|
{
|
||||||
|
if (subkey.starts_with("header"))
|
||||||
|
{
|
||||||
|
auto header_str = config.getString(config_elem + "." + key + "." + subkey);
|
||||||
|
auto delimiter = header_str.find(':');
|
||||||
|
if (delimiter == String::npos)
|
||||||
|
throw Exception("Malformed s3 header value", ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||||
|
headers.emplace_back(HttpHeader{header_str.substr(0, delimiter), header_str.substr(delimiter + 1, String::npos)});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
settings.emplace(endpoint, S3AuthSettings{std::move(access_key_id), std::move(secret_access_key), std::move(headers), use_environment_credentials});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
S3AuthSettings StorageS3Settings::getSettings(const String & endpoint) const
|
S3AuthSettings StorageS3Settings::getSettings(const String & endpoint) const
|
||||||
{
|
{
|
||||||
std::lock_guard lock(mutex);
|
std::lock_guard lock(mutex);
|
||||||
if (auto setting = settings.find(endpoint); setting != settings.end())
|
auto next_prefix_setting = settings.upper_bound(endpoint);
|
||||||
return setting->second;
|
|
||||||
|
/// Linear time algorithm may be replaced with logarithmic with prefix tree map.
|
||||||
|
for (auto possible_prefix_setting = next_prefix_setting; possible_prefix_setting != settings.begin();)
|
||||||
|
{
|
||||||
|
std::advance(possible_prefix_setting, -1);
|
||||||
|
if (boost::algorithm::starts_with(endpoint, possible_prefix_setting->first))
|
||||||
|
return possible_prefix_setting->second;
|
||||||
|
}
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,7 +38,8 @@ ColumnsDescription TableFunctionView::getActualTableStructure(const Context & co
|
|||||||
return ColumnsDescription(sample.getNamesAndTypesList());
|
return ColumnsDescription(sample.getNamesAndTypesList());
|
||||||
}
|
}
|
||||||
|
|
||||||
StoragePtr TableFunctionView::executeImpl(const ASTPtr & /*ast_function*/, const Context & context, const std::string & table_name, ColumnsDescription /*cached_columns*/) const
|
StoragePtr TableFunctionView::executeImpl(
|
||||||
|
const ASTPtr & /*ast_function*/, const Context & context, const std::string & table_name, ColumnsDescription /*cached_columns*/) const
|
||||||
{
|
{
|
||||||
auto columns = getActualTableStructure(context);
|
auto columns = getActualTableStructure(context);
|
||||||
auto res = StorageView::create(StorageID(getDatabaseName(), table_name), create, columns);
|
auto res = StorageView::create(StorageID(getDatabaseName(), table_name), create, columns);
|
||||||
|
@ -151,7 +151,7 @@ def run_single_test(args, ext, server_logs_level, client_options, case_file, std
|
|||||||
pattern = '{test} > {stdout} 2> {stderr}'
|
pattern = '{test} > {stdout} 2> {stderr}'
|
||||||
|
|
||||||
if ext == '.sql':
|
if ext == '.sql':
|
||||||
pattern = "{client} --send_logs_level={logs_level} --testmode --multiquery {options} < " + pattern
|
pattern = "{client} --send_logs_level={logs_level} --testmode --multiquery {options} --log_comment='{test}' < " + pattern
|
||||||
|
|
||||||
command = pattern.format(**params)
|
command = pattern.format(**params)
|
||||||
|
|
||||||
@ -777,7 +777,7 @@ def main(args):
|
|||||||
print(colored("\nFound hung queries in processlist:", args, "red", attrs=["bold"]))
|
print(colored("\nFound hung queries in processlist:", args, "red", attrs=["bold"]))
|
||||||
print(processlist)
|
print(processlist)
|
||||||
else:
|
else:
|
||||||
print(colored("Seems like server hung and cannot respond to queries", "red", attrs=["bold"]))
|
print(colored("Seems like server hung and cannot respond to queries", args, "red", attrs=["bold"]))
|
||||||
|
|
||||||
clickhouse_tcp_port = os.getenv("CLICKHOUSE_PORT_TCP", '9000')
|
clickhouse_tcp_port = os.getenv("CLICKHOUSE_PORT_TCP", '9000')
|
||||||
server_pid = get_server_pid(clickhouse_tcp_port)
|
server_pid = get_server_pid(clickhouse_tcp_port)
|
||||||
|
@ -61,7 +61,10 @@ class QueryTimeoutExceedException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class QueryRuntimeException(Exception):
|
class QueryRuntimeException(Exception):
|
||||||
pass
|
def __init__(self, message, returncode, stderr):
|
||||||
|
super(QueryRuntimeException, self).__init__(message)
|
||||||
|
self.returncode = returncode
|
||||||
|
self.stderr = stderr
|
||||||
|
|
||||||
|
|
||||||
class CommandRequest:
|
class CommandRequest:
|
||||||
@ -106,7 +109,7 @@ class CommandRequest:
|
|||||||
|
|
||||||
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
|
if (self.process.returncode != 0 or stderr) and not self.ignore_error:
|
||||||
raise QueryRuntimeException(
|
raise QueryRuntimeException(
|
||||||
'Client failed! Return code: {}, stderr: {}'.format(self.process.returncode, stderr))
|
'Client failed! Return code: {}, stderr: {}'.format(self.process.returncode, stderr), self.process.returncode, stderr)
|
||||||
|
|
||||||
return stdout
|
return stdout
|
||||||
|
|
||||||
@ -122,7 +125,7 @@ class CommandRequest:
|
|||||||
raise QueryTimeoutExceedException('Client timed out!')
|
raise QueryTimeoutExceedException('Client timed out!')
|
||||||
|
|
||||||
if (self.process.returncode == 0):
|
if (self.process.returncode == 0):
|
||||||
raise QueryRuntimeException('Client expected to be failed but succeeded! stdout: {}'.format(stdout))
|
raise QueryRuntimeException('Client expected to be failed but succeeded! stdout: {}'.format(stdout), self.process.returncode, stderr)
|
||||||
|
|
||||||
return stderr
|
return stderr
|
||||||
|
|
||||||
|
@ -4,5 +4,8 @@
|
|||||||
<endpoint>http://resolver:8080</endpoint>
|
<endpoint>http://resolver:8080</endpoint>
|
||||||
<header>Authorization: Bearer TOKEN</header>
|
<header>Authorization: Bearer TOKEN</header>
|
||||||
</s3_mock>
|
</s3_mock>
|
||||||
|
<s3_mock_restricted_directory>
|
||||||
|
<endpoint>http://resolver:8080/root-with-auth/restricteddirectory/</endpoint>
|
||||||
|
</s3_mock_restricted_directory>
|
||||||
</s3>
|
</s3>
|
||||||
</yandex>
|
</yandex>
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
from bottle import abort, route, run, request, response
|
from bottle import abort, route, run, request, response
|
||||||
|
|
||||||
|
|
||||||
@route('/redirected/<_path>')
|
@route('/redirected/<_path:path>')
|
||||||
def infinite_redirect(_path):
|
def infinite_redirect(_path):
|
||||||
response.set_header("Location", request.url)
|
response.set_header("Location", request.url)
|
||||||
response.status = 307
|
response.status = 307
|
||||||
return 'Redirected'
|
return 'Redirected'
|
||||||
|
|
||||||
|
|
||||||
@route('/<_bucket>/<_path>')
|
@route('/<_bucket>/<_path:path>')
|
||||||
def server(_bucket, _path):
|
def server(_bucket, _path):
|
||||||
for name in request.headers:
|
for name in request.headers:
|
||||||
if name == 'Authorization' and request.headers[name] == 'Bearer TOKEN':
|
if name == 'Authorization' and request.headers[name] == 'Bearer TOKEN':
|
||||||
|
@ -410,6 +410,20 @@ def test_custom_auth_headers(cluster):
|
|||||||
assert result == '1\t2\t3\n'
|
assert result == '1\t2\t3\n'
|
||||||
|
|
||||||
|
|
||||||
|
def test_custom_auth_headers_exclusion(cluster):
|
||||||
|
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
|
||||||
|
filename = "test.csv"
|
||||||
|
get_query = f"SELECT * FROM s3('http://resolver:8080/{cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
|
||||||
|
|
||||||
|
instance = cluster.instances["dummy"] # type: ClickHouseInstance
|
||||||
|
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
|
||||||
|
result = run_query(instance, get_query)
|
||||||
|
print(result)
|
||||||
|
|
||||||
|
assert ei.value.returncode == 243
|
||||||
|
assert '403 Forbidden' in ei.value.stderr
|
||||||
|
|
||||||
|
|
||||||
def test_infinite_redirect(cluster):
|
def test_infinite_redirect(cluster):
|
||||||
bucket = "redirected"
|
bucket = "redirected"
|
||||||
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
|
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
|
||||||
|
@ -21,17 +21,3 @@
|
|||||||
4611686011984936962
|
4611686011984936962
|
||||||
4611686011984936962
|
4611686011984936962
|
||||||
2147483648
|
2147483648
|
||||||
256
|
|
||||||
11
|
|
||||||
64
|
|
||||||
1
|
|
||||||
2
|
|
||||||
1
|
|
||||||
1
|
|
||||||
5120
|
|
||||||
121
|
|
||||||
256
|
|
||||||
1
|
|
||||||
4
|
|
||||||
735
|
|
||||||
64770
|
|
||||||
|
@ -23,19 +23,19 @@ select lcm(255, 254);
|
|||||||
select lcm(2147483647, 2147483646);
|
select lcm(2147483647, 2147483646);
|
||||||
select lcm(4611686011984936962, 2147483647);
|
select lcm(4611686011984936962, 2147483647);
|
||||||
select lcm(-2147483648, 1);
|
select lcm(-2147483648, 1);
|
||||||
-- test gcd float will cast to int
|
-- test gcd float
|
||||||
select gcd(1280.1, 1024.1);
|
select gcd(1280.1, 1024.1); -- { serverError 48 }
|
||||||
select gcd(11.1, 121.1);
|
select gcd(11.1, 121.1); -- { serverError 48 }
|
||||||
select gcd(-256.1, 64.1);
|
select gcd(-256.1, 64.1); -- { serverError 48 }
|
||||||
select gcd(1.1, 1.1);
|
select gcd(1.1, 1.1); -- { serverError 48 }
|
||||||
select gcd(4.1, 2.1);
|
select gcd(4.1, 2.1); -- { serverError 48 }
|
||||||
select gcd(15.1, 49.1);
|
select gcd(15.1, 49.1); -- { serverError 48 }
|
||||||
select gcd(255.1, 254.1);
|
select gcd(255.1, 254.1); -- { serverError 48 }
|
||||||
-- test lcm float cast to int
|
-- test lcm float
|
||||||
select lcm(1280.1, 1024.1);
|
select lcm(1280.1, 1024.1); -- { serverError 48 }
|
||||||
select lcm(11.1, 121.1);
|
select lcm(11.1, 121.1); -- { serverError 48 }
|
||||||
select lcm(-256.1, 64.1);
|
select lcm(-256.1, 64.1); -- { serverError 48 }
|
||||||
select lcm(1.1, 1.1);
|
select lcm(1.1, 1.1); -- { serverError 48 }
|
||||||
select lcm(4.1, 2.1);
|
select lcm(4.1, 2.1); -- { serverError 48 }
|
||||||
select lcm(15.1, 49.1);
|
select lcm(15.1, 49.1); -- { serverError 48 }
|
||||||
select lcm(255.1, 254.1);
|
select lcm(255.1, 254.1); -- { serverError 48 }
|
||||||
|
@ -5,4 +5,3 @@
|
|||||||
0
|
0
|
||||||
0
|
0
|
||||||
0
|
0
|
||||||
0
|
|
||||||
|
@ -6,5 +6,5 @@ SELECT lcm(-15, -10);
|
|||||||
-- Implementation specific result on overflow:
|
-- Implementation specific result on overflow:
|
||||||
SELECT ignore(lcm(256, 9223372036854775807));
|
SELECT ignore(lcm(256, 9223372036854775807));
|
||||||
SELECT ignore(lcm(256, -9223372036854775807));
|
SELECT ignore(lcm(256, -9223372036854775807));
|
||||||
SELECT ignore(lcm(-256, 9223372036854775807));
|
SELECT ignore(lcm(-256, 9223372036854775807)); -- { serverError 407 }
|
||||||
SELECT ignore(lcm(-256, -9223372036854775807));
|
SELECT ignore(lcm(-256, -9223372036854775807));
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user