mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 00:52:02 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
f6191b98e7
@ -152,6 +152,7 @@ if (CMAKE_GENERATOR STREQUAL "Ninja" AND NOT DISABLE_COLORED_BUILD)
|
|||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-color=always")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
include (cmake/check_flags.cmake)
|
||||||
include (cmake/add_warning.cmake)
|
include (cmake/add_warning.cmake)
|
||||||
|
|
||||||
if (NOT MSVC)
|
if (NOT MSVC)
|
||||||
@ -166,7 +167,8 @@ if (COMPILER_CLANG)
|
|||||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.0.0)
|
if (HAS_USE_CTOR_HOMING)
|
||||||
|
# For more info see https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
|
||||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
|
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
|
||||||
@ -629,9 +631,6 @@ include_directories(${ConfigIncludePath})
|
|||||||
# Add as many warnings as possible for our own code.
|
# Add as many warnings as possible for our own code.
|
||||||
include (cmake/warnings.cmake)
|
include (cmake/warnings.cmake)
|
||||||
|
|
||||||
# Check if needed compiler flags are supported
|
|
||||||
include (cmake/check_flags.cmake)
|
|
||||||
|
|
||||||
add_subdirectory (base)
|
add_subdirectory (base)
|
||||||
add_subdirectory (src)
|
add_subdirectory (src)
|
||||||
add_subdirectory (programs)
|
add_subdirectory (programs)
|
||||||
|
@ -4,3 +4,4 @@ include (CheckCCompilerFlag)
|
|||||||
check_cxx_compiler_flag("-Wsuggest-destructor-override" HAS_SUGGEST_DESTRUCTOR_OVERRIDE)
|
check_cxx_compiler_flag("-Wsuggest-destructor-override" HAS_SUGGEST_DESTRUCTOR_OVERRIDE)
|
||||||
check_cxx_compiler_flag("-Wshadow" HAS_SHADOW)
|
check_cxx_compiler_flag("-Wshadow" HAS_SHADOW)
|
||||||
check_cxx_compiler_flag("-Wsuggest-override" HAS_SUGGEST_OVERRIDE)
|
check_cxx_compiler_flag("-Wsuggest-override" HAS_SUGGEST_OVERRIDE)
|
||||||
|
check_cxx_compiler_flag("-Xclang -fuse-ctor-homing" HAS_USE_CTOR_HOMING)
|
||||||
|
@ -3,58 +3,58 @@ toc_priority: 66
|
|||||||
toc_title: ClickHouse Keeper
|
toc_title: ClickHouse Keeper
|
||||||
---
|
---
|
||||||
|
|
||||||
# [pre-production] clickhouse-keeper
|
# [pre-production] ClickHouse Keeper
|
||||||
|
|
||||||
ClickHouse server use [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper.
|
ClickHouse server uses [ZooKeeper](https://zookeeper.apache.org/) coordination system for data [replication](../engines/table-engines/mergetree-family/replication.md) and [distributed DDL](../sql-reference/distributed-ddl.md) queries execution. ClickHouse Keeper is an alternative coordination system compatible with ZooKeeper.
|
||||||
|
|
||||||
!!! warning "Warning"
|
!!! warning "Warning"
|
||||||
This feature currently in pre-production stage. We test it in our CI and on small internal installations.
|
This feature is currently in the pre-production stage. We test it in our CI and on small internal installations.
|
||||||
|
|
||||||
## Implementation details
|
## Implementation details
|
||||||
|
|
||||||
ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper `clickhouse-keeper` written in C++ and use [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages.
|
ZooKeeper is one of the first well-known open-source coordination systems. It's implemented in Java, has quite a simple and powerful data model. ZooKeeper's coordination algorithm called ZAB (ZooKeeper Atomic Broadcast) doesn't provide linearizability guarantees for reads, because each ZooKeeper node serves reads locally. Unlike ZooKeeper ClickHouse Keeper is written in C++ and uses [RAFT algorithm](https://raft.github.io/) [implementation](https://github.com/eBay/NuRaft). This algorithm allows to have linearizability for reads and writes, has several open-source implementations in different languages.
|
||||||
|
|
||||||
By default, `clickhouse-keeper` provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with `clickhouse-keeper`. Snapshots and logs have an incompatible format with ZooKeeper, but `clickhouse-keeper-converter` tool allows to convert ZooKeeper data to `clickhouse-keeper` snapshot. Interserver protocol in `clickhouse-keeper` also incompatible with ZooKeeper so mixed ZooKeeper/clickhouse-keeper cluster is impossible.
|
By default, ClickHouse Keeper provides the same guarantees as ZooKeeper (linearizable writes, non-linearizable reads). It has a compatible client-server protocol, so any standard ZooKeeper client can be used to interact with ClickHouse Keeper. Snapshots and logs have an incompatible format with ZooKeeper, but `clickhouse-keeper-converter` tool allows to convert ZooKeeper data to ClickHouse Keeper snapshot. Interserver protocol in ClickHouse Keeper is also incompatible with ZooKeeper so mixed ZooKeeper / ClickHouse Keeper cluster is impossible.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
`clickhouse-keeper` can be used as a standalone replacement for ZooKeeper or as an internal part of the `clickhouse-server`, but in both cases configuration is almost the same `.xml` file. The main `clickhouse-keeper` configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
|
ClickHouse Keeper can be used as a standalone replacement for ZooKeeper or as an internal part of the ClickHouse server, but in both cases configuration is almost the same `.xml` file. The main ClickHouse Keeper configuration tag is `<keeper_server>`. Keeper configuration has the following parameters:
|
||||||
|
|
||||||
- `tcp_port` — the port for a client to connect (default for ZooKeeper is `2181`)
|
- `tcp_port` — Port for a client to connect (default for ZooKeeper is `2181`).
|
||||||
- `tcp_port_secure` — the secure port for a client to connect
|
- `tcp_port_secure` — Secure port for a client to connect.
|
||||||
- `server_id` — unique server id, each participant of the clickhouse-keeper cluster must have a unique number (1, 2, 3, and so on)
|
- `server_id` — Unique server id, each participant of the ClickHouse Keeper cluster must have a unique number (1, 2, 3, and so on).
|
||||||
- `log_storage_path` — path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper)
|
- `log_storage_path` — Path to coordination logs, better to store logs on the non-busy device (same for ZooKeeper).
|
||||||
- `snapshot_storage_path` — path to coordination snapshots
|
- `snapshot_storage_path` — Path to coordination snapshots.
|
||||||
|
|
||||||
Other common parameters are inherited from clickhouse-server config (`listen_host`, `logger` and so on).
|
Other common parameters are inherited from the ClickHouse server config (`listen_host`, `logger`, and so on).
|
||||||
|
|
||||||
Internal coordination settings are located in `<keeper_server>.<coordination_settings>` section:
|
Internal coordination settings are located in `<keeper_server>.<coordination_settings>` section:
|
||||||
|
|
||||||
- `operation_timeout_ms` — timeout for a single client operation (default: 10000)
|
- `operation_timeout_ms` — Timeout for a single client operation (ms) (default: 10000).
|
||||||
- `session_timeout_ms` — timeout for client session (default: 30000)
|
- `session_timeout_ms` — Timeout for client session (ms) (default: 30000).
|
||||||
- `dead_session_check_period_ms` — how often clickhouse-keeper check dead sessions and remove them (default: 500)
|
- `dead_session_check_period_ms` — How often ClickHouse Keeper check dead sessions and remove them (ms) (default: 500).
|
||||||
- `heart_beat_interval_ms` — how often a clickhouse-keeper leader will send heartbeats to followers (default: 500)
|
- `heart_beat_interval_ms` — How often a ClickHouse Keeper leader will send heartbeats to followers (ms) (default: 500).
|
||||||
- `election_timeout_lower_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election (default: 1000)
|
- `election_timeout_lower_bound_ms` — If the follower didn't receive heartbeats from the leader in this interval, then it can initiate leader election (default: 1000).
|
||||||
- `election_timeout_upper_bound_ms` — if follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election (default: 2000)
|
- `election_timeout_upper_bound_ms` — If the follower didn't receive heartbeats from the leader in this interval, then it must initiate leader election (default: 2000).
|
||||||
- `rotate_log_storage_interval` — how many log records to store in a single file (default: 100000)
|
- `rotate_log_storage_interval` — How many log records to store in a single file (default: 100000).
|
||||||
- `reserved_log_items` — how many coordination log records to store before compaction (default: 100000)
|
- `reserved_log_items` — How many coordination log records to store before compaction (default: 100000).
|
||||||
- `snapshot_distance` — how often clickhouse-keeper will create new snapshots (in the number of records in logs) (default: 100000)
|
- `snapshot_distance` — How often ClickHouse Keeper will create new snapshots (in the number of records in logs) (default: 100000).
|
||||||
- `snapshots_to_keep` — how many snapshots to keep (default: 3)
|
- `snapshots_to_keep` — How many snapshots to keep (default: 3).
|
||||||
- `stale_log_gap` — the threshold when leader consider follower as stale and send snapshot to it instead of logs (default: 10000)
|
- `stale_log_gap` — Threshold when leader considers follower as stale and sends the snapshot to it instead of logs (default: 10000).
|
||||||
- `fresh_log_gap` - when node became fresh (default: 200)
|
- `fresh_log_gap` — When node became fresh (default: 200).
|
||||||
- `max_requests_batch_size` - max size of batch in requests count before it will be sent to RAFT (default: 100)
|
- `max_requests_batch_size` - Max size of batch in requests count before it will be sent to RAFT (default: 100).
|
||||||
- `force_sync` — call `fsync` on each write to coordination log (default: true)
|
- `force_sync` — Call `fsync` on each write to coordination log (default: true).
|
||||||
- `quorum_reads` - execute read requests as writes through whole RAFT consesus with similar speed (default: false)
|
- `quorum_reads` — Execute read requests as writes through whole RAFT consensus with similar speed (default: false).
|
||||||
- `raft_logs_level` — text logging level about coordination (trace, debug, and so on) (default: system default)
|
- `raft_logs_level` — Text logging level about coordination (trace, debug, and so on) (default: system default).
|
||||||
- `auto_forwarding` - allow to forward write requests from followers to leader (default: true)
|
- `auto_forwarding` — Allow to forward write requests from followers to the leader (default: true).
|
||||||
- `shutdown_timeout` — wait to finish internal connections and shutdown (ms) (default: 5000)
|
- `shutdown_timeout` — Wait to finish internal connections and shutdown (ms) (default: 5000).
|
||||||
- `startup_timeout` — if the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000)
|
- `startup_timeout` — If the server doesn't connect to other quorum participants in the specified timeout it will terminate (ms) (default: 30000).
|
||||||
|
|
||||||
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `<server>` are:
|
Quorum configuration is located in `<keeper_server>.<raft_configuration>` section and contain servers description. The only parameter for the whole quorum is `secure`, which enables encrypted connection for communication between quorum participants. The main parameters for each `<server>` are:
|
||||||
|
|
||||||
- `id` — server_id in quorum
|
- `id` — Server identifier in a quorum.
|
||||||
- `hostname` — hostname where this server placed
|
- `hostname` — Hostname where this server is placed.
|
||||||
- `port` — port where this server listen for connections
|
- `port` — Port where this server listens for connections.
|
||||||
|
|
||||||
|
|
||||||
Examples of configuration for quorum with three nodes can be found in [integration tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) with `test_keeper_` prefix. Example configuration for server #1:
|
Examples of configuration for quorum with three nodes can be found in [integration tests](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) with `test_keeper_` prefix. Example configuration for server #1:
|
||||||
@ -94,7 +94,7 @@ Examples of configuration for quorum with three nodes can be found in [integrati
|
|||||||
|
|
||||||
## How to run
|
## How to run
|
||||||
|
|
||||||
`clickhouse-keeper` is bundled into `clickhouse-server` package, just add configuration of `<keeper_server>` and start clickhouse-server as always. If you want to run standalone `clickhouse-keeper` you can start it in a similar way with:
|
ClickHouse Keeper is bundled into the ClickHouse server package, just add configuration of `<keeper_server>` and start ClickHouse server as always. If you want to run standalone ClickHouse Keeper you can start it in a similar way with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
||||||
@ -102,17 +102,18 @@ clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
|||||||
|
|
||||||
## [experimental] Migration from ZooKeeper
|
## [experimental] Migration from ZooKeeper
|
||||||
|
|
||||||
Seamlessly migration from ZooKeeper to `clickhouse-keeper` is impossible you have to stop your ZooKeeper cluster, convert data and start `clickhouse-keeper`. `clickhouse-keeper-converter` tool allows to convert ZooKeeper logs and snapshots to `clickhouse-keeper` snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
Seamlessly migration from ZooKeeper to ClickHouse Keeper is impossible you have to stop your ZooKeeper cluster, convert data and start ClickHouse Keeper. `clickhouse-keeper-converter` tool allows converting ZooKeeper logs and snapshots to ClickHouse Keeper snapshot. It works only with ZooKeeper > 3.4. Steps for migration:
|
||||||
|
|
||||||
1. Stop all ZooKeeper nodes.
|
1. Stop all ZooKeeper nodes.
|
||||||
|
|
||||||
2. [optional, but recommended] Found ZooKeeper leader node, start and stop it again. It will force ZooKeeper to create consistent snapshot.
|
2. Optional, but recommended: find ZooKeeper leader node, start and stop it again. It will force ZooKeeper to create a consistent snapshot.
|
||||||
|
|
||||||
3. Run `clickhouse-keeper-converter` on leader, example
|
3. Run `clickhouse-keeper-converter` on a leader, for example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots
|
clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Copy snapshot to `clickhouse-server` nodes with configured `keeper` or start `clickhouse-keeper` instead of ZooKeeper. Snapshot must persist on all nodes, otherwise empty nodes can be faster and one of them can becamse leader.
|
4. Copy snapshot to ClickHouse server nodes with a configured `keeper` or start ClickHouse Keeper instead of ZooKeeper. The snapshot must persist on all nodes, otherwise, empty nodes can be faster and one of them can become a leader.
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/clickhouse-keeper/) <!--hide-->
|
||||||
|
@ -3566,3 +3566,91 @@ Possible values:
|
|||||||
- Positive integer.
|
- Positive integer.
|
||||||
|
|
||||||
Default value: `1000`.
|
Default value: `1000`.
|
||||||
|
|
||||||
|
## max_hyperscan_regexp_length {#max-hyperscan-regexp-length}
|
||||||
|
|
||||||
|
Defines the maximum length for each regular expression in the [hyperscan multi-match functions](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Positive integer.
|
||||||
|
- 0 - The length is not limited.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['ab','bcd','c','d']) SETTINGS max_hyperscan_regexp_length = 3;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiMatchAny('abcd', ['ab', 'bcd', 'c', 'd'])─┐
|
||||||
|
│ 1 │
|
||||||
|
└────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['ab','bcd','c','d']) SETTINGS max_hyperscan_regexp_length = 2;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Exception: Regexp length too large.
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [max_hyperscan_regexp_total_length](#max-hyperscan-regexp-total-length)
|
||||||
|
|
||||||
|
|
||||||
|
## max_hyperscan_regexp_total_length {#max-hyperscan-regexp-total-length}
|
||||||
|
|
||||||
|
Sets the maximum length total of all regular expressions in each [hyperscan multi-match function](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Positive integer.
|
||||||
|
- 0 - The length is not limited.
|
||||||
|
|
||||||
|
Default value: `0`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['a','b','c','d']) SETTINGS max_hyperscan_regexp_total_length = 5;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiMatchAny('abcd', ['a', 'b', 'c', 'd'])─┐
|
||||||
|
│ 1 │
|
||||||
|
└─────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['ab','bc','c','d']) SETTINGS max_hyperscan_regexp_total_length = 5;
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Exception: Total regexp lengths too large.
|
||||||
|
```
|
||||||
|
|
||||||
|
**See Also**
|
||||||
|
|
||||||
|
- [max_hyperscan_regexp_length](#max-hyperscan-regexp-length)
|
||||||
|
@ -29,7 +29,7 @@ Returns the round number with largest absolute value that has an absolute value
|
|||||||
|
|
||||||
Rounds a value to a specified number of decimal places.
|
Rounds a value to a specified number of decimal places.
|
||||||
|
|
||||||
The function returns the nearest number of the specified order. In case when given number has equal distance to surrounding numbers, the function uses banker’s rounding for float number types and rounds away from zero for the other number types.
|
The function returns the nearest number of the specified order. In case when given number has equal distance to surrounding numbers, the function uses banker’s rounding for float number types and rounds away from zero for the other number types (Decimal).
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
round(expression [, decimal_places])
|
round(expression [, decimal_places])
|
||||||
@ -49,7 +49,7 @@ The rounded number of the same type as the input number.
|
|||||||
|
|
||||||
### Examples {#examples}
|
### Examples {#examples}
|
||||||
|
|
||||||
**Example of use**
|
**Example of use with Float**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
||||||
@ -63,6 +63,20 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
|||||||
└─────┴──────────────────────────┘
|
└─────┴──────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Example of use with Decimal**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌──────x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐
|
||||||
|
│ 0.0000 │ 0.0000 │
|
||||||
|
│ 0.5000 │ 1.0000 │
|
||||||
|
│ 1.0000 │ 1.0000 │
|
||||||
|
└────────┴──────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
**Examples of rounding**
|
**Examples of rounding**
|
||||||
|
|
||||||
Rounding to the nearest number.
|
Rounding to the nearest number.
|
||||||
|
119
docs/ru/operations/clickhouse-keeper.md
Normal file
119
docs/ru/operations/clickhouse-keeper.md
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
---
|
||||||
|
toc_priority: 66
|
||||||
|
toc_title: ClickHouse Keeper
|
||||||
|
---
|
||||||
|
|
||||||
|
# [пре-продакшн] ClickHouse Keeper
|
||||||
|
|
||||||
|
Сервер ClickHouse использует сервис координации [ZooKeeper](https://zookeeper.apache.org/) для [репликации](../engines/table-engines/mergetree-family/replication.md) данных и выполнения [распределенных DDL запросов](../sql-reference/distributed-ddl.md). ClickHouse Keeper — это альтернативный сервис координации, совместимый с ZooKeeper.
|
||||||
|
|
||||||
|
!!! warning "Предупреждение"
|
||||||
|
ClickHouse Keeper находится в стадии пре-продакшн и тестируется в CI ClickHouse и на нескольких внутренних инсталляциях.
|
||||||
|
|
||||||
|
## Детали реализации
|
||||||
|
|
||||||
|
ZooKeeper — один из первых широко известных сервисов координации с открытым исходным кодом. Он реализован на языке программирования Java, имеет достаточно простую и мощную модель данных. Алгоритм координации Zookeeper называется ZAB (ZooKeeper Atomic Broadcast). Он не гарантирует линеаризуемость операций чтения, поскольку каждый узел ZooKeeper обслуживает чтения локально. В отличие от ZooKeeper, ClickHouse Keeper реализован на C++ и использует алгоритм [RAFT](https://raft.github.io/), [реализация](https://github.com/eBay/NuRaft). Этот алгоритм позволяет достичь линеаризуемости чтения и записи, имеет несколько реализаций с открытым исходным кодом на разных языках.
|
||||||
|
|
||||||
|
По умолчанию ClickHouse Keeper предоставляет те же гарантии, что и ZooKeeper (линеаризуемость записей, последовательная согласованность чтений). У него есть совместимый клиент-серверный протокол, поэтому любой стандартный клиент ZooKeeper может использоваться для взаимодействия с ClickHouse Keeper. Снэпшоты и журналы имеют несовместимый с ZooKeeper формат, однако можно конвертировать данные Zookeeper в снэпшот ClickHouse Keeper с помощью `clickhouse-keeper-converter`. Межсерверный протокол ClickHouse Keeper также несовместим с ZooKeeper, поэтому создание смешанного кластера ZooKeeper / ClickHouse Keeper невозможно.
|
||||||
|
|
||||||
|
## Конфигурация
|
||||||
|
|
||||||
|
ClickHouse Keeper может использоваться как равноценная замена ZooKeeper или как внутренняя часть сервера ClickHouse, но в обоих случаях конфигурация представлена файлом `.xml`. Главный тег конфигурации ClickHouse Keeper — это `<keeper_server>`. Параметры конфигурации:
|
||||||
|
|
||||||
|
- `tcp_port` — порт для подключения клиента (по умолчанию для ZooKeeper: `2181`).
|
||||||
|
- `tcp_port_secure` — зашифрованный порт для подключения клиента.
|
||||||
|
- `server_id` — уникальный идентификатор сервера, каждый участник кластера должен иметь уникальный номер (1, 2, 3 и т. д.).
|
||||||
|
- `log_storage_path` — путь к журналам координации, лучше хранить их на незанятом устройстве (актуально и для ZooKeeper).
|
||||||
|
- `snapshot_storage_path` — путь к снэпшотам координации.
|
||||||
|
|
||||||
|
Другие общие параметры наследуются из конфигурации сервера ClickHouse (`listen_host`, `logger`, и т. д.).
|
||||||
|
|
||||||
|
Настройки внутренней координации находятся в `<keeper_server>.<coordination_settings>`:
|
||||||
|
|
||||||
|
- `operation_timeout_ms` — максимальное время ожидания для одной клиентской операции в миллисекундах (по умолчанию: 10000).
|
||||||
|
- `session_timeout_ms` — максимальное время ожидания для клиентской сессии в миллисекундах (по умолчанию: 30000).
|
||||||
|
- `dead_session_check_period_ms` — частота, с которой ClickHouse Keeper проверяет мертвые сессии и удаляет их, в миллисекундах (по умолчанию: 500).
|
||||||
|
- `heart_beat_interval_ms` — частота, с которой узел-лидер ClickHouse Keeper отправляет хартбиты узлам-последователям, в миллисекундах (по умолчанию: 500).
|
||||||
|
- `election_timeout_lower_bound_ms` — время, после которого последователь может инициировать выборы лидера, если не получил от него сердцебиения (по умолчанию: 1000).
|
||||||
|
- `election_timeout_upper_bound_ms` — время, после которого последователь должен инициировать выборы лидера, если не получил от него сердцебиения (по умолчанию: 2000).
|
||||||
|
- `rotate_log_storage_interval` — количество записей в журнале координации для хранения в одном файле (по умолчанию: 100000).
|
||||||
|
- `reserved_log_items` — минимальное количество записей в журнале координации которые нужно сохранять после снятия снепшота (по умолчанию: 100000).
|
||||||
|
- `snapshot_distance` — частота, с которой ClickHouse Keeper делает новые снэпшоты (по количеству записей в журналах), в миллисекундах (по умолчанию: 100000).
|
||||||
|
- `snapshots_to_keep` — количество снэпшотов для сохранения (по умолчанию: 3).
|
||||||
|
- `stale_log_gap` — время, после которого лидер считает последователя устаревшим и отправляет ему снэпшот вместо журналов (по умолчанию: 10000).
|
||||||
|
- `fresh_log_gap` — максимальное отставание от лидера в количестве записей журнала после которого последователь считает себя не отстающим (по умолчанию: 200).
|
||||||
|
- `max_requests_batch_size` — количество запросов на запись, которые будут сгруппированы в один перед отправкой через RAFT (по умолчанию: 100).
|
||||||
|
- `force_sync` — вызывать `fsync` при каждой записи в журнал координации (по умолчанию: true).
|
||||||
|
- `quorum_reads` — выполнять запросы чтения аналогично запросам записи через весь консенсус RAFT с негативным эффектом на производительность и размер журналов (по умолчанию: false).
|
||||||
|
- `raft_logs_level` — уровень логгирования сообщений в текстовый лог (trace, debug и т. д.) (по умолчанию: information).
|
||||||
|
- `auto_forwarding` — разрешить пересылку запросов на запись от последователей лидеру (по умолчанию: true).
|
||||||
|
- `shutdown_timeout` — время ожидания завершения внутренних подключений и выключения, в миллисекундах (по умолчанию: 5000).
|
||||||
|
- `startup_timeout` — время отключения сервера, если он не подключается к другим участникам кворума, в миллисекундах (по умолчанию: 30000).
|
||||||
|
|
||||||
|
Конфигурация кворума находится в `<keeper_server>.<raft_configuration>` и содержит описание серверов. Единственный параметр для всего кворума — `secure`, который включает зашифрованное соединение для связи между участниками кворума. Параметры для каждого `<server>`:
|
||||||
|
|
||||||
|
- `id` — идентификатор сервера в кворуме.
|
||||||
|
- `hostname` — имя хоста, на котором размещен сервер.
|
||||||
|
- `port` — порт, на котором серверу доступны соединения для внутренней коммуникации.
|
||||||
|
|
||||||
|
|
||||||
|
Примеры конфигурации кворума с тремя узлами можно найти в [интеграционных тестах](https://github.com/ClickHouse/ClickHouse/tree/master/tests/integration) с префиксом `test_keeper_`. Пример конфигурации для сервера №1:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<keeper_server>
|
||||||
|
<tcp_port>2181</tcp_port>
|
||||||
|
<server_id>1</server_id>
|
||||||
|
<log_storage_path>/var/lib/clickhouse/coordination/log</log_storage_path>
|
||||||
|
<snapshot_storage_path>/var/lib/clickhouse/coordination/snapshots</snapshot_storage_path>
|
||||||
|
|
||||||
|
<coordination_settings>
|
||||||
|
<operation_timeout_ms>10000</operation_timeout_ms>
|
||||||
|
<session_timeout_ms>30000</session_timeout_ms>
|
||||||
|
<raft_logs_level>trace</raft_logs_level>
|
||||||
|
</coordination_settings>
|
||||||
|
|
||||||
|
<raft_configuration>
|
||||||
|
<server>
|
||||||
|
<id>1</id>
|
||||||
|
<hostname>zoo1</hostname>
|
||||||
|
<port>9444</port>
|
||||||
|
</server>
|
||||||
|
<server>
|
||||||
|
<id>2</id>
|
||||||
|
<hostname>zoo2</hostname>
|
||||||
|
<port>9444</port>
|
||||||
|
</server>
|
||||||
|
<server>
|
||||||
|
<id>3</id>
|
||||||
|
<hostname>zoo3</hostname>
|
||||||
|
<port>9444</port>
|
||||||
|
</server>
|
||||||
|
</raft_configuration>
|
||||||
|
</keeper_server>
|
||||||
|
```
|
||||||
|
|
||||||
|
## Как запустить
|
||||||
|
|
||||||
|
ClickHouse Keeper входит в пакет` clickhouse-server`, просто добавьте кофигурацию `<keeper_server>` и запустите сервер ClickHouse как обычно. Если вы хотите запустить ClickHouse Keeper автономно, сделайте это аналогичным способом:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
clickhouse-keeper --config /etc/your_path_to_config/config.xml --daemon
|
||||||
|
```
|
||||||
|
|
||||||
|
## [экспериментально] Переход с ZooKeeper
|
||||||
|
|
||||||
|
Плавный переход с ZooKeeper на ClickHouse Keeper невозможен, необходимо остановить кластер ZooKeeper, преобразовать данные и запустить ClickHouse Keeper. Утилита `clickhouse-keeper-converter` конвертирует журналы и снэпшоты ZooKeeper в снэпшот ClickHouse Keeper. Работа утилиты проверена только для версий ZooKeeper выше 3.4. Для миграции необходимо выполнить следующие шаги:
|
||||||
|
|
||||||
|
1. Остановите все узлы ZooKeeper.
|
||||||
|
|
||||||
|
2. Необязательно, но рекомендуется: найдите узел-лидер ZooKeeper, запустите и снова остановите его. Это заставит ZooKeeper создать консистентный снэпшот.
|
||||||
|
|
||||||
|
3. Запустите `clickhouse-keeper-converter` на лидере, например:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
clickhouse-keeper-converter --zookeeper-logs-dir /var/lib/zookeeper/version-2 --zookeeper-snapshots-dir /var/lib/zookeeper/version-2 --output-dir /path/to/clickhouse/keeper/snapshots
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Скопируйте снэпшот на узлы сервера ClickHouse с настроенным `keeper` или запустите ClickHouse Keeper вместо ZooKeeper. Снэпшот должен сохраняться на всех узлах: в противном случае пустые узлы могут захватить лидерство и сконвертированные данные могут быть отброшены на старте.
|
||||||
|
|
||||||
|
[Original article](https://clickhouse.tech/docs/en/operations/clickhouse-keeper/) <!--hide-->
|
@ -3375,3 +3375,91 @@ SETTINGS index_granularity = 8192 │
|
|||||||
- Положительное целое число.
|
- Положительное целое число.
|
||||||
|
|
||||||
Значение по умолчанию: `1000`.
|
Значение по умолчанию: `1000`.
|
||||||
|
|
||||||
|
## max_hyperscan_regexp_length {#max-hyperscan-regexp-length}
|
||||||
|
|
||||||
|
Задает максимальную длину каждого регулярного выражения в [hyperscan-функциях](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn) поиска множественных совпадений в строке.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
- 0 - длина не ограничена.
|
||||||
|
|
||||||
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['ab','bcd','c','d']) SETTINGS max_hyperscan_regexp_length = 3;
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiMatchAny('abcd', ['ab', 'bcd', 'c', 'd'])─┐
|
||||||
|
│ 1 │
|
||||||
|
└────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['ab','bcd','c','d']) SETTINGS max_hyperscan_regexp_length = 2;
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Exception: Regexp length too large.
|
||||||
|
```
|
||||||
|
|
||||||
|
**См. также**
|
||||||
|
|
||||||
|
- [max_hyperscan_regexp_total_length](#max-hyperscan-regexp-total-length)
|
||||||
|
|
||||||
|
|
||||||
|
## max_hyperscan_regexp_total_length {#max-hyperscan-regexp-total-length}
|
||||||
|
|
||||||
|
Задает максимальную общую длину всех регулярных выражений в каждой [hyperscan-функции](../../sql-reference/functions/string-search-functions.md#multimatchanyhaystack-pattern1-pattern2-patternn) поиска множественных совпадений в строке.
|
||||||
|
|
||||||
|
Возможные значения:
|
||||||
|
|
||||||
|
- Положительное целое число.
|
||||||
|
- 0 - длина не ограничена.
|
||||||
|
|
||||||
|
Значение по умолчанию: `0`.
|
||||||
|
|
||||||
|
**Пример**
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['a','b','c','d']) SETTINGS max_hyperscan_regexp_total_length = 5;
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiMatchAny('abcd', ['a', 'b', 'c', 'd'])─┐
|
||||||
|
│ 1 │
|
||||||
|
└─────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Запрос:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiMatchAny('abcd', ['ab','bc','c','d']) SETTINGS max_hyperscan_regexp_total_length = 5;
|
||||||
|
```
|
||||||
|
|
||||||
|
Результат:
|
||||||
|
|
||||||
|
```text
|
||||||
|
Exception: Total regexp lengths too large.
|
||||||
|
```
|
||||||
|
|
||||||
|
**См. также**
|
||||||
|
|
||||||
|
- [max_hyperscan_regexp_length](#max-hyperscan-regexp-length)
|
||||||
|
@ -27,7 +27,7 @@ N может быть отрицательным.
|
|||||||
|
|
||||||
Округляет значение до указанного десятичного разряда.
|
Округляет значение до указанного десятичного разряда.
|
||||||
|
|
||||||
Функция возвращает ближайшее значение указанного порядка. В случае, когда заданное число равноудалено от чисел необходимого порядка, функция возвращает то из них, которое имеет ближайшую чётную цифру (банковское округление).
|
Функция возвращает ближайшее значение указанного порядка. В случае, когда заданное число равноудалено от чисел необходимого порядка, для типов с плавающей точкой (Float32/64) функция возвращает то из них, которое имеет ближайшую чётную цифру (банковское округление), для типов с фиксированной точкой (Decimal) функция использует округление в бо́льшую по модулю сторону (математическое округление).
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
round(expression [, decimal_places])
|
round(expression [, decimal_places])
|
||||||
@ -47,7 +47,7 @@ round(expression [, decimal_places])
|
|||||||
|
|
||||||
### Примеры {#primery}
|
### Примеры {#primery}
|
||||||
|
|
||||||
**Пример использования**
|
**Пример использования с Float**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
||||||
@ -61,6 +61,21 @@ SELECT number / 2 AS x, round(x) FROM system.numbers LIMIT 3
|
|||||||
└─────┴──────────────────────────┘
|
└─────┴──────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Пример использования с Decimal**
|
||||||
|
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT cast(number / 2 AS Decimal(10,4)) AS x, round(x) FROM system.numbers LIMIT 3
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌──────x─┬─round(CAST(divide(number, 2), 'Decimal(10, 4)'))─┐
|
||||||
|
│ 0.0000 │ 0.0000 │
|
||||||
|
│ 0.5000 │ 1.0000 │
|
||||||
|
│ 1.0000 │ 1.0000 │
|
||||||
|
└────────┴──────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
**Примеры округления**
|
**Примеры округления**
|
||||||
|
|
||||||
Округление до ближайшего числа.
|
Округление до ближайшего числа.
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "Common/MemoryTracker.h"
|
#include "Common/MemoryTracker.h"
|
||||||
#include "Columns/ColumnsNumber.h"
|
#include "Columns/ColumnsNumber.h"
|
||||||
#include "ConnectionParameters.h"
|
#include "ConnectionParameters.h"
|
||||||
@ -1941,16 +1942,30 @@ private:
|
|||||||
{
|
{
|
||||||
/// If INSERT data must be sent.
|
/// If INSERT data must be sent.
|
||||||
auto * parsed_insert_query = parsed_query->as<ASTInsertQuery>();
|
auto * parsed_insert_query = parsed_query->as<ASTInsertQuery>();
|
||||||
|
/// If query isn't parsed, no information can be got from it.
|
||||||
if (!parsed_insert_query)
|
if (!parsed_insert_query)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/// If data is got from file (maybe compressed file)
|
||||||
if (parsed_insert_query->infile)
|
if (parsed_insert_query->infile)
|
||||||
{
|
{
|
||||||
|
/// Get name of this file (path to file)
|
||||||
const auto & in_file_node = parsed_insert_query->infile->as<ASTLiteral &>();
|
const auto & in_file_node = parsed_insert_query->infile->as<ASTLiteral &>();
|
||||||
const auto in_file = in_file_node.value.safeGet<std::string>();
|
const auto in_file = in_file_node.value.safeGet<std::string>();
|
||||||
|
|
||||||
auto in_buffer = wrapReadBufferWithCompressionMethod(std::make_unique<ReadBufferFromFile>(in_file), chooseCompressionMethod(in_file, ""));
|
std::string compression_method;
|
||||||
|
/// Compression method can be specified in query
|
||||||
|
if (parsed_insert_query->compression)
|
||||||
|
{
|
||||||
|
const auto & compression_method_node = parsed_insert_query->compression->as<ASTLiteral &>();
|
||||||
|
compression_method = compression_method_node.value.safeGet<std::string>();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Otherwise, it will be detected from file name automatically (by chooseCompressionMethod)
|
||||||
|
/// Buffer for reading from file is created and wrapped with appropriate compression method
|
||||||
|
auto in_buffer = wrapReadBufferWithCompressionMethod(std::make_unique<ReadBufferFromFile>(in_file), chooseCompressionMethod(in_file, compression_method));
|
||||||
|
|
||||||
|
/// Now data is ready to be sent on server.
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
sendDataFrom(*in_buffer, sample, columns_description);
|
sendDataFrom(*in_buffer, sample, columns_description);
|
||||||
@ -1961,6 +1976,7 @@ private:
|
|||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/// If query already has data to sent
|
||||||
else if (parsed_insert_query->data)
|
else if (parsed_insert_query->data)
|
||||||
{
|
{
|
||||||
/// Send data contained in the query.
|
/// Send data contained in the query.
|
||||||
@ -2349,9 +2365,16 @@ private:
|
|||||||
const auto & out_file_node = query_with_output->out_file->as<ASTLiteral &>();
|
const auto & out_file_node = query_with_output->out_file->as<ASTLiteral &>();
|
||||||
const auto & out_file = out_file_node.value.safeGet<std::string>();
|
const auto & out_file = out_file_node.value.safeGet<std::string>();
|
||||||
|
|
||||||
|
std::string compression_method;
|
||||||
|
if (query_with_output->compression)
|
||||||
|
{
|
||||||
|
const auto & compression_method_node = query_with_output->compression->as<ASTLiteral &>();
|
||||||
|
compression_method = compression_method_node.value.safeGet<std::string>();
|
||||||
|
}
|
||||||
|
|
||||||
out_file_buf = wrapWriteBufferWithCompressionMethod(
|
out_file_buf = wrapWriteBufferWithCompressionMethod(
|
||||||
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
||||||
chooseCompressionMethod(out_file, ""),
|
chooseCompressionMethod(out_file, compression_method),
|
||||||
/* compression level = */ 3
|
/* compression level = */ 3
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -466,6 +466,7 @@ class IColumn;
|
|||||||
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \
|
||||||
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \
|
||||||
M(Bool, insert_null_as_default, true, "Insert DEFAULT values instead of NULL in INSERT SELECT (UNION ALL)", 0) \
|
M(Bool, insert_null_as_default, true, "Insert DEFAULT values instead of NULL in INSERT SELECT (UNION ALL)", 0) \
|
||||||
|
M(Bool, describe_include_subcolumns, false, "If true, subcolumns of all table columns will be included into result of DESCRIBE query", 0) \
|
||||||
\
|
\
|
||||||
M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \
|
M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \
|
||||||
M(UInt64, insert_shard_id, 0, "If non zero, when insert into a distributed table, the data will be inserted into the shard `insert_shard_id` synchronously. Possible values range from 1 to `shards_number` of corresponding distributed table", 0) \
|
M(UInt64, insert_shard_id, 0, "If non zero, when insert into a distributed table, the data will be inserted into the shard `insert_shard_id` synchronously. Possible values range from 1 to `shards_number` of corresponding distributed table", 0) \
|
||||||
|
@ -87,23 +87,38 @@ ColumnPtr IDataType::getSubcolumn(const String & subcolumn_name, const IColumn &
|
|||||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in type {}", subcolumn_name, getName());
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "There is no subcolumn {} in type {}", subcolumn_name, getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
Names IDataType::getSubcolumnNames() const
|
void IDataType::forEachSubcolumn(const SubcolumnCallback & callback) const
|
||||||
{
|
{
|
||||||
NameSet res;
|
NameSet set;
|
||||||
getDefaultSerialization()->enumerateStreams([&res, this](const ISerialization::SubstreamPath & substream_path)
|
getDefaultSerialization()->enumerateStreams([&, this](const ISerialization::SubstreamPath & substream_path)
|
||||||
{
|
{
|
||||||
ISerialization::SubstreamPath new_path;
|
ISerialization::SubstreamPath new_path;
|
||||||
/// Iterate over path to try to get intermediate subcolumns for complex nested types.
|
/// Iterate over path to try to get intermediate subcolumns for complex nested types.
|
||||||
for (const auto & elem : substream_path)
|
for (const auto & elem : substream_path)
|
||||||
{
|
{
|
||||||
new_path.push_back(elem);
|
new_path.push_back(elem);
|
||||||
auto subcolumn_name = ISerialization::getSubcolumnNameForStream(new_path);
|
auto name = ISerialization::getSubcolumnNameForStream(new_path);
|
||||||
if (!subcolumn_name.empty() && tryGetSubcolumnType(subcolumn_name))
|
auto type = tryGetSubcolumnType(name);
|
||||||
res.insert(subcolumn_name);
|
|
||||||
|
/// Subcolumn names may repeat among several substream paths.
|
||||||
|
if (!name.empty() && type && !set.count(name))
|
||||||
|
{
|
||||||
|
callback(name, type, substream_path);
|
||||||
|
set.insert(name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
}
|
||||||
|
|
||||||
return Names(std::make_move_iterator(res.begin()), std::make_move_iterator(res.end()));
|
Names IDataType::getSubcolumnNames() const
|
||||||
|
{
|
||||||
|
Names res;
|
||||||
|
forEachSubcolumn([&](const auto & name, const auto &, const auto &)
|
||||||
|
{
|
||||||
|
res.push_back(name);
|
||||||
|
});
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
void IDataType::insertDefaultInto(IColumn & column) const
|
void IDataType::insertDefaultInto(IColumn & column) const
|
||||||
|
@ -80,6 +80,9 @@ public:
|
|||||||
virtual DataTypePtr tryGetSubcolumnType(const String & subcolumn_name) const;
|
virtual DataTypePtr tryGetSubcolumnType(const String & subcolumn_name) const;
|
||||||
DataTypePtr getSubcolumnType(const String & subcolumn_name) const;
|
DataTypePtr getSubcolumnType(const String & subcolumn_name) const;
|
||||||
virtual ColumnPtr getSubcolumn(const String & subcolumn_name, const IColumn & column) const;
|
virtual ColumnPtr getSubcolumn(const String & subcolumn_name, const IColumn & column) const;
|
||||||
|
|
||||||
|
using SubcolumnCallback = std::function<void(const String &, const DataTypePtr &, const ISerialization::SubstreamPath &)>;
|
||||||
|
void forEachSubcolumn(const SubcolumnCallback & callback) const;
|
||||||
Names getSubcolumnNames() const;
|
Names getSubcolumnNames() const;
|
||||||
|
|
||||||
/// Returns default serialization of data type.
|
/// Returns default serialization of data type.
|
||||||
|
@ -12,9 +12,6 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
class ASTInsertQuery;
|
|
||||||
struct BlockIO;
|
|
||||||
|
|
||||||
/// A queue, that stores data for insert queries and periodically flushes it to tables.
|
/// A queue, that stores data for insert queries and periodically flushes it to tables.
|
||||||
/// The data is grouped by table, format and settings of insert query.
|
/// The data is grouped by table, format and settings of insert query.
|
||||||
class AsynchronousInsertQueue : public WithContext
|
class AsynchronousInsertQueue : public WithContext
|
||||||
@ -141,7 +138,7 @@ private:
|
|||||||
static void processData(InsertQuery key, InsertDataPtr data, ContextPtr global_context);
|
static void processData(InsertQuery key, InsertDataPtr data, ContextPtr global_context);
|
||||||
|
|
||||||
template <typename E>
|
template <typename E>
|
||||||
static void finishWithException(const ASTPtr & query, const std::list<InsertData::EntryPtr> & entries, const E & e);
|
static void finishWithException(const ASTPtr & query, const std::list<InsertData::EntryPtr> & entries, const E & exception);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Queue getQueue() const
|
Queue getQueue() const
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
#include <Interpreters/RequiredSourceColumnsVisitor.h>
|
#include <Interpreters/RequiredSourceColumnsVisitor.h>
|
||||||
#include <Interpreters/addTypeConversionToAST.h>
|
#include <Interpreters/addTypeConversionToAST.h>
|
||||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
|
||||||
#include <Parsers/ASTSelectQuery.h>
|
#include <Parsers/ASTSelectQuery.h>
|
||||||
#include <Parsers/ASTSubquery.h>
|
#include <Parsers/ASTSubquery.h>
|
||||||
#include <Parsers/ASTAlterQuery.h>
|
#include <Parsers/ASTAlterQuery.h>
|
||||||
@ -14,8 +13,11 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
bool ColumnAliasesMatcher::needChildVisit(const ASTPtr & node, const ASTPtr &)
|
bool ColumnAliasesMatcher::needChildVisit(const ASTPtr & node, const ASTPtr &, const Data & data)
|
||||||
{
|
{
|
||||||
|
if (data.excluded_nodes.contains(node.get()))
|
||||||
|
return false;
|
||||||
|
|
||||||
if (const auto * f = node->as<ASTFunction>())
|
if (const auto * f = node->as<ASTFunction>())
|
||||||
{
|
{
|
||||||
/// "lambda" visits children itself.
|
/// "lambda" visits children itself.
|
||||||
|
@ -46,7 +46,7 @@ using DataTypePtr = std::shared_ptr<const IDataType>;
|
|||||||
class ColumnAliasesMatcher
|
class ColumnAliasesMatcher
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using Visitor = InDepthNodeVisitor<ColumnAliasesMatcher, false>;
|
using Visitor = InDepthNodeVisitor<ColumnAliasesMatcher, false, true>;
|
||||||
|
|
||||||
struct Data
|
struct Data
|
||||||
{
|
{
|
||||||
@ -57,14 +57,16 @@ public:
|
|||||||
NameSet array_join_source_columns;
|
NameSet array_join_source_columns;
|
||||||
ContextPtr context;
|
ContextPtr context;
|
||||||
|
|
||||||
|
const std::unordered_set<IAST *> & excluded_nodes;
|
||||||
|
|
||||||
/// private_aliases are from lambda, so these are local names.
|
/// private_aliases are from lambda, so these are local names.
|
||||||
NameSet private_aliases;
|
NameSet private_aliases;
|
||||||
|
|
||||||
/// Check if query is changed by this visitor.
|
/// Check if query is changed by this visitor.
|
||||||
bool changed = false;
|
bool changed = false;
|
||||||
|
|
||||||
Data(const ColumnsDescription & columns_, const NameToNameMap & array_join_result_columns_, ContextPtr context_)
|
Data(const ColumnsDescription & columns_, const NameToNameMap & array_join_result_columns_, ContextPtr context_, const std::unordered_set<IAST *> & excluded_nodes_)
|
||||||
: columns(columns_), context(context_)
|
: columns(columns_), context(context_), excluded_nodes(excluded_nodes_)
|
||||||
{
|
{
|
||||||
for (const auto & [result, source] : array_join_result_columns_)
|
for (const auto & [result, source] : array_join_result_columns_)
|
||||||
{
|
{
|
||||||
@ -75,7 +77,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
static void visit(ASTPtr & ast, Data & data);
|
static void visit(ASTPtr & ast, Data & data);
|
||||||
static bool needChildVisit(const ASTPtr & node, const ASTPtr & child);
|
static bool needChildVisit(const ASTPtr & node, const ASTPtr & child, const Data & data);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static void visit(ASTIdentifier & node, ASTPtr & ast, Data & data);
|
static void visit(ASTIdentifier & node, ASTPtr & ast, Data & data);
|
||||||
|
@ -1095,7 +1095,7 @@ IColumn::Filter dictionaryJoinRightColumns(const TableJoin & table_join, AddedCo
|
|||||||
std::move(key_getter), nullptr, added_columns, null_map, flags);
|
std::move(key_getter), nullptr, added_columns, null_map, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
throw Exception("Logical error: wrong JOIN combination", ErrorCodes::LOGICAL_ERROR);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong JOIN combination: {} {}", STRICTNESS, KIND);
|
||||||
}
|
}
|
||||||
|
|
||||||
} /// nameless
|
} /// nameless
|
||||||
@ -1414,13 +1414,13 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed)
|
|||||||
joinBlockImpl<Kind::Left, Strictness::Anti>(block, key_names_left, sample_block_with_columns_to_add, map);
|
joinBlockImpl<Kind::Left, Strictness::Anti>(block, key_names_left, sample_block_with_columns_to_add, map);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw Exception("Logical error: wrong JOIN combination", ErrorCodes::LOGICAL_ERROR);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong JOIN combination: dictionary + {} {}", strictness, kind);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (kind == Kind::Inner && strictness == Strictness::All)
|
else if (kind == Kind::Inner && strictness == Strictness::All)
|
||||||
joinBlockImpl<Kind::Left, Strictness::Semi>(block, key_names_left, sample_block_with_columns_to_add, map);
|
joinBlockImpl<Kind::Left, Strictness::Semi>(block, key_names_left, sample_block_with_columns_to_add, map);
|
||||||
else
|
else
|
||||||
throw Exception("Logical error: wrong JOIN combination", ErrorCodes::LOGICAL_ERROR);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong JOIN combination: dictionary + {} {}", strictness, kind);
|
||||||
}
|
}
|
||||||
else if (joinDispatch(kind, strictness, data->maps, [&](auto kind_, auto strictness_, auto & map)
|
else if (joinDispatch(kind, strictness, data->maps, [&](auto kind_, auto strictness_, auto & map)
|
||||||
{
|
{
|
||||||
@ -1432,7 +1432,7 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed)
|
|||||||
else if (kind == ASTTableJoin::Kind::Cross)
|
else if (kind == ASTTableJoin::Kind::Cross)
|
||||||
joinBlockImplCross(block, not_processed);
|
joinBlockImplCross(block, not_processed);
|
||||||
else
|
else
|
||||||
throw Exception("Logical error: unknown combination of JOIN", ErrorCodes::LOGICAL_ERROR);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong JOIN combination: {} {}", strictness, kind);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Mapped>
|
template <typename Mapped>
|
||||||
@ -1494,7 +1494,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps, fill_callback))
|
if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps, fill_callback))
|
||||||
throw Exception("Logical error: unknown JOIN strictness (must be on of: ANY, ALL, ASOF)", ErrorCodes::LOGICAL_ERROR);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown JOIN strictness '{}' (must be on of: ANY, ALL, ASOF)", parent.strictness);
|
||||||
|
|
||||||
fillNullsFromBlocks(columns_right, rows_added);
|
fillNullsFromBlocks(columns_right, rows_added);
|
||||||
return rows_added;
|
return rows_added;
|
||||||
|
@ -10,7 +10,7 @@ namespace DB
|
|||||||
|
|
||||||
/// Visits AST tree in depth, call functions for nodes according to Matcher type data.
|
/// Visits AST tree in depth, call functions for nodes according to Matcher type data.
|
||||||
/// You need to define Data, visit() and needChildVisit() in Matcher class.
|
/// You need to define Data, visit() and needChildVisit() in Matcher class.
|
||||||
template <typename Matcher, bool _top_to_bottom, typename T = ASTPtr>
|
template <typename Matcher, bool _top_to_bottom, bool need_child_accept_data = false, typename T = ASTPtr>
|
||||||
class InDepthNodeVisitor
|
class InDepthNodeVisitor
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -51,13 +51,21 @@ private:
|
|||||||
void visitChildren(T & ast)
|
void visitChildren(T & ast)
|
||||||
{
|
{
|
||||||
for (auto & child : ast->children)
|
for (auto & child : ast->children)
|
||||||
if (Matcher::needChildVisit(ast, child))
|
{
|
||||||
|
bool need_visit_child = false;
|
||||||
|
if constexpr (need_child_accept_data)
|
||||||
|
need_visit_child = Matcher::needChildVisit(ast, child, data);
|
||||||
|
else
|
||||||
|
need_visit_child = Matcher::needChildVisit(ast, child);
|
||||||
|
|
||||||
|
if (need_visit_child)
|
||||||
visit(child);
|
visit(child);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Matcher, bool top_to_bottom>
|
template <typename Matcher, bool top_to_bottom, bool need_child_accept_data = false>
|
||||||
using ConstInDepthNodeVisitor = InDepthNodeVisitor<Matcher, top_to_bottom, const ASTPtr>;
|
using ConstInDepthNodeVisitor = InDepthNodeVisitor<Matcher, top_to_bottom, need_child_accept_data, const ASTPtr>;
|
||||||
|
|
||||||
struct NeedChild
|
struct NeedChild
|
||||||
{
|
{
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#include <Storages/IStorage.h>
|
#include <Storages/IStorage.h>
|
||||||
#include <DataStreams/OneBlockInputStream.h>
|
|
||||||
#include <DataStreams/BlockIO.h>
|
#include <DataStreams/BlockIO.h>
|
||||||
#include <DataTypes/DataTypeString.h>
|
#include <DataTypes/DataTypeString.h>
|
||||||
#include <Parsers/queryToString.h>
|
#include <Parsers/queryToString.h>
|
||||||
@ -15,20 +14,14 @@
|
|||||||
#include <Parsers/ASTFunction.h>
|
#include <Parsers/ASTFunction.h>
|
||||||
#include <Parsers/ASTTablesInSelectQuery.h>
|
#include <Parsers/ASTTablesInSelectQuery.h>
|
||||||
#include <Parsers/TablePropertiesQueriesASTs.h>
|
#include <Parsers/TablePropertiesQueriesASTs.h>
|
||||||
|
#include <DataTypes/NestedUtils.h>
|
||||||
|
#include <Processors/Sources/SourceFromSingleChunk.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
BlockIO InterpreterDescribeQuery::execute()
|
Block InterpreterDescribeQuery::getSampleBlock(bool include_subcolumns)
|
||||||
{
|
|
||||||
BlockIO res;
|
|
||||||
res.in = executeImpl();
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Block InterpreterDescribeQuery::getSampleBlock()
|
|
||||||
{
|
{
|
||||||
Block block;
|
Block block;
|
||||||
|
|
||||||
@ -56,11 +49,19 @@ Block InterpreterDescribeQuery::getSampleBlock()
|
|||||||
col.name = "ttl_expression";
|
col.name = "ttl_expression";
|
||||||
block.insert(col);
|
block.insert(col);
|
||||||
|
|
||||||
|
if (include_subcolumns)
|
||||||
|
{
|
||||||
|
col.name = "is_subcolumn";
|
||||||
|
col.type = std::make_shared<DataTypeUInt8>();
|
||||||
|
col.column = col.type->createColumn();
|
||||||
|
block.insert(col);
|
||||||
|
}
|
||||||
|
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
BlockInputStreamPtr InterpreterDescribeQuery::executeImpl()
|
BlockIO InterpreterDescribeQuery::execute()
|
||||||
{
|
{
|
||||||
ColumnsDescription columns;
|
ColumnsDescription columns;
|
||||||
|
|
||||||
@ -87,7 +88,8 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl()
|
|||||||
columns = metadata_snapshot->getColumns();
|
columns = metadata_snapshot->getColumns();
|
||||||
}
|
}
|
||||||
|
|
||||||
Block sample_block = getSampleBlock();
|
bool include_subcolumns = getContext()->getSettingsRef().describe_include_subcolumns;
|
||||||
|
Block sample_block = getSampleBlock(include_subcolumns);
|
||||||
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
MutableColumns res_columns = sample_block.cloneEmptyColumns();
|
||||||
|
|
||||||
for (const auto & column : columns)
|
for (const auto & column : columns)
|
||||||
@ -117,9 +119,47 @@ BlockInputStreamPtr InterpreterDescribeQuery::executeImpl()
|
|||||||
res_columns[6]->insert(queryToString(column.ttl));
|
res_columns[6]->insert(queryToString(column.ttl));
|
||||||
else
|
else
|
||||||
res_columns[6]->insertDefault();
|
res_columns[6]->insertDefault();
|
||||||
|
|
||||||
|
if (include_subcolumns)
|
||||||
|
res_columns[7]->insertDefault();
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<OneBlockInputStream>(sample_block.cloneWithColumns(std::move(res_columns)));
|
if (include_subcolumns)
|
||||||
|
{
|
||||||
|
for (const auto & column : columns)
|
||||||
|
{
|
||||||
|
column.type->forEachSubcolumn([&](const auto & name, const auto & type, const auto & path)
|
||||||
|
{
|
||||||
|
res_columns[0]->insert(Nested::concatenateName(column.name, name));
|
||||||
|
res_columns[1]->insert(type->getName());
|
||||||
|
|
||||||
|
/// It's not trivial to calculate default expression for subcolumn.
|
||||||
|
/// So, leave it empty.
|
||||||
|
res_columns[2]->insertDefault();
|
||||||
|
res_columns[3]->insertDefault();
|
||||||
|
res_columns[4]->insert(column.comment);
|
||||||
|
|
||||||
|
if (column.codec && ISerialization::isSpecialCompressionAllowed(path))
|
||||||
|
res_columns[5]->insert(queryToString(column.codec->as<ASTFunction>()->arguments));
|
||||||
|
else
|
||||||
|
res_columns[5]->insertDefault();
|
||||||
|
|
||||||
|
if (column.ttl)
|
||||||
|
res_columns[6]->insert(queryToString(column.ttl));
|
||||||
|
else
|
||||||
|
res_columns[6]->insertDefault();
|
||||||
|
|
||||||
|
res_columns[7]->insert(1u);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
BlockIO res;
|
||||||
|
size_t num_rows = res_columns[0]->size();
|
||||||
|
auto source = std::make_shared<SourceFromSingleChunk>(sample_block, Chunk(std::move(res_columns), num_rows));
|
||||||
|
res.pipeline.init(Pipe(std::move(source)));
|
||||||
|
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -16,12 +16,10 @@ public:
|
|||||||
|
|
||||||
BlockIO execute() override;
|
BlockIO execute() override;
|
||||||
|
|
||||||
static Block getSampleBlock();
|
static Block getSampleBlock(bool include_subcolumns);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ASTPtr query_ptr;
|
ASTPtr query_ptr;
|
||||||
|
|
||||||
BlockInputStreamPtr executeImpl();
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -339,8 +339,16 @@ static std::optional<String> getDictKeyName(const String & dict_name , ContextPt
|
|||||||
|
|
||||||
bool TableJoin::tryInitDictJoin(const Block & sample_block, ContextPtr context)
|
bool TableJoin::tryInitDictJoin(const Block & sample_block, ContextPtr context)
|
||||||
{
|
{
|
||||||
|
using Strictness = ASTTableJoin::Strictness;
|
||||||
|
|
||||||
|
bool allowed_inner = isInner(kind()) && strictness() == Strictness::All;
|
||||||
|
bool allowed_left = isLeft(kind()) && (strictness() == Strictness::Any ||
|
||||||
|
strictness() == Strictness::All ||
|
||||||
|
strictness() == Strictness::Semi ||
|
||||||
|
strictness() == Strictness::Anti);
|
||||||
|
|
||||||
/// Support ALL INNER, [ANY | ALL | SEMI | ANTI] LEFT
|
/// Support ALL INNER, [ANY | ALL | SEMI | ANTI] LEFT
|
||||||
if (!isLeft(kind()) && !(isInner(kind()) && strictness() == ASTTableJoin::Strictness::All))
|
if (!allowed_inner && !allowed_left)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
const Names & right_keys = keyNamesRight();
|
const Names & right_keys = keyNamesRight();
|
||||||
|
@ -951,16 +951,9 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
|
|||||||
setJoinStrictness(
|
setJoinStrictness(
|
||||||
*select_query, settings.join_default_strictness, settings.any_join_distinct_right_table_keys, result.analyzed_join->table_join);
|
*select_query, settings.join_default_strictness, settings.any_join_distinct_right_table_keys, result.analyzed_join->table_join);
|
||||||
|
|
||||||
if (const auto * join_ast = select_query->join(); join_ast && tables_with_columns.size() >= 2)
|
auto * table_join_ast = select_query->join() ? select_query->join()->table_join->as<ASTTableJoin>() : nullptr;
|
||||||
{
|
if (table_join_ast && tables_with_columns.size() >= 2)
|
||||||
auto & table_join_ast = join_ast->table_join->as<ASTTableJoin &>();
|
collectJoinedColumns(*result.analyzed_join, *table_join_ast, tables_with_columns, result.aliases);
|
||||||
if (table_join_ast.using_expression_list && result.metadata_snapshot)
|
|
||||||
replaceAliasColumnsInQuery(table_join_ast.using_expression_list, result.metadata_snapshot->getColumns(), result.array_join_result_to_source, getContext());
|
|
||||||
if (table_join_ast.on_expression && result.metadata_snapshot)
|
|
||||||
replaceAliasColumnsInQuery(table_join_ast.on_expression, result.metadata_snapshot->getColumns(), result.array_join_result_to_source, getContext());
|
|
||||||
|
|
||||||
collectJoinedColumns(*result.analyzed_join, table_join_ast, tables_with_columns, result.aliases);
|
|
||||||
}
|
|
||||||
|
|
||||||
result.aggregates = getAggregates(query, *select_query);
|
result.aggregates = getAggregates(query, *select_query);
|
||||||
result.window_function_asts = getWindowFunctions(query, *select_query);
|
result.window_function_asts = getWindowFunctions(query, *select_query);
|
||||||
@ -971,8 +964,19 @@ TreeRewriterResultPtr TreeRewriter::analyzeSelect(
|
|||||||
bool is_initiator = getContext()->getClientInfo().distributed_depth == 0;
|
bool is_initiator = getContext()->getClientInfo().distributed_depth == 0;
|
||||||
if (settings.optimize_respect_aliases && result.metadata_snapshot && is_initiator)
|
if (settings.optimize_respect_aliases && result.metadata_snapshot && is_initiator)
|
||||||
{
|
{
|
||||||
|
std::unordered_set<IAST *> excluded_nodes;
|
||||||
|
{
|
||||||
|
/// Do not replace ALIASed columns in JOIN ON/USING sections
|
||||||
|
if (table_join_ast && table_join_ast->on_expression)
|
||||||
|
excluded_nodes.insert(table_join_ast->on_expression.get());
|
||||||
|
if (table_join_ast && table_join_ast->using_expression_list)
|
||||||
|
excluded_nodes.insert(table_join_ast->using_expression_list.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_changed = replaceAliasColumnsInQuery(query, result.metadata_snapshot->getColumns(),
|
||||||
|
result.array_join_result_to_source, getContext(), excluded_nodes);
|
||||||
/// If query is changed, we need to redo some work to correct name resolution.
|
/// If query is changed, we need to redo some work to correct name resolution.
|
||||||
if (replaceAliasColumnsInQuery(query, result.metadata_snapshot->getColumns(), result.array_join_result_to_source, getContext()))
|
if (is_changed)
|
||||||
{
|
{
|
||||||
result.aggregates = getAggregates(query, *select_query);
|
result.aggregates = getAggregates(query, *select_query);
|
||||||
result.window_function_asts = getWindowFunctions(query, *select_query);
|
result.window_function_asts = getWindowFunctions(query, *select_query);
|
||||||
|
@ -51,6 +51,7 @@
|
|||||||
#include <Common/ProfileEvents.h>
|
#include <Common/ProfileEvents.h>
|
||||||
|
|
||||||
#include <Common/SensitiveDataMasker.h>
|
#include <Common/SensitiveDataMasker.h>
|
||||||
|
#include "IO/CompressionMethod.h"
|
||||||
|
|
||||||
#include <Processors/Transforms/LimitsCheckingTransform.h>
|
#include <Processors/Transforms/LimitsCheckingTransform.h>
|
||||||
#include <Processors/Transforms/MaterializingTransform.h>
|
#include <Processors/Transforms/MaterializingTransform.h>
|
||||||
@ -1094,9 +1095,17 @@ void executeQuery(
|
|||||||
throw Exception("INTO OUTFILE is not allowed", ErrorCodes::INTO_OUTFILE_NOT_ALLOWED);
|
throw Exception("INTO OUTFILE is not allowed", ErrorCodes::INTO_OUTFILE_NOT_ALLOWED);
|
||||||
|
|
||||||
const auto & out_file = ast_query_with_output->out_file->as<ASTLiteral &>().value.safeGet<std::string>();
|
const auto & out_file = ast_query_with_output->out_file->as<ASTLiteral &>().value.safeGet<std::string>();
|
||||||
|
|
||||||
|
std::string compression_method;
|
||||||
|
if (ast_query_with_output->compression)
|
||||||
|
{
|
||||||
|
const auto & compression_method_node = ast_query_with_output->compression->as<ASTLiteral &>();
|
||||||
|
compression_method = compression_method_node.value.safeGet<std::string>();
|
||||||
|
}
|
||||||
|
|
||||||
compressed_buffer = wrapWriteBufferWithCompressionMethod(
|
compressed_buffer = wrapWriteBufferWithCompressionMethod(
|
||||||
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
||||||
chooseCompressionMethod(out_file, ""),
|
chooseCompressionMethod(out_file, compression_method),
|
||||||
/* compression level = */ 3
|
/* compression level = */ 3
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -1142,9 +1151,17 @@ void executeQuery(
|
|||||||
throw Exception("INTO OUTFILE is not allowed", ErrorCodes::INTO_OUTFILE_NOT_ALLOWED);
|
throw Exception("INTO OUTFILE is not allowed", ErrorCodes::INTO_OUTFILE_NOT_ALLOWED);
|
||||||
|
|
||||||
const auto & out_file = typeid_cast<const ASTLiteral &>(*ast_query_with_output->out_file).value.safeGet<std::string>();
|
const auto & out_file = typeid_cast<const ASTLiteral &>(*ast_query_with_output->out_file).value.safeGet<std::string>();
|
||||||
|
|
||||||
|
std::string compression_method;
|
||||||
|
if (ast_query_with_output->compression)
|
||||||
|
{
|
||||||
|
const auto & compression_method_node = ast_query_with_output->compression->as<ASTLiteral &>();
|
||||||
|
compression_method = compression_method_node.value.safeGet<std::string>();
|
||||||
|
}
|
||||||
|
|
||||||
compressed_buffer = wrapWriteBufferWithCompressionMethod(
|
compressed_buffer = wrapWriteBufferWithCompressionMethod(
|
||||||
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
||||||
chooseCompressionMethod(out_file, ""),
|
chooseCompressionMethod(out_file, compression_method),
|
||||||
/* compression level = */ 3
|
/* compression level = */ 3
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -7,9 +7,13 @@ namespace DB
|
|||||||
{
|
{
|
||||||
|
|
||||||
bool replaceAliasColumnsInQuery(
|
bool replaceAliasColumnsInQuery(
|
||||||
ASTPtr & ast, const ColumnsDescription & columns, const NameToNameMap & array_join_result_to_source, ContextPtr context)
|
ASTPtr & ast,
|
||||||
|
const ColumnsDescription & columns,
|
||||||
|
const NameToNameMap & array_join_result_to_source,
|
||||||
|
ContextPtr context,
|
||||||
|
const std::unordered_set<IAST *> & excluded_nodes)
|
||||||
{
|
{
|
||||||
ColumnAliasesVisitor::Data aliases_column_data(columns, array_join_result_to_source, context);
|
ColumnAliasesVisitor::Data aliases_column_data(columns, array_join_result_to_source, context, excluded_nodes);
|
||||||
ColumnAliasesVisitor aliases_column_visitor(aliases_column_data);
|
ColumnAliasesVisitor aliases_column_visitor(aliases_column_data);
|
||||||
aliases_column_visitor.visit(ast);
|
aliases_column_visitor.visit(ast);
|
||||||
return aliases_column_data.changed;
|
return aliases_column_data.changed;
|
||||||
|
@ -12,6 +12,10 @@ class ColumnsDescription;
|
|||||||
|
|
||||||
/// Replace storage alias columns in select query if possible. Return true if the query is changed.
|
/// Replace storage alias columns in select query if possible. Return true if the query is changed.
|
||||||
bool replaceAliasColumnsInQuery(
|
bool replaceAliasColumnsInQuery(
|
||||||
ASTPtr & ast, const ColumnsDescription & columns, const NameToNameMap & array_join_result_to_source, ContextPtr context);
|
ASTPtr & ast,
|
||||||
|
const ColumnsDescription & columns,
|
||||||
|
const NameToNameMap & array_join_result_to_source,
|
||||||
|
ContextPtr context,
|
||||||
|
const std::unordered_set<IAST *> & excluded_nodes = {});
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -57,6 +57,8 @@ void ASTInsertQuery::formatImpl(const FormatSettings & settings, FormatState & s
|
|||||||
if (infile)
|
if (infile)
|
||||||
{
|
{
|
||||||
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM INFILE " << (settings.hilite ? hilite_none : "") << infile->as<ASTLiteral &>().value.safeGet<std::string>();
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << " FROM INFILE " << (settings.hilite ? hilite_none : "") << infile->as<ASTLiteral &>().value.safeGet<std::string>();
|
||||||
|
if (compression)
|
||||||
|
settings.ostr << (settings.hilite ? hilite_keyword : "") << " COMPRESSION " << (settings.hilite ? hilite_none : "") << compression->as<ASTLiteral &>().value.safeGet<std::string>();
|
||||||
}
|
}
|
||||||
if (!format.empty())
|
if (!format.empty())
|
||||||
{
|
{
|
||||||
|
@ -12,7 +12,6 @@ class ReadBuffer;
|
|||||||
class ASTInsertQuery : public IAST
|
class ASTInsertQuery : public IAST
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/// Part of hash
|
|
||||||
StorageID table_id = StorageID::createEmpty();
|
StorageID table_id = StorageID::createEmpty();
|
||||||
ASTPtr columns;
|
ASTPtr columns;
|
||||||
String format;
|
String format;
|
||||||
@ -23,6 +22,7 @@ public:
|
|||||||
ASTPtr select;
|
ASTPtr select;
|
||||||
ASTPtr watch;
|
ASTPtr watch;
|
||||||
ASTPtr infile;
|
ASTPtr infile;
|
||||||
|
ASTPtr compression;
|
||||||
|
|
||||||
/// Data inlined into query
|
/// Data inlined into query
|
||||||
const char * data = nullptr;
|
const char * data = nullptr;
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <Parsers/IAST.h>
|
#include <Parsers/IAST.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
|
#include "Parsers/IAST_fwd.h"
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -16,6 +17,7 @@ public:
|
|||||||
ASTPtr out_file;
|
ASTPtr out_file;
|
||||||
ASTPtr format;
|
ASTPtr format;
|
||||||
ASTPtr settings_ast;
|
ASTPtr settings_ast;
|
||||||
|
ASTPtr compression;
|
||||||
|
|
||||||
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const final;
|
void formatImpl(const FormatSettings & s, FormatState & state, FormatStateStacked frame) const final;
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Parsers/IAST.h>
|
#include <Parsers/IAST.h>
|
||||||
|
#include <common/EnumReflection.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -25,8 +25,10 @@ namespace ErrorCodes
|
|||||||
|
|
||||||
bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
||||||
{
|
{
|
||||||
|
/// Create parsers
|
||||||
ParserKeyword s_insert_into("INSERT INTO");
|
ParserKeyword s_insert_into("INSERT INTO");
|
||||||
ParserKeyword s_from_infile("FROM INFILE");
|
ParserKeyword s_from_infile("FROM INFILE");
|
||||||
|
ParserKeyword s_compression("COMPRESSION");
|
||||||
ParserKeyword s_table("TABLE");
|
ParserKeyword s_table("TABLE");
|
||||||
ParserKeyword s_function("FUNCTION");
|
ParserKeyword s_function("FUNCTION");
|
||||||
ParserToken s_dot(TokenType::Dot);
|
ParserToken s_dot(TokenType::Dot);
|
||||||
@ -45,6 +47,8 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
ParserStringLiteral infile_name_p;
|
ParserStringLiteral infile_name_p;
|
||||||
ParserExpressionWithOptionalAlias exp_elem_p(false);
|
ParserExpressionWithOptionalAlias exp_elem_p(false);
|
||||||
|
|
||||||
|
/// create ASTPtr variables (result of parsing will be put in them).
|
||||||
|
/// They will be used to initialize ASTInsertQuery's fields.
|
||||||
ASTPtr database;
|
ASTPtr database;
|
||||||
ASTPtr table;
|
ASTPtr table;
|
||||||
ASTPtr infile;
|
ASTPtr infile;
|
||||||
@ -55,20 +59,28 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
ASTPtr table_function;
|
ASTPtr table_function;
|
||||||
ASTPtr settings_ast;
|
ASTPtr settings_ast;
|
||||||
ASTPtr partition_by_expr;
|
ASTPtr partition_by_expr;
|
||||||
|
ASTPtr compression;
|
||||||
|
|
||||||
/// Insertion data
|
/// Insertion data
|
||||||
const char * data = nullptr;
|
const char * data = nullptr;
|
||||||
|
|
||||||
|
/// Check for key words `INSERT INTO`. If it isn't found, the query can't be parsed as insert query.
|
||||||
if (!s_insert_into.ignore(pos, expected))
|
if (!s_insert_into.ignore(pos, expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/// try to find 'TABLE'
|
||||||
s_table.ignore(pos, expected);
|
s_table.ignore(pos, expected);
|
||||||
|
|
||||||
|
/// Search for 'FUNCTION'. If this key word is in query, read fields for insertion into 'TABLE FUNCTION'.
|
||||||
|
/// Word table is optional for table functions. (for example, s3 table function)
|
||||||
|
/// Otherwise fill 'TABLE' fields.
|
||||||
if (s_function.ignore(pos, expected))
|
if (s_function.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
|
/// Read function name
|
||||||
if (!table_function_p.parse(pos, table_function, expected))
|
if (!table_function_p.parse(pos, table_function, expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/// Support insertion values with partition by.
|
||||||
if (s_partition_by.ignore(pos, expected))
|
if (s_partition_by.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
if (!exp_elem_p.parse(pos, partition_by_expr, expected))
|
if (!exp_elem_p.parse(pos, partition_by_expr, expected))
|
||||||
@ -77,9 +89,12 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
/// Read one word. It can be table or database name.
|
||||||
if (!name_p.parse(pos, table, expected))
|
if (!name_p.parse(pos, table, expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/// If there is a dot, previous name was database name,
|
||||||
|
/// so read table name after dot.
|
||||||
if (s_dot.ignore(pos, expected))
|
if (s_dot.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
database = table;
|
database = table;
|
||||||
@ -98,26 +113,41 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
Pos before_values = pos;
|
/// Check if file is a source of data.
|
||||||
|
|
||||||
if (s_from_infile.ignore(pos, expected))
|
if (s_from_infile.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
|
/// Read its name to process it later
|
||||||
if (!infile_name_p.parse(pos, infile, expected))
|
if (!infile_name_p.parse(pos, infile, expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/// Check for 'COMPRESSION' parameter (optional)
|
||||||
|
if (s_compression.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
/// Read compression name. Create parser for this purpose.
|
||||||
|
ParserStringLiteral compression_p;
|
||||||
|
if (!compression_p.parse(pos, compression, expected))
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Pos before_values = pos;
|
||||||
|
|
||||||
/// VALUES or FROM INFILE or FORMAT or SELECT
|
/// VALUES or FROM INFILE or FORMAT or SELECT
|
||||||
if (!infile && s_values.ignore(pos, expected))
|
if (!infile && s_values.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
|
/// If VALUES is defined in query, everything except setting will be parsed as data
|
||||||
data = pos->begin;
|
data = pos->begin;
|
||||||
}
|
}
|
||||||
else if (s_format.ignore(pos, expected))
|
else if (s_format.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
|
/// If FORMAT is defined, read format name
|
||||||
if (!name_p.parse(pos, format, expected))
|
if (!name_p.parse(pos, format, expected))
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
else if (s_select.ignore(pos, expected) || s_with.ignore(pos,expected))
|
else if (s_select.ignore(pos, expected) || s_with.ignore(pos,expected))
|
||||||
{
|
{
|
||||||
|
/// If SELECT is defined, return to position before select and parse
|
||||||
|
/// rest of query as SELECT query.
|
||||||
pos = before_values;
|
pos = before_values;
|
||||||
ParserSelectWithUnionQuery select_p;
|
ParserSelectWithUnionQuery select_p;
|
||||||
select_p.parse(pos, select, expected);
|
select_p.parse(pos, select, expected);
|
||||||
@ -128,6 +158,8 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
}
|
}
|
||||||
else if (s_watch.ignore(pos, expected))
|
else if (s_watch.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
|
/// If WATCH is defined, return to position before WATCH and parse
|
||||||
|
/// rest of query as WATCH query.
|
||||||
pos = before_values;
|
pos = before_values;
|
||||||
ParserWatchQuery watch_p;
|
ParserWatchQuery watch_p;
|
||||||
watch_p.parse(pos, watch, expected);
|
watch_p.parse(pos, watch, expected);
|
||||||
@ -138,11 +170,14 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
/// If all previous conditions were false, query is incorrect
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Read SETTINGS if they are defined
|
||||||
if (s_settings.ignore(pos, expected))
|
if (s_settings.ignore(pos, expected))
|
||||||
{
|
{
|
||||||
|
/// Settings are written like SET query, so parse them with ParserSetQuery
|
||||||
ParserSetQuery parser_settings(true);
|
ParserSetQuery parser_settings(true);
|
||||||
if (!parser_settings.parse(pos, settings_ast, expected))
|
if (!parser_settings.parse(pos, settings_ast, expected))
|
||||||
return false;
|
return false;
|
||||||
@ -155,13 +190,14 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
InsertQuerySettingsPushDownVisitor(visitor_data).visit(select);
|
InsertQuerySettingsPushDownVisitor(visitor_data).visit(select);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// In case of defined format, data follows it.
|
||||||
if (format && !infile)
|
if (format && !infile)
|
||||||
{
|
{
|
||||||
Pos last_token = pos;
|
Pos last_token = pos;
|
||||||
--last_token;
|
--last_token;
|
||||||
data = last_token->end;
|
data = last_token->end;
|
||||||
|
|
||||||
|
/// If format name is followed by ';' (end of query symbol) there is no data to insert.
|
||||||
if (data < end && *data == ';')
|
if (data < end && *data == ';')
|
||||||
throw Exception("You have excessive ';' symbol before data for INSERT.\n"
|
throw Exception("You have excessive ';' symbol before data for INSERT.\n"
|
||||||
"Example:\n\n"
|
"Example:\n\n"
|
||||||
@ -184,11 +220,16 @@ bool ParserInsertQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
|
|||||||
++data;
|
++data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create query and fill its fields.
|
||||||
auto query = std::make_shared<ASTInsertQuery>();
|
auto query = std::make_shared<ASTInsertQuery>();
|
||||||
node = query;
|
node = query;
|
||||||
|
|
||||||
if (infile)
|
if (infile)
|
||||||
|
{
|
||||||
query->infile = infile;
|
query->infile = infile;
|
||||||
|
if (compression)
|
||||||
|
query->compression = compression;
|
||||||
|
}
|
||||||
|
|
||||||
if (table_function)
|
if (table_function)
|
||||||
{
|
{
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
#include <Parsers/ParserTablePropertiesQuery.h>
|
#include <Parsers/ParserTablePropertiesQuery.h>
|
||||||
#include <Parsers/ParserWatchQuery.h>
|
#include <Parsers/ParserWatchQuery.h>
|
||||||
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
|
#include <Parsers/QueryWithOutputSettingsPushDownVisitor.h>
|
||||||
|
#include "Common/Exception.h"
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -86,6 +87,14 @@ bool ParserQueryWithOutput::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
|
|||||||
if (!out_file_p.parse(pos, query_with_output.out_file, expected))
|
if (!out_file_p.parse(pos, query_with_output.out_file, expected))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
ParserKeyword s_compression_method("COMPRESSION");
|
||||||
|
if (s_compression_method.ignore(pos, expected))
|
||||||
|
{
|
||||||
|
ParserStringLiteral compression;
|
||||||
|
if (!compression.parse(pos, query_with_output.compression, expected))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
query_with_output.children.push_back(query_with_output.out_file);
|
query_with_output.children.push_back(query_with_output.out_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ InputFormatPtr getInputFormatFromASTInsertQuery(
|
|||||||
ContextPtr context,
|
ContextPtr context,
|
||||||
const ASTPtr & input_function)
|
const ASTPtr & input_function)
|
||||||
{
|
{
|
||||||
|
/// get ast query
|
||||||
const auto * ast_insert_query = ast->as<ASTInsertQuery>();
|
const auto * ast_insert_query = ast->as<ASTInsertQuery>();
|
||||||
|
|
||||||
if (!ast_insert_query)
|
if (!ast_insert_query)
|
||||||
@ -51,7 +52,6 @@ InputFormatPtr getInputFormatFromASTInsertQuery(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Data could be in parsed (ast_insert_query.data) and in not parsed yet (input_buffer_tail_part) part of query.
|
/// Data could be in parsed (ast_insert_query.data) and in not parsed yet (input_buffer_tail_part) part of query.
|
||||||
|
|
||||||
auto input_buffer_ast_part = std::make_unique<ReadBufferFromMemory>(
|
auto input_buffer_ast_part = std::make_unique<ReadBufferFromMemory>(
|
||||||
ast_insert_query->data, ast_insert_query->data ? ast_insert_query->end - ast_insert_query->data : 0);
|
ast_insert_query->data, ast_insert_query->data ? ast_insert_query->end - ast_insert_query->data : 0);
|
||||||
|
|
||||||
@ -59,6 +59,7 @@ InputFormatPtr getInputFormatFromASTInsertQuery(
|
|||||||
? getReadBufferFromASTInsertQuery(ast)
|
? getReadBufferFromASTInsertQuery(ast)
|
||||||
: std::make_unique<EmptyReadBuffer>();
|
: std::make_unique<EmptyReadBuffer>();
|
||||||
|
|
||||||
|
/// Create a source from input buffer using format from query
|
||||||
auto source = FormatFactory::instance().getInput(format, *input_buffer, header, context, context->getSettings().max_insert_block_size);
|
auto source = FormatFactory::instance().getInput(format, *input_buffer, header, context, context->getSettings().max_insert_block_size);
|
||||||
source->addBuffer(std::move(input_buffer));
|
source->addBuffer(std::move(input_buffer));
|
||||||
return source;
|
return source;
|
||||||
@ -104,8 +105,17 @@ std::unique_ptr<ReadBuffer> getReadBufferFromASTInsertQuery(const ASTPtr & ast)
|
|||||||
const auto & in_file_node = insert_query->infile->as<ASTLiteral &>();
|
const auto & in_file_node = insert_query->infile->as<ASTLiteral &>();
|
||||||
const auto in_file = in_file_node.value.safeGet<std::string>();
|
const auto in_file = in_file_node.value.safeGet<std::string>();
|
||||||
|
|
||||||
return wrapReadBufferWithCompressionMethod(
|
/// It can be compressed and compression method maybe specified in query
|
||||||
std::make_unique<ReadBufferFromFile>(in_file), chooseCompressionMethod(in_file, ""));
|
std::string compression_method;
|
||||||
|
if (insert_query->compression)
|
||||||
|
{
|
||||||
|
const auto & compression_method_node = insert_query->compression->as<ASTLiteral &>();
|
||||||
|
compression_method = compression_method_node.value.safeGet<std::string>();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Otherwise, it will be detected from file name automatically (by chooseCompressionMethod)
|
||||||
|
/// Buffer for reading from file is created and wrapped with appropriate compression method
|
||||||
|
return wrapReadBufferWithCompressionMethod(std::make_unique<ReadBufferFromFile>(in_file), chooseCompressionMethod(in_file, compression_method));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::unique_ptr<ReadBuffer>> buffers;
|
std::vector<std::unique_ptr<ReadBuffer>> buffers;
|
||||||
|
@ -1299,34 +1299,48 @@ bool KeyCondition::tryParseAtomFromAST(const ASTPtr & node, ContextPtr context,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
key_expr_type = recursiveRemoveLowCardinality(key_expr_type);
|
||||||
|
DataTypePtr key_expr_type_not_null;
|
||||||
|
bool key_expr_type_is_nullable = false;
|
||||||
|
if (const auto * nullable_type = typeid_cast<const DataTypeNullable *>(key_expr_type.get()))
|
||||||
|
{
|
||||||
|
key_expr_type_is_nullable = true;
|
||||||
|
key_expr_type_not_null = nullable_type->getNestedType();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
key_expr_type_not_null = key_expr_type;
|
||||||
|
|
||||||
bool cast_not_needed = is_set_const /// Set args are already casted inside Set::createFromAST
|
bool cast_not_needed = is_set_const /// Set args are already casted inside Set::createFromAST
|
||||||
|| ((isNativeNumber(key_expr_type) || isDateTime(key_expr_type))
|
|| ((isNativeNumber(key_expr_type_not_null) || isDateTime(key_expr_type_not_null))
|
||||||
&& (isNativeNumber(const_type) || isDateTime(const_type))); /// Numbers and DateTime are accurately compared without cast.
|
&& (isNativeNumber(const_type) || isDateTime(const_type))); /// Numbers and DateTime are accurately compared without cast.
|
||||||
|
|
||||||
if (!cast_not_needed && !key_expr_type->equals(*const_type))
|
if (!cast_not_needed && !key_expr_type_not_null->equals(*const_type))
|
||||||
{
|
{
|
||||||
if (const_value.getType() == Field::Types::String)
|
if (const_value.getType() == Field::Types::String)
|
||||||
{
|
{
|
||||||
const_value = convertFieldToType(const_value, *key_expr_type);
|
const_value = convertFieldToType(const_value, *key_expr_type_not_null);
|
||||||
if (const_value.isNull())
|
if (const_value.isNull())
|
||||||
return false;
|
return false;
|
||||||
// No need to set is_constant_transformed because we're doing exact conversion
|
// No need to set is_constant_transformed because we're doing exact conversion
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
DataTypePtr common_type = getLeastSupertype({key_expr_type, const_type});
|
DataTypePtr common_type = getLeastSupertype({key_expr_type_not_null, const_type});
|
||||||
if (!const_type->equals(*common_type))
|
if (!const_type->equals(*common_type))
|
||||||
{
|
{
|
||||||
castValueToType(common_type, const_value, const_type, node);
|
castValueToType(common_type, const_value, const_type, node);
|
||||||
|
|
||||||
// Need to set is_constant_transformed unless we're doing exact conversion
|
// Need to set is_constant_transformed unless we're doing exact conversion
|
||||||
if (!key_expr_type->equals(*common_type))
|
if (!key_expr_type_not_null->equals(*common_type))
|
||||||
is_constant_transformed = true;
|
is_constant_transformed = true;
|
||||||
}
|
}
|
||||||
if (!key_expr_type->equals(*common_type))
|
if (!key_expr_type_not_null->equals(*common_type))
|
||||||
{
|
{
|
||||||
|
auto common_type_maybe_nullable
|
||||||
|
= key_expr_type_is_nullable ? DataTypePtr(std::make_shared<DataTypeNullable>(common_type)) : common_type;
|
||||||
ColumnsWithTypeAndName arguments{
|
ColumnsWithTypeAndName arguments{
|
||||||
{nullptr, key_expr_type, ""}, {DataTypeString().createColumnConst(1, common_type->getName()), common_type, ""}};
|
{nullptr, key_expr_type, ""},
|
||||||
|
{DataTypeString().createColumnConst(1, common_type_maybe_nullable->getName()), common_type_maybe_nullable, ""}};
|
||||||
FunctionOverloadResolverPtr func_builder_cast = CastInternalOverloadResolver<CastType::nonAccurate>::createImpl();
|
FunctionOverloadResolverPtr func_builder_cast = CastInternalOverloadResolver<CastType::nonAccurate>::createImpl();
|
||||||
auto func_cast = func_builder_cast->build(arguments);
|
auto func_cast = func_builder_cast->build(arguments);
|
||||||
|
|
||||||
|
@ -16,6 +16,12 @@ flat: any left
|
|||||||
2 2 2 2
|
2 2 2 2
|
||||||
3 3 3 3
|
3 3 3 3
|
||||||
4 0 0
|
4 0 0
|
||||||
|
flat: any left + any_join_distinct_right_table_keys
|
||||||
|
0 0 0 0
|
||||||
|
1 1 1 1
|
||||||
|
2 2 2 2
|
||||||
|
3 3 3 3
|
||||||
|
4 0 0
|
||||||
flat: semi left
|
flat: semi left
|
||||||
0 0 0 0
|
0 0 0 0
|
||||||
1 1 1 1
|
1 1 1 1
|
||||||
|
@ -33,6 +33,8 @@ SELECT 'flat: left';
|
|||||||
SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_flat d USING(key) ORDER BY key;
|
SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_flat d USING(key) ORDER BY key;
|
||||||
SELECT 'flat: any left';
|
SELECT 'flat: any left';
|
||||||
SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_flat d USING(key) ORDER BY key;
|
SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_flat d USING(key) ORDER BY key;
|
||||||
|
SELECT 'flat: any left + any_join_distinct_right_table_keys'; -- falls back to regular join
|
||||||
|
SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_flat d USING(key) ORDER BY key SETTINGS any_join_distinct_right_table_keys = '1';
|
||||||
SELECT 'flat: semi left';
|
SELECT 'flat: semi left';
|
||||||
SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 SEMI JOIN dict_flat d USING(key) ORDER BY key;
|
SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 SEMI JOIN dict_flat d USING(key) ORDER BY key;
|
||||||
SELECT 'flat: anti left';
|
SELECT 'flat: anti left';
|
||||||
|
@ -22,3 +22,13 @@ fact1t1_val1 fact1t2_val2
|
|||||||
fact2t1_val2 fact2t1_val2
|
fact2t1_val2 fact2t1_val2
|
||||||
-
|
-
|
||||||
2020-02-02 13:00:00 2020-02-05 13:00:00
|
2020-02-02 13:00:00 2020-02-05 13:00:00
|
||||||
|
-
|
||||||
|
1
|
||||||
|
1
|
||||||
|
1
|
||||||
|
1
|
||||||
|
-
|
||||||
|
2020-01-01 12:00:00
|
||||||
|
2020-01-01 12:00:00
|
||||||
|
2020-01-01 12:00:00
|
||||||
|
2020-01-01 12:00:00
|
||||||
|
@ -2,17 +2,23 @@ DROP TABLE IF EXISTS t1;
|
|||||||
DROP TABLE IF EXISTS t2;
|
DROP TABLE IF EXISTS t2;
|
||||||
|
|
||||||
CREATE TABLE t1 (
|
CREATE TABLE t1 (
|
||||||
time DateTime, foo String, dimension_1 String,
|
time DateTime,
|
||||||
|
foo String,
|
||||||
|
dimension_1 String,
|
||||||
dt Date MATERIALIZED toDate(time),
|
dt Date MATERIALIZED toDate(time),
|
||||||
dt1 Date MATERIALIZED toDayOfYear(time),
|
dt1 Date MATERIALIZED toDayOfYear(time),
|
||||||
aliascol1 ALIAS foo || dimension_1
|
aliascol1 ALIAS foo || dimension_1,
|
||||||
|
time_alias DateTime ALIAS time
|
||||||
) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, foo);
|
) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, foo);
|
||||||
|
|
||||||
CREATE TABLE t2 (
|
CREATE TABLE t2 (
|
||||||
time DateTime, bar String, dimension_2 String,
|
time DateTime,
|
||||||
|
bar String,
|
||||||
|
dimension_2 String,
|
||||||
dt Date MATERIALIZED toDate(time),
|
dt Date MATERIALIZED toDate(time),
|
||||||
dt2 Date MATERIALIZED toDayOfYear(time),
|
dt2 Date MATERIALIZED toDayOfYear(time),
|
||||||
aliascol2 ALIAS bar || dimension_2
|
aliascol2 ALIAS bar || dimension_2,
|
||||||
|
time_alias DateTime ALIAS time
|
||||||
) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, bar);
|
) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, bar);
|
||||||
|
|
||||||
INSERT INTO t1 VALUES ('2020-01-01 12:00:00', 'fact1', 't1_val1'), ('2020-02-02 13:00:00', 'fact2', 't1_val2'), ('2020-01-01 13:00:00', 'fact3', 't1_val3');
|
INSERT INTO t1 VALUES ('2020-01-01 12:00:00', 'fact1', 't1_val1'), ('2020-02-02 13:00:00', 'fact2', 't1_val2'), ('2020-01-01 13:00:00', 'fact3', 't1_val3');
|
||||||
@ -35,3 +41,15 @@ SELECT '-';
|
|||||||
SELECT t1.aliascol1, t2.aliascol2 FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.time, t2.time;
|
SELECT t1.aliascol1, t2.aliascol2 FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.time, t2.time;
|
||||||
SELECT '-';
|
SELECT '-';
|
||||||
SELECT t1.time, t2.time FROM t1 JOIN t2 ON t1.aliascol1 = t2.aliascol2 ORDER BY t1.time, t2.time;
|
SELECT t1.time, t2.time FROM t1 JOIN t2 ON t1.aliascol1 = t2.aliascol2 ORDER BY t1.time, t2.time;
|
||||||
|
SELECT '-';
|
||||||
|
SELECT count() FROM t1 JOIN t2 ON t1.time_alias = t2.time;
|
||||||
|
SELECT count() FROM t1 JOIN t2 ON t1.time = t2.time_alias;
|
||||||
|
SELECT count() FROM t1 JOIN t2 ON t1.time_alias = t2.time_alias;
|
||||||
|
SELECT count() FROM t1 JOIN t2 USING (time_alias);
|
||||||
|
SELECT '-';
|
||||||
|
SELECT t1.time as talias FROM t1 JOIN t2 ON talias = t2.time;
|
||||||
|
SELECT t1.time as talias FROM t1 JOIN t2 ON talias = t2.time_alias;
|
||||||
|
SELECT t2.time as talias FROM t1 JOIN t2 ON t1.time = talias;
|
||||||
|
SELECT t2.time as talias FROM t1 JOIN t2 ON t1.time_alias = talias;
|
||||||
|
SELECT time as talias FROM t1 JOIN t2 ON t1.time = talias; -- { serverError AMBIGUOUS_COLUMN_NAME }
|
||||||
|
SELECT time as talias FROM t1 JOIN t2 ON talias = t2.time; -- { serverError AMBIGUOUS_COLUMN_NAME }
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
21585718595728998
|
10
tests/queries/0_stateless/02023_nullable_int_uint_where.sql
Normal file
10
tests/queries/0_stateless/02023_nullable_int_uint_where.sql
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
drop table if exists t1;
|
||||||
|
|
||||||
|
set allow_suspicious_low_cardinality_types = 1;
|
||||||
|
create table t1 (id LowCardinality(Nullable(Int64))) engine MergeTree order by id settings allow_nullable_key = 1, index_granularity = 1;
|
||||||
|
|
||||||
|
insert into t1 values (21585718595728998), (null);
|
||||||
|
|
||||||
|
select * from t1 where id = 21585718595728998;
|
||||||
|
|
||||||
|
drop table t1;
|
@ -0,0 +1,10 @@
|
|||||||
|
Hello, World! From client.
|
||||||
|
Hello, World! From client.
|
||||||
|
Hello, World! From client.
|
||||||
|
Hello, World! From client.
|
||||||
|
Hello, World! From client.
|
||||||
|
Hello, World! From local.
|
||||||
|
Hello, World! From local.
|
||||||
|
Hello, World! From local.
|
||||||
|
Hello, World! From local.
|
||||||
|
Hello, World! From local.
|
84
tests/queries/0_stateless/02024_compression_in_query.sh
Executable file
84
tests/queries/0_stateless/02024_compression_in_query.sh
Executable file
@ -0,0 +1,84 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
|
# shellcheck source=../shell_config.sh
|
||||||
|
. "$CURDIR"/../shell_config.sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
#____________________CLIENT__________________
|
||||||
|
# clear files from previous tests.
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output.gz ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output.gz
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz.gz ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz.gz
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz_to_decomp ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz_to_decomp
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_to_decomp ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_to_decomp
|
||||||
|
|
||||||
|
# create files using compression method and without it to check that both queries work correct
|
||||||
|
${CLICKHOUSE_CLIENT} --query "SELECT * FROM (SELECT 'Hello, World! From client.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' FORMAT TabSeparated;"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "SELECT * FROM (SELECT 'Hello, World! From client.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz' COMPRESSION 'GZ' FORMAT TabSeparated;"
|
||||||
|
|
||||||
|
# check content of files
|
||||||
|
cp ${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp.gz
|
||||||
|
gunzip ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp.gz
|
||||||
|
cat ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp
|
||||||
|
|
||||||
|
cp ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp.gz
|
||||||
|
gunzip ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp.gz
|
||||||
|
cat ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp
|
||||||
|
|
||||||
|
# create table to check inserts
|
||||||
|
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_compression_keyword;"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_compression_keyword (text String) Engine=Memory;"
|
||||||
|
|
||||||
|
# insert them
|
||||||
|
${CLICKHOUSE_CLIENT} --query "INSERT INTO TABLE test_compression_keyword FROM INFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' FORMAT TabSeparated;"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "INSERT INTO TABLE test_compression_keyword FROM INFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' COMPRESSION 'gz' FORMAT TabSeparated;"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "INSERT INTO TABLE test_compression_keyword FROM INFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz' COMPRESSION 'gz' FORMAT TabSeparated;"
|
||||||
|
|
||||||
|
# check result
|
||||||
|
${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_compression_keyword;"
|
||||||
|
|
||||||
|
# delete all created elements
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp"
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz"
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp"
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz"
|
||||||
|
${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_compression_keyword;"
|
||||||
|
|
||||||
|
#____________________LOCAL__________________
|
||||||
|
# clear files from previous tests.
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output.gz ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output.gz
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz
|
||||||
|
[ -e "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz.gz ] && rm "${CLICKHOUSE_TMP}"/test_comp_for_input_and_output_without_gz.gz
|
||||||
|
|
||||||
|
# create files using compression method and without it to check that both queries work correct
|
||||||
|
${CLICKHOUSE_LOCAL} --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' FORMAT TabSeparated;"
|
||||||
|
${CLICKHOUSE_LOCAL} --query "SELECT * FROM (SELECT 'Hello, World! From local.') INTO OUTFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz' COMPRESSION 'GZ' FORMAT TabSeparated;"
|
||||||
|
|
||||||
|
# check content of files
|
||||||
|
cp ${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp.gz
|
||||||
|
gunzip ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp.gz
|
||||||
|
cat ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp
|
||||||
|
|
||||||
|
cp ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp.gz
|
||||||
|
gunzip ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp.gz
|
||||||
|
cat ${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp
|
||||||
|
|
||||||
|
# create table to check inserts
|
||||||
|
${CLICKHOUSE_LOCAL} --query "
|
||||||
|
DROP TABLE IF EXISTS test_compression_keyword;
|
||||||
|
CREATE TABLE test_compression_keyword (text String) Engine=Memory;
|
||||||
|
INSERT INTO TABLE test_compression_keyword FROM INFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' FORMAT TabSeparated;
|
||||||
|
INSERT INTO TABLE test_compression_keyword FROM INFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz' COMPRESSION 'gz' FORMAT TabSeparated;
|
||||||
|
INSERT INTO TABLE test_compression_keyword FROM INFILE '${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz' COMPRESSION 'gz' FORMAT TabSeparated;
|
||||||
|
SELECT * FROM test_compression_keyword;
|
||||||
|
"
|
||||||
|
|
||||||
|
# delete all created elements
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output_to_decomp"
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output.gz"
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz_to_decomp"
|
||||||
|
rm -f "${CLICKHOUSE_TMP}/test_comp_for_input_and_output_without_gz"
|
@ -0,0 +1,23 @@
|
|||||||
|
┌─name─┬─type────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─────────────────┬─codec_expression─┬─ttl_expression───────┐
|
||||||
|
│ d │ Date │ │ │ │ │ │
|
||||||
|
│ n │ Nullable(String) │ │ │ It is a nullable column │ │ │
|
||||||
|
│ arr1 │ Array(UInt32) │ │ │ │ ZSTD(1) │ │
|
||||||
|
│ arr2 │ Array(Array(String)) │ │ │ │ │ d + toIntervalDay(1) │
|
||||||
|
│ t │ Tuple(s String, a Array(Tuple(a UInt32, b UInt32))) │ │ │ │ ZSTD(1) │ │
|
||||||
|
└──────┴─────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────────────────────┴──────────────────┴──────────────────────┘
|
||||||
|
┌─name───────┬─type────────────────────────────────────────────────┬─default_type─┬─default_expression─┬─comment─────────────────┬─codec_expression─┬─ttl_expression───────┬─is_subcolumn─┐
|
||||||
|
│ d │ Date │ │ │ │ │ │ 0 │
|
||||||
|
│ n │ Nullable(String) │ │ │ It is a nullable column │ │ │ 0 │
|
||||||
|
│ arr1 │ Array(UInt32) │ │ │ │ ZSTD(1) │ │ 0 │
|
||||||
|
│ arr2 │ Array(Array(String)) │ │ │ │ │ d + toIntervalDay(1) │ 0 │
|
||||||
|
│ t │ Tuple(s String, a Array(Tuple(a UInt32, b UInt32))) │ │ │ │ ZSTD(1) │ │ 0 │
|
||||||
|
│ n.null │ UInt8 │ │ │ It is a nullable column │ │ │ 1 │
|
||||||
|
│ arr1.size0 │ UInt64 │ │ │ │ │ │ 1 │
|
||||||
|
│ arr2.size0 │ UInt64 │ │ │ │ │ d + toIntervalDay(1) │ 1 │
|
||||||
|
│ arr2.size1 │ Array(UInt64) │ │ │ │ │ d + toIntervalDay(1) │ 1 │
|
||||||
|
│ t.s │ String │ │ │ │ ZSTD(1) │ │ 1 │
|
||||||
|
│ t.a │ Array(Tuple(a UInt32, b UInt32)) │ │ │ │ │ │ 1 │
|
||||||
|
│ t.a.size0 │ UInt64 │ │ │ │ │ │ 1 │
|
||||||
|
│ t.a.a │ Array(UInt32) │ │ │ │ ZSTD(1) │ │ 1 │
|
||||||
|
│ t.a.b │ Array(UInt32) │ │ │ │ ZSTD(1) │ │ 1 │
|
||||||
|
└────────────┴─────────────────────────────────────────────────────┴──────────────┴────────────────────┴─────────────────────────┴──────────────────┴──────────────────────┴──────────────┘
|
@ -0,0 +1,18 @@
|
|||||||
|
DROP TABLE IF EXISTS t_desc_subcolumns;
|
||||||
|
|
||||||
|
CREATE TABLE t_desc_subcolumns
|
||||||
|
(
|
||||||
|
d Date,
|
||||||
|
n Nullable(String) COMMENT 'It is a nullable column',
|
||||||
|
arr1 Array(UInt32) CODEC(ZSTD),
|
||||||
|
arr2 Array(Array(String)) TTL d + INTERVAL 1 DAY,
|
||||||
|
t Tuple(s String, a Array(Tuple(a UInt32, b UInt32))) CODEC(ZSTD)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree ORDER BY d;
|
||||||
|
|
||||||
|
DESCRIBE TABLE t_desc_subcolumns FORMAT PrettyCompactNoEscapes;
|
||||||
|
|
||||||
|
DESCRIBE TABLE t_desc_subcolumns FORMAT PrettyCompactNoEscapes
|
||||||
|
SETTINGS describe_include_subcolumns = 1;
|
||||||
|
|
||||||
|
DROP TABLE t_desc_subcolumns;
|
Loading…
Reference in New Issue
Block a user