Merge branch 'master' into bug/trivial_count_optimization_with_array_join

This commit is contained in:
Denny Crane 2022-09-11 15:23:10 -03:00 committed by GitHub
commit 0e4e868f29
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
318 changed files with 275 additions and 7214 deletions

2
contrib/capnproto vendored

@ -1 +1 @@
Subproject commit c8189ec3c27dacbd4a3288e682473010e377f593
Subproject commit 2e88221d3dde22266bfccf40eaee6ff9b40d113d

View File

@ -8,6 +8,9 @@ services:
- type: bind
source: ${keeper_binary:-}
target: /usr/bin/clickhouse
- type: bind
source: ${keeper_binary:-}
target: /usr/bin/clickhouse-keeper
- type: bind
source: ${keeper_config_dir1:-}
target: /etc/clickhouse-keeper
@ -38,6 +41,9 @@ services:
- type: bind
source: ${keeper_binary:-}
target: /usr/bin/clickhouse
- type: bind
source: ${keeper_binary:-}
target: /usr/bin/clickhouse-keeper
- type: bind
source: ${keeper_config_dir2:-}
target: /etc/clickhouse-keeper
@ -68,6 +74,9 @@ services:
- type: bind
source: ${keeper_binary:-}
target: /usr/bin/clickhouse
- type: bind
source: ${keeper_binary:-}
target: /usr/bin/clickhouse-keeper
- type: bind
source: ${keeper_config_dir3:-}
target: /etc/clickhouse-keeper

View File

@ -12,8 +12,9 @@ ALTER TABLE [db.]table [ON CLUSTER cluster] DELETE WHERE filter_expr
Deletes data matching the specified filtering expression. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations).
:::note
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use.
:::note
The `ALTER TABLE` prefix makes this syntax different from most other systems supporting SQL. It is intended to signify that unlike similar queries in OLTP databases this is a heavy operation not designed for frequent use. `ALTER TABLE` is considered a heavyweight operation that requires the underlying data to be merged before it is deleted. For MergeTree tables, consider using the [`DELETE FROM` query](../delete.md), which performs a lightweight delete and can be considerably faster.
:::
The `filter_expr` must be of type `UInt8`. The query deletes rows in the table for which this expression takes a non-zero value.

View File

@ -0,0 +1,37 @@
---
slug: /en/sql-reference/statements/delete
sidebar_position: 36
sidebar_label: DELETE
---
# DELETE Statement
``` sql
DELETE FROM [db.]table [WHERE expr]
```
`DELETE FROM` removes rows from table `[db.]table` that match expression `expr`. The deleted rows are marked as deleted immediately and will be automatically filtered out of all subsequent queries. Cleanup of data happens asynchronously in background. This feature is only available for MergeTree table engine family.
For example, the following query deletes all rows from the `hits` table where the `Title` column contains the text `hello`:
```sql
DELETE FROM hits WHERE Title LIKE '%hello%';
```
Lightweight deletes are asynchronous by default. Set `mutations_sync` equal to 1 to wait for one replica to process the statement, and set `mutations_sync` to 2 to wait for all replicas.
:::note
This feature is experimental and requires you to set `allow_experimental_lightweight_delete` to true:
```sql
SET allow_experimental_lightweight_delete = true;
```
:::
An [alternative way to delete rows](./alter/delete.md) in ClickHouse is `ALTER TABLE ... DELETE`, which might be more efficient if you do bulk deletes only occasionally and don't need the operation to be applied instantly. In most use cases the new lightweight `DELETE FROM` behavior will be considerably faster.
:::warning
Even though deletes are becoming more lightweight in ClickHouse, they should still not be used as aggressively as on OLTP system. Ligthweight deletes are currently efficient for wide parts, but for compact parts they can be a heavyweight operation, and it may be better to use `ALTER TABLE` for some scenarios.
:::

View File

@ -10,7 +10,7 @@ Makes the server "forget" about the existence of a table, a materialized view, o
**Syntax**
``` sql
DETACH TABLE|VIEW|DICTIONARY [IF EXISTS] [db.]name [ON CLUSTER cluster] [PERMANENTLY]
DETACH TABLE|VIEW|DICTIONARY [IF EXISTS] [db.]name [ON CLUSTER cluster] [PERMANENTLY] [SYNC]
```
Detaching does not delete the data or metadata of a table, a materialized view or a dictionary. If an entity was not detached `PERMANENTLY`, on the next server launch the server will read the metadata and recall the table/view/dictionary again. If an entity was detached `PERMANENTLY`, there will be no automatic recall.
@ -24,6 +24,8 @@ Note that you can not detach permanently the table which is already detached (te
Also you can not [DROP](../../sql-reference/statements/drop#drop-table) the detached table, or [CREATE TABLE](../../sql-reference/statements/create/table.md) with the same name as detached permanently, or replace it with the other table with [RENAME TABLE](../../sql-reference/statements/rename.md) query.
The `SYNC` modifier executes the action without delay.
**Example**
Creating a table:

View File

@ -6,7 +6,7 @@ sidebar_label: DROP
# DROP Statements
Deletes existing entity. If the `IF EXISTS` clause is specified, these queries do not return an error if the entity does not exist.
Deletes existing entity. If the `IF EXISTS` clause is specified, these queries do not return an error if the entity does not exist. If the `SYNC` modifier is specified, the entity is dropped without delay.
## DROP DATABASE
@ -15,7 +15,7 @@ Deletes all tables inside the `db` database, then deletes the `db` database itse
Syntax:
``` sql
DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster]
DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] [SYNC]
```
## DROP TABLE
@ -25,7 +25,7 @@ Deletes the table.
Syntax:
``` sql
DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster] [SYNC]
```
## DROP DICTIONARY
@ -35,7 +35,7 @@ Deletes the dictionary.
Syntax:
``` sql
DROP DICTIONARY [IF EXISTS] [db.]name
DROP DICTIONARY [IF EXISTS] [db.]name [SYNC]
```
## DROP USER
@ -95,7 +95,7 @@ Deletes a view. Views can be deleted by a `DROP TABLE` command as well but `DROP
Syntax:
``` sql
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster] [SYNC]
```
## DROP FUNCTION

View File

@ -1,5 +1,5 @@
---
slug: /en/development/tests
slug: /zh/development/tests
sidebar_position: 70
sidebar_label: Testing
title: ClickHouse Testing

View File

@ -1,264 +0,0 @@
---
slug: /zh/whats-new/changelog/2017
---
### ClickHouse 版本 1.1.54327, 2017-12-21 {#clickhouse-release-1-1-54327-2017-12-21}
此版本包含先前版本 1.1.54318 的错误修复:
- 修复了可能导致数据丢失的复制中可能出现的竞争条件的错误. 此问题影响1.1.54310和1.1.54318版本. 如果将这些版本的任意一个与 Replicated 表一起使用,则强烈建议进行更新. 此问题显示在日志中的警告消息中,例如 `Part ... from own log does not exist.` 即使您没有在日志中看到这些消息,该问题也是相关的.
### ClickHouse 版本 1.1.54318, 2017-11-30 {#clickhouse-release-1-1-54318-2017-11-30}
此版本包含先前版本 1.1.54310 的错误修复:
- 修复了在 SummingMergeTree 引擎中合并期间不正确的行删除.
- 修复了未复制的 MergeTree 引擎中的内存泄漏.
- 修复了在 MergeTree 引擎中频繁插入导致性能下降的问题.
- 修复了导致复制队列停止运行的问题.
- 修复了服务器日志的轮换和归档.
### ClickHouse 版本 1.1.54310, 2017-11-01 {#clickhouse-release-1-1-54310-2017-11-01}
#### 新特征: {#new-features}
- MergeTree 系列表引擎的自定义分区键.
- [Kafka](https://clickhouse.com/docs/en/operations/table_engines/kafka/) 表引擎.
- 增加了对加载[CatBoost](https://catboost.yandex/)模型的支持, 并将它们应用到存储在ClickHouse中的数据.
- 添加了对 UTC 非整数偏移时区的支持
- 添加了对具有时间间隔的算术运算的支持.
- Date 和 DateTime 类型的值范围扩展到 2105 年.
- 添加了 `CREATE MATERIALIZED VIEW x TO y` 查询(指定用于存储物化视图数据的现有表).
- 添加了不带参数的`ATTACH TABLE` 查询.
- SummingMergeTree 表中名称以 -Map 结尾的嵌套列的处理逻辑被提取到 sumMap 聚合函数中. 您现在可以明确指定此类列.
- IP 树字典的最大大小增加到 128M 条目.
- 添加了 getSizeOfEnumType 函数.
- 添加了 sumWithOverflow 聚合函数.
- 添加了对 Cap'n Proto 输入格式的支持.
- 您现在可以在使用 zstd 算法时自定义压缩级别.
#### 向后不兼容的变化: {#backward-incompatible-changes}
- 不允许使用内存以外的引擎创建临时表.
- 不允许使用 View 或 MaterializedView 引擎显式创建表.
- 在表创建期间,新的检查验证采样键表达式是否包含在主键中.
#### Bug 修复: {#bug-fixes}
- 修复了同步插入分布式表时的挂断问题.
- 修复了复制表中部件的非原子添加和删除.
- 插入物化视图的数据不会进行不必要的重复数据删除.
- 对本地副本滞后且远程副本不可用的分布式表执行查询不再导致错误.
- 用户不再需要访问 `default` 数据库的权限来创建临时表.
- 修复了指定不带参数的 Array 类型时崩溃的问题.
- 修复了包含服务器日志的磁盘卷已满时的挂断问题.
- 修复了 Unix 纪元第一周 toRelativeWeekNum 函数中的溢出问题.
#### Build 改进: {#build-improvements}
- 更新了多个第三方库(尤其是 Poco)并转换为 git 子模块.
### ClickHouse 版本 1.1.54304, 2017-10-19 {#clickhouse-release-1-1-54304-2017-10-19}
#### 新特征: {#new-features-1}
- 本机协议中的 TLS 支持(要启用,请在 `config.xml` 中设置 `tcp_ssl_port`).
#### Bug 修复: {#bug-fixes-1}
- 复制表的`ALTER` 现在尝试尽快开始运行.
- 修复了使用设置 `preferred_block_size_bytes=0.` 读取数据时崩溃的问题.
- 修复了按下 `Page Down``clickhouse-client` 崩溃的问题.
- 使用 `GLOBAL IN``UNION ALL` 正确解释某些复杂的查询.
- `FREEZE PARTITION` 现在总是以原子方式工作.
- 空 POST 请求现在返回代码为 411 的响应.
- 修正了像 `CAST(1 AS Nullable(UInt8)).` 这样的表达式的解释错误.
- 修正了从 `MergeTree` 表中读取 `Array(Nullable(String))` 列时的错误.
- 修复了在解析诸如 `SELECT dummy AS dummy, dummy AS b` 之类的查询时崩溃的问题.
- 用户使用无效的 `users.xml` 正确更新.
- 可执行字典返回非零响应代码时的正确处理.
### ClickHouse 版本 1.1.54292, 2017-09-20 {#clickhouse-release-1-1-54292-2017-09-20}
#### 新特征: {#new-features-2}
- 添加了用于处理坐标平面上的坐标的 `pointInPolygon` 函数.
- 添加了用于计算数组总和的 `sumMap` 聚合函数, 类似于 `SummingMergeTree` .
- 添加了 `trunc` 功能. 改进了舍入函数(`round`、`floor`、`ceil`、`roundToExp2`)的性能并更正了它们工作方式的逻辑. 更改了分数和负数的 `roundToExp2` 函数的逻辑.
- ClickHouse 可执行文件现在较少依赖于 libc 版本. 同一个 ClickHouse 可执行文件可以在各种 Linux 系统上运行. 使用编译查询时仍然存在依赖性(使用设置 `compile = 1` , 默认情况下不使用).
- 减少动态编译查询所需的时间.
#### Bug 修复: {#bug-fixes-2}
- 修复了有时会产生 `part ... intersects previous part` 消息和削弱副本一致性的错误.
- 修复了关闭期间 ZooKeeper 不可用导致服务器锁定的错误.
- 恢复副本时删除了过多的日志记录.
- 修复了 UNION ALL 实现中的错误.
- 修复了如果块中的第一列具有 Array 类型时在 concat 函数中发生的错误.
- 进度现在在 system.merges 表中可以正确显示.
### ClickHouse 版本 1.1.54289, 2017-09-13 {#clickhouse-release-1-1-54289-2017-09-13}
#### 新特征: {#new-features-3}
- 用于服务器管理的 `SYSTEM` 查询: `SYSTEM RELOAD DICTIONARY`, `SYSTEM RELOAD DICTIONARIES`, `SYSTEM DROP DNS CACHE`, `SYSTEM SHUTDOWN`, `SYSTEM KILL`.
- 添加了用于处理数组的函数: `concat`, `arraySlice`, `arrayPushBack`, `arrayPushFront`, `arrayPopBack`, `arrayPopFront`.
- 为 ZooKeeper 配置添加了 `root``identity` 参数. 这将允许您隔离同一 ZooKeeper 集群上的各个用户.
- 添加了聚合函数 `groupBitAnd``groupBitOr``groupBitXor` (为了兼容性,它们也可以在名称 `BIT_AND` 、`BIT_OR`和`BIT_XOR` 下使用).
- 可以通过在文件系统中指定套接字来从 MySQL 加载外部字典.
- 可以通过 SSL 从 MySQL 加载外部字典 (`ssl_cert`, `ssl_key`, `ssl_ca` 参数).
- 添加了 `max_network_bandwidth_for_user` 设置以限制每个用户查询的总体带宽使用.
- 支持临时表的 `DROP TABLE`.
- 支持从 `CSV``JSONEachRow` 格式读取 Unix 时间戳格式的 `DateTime` 值.
- 现在默认排除分布式查询中的滞后副本(默认阈值为 5 分钟).
- 在 ALTER 期间使用 FIFO 锁定对于连续运行的查询ALTER 查询不会无限期阻塞.
- 在配置文件中设置 `umask` 的选项.
- 使用 `DISTINCT` 提高查询的性能.
#### Bug 修复: {#bug-fixes-3}
- 改进了在 ZooKeeper 中删除旧节点的过程. 以前, 如果插入非常频繁, 旧节点有时不会被删除, 从而导致服务器关闭缓慢等.
- 修复了为 ZooKeeper 连接选择主机时的随机化问题.
- 如果副本是本地主机, 则修复了在分布式查询中排除滞后副本的问题.
- 修复了在 `嵌套` 结构中的元素上运行 `ALTER MODIFY` 后, `ReplicatedMergeTree` 表中的数据部分可能被破坏的错误.
- 修复了可能导致 SELECT 查询 `hang` 的错误.
- 分布式 DDL 查询的改进.
- 修复了查询 `CREATE TABLE ... AS <materialized view>`.
- 解决了对 Buffer 表的 `ALTER ... CLEAR COLUMN IN PARTITION` 查询中的死锁.
- 修复了使用 `JSONEachRow``TSKV` 格式时 `Enum` 的无效默认值 (0 而不是最小值).
- 解决了使用带有 `可执行` 源的字典时出现僵尸进程的问题.
- 修复了 HEAD 查询的段错误.
#### 改进了开发和组装ClickHouse的工作流: {#improved-workflow-for-developing-and-assembling-clickhouse}
- 您可以使用 `pbuilder` 来构建 ClickHouse.
- 你可以使用 `libc++` 代替 `libstdc++` 在 Linux 上构建.
- 添加了使用静态代码分析工具的说明: `Coverage`, `clang-tidy`, `cppcheck`.
#### 升级时请注意: {#please-note-when-upgrading}
- 现在有更高的 MergeTree 设置默认值 `max_bytes_to_merge_at_max_space_in_pool` (要合并的数据部分的最大总大小, 以字节为单位): 它已从 100 GiB 增加到 150 GiB. 这可能会导致在服务器升级后运行大型合并, 从而导致磁盘子系统负载增加. 如果服务器上的可用空间小于正在运行的合并总量的两倍, 这将导致所有其他合并停止运行, 包括小数据部分的合并. 因此, INSERT 查询将失败并显示消息"合并的处理速度明显慢于插入." , 使用 `SELECT * FROM system.merges` 查询来监控情况. 您还可以在 `system.metrics` 表或 Graphite 中检查 `DiskSpaceReservedForMerge` 指标. 您不需要做任何事情来解决这个问题, 因为一旦大型合并完成, 问题就会自行解决. 如果您发现这不可接受, 您可以恢复 `max_bytes_to_merge_at_max_space_in_pool` 设置的先前值. 为此, 请转到 config.xml 中的 `<merge_tree>` 部分, 设置 ``` <merge_tree>``<max_bytes_to_merge_at_max_space_in_pool>107374182400</max_bytes_to_merge_at_max_space_in_pool> ``` 并重新启动服务器.
### ClickHouse 版本 1.1.54284, 2017-08-29 {#clickhouse-release-1-1-54284-2017-08-29}
- 这是先前 1.1.54282 版本的错误修复版本. 它修复了 ZooKeeper 中部分目录中的泄漏.
### ClickHouse 版本 1.1.54282, 2017-08-23 {#clickhouse-release-1-1-54282-2017-08-23}
此版本包含先前版本 1.1.54276 的错误修复:
- 修复了插入分布式表时的 `DB::Exception: Assertion violation: !_path.empty()`.
- 如果输入数据以 ';' 开头, 则在以 RowBinary 格式插入时固定解析.
- 某些聚合函数 (例如 `groupArray()` ) 的运行时编译期间的错误.
### ClickHouse 版本 1.1.54276, 2017-08-16 {#clickhouse-release-1-1-54276-2017-08-16}
#### 新特征: {#new-features-4}
- SELECT 查询添加了一个可选的 WITH 部分. 示例查询:`WITH 1+1 AS a SELECT a, a*a` .
- NSERT 可以在分布式表中同步执行:只有在所有数据都保存在所有分片上后才返回 OK. 这是通过设置 `insert_distributed_sync=1` 激活的.
- 添加了用于处理 16 字节标识符的 UUID 数据类型.
- 添加了 CHAR、FLOAT 和其他类型的别名以与 Tableau 兼容.
- 新增 toYYYYMM, toYYYYMMDD, toYYYYMMDDhhmmss 时间转数字功能.
- 您可以使用 IP 地址 (与主机名一起) 来识别集群 DDL 查询的服务器.
- 在函数 `substring(str, pos, len)` 中添加了对非常量参数和负偏移量的支持.
- 为 `groupArray(max_size)(column)` 聚合函数增加了max_size参数, 并优化了其性能.
#### Main Changes: {#main-changes}
- 安全改进:所有服务器文件都使用 0640 权限创建(可以通过 `<umask>` 配置参数更改).
- 改进了语法无效查询的错误消息.
- 合并大段 MergeTree 数据时显着减少内存消耗并提高性能.
- 显着提高了 ReplacingMergeTree 引擎的数据合并性能.
- 通过组合多个源插入提高了从分布式表进行异步插入的性能. 要启用此功能, 请使用设置 `distributed_directory_monitor_batch_inserts=1` .
#### Backward Incompatible Changes: {#backward-incompatible-changes-1}
- 更改了数组 `groupArray(array_column)` 函数聚合状态的二进制格式.
#### Complete List of Changes: {#complete-list-of-changes}
- 添加了 `output_format_json_quote_denormals` 设置, 可以以 JSON 格式输出 nan 和 inf 值.
- 从分布式表读取时优化流分配.
- 如果值不变, 可以在只读模式下配置设置.
- 加了检索 MergeTree 引擎的非整数粒度的功能, 以满足对 `preferred_block_size_bytes` 设置中指定的块大小的限制. 目的是在处理来自大列的表的查询时减少RAM的消耗并增加缓存局部性.
- 有效地使用包含像 `toStartOfHour(x)` 这样的表达式的索引来处理像 `toStartOfHour(x) op сonstexpr` 这样的条件.
- 添加了 MergeTree 引擎的新设置(config.xml 中的 merge_tree 部分):
- `replicad_deduplication_window_seconds` 设置允许在复制表中删除重复插入的秒数.
- `cleanup_delay_period` 设置启动清理以删除过时数据的频率.
- `Replicationd_can_become_leader` 可以防止副本成为领导者(并分配合并).
- 加速清理以从 ZooKeeper 中删除过时的数据.
- 集群 DDL 查询的多项改进和修复. 特别有趣的是新设置 `distributed_ddl_task_timeout`, 它限制了等待集群中服务器响应的时间. 如果 ddl 请求没有在所有主机上执行,响应将包含超时错误并且请求将以异步模式执行.
- 改进了服务器日志中堆栈跟踪的显示.
- 为压缩方法添加了 "none" 值.
- 您可以在 config.xml 中使用多个dictionaries_config 部分.
- 可以通过文件系统中的套接字连接到 MySQL.
- `system.parts` 表有一个新列, 其中包含有关标记大小的信息(以字节为单位).
#### Bug 修复: {#bug-fixes-4}
- 使用 Merge 表的分布式表现在可以正确用于带有 `_table` 字段条件的 SELECT 查询.
- 修复了检查数据部分时 ReplicatedMergeTree 中罕见的竞争条件.
- 修复了启动服务器时 `leader election` 可能会冻结的问题.
- 使用数据源的本地副本时,将忽略 `max_replica_delay_for_distributed_queries` 设置. 这已被修复.
- 修复了尝试清理不存在的列时 `ALTER TABLE CLEAR COLUMN IN PARTITION` 的错误行为.
- 修复了 multiIf 函数中使用空数组或字符串时的异常.
- 修复了反序列化本机格式时过多的内存分配.
- 修复了 Trie 词典的错误自动更新.
- 修复了在使用 SAMPLE 时从合并表中使用 GROUP BY 子句运行查询时的异常.
- 修复了 `distributed_aggregation_memory_efficient=1` 时 GROUP BY 的崩溃.
- 现在可以在 IN 和 JOIN 右侧指定 `database.table`.
- 太多线程用于并行聚合. 这已被修复.
- 修复了 `if` 函数如何与 FixedString 参数一起工作.
- 对于权重为 0 的分片, SELECT 在分布式表中工作不正确. 这已得到修复.
- 运行 `CREATE VIEW IF EXISTS 不再导致崩溃` .
- 修复了设置 `input_format_skip_unknown_fields=1` 且存在负数时的错误行为.
- 修复了如果字典中有一些无效数据, `dictGetHierarchy()` 函数中的无限循环.
- 修复了使用 IN 或 JOIN 子句和合并表中的子查询运行分布式查询时的 `Syntax error: unexpected (...)` 错误.
- 修复了对字典表中 SELECT 查询的错误解释.
- 修复了在超过 20 亿元素的 IN 和 JOIN 子句中使用数组时的 "Cannot mremap" 错误.
- 修复了以 MySQL 为源的字典的故障转移.
#### 改进了开发和组装ClickHouse的工作流: {#improved-workflow-for-developing-and-assembling-clickhouse-1}
- 可以在 Arcadia 中组装 Builds.
- 可以使用 gcc 7 编译 ClickHouse.
- 使用 ccache+distcc 的并行构建现在更快了.
### ClickHouse 版本 1.1.54245, 2017-07-04 {#clickhouse-release-1-1-54245-2017-07-04}
#### 新特征: {#new-features-5}
- 分布式 DDL(例如, `REATE TABLE ON CLUSTER`).
- 复制查询 `ALTER TABLE CLEAR COLUMN IN PARTITION.` .
- 字典表引擎 (以表的形式访问字典数据).
- 字典数据库引擎 (这种类型的数据库自动为所有连接的外部字典提供字典表).
- 您可以通过向源发送请求来检查字典的更新.
- 合格的列名.
- 使用双引号引用标识符.
- HTTP 接口中的会话.
- 复制表的 OPTIMIZE 查询不仅可以在领导者上运行.
#### 向后不兼容的变化: {#backward-incompatible-changes-2}
- 删除了 SET GLOBAL.
#### 次要更改: {#minor-changes}
- 目前在触发警报后,日志会打印完整的堆栈跟踪.
- 放宽了启动时损坏/额外数据部分数量的验证(误报太多).
#### Bug 修复: {#bug-fixes-5}
- 修复了插入分布式表时的错误连接"卡住"问题.
- GLOBAL IN 现在适用于来自查看分布式表的合并表的查询.
- 在 Google Compute Engine 虚拟机上检测到的内核数不正确. 这已被修复.
- 缓存外部字典的可执行源的工作方式发生了变化.
- 修正了包含空字符的字符串的比较.
- 修复了 Float32 主键字段与常量的比较.
- 以前,对字段大小的错误估计可能会导致分配过大.
- 修复了使用 ALTER 查询添加到表中的 Nullable 列时发生的崩溃.
- 修复了当行数小于 LIMIT 时按 Nullable 列排序时崩溃的问题.
- 修复了仅由常量值组成的 ORDER BY 子查询.
- 以前,复制表在 DROP TABLE 失败后可能保持无效状态.
- 结果为空的标量子查询的别名不再丢失.
- 现在,如果 .so 文件损坏,使用编译的查询不会因错误而失败.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -133,11 +133,11 @@ func TestConfigFileFrameCopy(t *testing.T) {
require.Empty(t, errs)
i := 0
sizes := map[string]int64{
"users.xml": int64(2039),
"users.xml": int64(2017),
"default-password.xml": int64(188),
"config.xml": int64(61282),
"config.xml": int64(61260),
"server-include.xml": int64(168),
"user-include.xml": int64(582),
"user-include.xml": int64(559),
}
var checkedFiles []string
for {

View File

@ -1,4 +1,3 @@
<?xml version="1.0" ?>
<clickhouse>
<test_user>
<networks>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<!--
NOTE: User and query level settings are set up in "users.xml" file.
If you have accidentally specified user-level settings here, server won't start.

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<!--
NOTE: User and query level settings are set up in "users.xml" file.
If you have accidentally specified user-level settings here, server won't start.

View File

@ -1,4 +1,3 @@
<?xml version="1.0" ?>
<clickhouse>
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<!--
NOTE: User and query level settings are set up in "users.xml" file.
If you have accidentally specified user-level settings here, server won't start.

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<!-- Config that is used when server is run without config file. -->
<clickhouse>
<logger>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<profiles>
<default>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<users>
<default>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
@ -6,15 +5,6 @@
<profiles>
<!-- Default settings. -->
<default>
<!-- How to choose between replicas during distributed query processing.
random - choose random replica from set of replicas with minimum number of errors
nearest_hostname - from set of replicas with minimum number of errors, choose replica
with minimum number of different symbols between replica's hostname and local hostname
(Hamming distance).
in_order - first live replica is chosen in specified order.
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
-->
<load_balancing>random</load_balancing>
</default>
<!-- Profile that allows only read queries. -->

View File

@ -519,7 +519,7 @@ private:
template <typename FieldType>
bool compareImpl(FieldType & x) const
{
auto val = get<FieldType>(rhs);
auto val = rhs.get<FieldType>();
if (val > x)
{
x = val;
@ -554,7 +554,7 @@ private:
template <typename FieldType>
bool compareImpl(FieldType & x) const
{
auto val = get<FieldType>(rhs);
auto val = rhs.get<FieldType>();
if (val < x)
{
x = val;

View File

@ -141,7 +141,7 @@ void ColumnArray::get(size_t n, Field & res) const
size, max_array_size_as_field);
res = Array();
Array & res_arr = DB::get<Array &>(res);
Array & res_arr = res.get<Array &>();
res_arr.reserve(size);
for (size_t i = 0; i < size; ++i)
@ -296,7 +296,7 @@ void ColumnArray::updateHashFast(SipHash & hash) const
void ColumnArray::insert(const Field & x)
{
const Array & array = DB::get<const Array &>(x);
const Array & array = x.get<const Array &>();
size_t size = array.size();
for (size_t i = 0; i < size; ++i)
getData().insert(array[i]);

View File

@ -63,7 +63,7 @@ public:
{
data.resize_fill(data.size() + length);
}
void insert(const Field & x) override { data.push_back(DB::get<T>(x)); }
void insert(const Field & x) override { data.push_back(x.get<T>()); }
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;
void popBack(size_t n) override

View File

@ -59,7 +59,7 @@ bool ColumnFixedString::isDefaultAt(size_t index) const
void ColumnFixedString::insert(const Field & x)
{
const String & s = DB::get<const String &>(x);
const String & s = x.get<const String &>();
if (s.size() > n)
throw Exception("Too large string '" + s + "' for FixedString column", ErrorCodes::TOO_LARGE_STRING_SIZE);

View File

@ -68,9 +68,9 @@ public:
UInt64 get64(size_t n) const override { return getDictionary().get64(getIndexes().getUInt(n)); }
UInt64 getUInt(size_t n) const override { return getDictionary().getUInt(getIndexes().getUInt(n)); }
Int64 getInt(size_t n) const override { return getDictionary().getInt(getIndexes().getUInt(n)); }
Float64 getFloat64(size_t n) const override { return getDictionary().getInt(getIndexes().getFloat64(n)); }
Float32 getFloat32(size_t n) const override { return getDictionary().getInt(getIndexes().getFloat32(n)); }
bool getBool(size_t n) const override { return getDictionary().getInt(getIndexes().getBool(n)); }
Float64 getFloat64(size_t n) const override { return getDictionary().getFloat64(getIndexes().getUInt(n)); }
Float32 getFloat32(size_t n) const override { return getDictionary().getFloat32(getIndexes().getUInt(n)); }
bool getBool(size_t n) const override { return getDictionary().getBool(getIndexes().getUInt(n)); }
bool isNullAt(size_t n) const override { return getDictionary().isNullAt(getIndexes().getUInt(n)); }
ColumnPtr cut(size_t start, size_t length) const override
{

View File

@ -74,7 +74,7 @@ void ColumnMap::get(size_t n, Field & res) const
size_t size = offsets[n] - offsets[n - 1];
res = Map();
auto & map = DB::get<Map &>(res);
auto & map = res.get<Map &>();
map.reserve(size);
for (size_t i = 0; i < size; ++i)
@ -98,7 +98,7 @@ void ColumnMap::insertData(const char *, size_t)
void ColumnMap::insert(const Field & x)
{
const auto & map = DB::get<const Map &>(x);
const auto & map = x.get<const Map &>();
nested->insert(Array(map.begin(), map.end()));
}

View File

@ -128,7 +128,7 @@ public:
void insert(const Field & x) override
{
const String & s = DB::get<const String &>(x);
const String & s = x.get<const String &>();
const size_t old_size = chars.size();
const size_t size_to_append = s.size() + 1;
const size_t new_size = old_size + size_to_append;

View File

@ -109,7 +109,7 @@ void ColumnTuple::get(size_t n, Field & res) const
const size_t tuple_size = columns.size();
res = Tuple();
Tuple & res_tuple = DB::get<Tuple &>(res);
Tuple & res_tuple = res.get<Tuple &>();
res_tuple.reserve(tuple_size);
for (size_t i = 0; i < tuple_size; ++i)
@ -137,7 +137,7 @@ void ColumnTuple::insertData(const char *, size_t)
void ColumnTuple::insert(const Field & x)
{
const auto & tuple = DB::get<const Tuple &>(x);
const auto & tuple = x.get<const Tuple &>();
const size_t tuple_size = columns.size();
if (tuple.size() != tuple_size)

View File

@ -90,7 +90,7 @@ void ColumnVector<T>::updateWeakHash32(WeakHash32 & hash) const
while (begin < end)
{
*hash_data = intHashCRC32(*begin, *hash_data);
*hash_data = hashCRC32(*begin, *hash_data);
++begin;
++hash_data;
}
@ -918,7 +918,7 @@ ColumnPtr ColumnVector<T>::createWithOffsets(const IColumn::Offsets & offsets, c
auto res = this->create();
auto & res_data = res->getData();
T default_value = safeGet<T>(default_field);
T default_value = default_field.safeGet<T>();
res_data.resize_fill(total_rows, default_value);
for (size_t i = 0; i < offsets.size(); ++i)
res_data[offsets[i]] = data[i + shift];

View File

@ -301,7 +301,7 @@ public:
void insert(const Field & x) override
{
data.push_back(DB::get<T>(x));
data.push_back(x.get<T>());
}
void insertRangeFrom(const IColumn & src, size_t start, size_t length) override;

View File

@ -19,7 +19,7 @@ bool FieldVisitorSum::operator() (UInt64 & x) const
return x != 0;
}
bool FieldVisitorSum::operator() (Float64 & x) const { x += get<Float64>(rhs); return x != 0; }
bool FieldVisitorSum::operator() (Float64 & x) const { x += rhs.get<Float64>(); return x != 0; }
bool FieldVisitorSum::operator() (Null &) const { throw Exception("Cannot sum Nulls", ErrorCodes::LOGICAL_ERROR); }
bool FieldVisitorSum::operator() (String &) const { throw Exception("Cannot sum Strings", ErrorCodes::LOGICAL_ERROR); }
@ -37,4 +37,3 @@ bool FieldVisitorSum::operator() (AggregateFunctionStateData &) const
bool FieldVisitorSum::operator() (bool &) const { throw Exception("Cannot sum Bools", ErrorCodes::LOGICAL_ERROR); }
}

View File

@ -33,7 +33,7 @@ public:
template <typename T>
bool operator() (DecimalField<T> & x) const
{
x += get<DecimalField<T>>(rhs);
x += rhs.get<DecimalField<T>>();
return x.getValue() != T(0);
}
@ -47,4 +47,3 @@ public:
};
}

View File

@ -16,7 +16,7 @@ std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc
case LoadBalancing::NEAREST_HOSTNAME:
if (hostname_differences.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "It's a bug: hostname_differences is not initialized");
get_priority = [&](size_t i) { return hostname_differences[i]; };
get_priority = [this](size_t i) { return hostname_differences[i]; };
break;
case LoadBalancing::IN_ORDER:
get_priority = [](size_t i) { return i; };
@ -36,7 +36,7 @@ std::function<size_t(size_t index)> GetPriorityForLoadBalancing::getPriorityFunc
* last_used = 3 -> get_priority: 4 3 0 1 2
* ...
* */
get_priority = [&](size_t i)
get_priority = [this, pool_size](size_t i)
{
++i;
return i < last_used ? pool_size - i : i - last_used;

View File

@ -220,7 +220,7 @@ template <typename T> struct HashCRC32;
template <typename T>
requires (sizeof(T) <= sizeof(UInt64))
inline size_t hashCRC32(T key)
inline size_t hashCRC32(T key, DB::UInt64 updated_value = -1)
{
union
{
@ -229,14 +229,14 @@ inline size_t hashCRC32(T key)
} u;
u.out = 0;
u.in = key;
return intHashCRC32(u.out);
return intHashCRC32(u.out, updated_value);
}
template <typename T>
requires (sizeof(T) > sizeof(UInt64))
inline size_t hashCRC32(T key)
inline size_t hashCRC32(T key, DB::UInt64 updated_value = -1)
{
return intHashCRC32(key, -1);
return intHashCRC32(key, updated_value);
}
#define DEFINE_HASH(T) \

View File

@ -10,8 +10,7 @@ using namespace DB;
TEST(Common, getMultipleValuesFromConfig)
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam(R"END(<?xml version="1.0"?>
<clickhouse>
xml_isteam(R"END(<clickhouse>
<first_level>
<second_level>0</second_level>
<second_level>1</second_level>

View File

@ -60,24 +60,6 @@ TEST(Common, SensitiveDataMasker)
"SELECT id FROM mysql('localhost:3308', 'database', 'table', 'root', '******') WHERE "
"ssn='000-00-0000' or email='hidden@hidden.test'");
#ifndef NDEBUG
// simple benchmark
auto start = std::chrono::high_resolution_clock::now();
static constexpr size_t iterations = 200000;
for (int i = 0; i < iterations; ++i)
{
std::string query2 = "SELECT id FROM mysql('localhost:3308', 'database', 'table', 'root', 'qwerty123') WHERE ssn='123-45-6789' or "
"email='JonhSmith@secret.domain.test'";
masker2.wipeSensitiveData(query2);
}
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = finish - start;
std::cout << "Elapsed time: " << elapsed.count() << "s per " << iterations <<" calls (" << elapsed.count() * 1000000 / iterations << "µs per call)"
<< std::endl;
// I have: "Elapsed time: 3.44022s per 200000 calls (17.2011µs per call)"
masker2.printStats();
#endif
DB::SensitiveDataMasker maskerbad(*empty_xml_config , "");
// gtest has not good way to check exception content, so just do it manually (see https://github.com/google/googletest/issues/952 )
@ -101,9 +83,83 @@ TEST(Common, SensitiveDataMasker)
EXPECT_EQ(maskerbad.rulesCount(), 0);
EXPECT_EQ(maskerbad.wipeSensitiveData(x), 0);
try
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam(R"END(<?xml version="1.0"?>
xml_isteam(R"END(<clickhouse>
<query_masking_rules>
<rule>
<name>test</name>
<regexp>abc</regexp>
</rule>
<rule>
<name>test</name>
<regexp>abc</regexp>
</rule>
</query_masking_rules>
</clickhouse>)END");
Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam);
DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules");
ADD_FAILURE() << "XML should throw an error on bad XML" << std::endl;
}
catch (const DB::Exception & e)
{
EXPECT_EQ(
std::string(e.what()),
"query_masking_rules configuration contains more than one rule named 'test'.");
EXPECT_EQ(e.code(), DB::ErrorCodes::INVALID_CONFIG_PARAMETER);
}
try
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam(R"END(<clickhouse>
<query_masking_rules>
<rule><name>test</name></rule>
</query_masking_rules>
</clickhouse>)END");
Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam);
DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules");
ADD_FAILURE() << "XML should throw an error on bad XML" << std::endl;
}
catch (const DB::Exception & e)
{
EXPECT_EQ(
std::string(e.what()),
"query_masking_rules configuration, rule 'test' has no <regexp> node or <regexp> is empty.");
EXPECT_EQ(e.code(), DB::ErrorCodes::NO_ELEMENTS_IN_CONFIG);
}
try
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam(R"END(<clickhouse>
<query_masking_rules>
<rule><name>test</name><regexp>())(</regexp></rule>
</query_masking_rules>
</clickhouse>)END");
Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam);
DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules");
ADD_FAILURE() << "XML should throw an error on bad XML" << std::endl;
}
catch (const DB::Exception & e)
{
EXPECT_EQ(
std::string(e.message()),
"SensitiveDataMasker: cannot compile re2: ())(, error: unexpected ): ())(. Look at https://github.com/google/re2/wiki/Syntax for reference.: while adding query masking rule 'test'."
);
EXPECT_EQ(e.code(), DB::ErrorCodes::CANNOT_COMPILE_REGEXP);
}
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam(R"END(
<clickhouse>
<query_masking_rules>
<rule>
@ -150,82 +206,4 @@ TEST(Common, SensitiveDataMasker)
masker_xml_based.printStats();
#endif
}
try
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam_bad(R"END(<?xml version="1.0"?>
<clickhouse>
<query_masking_rules>
<rule>
<name>test</name>
<regexp>abc</regexp>
</rule>
<rule>
<name>test</name>
<regexp>abc</regexp>
</rule>
</query_masking_rules>
</clickhouse>)END");
Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam_bad);
DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules");
ADD_FAILURE() << "XML should throw an error on bad XML" << std::endl;
}
catch (const DB::Exception & e)
{
EXPECT_EQ(
std::string(e.what()),
"query_masking_rules configuration contains more than one rule named 'test'.");
EXPECT_EQ(e.code(), DB::ErrorCodes::INVALID_CONFIG_PARAMETER);
}
try
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam_bad(R"END(<?xml version="1.0"?>
<clickhouse>
<query_masking_rules>
<rule><name>test</name></rule>
</query_masking_rules>
</clickhouse>)END");
Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam_bad);
DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules");
ADD_FAILURE() << "XML should throw an error on bad XML" << std::endl;
}
catch (const DB::Exception & e)
{
EXPECT_EQ(
std::string(e.what()),
"query_masking_rules configuration, rule 'test' has no <regexp> node or <regexp> is empty.");
EXPECT_EQ(e.code(), DB::ErrorCodes::NO_ELEMENTS_IN_CONFIG);
}
try
{
std::istringstream // STYLE_CHECK_ALLOW_STD_STRING_STREAM
xml_isteam_bad(R"END(<?xml version="1.0"?>
<clickhouse>
<query_masking_rules>
<rule><name>test</name><regexp>())(</regexp></rule>
</query_masking_rules>
</clickhouse>)END");
Poco::AutoPtr<Poco::Util::XMLConfiguration> xml_config = new Poco::Util::XMLConfiguration(xml_isteam_bad);
DB::SensitiveDataMasker masker_xml_based_exception_check(*xml_config, "query_masking_rules");
ADD_FAILURE() << "XML should throw an error on bad XML" << std::endl;
}
catch (const DB::Exception & e)
{
EXPECT_EQ(
std::string(e.message()),
"SensitiveDataMasker: cannot compile re2: ())(, error: unexpected ): ())(. Look at https://github.com/google/re2/wiki/Syntax for reference.: while adding query masking rule 'test'."
);
EXPECT_EQ(e.code(), DB::ErrorCodes::CANNOT_COMPILE_REGEXP);
}
}

View File

@ -882,30 +882,6 @@ inline char & Field::reinterpret<char>()
return *reinterpret_cast<char *>(&storage);
}
template <typename T>
T get(const Field & field)
{
return field.template get<T>();
}
template <typename T>
T get(Field & field)
{
return field.template get<T>();
}
template <typename T>
T safeGet(const Field & field)
{
return field.template safeGet<T>();
}
template <typename T>
T safeGet(Field & field)
{
return field.template safeGet<T>();
}
template <typename T>
Field::Field(T && rhs, enable_if_not_field_or_bool_or_stringlike_t<T>) //-V730
{
@ -1036,4 +1012,3 @@ struct fmt::formatter<DB::Field>
return format_to(ctx.out(), "{}", toString(x));
}
};

View File

@ -37,7 +37,7 @@ int main(int argc, char ** argv)
std::cerr << applyVisitor(to_string, field) << std::endl;
}
get<Array &>(field).push_back(field);
field.get<Array &>().push_back(field);
std::cerr << applyVisitor(to_string, field) << std::endl;
std::cerr << (field < field2) << std::endl;
@ -71,7 +71,7 @@ int main(int argc, char ** argv)
size_t sum = 0;
for (size_t i = 0; i < n; ++i)
sum += safeGet<const String &>(array[i]).size();
sum += array[i].safeGet<const String &>().size();
watch.stop();
std::cerr << std::fixed << std::setprecision(2)

View File

@ -131,7 +131,7 @@ void JSONDataParser<ParserImpl>::traverseArrayElement(const Element & element, P
auto nested_key = getNameOfNested(paths[i], values[i]);
if (!nested_key.empty())
{
size_t array_size = get<const Array &>(values[i]).size();
size_t array_size = values[i].template get<const Array &>().size();
auto & current_nested_sizes = ctx.nested_sizes_by_key[nested_key];
if (current_nested_sizes.size() == ctx.current_size)
@ -154,7 +154,7 @@ void JSONDataParser<ParserImpl>::traverseArrayElement(const Element & element, P
auto nested_key = getNameOfNested(paths[i], values[i]);
if (!nested_key.empty())
{
size_t array_size = get<const Array &>(values[i]).size();
size_t array_size = values[i].template get<const Array &>().size();
auto & current_nested_sizes = ctx.nested_sizes_by_key[nested_key];
if (current_nested_sizes.empty())

View File

@ -19,14 +19,14 @@ namespace DB
void SerializationAggregateFunction::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
const AggregateFunctionStateData & state = get<const AggregateFunctionStateData &>(field);
const AggregateFunctionStateData & state = field.get<const AggregateFunctionStateData &>();
writeBinary(state.data, ostr);
}
void SerializationAggregateFunction::deserializeBinary(Field & field, ReadBuffer & istr) const
{
field = AggregateFunctionStateData();
AggregateFunctionStateData & s = get<AggregateFunctionStateData &>(field);
AggregateFunctionStateData & s = field.get<AggregateFunctionStateData &>();
readBinary(s.data, istr);
s.name = type_name;
}

View File

@ -24,7 +24,7 @@ namespace ErrorCodes
void SerializationArray::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
const Array & a = get<const Array &>(field);
const Array & a = field.get<const Array &>();
writeVarUInt(a.size(), ostr);
for (size_t i = 0; i < a.size(); ++i)
{
@ -38,7 +38,7 @@ void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr) con
size_t size;
readVarUInt(size, istr);
field = Array();
Array & arr = get<Array &>(field);
Array & arr = field.get<Array &>();
arr.reserve(size);
for (size_t i = 0; i < size; ++i)
nested->deserializeBinary(arr.emplace_back(), istr);

View File

@ -14,7 +14,7 @@ namespace DB
template <typename T>
void SerializationDecimalBase<T>::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
FieldType x = get<DecimalField<T>>(field);
FieldType x = field.get<DecimalField<T>>();
writeBinary(x, ostr);
}

View File

@ -26,7 +26,7 @@ namespace ErrorCodes
void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
const String & s = get<const String &>(field);
const String & s = field.get<const String &>();
ostr.write(s.data(), std::min(s.size(), n));
if (s.size() < n)
for (size_t i = s.size(); i < n; ++i)
@ -37,7 +37,7 @@ void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer
void SerializationFixedString::deserializeBinary(Field & field, ReadBuffer & istr) const
{
field = String();
String & s = get<String &>(field);
String & s = field.get<String &>();
s.resize(n);
istr.readStrict(s.data(), n);
}

View File

@ -38,7 +38,7 @@ static IColumn & extractNestedColumn(IColumn & column)
void SerializationMap::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
const auto & map = get<const Map &>(field);
const auto & map = field.get<const Map &>();
writeVarUInt(map.size(), ostr);
for (const auto & elem : map)
{

View File

@ -105,7 +105,7 @@ template <typename T>
void SerializationNumber<T>::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
/// ColumnVector<T>::ValueType is a narrower type. For example, UInt8, when the Field type is UInt64
typename ColumnVector<T>::ValueType x = get<FieldType>(field);
typename ColumnVector<T>::ValueType x = field.get<FieldType>();
writeBinary(x, ostr);
}

View File

@ -22,7 +22,7 @@ namespace DB
void SerializationString::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
const String & s = get<const String &>(field);
const String & s = field.get<const String &>();
writeVarUInt(s.size(), ostr);
writeString(s, ostr);
}
@ -33,7 +33,7 @@ void SerializationString::deserializeBinary(Field & field, ReadBuffer & istr) co
UInt64 size;
readVarUInt(size, istr);
field = String();
String & s = get<String &>(field);
String & s = field.get<String &>();
s.resize(size);
istr.readStrict(s.data(), size);
}

View File

@ -31,7 +31,7 @@ static inline const IColumn & extractElementColumn(const IColumn & column, size_
void SerializationTuple::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
const auto & tuple = get<const Tuple &>(field);
const auto & tuple = field.get<const Tuple &>();
for (size_t element_index = 0; element_index < elems.size(); ++element_index)
{
const auto & serialization = elems[element_index];
@ -44,7 +44,7 @@ void SerializationTuple::deserializeBinary(Field & field, ReadBuffer & istr) con
const size_t size = elems.size();
field = Tuple();
Tuple & tuple = get<Tuple &>(field);
Tuple & tuple = field.get<Tuple &>();
tuple.reserve(size);
for (size_t i = 0; i < size; ++i)
elems[i]->deserializeBinary(tuple.emplace_back(), istr);

View File

@ -84,7 +84,7 @@ void SerializationUUID::deserializeTextCSV(IColumn & column, ReadBuffer & istr,
void SerializationUUID::serializeBinary(const Field & field, WriteBuffer & ostr) const
{
UUID x = get<UUID>(field);
UUID x = field.get<UUID>();
writeBinary(x, ostr);
}

View File

@ -811,7 +811,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
/// Also we have to commit metadata transaction, because it's not committed by default for inner tables of MVs.
/// Yep, I hate inner tables of materialized views.
auto mv_drop_inner_table_context = make_query_context();
table->dropInnerTableIfAny(sync, mv_drop_inner_table_context);
table->dropInnerTableIfAny(/* sync */ true, mv_drop_inner_table_context);
mv_drop_inner_table_context->getZooKeeperMetadataTransaction()->commit();
}

View File

@ -633,7 +633,7 @@ static void writeFieldsToColumn(
{
for (size_t index = 0; index < rows_data.size(); ++index)
{
const Tuple & row_data = DB::get<const Tuple &>(rows_data[index]);
const Tuple & row_data = rows_data[index].get<const Tuple &>();
const Field & value = row_data[column_index];
if (write_data_to_null_map(value, index))
@ -673,7 +673,7 @@ static void writeFieldsToColumn(
{
for (size_t index = 0; index < rows_data.size(); ++index)
{
const Tuple & row_data = DB::get<const Tuple &>(rows_data[index]);
const Tuple & row_data = rows_data[index].get<const Tuple &>();
const Field & value = row_data[column_index];
if (write_data_to_null_map(value, index))
@ -695,7 +695,7 @@ static void writeFieldsToColumn(
{
for (size_t index = 0; index < rows_data.size(); ++index)
{
const Tuple & row_data = DB::get<const Tuple &>(rows_data[index]);
const Tuple & row_data = rows_data[index].get<const Tuple &>();
const Field & value = row_data[column_index];
if (write_data_to_null_map(value, index))
@ -709,7 +709,7 @@ static void writeFieldsToColumn(
{
for (size_t index = 0; index < rows_data.size(); ++index)
{
const Tuple & row_data = DB::get<const Tuple &>(rows_data[index]);
const Tuple & row_data = rows_data[index].get<const Tuple &>();
const Field & value = row_data[column_index];
if (write_data_to_null_map(value, index))
@ -761,7 +761,7 @@ static inline size_t onUpdateData(const Row & rows_data, Block & buffer, size_t
{
writeable_rows_mask[index + 1] = true;
writeable_rows_mask[index] = differenceSortingKeys(
DB::get<const Tuple &>(rows_data[index]), DB::get<const Tuple &>(rows_data[index + 1]), sorting_columns_index);
rows_data[index].get<const Tuple &>(), rows_data[index + 1].get<const Tuple &>(), sorting_columns_index);
}
for (size_t column = 0; column < buffer.columns() - 2; ++column)

View File

@ -232,7 +232,7 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con
}
case AttributeUnderlyingType::String:
{
String loaded_str(get<String>((*key_columns[attribute_index])[row_idx]));
String loaded_str((*key_columns[attribute_index])[row_idx].get<String>());
/// Convert string to ObjectID
if (key_attribute.is_object_id)
{
@ -259,7 +259,7 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con
std::string MongoDBDictionarySource::toString() const
{
return "MongoDB: " + db + '.' + collection + ',' + (user.empty() ? " " : " " + user + '@') + host + ':' + DB::toString(port);
return fmt::format("MongoDB: {}.{},{}{}:{}", db, collection, (user.empty() ? " " : " " + user + '@'), host, port);
}
}

View File

@ -220,7 +220,7 @@ namespace DB
if (isInteger(type))
key << DB::toString(key_columns[i]->get64(row));
else if (isString(type))
key << get<const String &>((*key_columns[i])[row]);
key << (*key_columns[i])[row].get<const String &>();
else
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected type of key in Redis dictionary");
}

View File

@ -476,10 +476,10 @@ ColumnPtr FunctionArrayElement::executeNumberConst(
auto col_res = ColumnVector<DataType>::create();
if (index.getType() == Field::Types::UInt64
|| (index.getType() == Field::Types::Int64 && get<Int64>(index) >= 0))
|| (index.getType() == Field::Types::Int64 && index.get<Int64>() >= 0))
{
ArrayElementNumImpl<DataType>::template vectorConst<false>(
col_nested->getData(), col_array->getOffsets(), get<UInt64>(index) - 1, col_res->getData(), builder);
col_nested->getData(), col_array->getOffsets(), index.get<UInt64>() - 1, col_res->getData(), builder);
}
else if (index.getType() == Field::Types::Int64)
{
@ -493,7 +493,7 @@ ColumnPtr FunctionArrayElement::executeNumberConst(
/// arr[-2] is the element at offset 1 from the last and so on.
ArrayElementNumImpl<DataType>::template vectorConst<true>(
col_nested->getData(), col_array->getOffsets(), -(static_cast<UInt64>(safeGet<Int64>(index)) + 1), col_res->getData(), builder);
col_nested->getData(), col_array->getOffsets(), -(static_cast<UInt64>(index.safeGet<Int64>()) + 1), col_res->getData(), builder);
}
else
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);
@ -539,12 +539,12 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument
auto col_res = ColumnString::create();
if (index.getType() == Field::Types::UInt64
|| (index.getType() == Field::Types::Int64 && get<Int64>(index) >= 0))
|| (index.getType() == Field::Types::Int64 && index.get<Int64>() >= 0))
ArrayElementStringImpl::vectorConst<false>(
col_nested->getChars(),
col_array->getOffsets(),
col_nested->getOffsets(),
get<UInt64>(index) - 1,
index.get<UInt64>() - 1,
col_res->getChars(),
col_res->getOffsets(),
builder);
@ -553,7 +553,7 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument
col_nested->getChars(),
col_array->getOffsets(),
col_nested->getOffsets(),
-(UInt64(get<Int64>(index)) + 1),
-(UInt64(index.get<Int64>()) + 1),
col_res->getChars(),
col_res->getOffsets(),
builder);
@ -603,12 +603,12 @@ ColumnPtr FunctionArrayElement::executeGenericConst(
auto col_res = col_nested.cloneEmpty();
if (index.getType() == Field::Types::UInt64
|| (index.getType() == Field::Types::Int64 && get<Int64>(index) >= 0))
|| (index.getType() == Field::Types::Int64 && index.get<Int64>() >= 0))
ArrayElementGenericImpl::vectorConst<false>(
col_nested, col_array->getOffsets(), get<UInt64>(index) - 1, *col_res, builder);
col_nested, col_array->getOffsets(), index.get<UInt64>() - 1, *col_res, builder);
else if (index.getType() == Field::Types::Int64)
ArrayElementGenericImpl::vectorConst<true>(
col_nested, col_array->getOffsets(), -(static_cast<UInt64>(get<Int64>(index) + 1)), *col_res, builder);
col_nested, col_array->getOffsets(), -(static_cast<UInt64>(index.get<Int64>() + 1)), *col_res, builder);
else
throw Exception("Illegal type of array index", ErrorCodes::LOGICAL_ERROR);
@ -877,7 +877,7 @@ bool FunctionArrayElement::matchKeyToIndexStringConst(
using DataColumn = std::decay_t<decltype(data_column)>;
if (index.getType() != Field::Types::String)
return false;
MatcherStringConst<DataColumn> matcher{data_column, get<const String &>(index)};
MatcherStringConst<DataColumn> matcher{data_column, index.get<const String &>()};
executeMatchKeyToIndex(offsets, matched_idxs, matcher);
return true;
});

View File

@ -114,7 +114,7 @@ static Block createBlockFromCollection(const Collection & collection, const Data
throw Exception("Invalid type in set. Expected tuple, got "
+ String(value.getTypeName()), ErrorCodes::INCORRECT_ELEMENT_OF_SET);
const auto & tuple = DB::get<const Tuple &>(value);
const auto & tuple = value.template get<const Tuple &>();
size_t tuple_size = tuple.size();
if (tuple_size != columns_num)
@ -306,9 +306,9 @@ Block createBlockForSet(
{
auto type_index = right_arg_type->getTypeId();
if (type_index == TypeIndex::Tuple)
block = createBlockFromCollection(DB::get<const Tuple &>(right_arg_value), set_element_types, tranform_null_in);
block = createBlockFromCollection(right_arg_value.get<const Tuple &>(), set_element_types, tranform_null_in);
else if (type_index == TypeIndex::Array)
block = createBlockFromCollection(DB::get<const Array &>(right_arg_value), set_element_types, tranform_null_in);
block = createBlockFromCollection(right_arg_value.get<const Array &>(), set_element_types, tranform_null_in);
else
throw_unsupported_type(right_arg_type);
}

View File

@ -58,8 +58,6 @@ public:
/// Add limits from external query.
void addStorageLimits(const StorageLimitsList & limits);
ContextPtr getContext() const { return context; }
protected:
ASTPtr query_ptr;
ContextMutablePtr context;

View File

@ -316,7 +316,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
interpreter.buildQueryPlan(plan);
if (settings.optimize)
plan.optimize(QueryPlanOptimizationSettings::fromContext(interpreter.getContext()));
plan.optimize(QueryPlanOptimizationSettings::fromContext(getContext()));
if (settings.json)
{
@ -326,7 +326,7 @@ QueryPipeline InterpreterExplainQuery::executeImpl()
auto plan_array = std::make_unique<JSONBuilder::JSONArray>();
plan_array->add(std::move(plan_map));
auto format_settings = getFormatSettings(interpreter.getContext());
auto format_settings = getFormatSettings(getContext());
format_settings.json.quote_64bit_integers = false;
JSONBuilder::FormatSettings json_format_settings{.settings = format_settings};

View File

@ -327,7 +327,7 @@ BlockIO InterpreterKillQueryQuery::execute()
for (size_t i = 0; i < moves_block.rows(); ++i)
{
table_id = StorageID{database_col.getDataAt(i).toString(), table_col.getDataAt(i).toString()};
auto task_uuid = get<UUID>(task_uuid_col[i]);
auto task_uuid = task_uuid_col[i].get<UUID>();
CancellationCode code = CancellationCode::Unknown;

View File

@ -111,11 +111,11 @@ void RewriteFunctionToSubcolumnData::visit(ASTFunction & function, ASTPtr & ast)
if (value_type == Field::Types::UInt64)
{
const auto & type_tuple = assert_cast<const DataTypeTuple &>(*column_type);
auto index = get<UInt64>(literal->value);
auto index = literal->value.get<UInt64>();
subcolumn_name = type_tuple.getNameByPosition(index);
}
else if (value_type == Field::Types::String)
subcolumn_name = get<const String &>(literal->value);
subcolumn_name = literal->value.get<const String &>();
else
return;

View File

@ -29,7 +29,7 @@ TEST(ComparisonGraph, Bounds)
const auto & [lower, strict] = *res;
ASSERT_EQ(get<UInt64>(lower), 3);
ASSERT_EQ(lower.get<UInt64>(), 3);
ASSERT_TRUE(strict);
}
@ -39,7 +39,7 @@ TEST(ComparisonGraph, Bounds)
const auto & [upper, strict] = *res;
ASSERT_EQ(get<UInt64>(upper), 7);
ASSERT_EQ(upper.get<UInt64>(), 7);
ASSERT_TRUE(strict);
}

View File

@ -177,12 +177,11 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
tryGetIdentifierNameInto(database, query->from);
if (like)
query->like = safeGet<const String &>(like->as<ASTLiteral &>().value);
query->like = like->as<ASTLiteral &>().value.safeGet<const String &>();
node = query;
return true;
}
}

View File

@ -63,7 +63,7 @@ inline const auto & getOptimizations()
{tryMergeExpressions, "mergeExpressions", &QueryPlanOptimizationSettings::optimize_plan},
{tryPushDownFilter, "pushDownFilter", &QueryPlanOptimizationSettings::filter_push_down},
{tryExecuteFunctionsAfterSorting, "liftUpFunctions", &QueryPlanOptimizationSettings::optimize_plan},
{tryReuseStorageOrderingForWindowFunctions, "reuseStorageOrderingForWindowFunctions", &QueryPlanOptimizationSettings::optimize_read_in_window_order}
{tryReuseStorageOrderingForWindowFunctions, "reuseStorageOrderingForWindowFunctions", &QueryPlanOptimizationSettings::optimize_plan}
}};
return optimizations;

View File

@ -11,7 +11,6 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
settings.optimize_plan = from.query_plan_enable_optimizations;
settings.max_optimizations_to_apply = from.query_plan_max_optimizations_to_apply;
settings.filter_push_down = from.query_plan_filter_push_down;
settings.optimize_read_in_window_order = from.optimize_read_in_window_order;
return settings;
}

View File

@ -21,9 +21,6 @@ struct QueryPlanOptimizationSettings
/// If filter push down optimization is enabled.
bool filter_push_down = true;
/// window functions read in order optimization
bool optimize_read_in_window_order = true;
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
static QueryPlanOptimizationSettings fromContext(ContextPtr from);
};

View File

@ -61,7 +61,12 @@ size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node,
return 0;
}
const auto context = read_from_merge_tree->getContext();
auto context = read_from_merge_tree->getContext();
if (!context->getSettings().optimize_read_in_window_order)
{
return 0;
}
const auto & query_info = read_from_merge_tree->getQueryInfo();
const auto * select_query = query_info.query->as<ASTSelectQuery>();

View File

@ -40,7 +40,7 @@ static FillColumnDescription::StepFunction getStepFunction(
{
#define DECLARE_CASE(NAME) \
case IntervalKind::NAME: \
return [step, scale, &date_lut](Field & field) { field = Add##NAME##sImpl::execute(get<T>(field), step, date_lut, scale); };
return [step, scale, &date_lut](Field & field) { field = Add##NAME##sImpl::execute(static_cast<T>(field.get<T>()), step, date_lut, scale); };
FOR_EACH_INTERVAL_KIND(DECLARE_CASE)
#undef DECLARE_CASE
@ -106,21 +106,21 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr &
{
if (which.isDate() || which.isDate32())
{
Int64 avg_seconds = get<Int64>(descr.fill_step) * descr.step_kind->toAvgSeconds();
Int64 avg_seconds = descr.fill_step.get<Int64>() * descr.step_kind->toAvgSeconds();
if (std::abs(avg_seconds) < 86400)
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
"Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds));
}
if (which.isDate())
descr.step_func = getStepFunction<UInt16>(*descr.step_kind, get<Int64>(descr.fill_step), DateLUT::instance());
descr.step_func = getStepFunction<UInt16>(*descr.step_kind, descr.fill_step.get<Int64>(), DateLUT::instance());
else if (which.isDate32())
descr.step_func = getStepFunction<Int32>(*descr.step_kind, get<Int64>(descr.fill_step), DateLUT::instance());
descr.step_func = getStepFunction<Int32>(*descr.step_kind, descr.fill_step.get<Int64>(), DateLUT::instance());
else if (const auto * date_time = checkAndGetDataType<DataTypeDateTime>(type.get()))
descr.step_func = getStepFunction<UInt32>(*descr.step_kind, get<Int64>(descr.fill_step), date_time->getTimeZone());
descr.step_func = getStepFunction<UInt32>(*descr.step_kind, descr.fill_step.get<Int64>(), date_time->getTimeZone());
else if (const auto * date_time64 = checkAndGetDataType<DataTypeDateTime64>(type.get()))
{
const auto & step_dec = get<const DecimalField<Decimal64> &>(descr.fill_step);
const auto & step_dec = descr.fill_step.get<const DecimalField<Decimal64> &>();
Int64 step = DecimalUtils::convertTo<Int64>(step_dec.getValue(), step_dec.getScale());
switch (*descr.step_kind)
@ -129,7 +129,7 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr &
case IntervalKind::NAME: \
descr.step_func = [step, &time_zone = date_time64->getTimeZone()](Field & field) \
{ \
auto field_decimal = get<DecimalField<DateTime64>>(field); \
auto field_decimal = field.get<DecimalField<DateTime64>>(); \
auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), step, time_zone, field_decimal.getScale()); \
field = DecimalField(res, field_decimal.getScale()); \
}; \

View File

@ -582,7 +582,7 @@ Pipe StorageLiveView::watch(
if (query.limit_length)
{
has_limit = true;
limit = safeGet<UInt64>(typeid_cast<ASTLiteral &>(*query.limit_length).value);
limit = typeid_cast<ASTLiteral &>(*query.limit_length).value.safeGet<UInt64>();
}
if (query.is_watch_events)

View File

@ -613,7 +613,7 @@ bool MergeTreeIndexConditionBloomFilter::traverseASTEquals(
if (which.isTuple() && function->name == "tuple")
{
const Tuple & tuple = get<const Tuple &>(value_field);
const Tuple & tuple = value_field.get<const Tuple &>();
const auto * value_tuple_data_type = typeid_cast<const DataTypeTuple *>(value_type.get());
const ASTs & arguments = typeid_cast<const ASTExpressionList &>(*function->arguments).children;

View File

@ -333,7 +333,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
/// Get path and name from engine arguments
ast_zk_path = engine_args[arg_num]->as<ASTLiteral>();
if (ast_zk_path && ast_zk_path->value.getType() == Field::Types::String)
zookeeper_path = safeGet<String>(ast_zk_path->value);
zookeeper_path = ast_zk_path->value.safeGet<String>();
else
throw Exception(
"Path in ZooKeeper must be a string literal" + getMergeTreeVerboseHelp(is_extended_storage_def),
@ -342,7 +342,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
ast_replica_name = engine_args[arg_num]->as<ASTLiteral>();
if (ast_replica_name && ast_replica_name->value.getType() == Field::Types::String)
replica_name = safeGet<String>(ast_replica_name->value);
replica_name = ast_replica_name->value.safeGet<String>();
else
throw Exception(
"Replica name must be a string literal" + getMergeTreeVerboseHelp(is_extended_storage_def), ErrorCodes::BAD_ARGUMENTS);
@ -654,7 +654,7 @@ static StoragePtr create(const StorageFactory::Arguments & args)
const auto * ast = engine_args[arg_num]->as<ASTLiteral>();
if (ast && ast->value.getType() == Field::Types::UInt64)
storage_settings->index_granularity = safeGet<UInt64>(ast->value);
storage_settings->index_granularity = ast->value.safeGet<UInt64>();
else
throw Exception(
"Index granularity must be a positive integer" + getMergeTreeVerboseHelp(is_extended_storage_def),

View File

@ -1127,7 +1127,7 @@ Pipe StorageWindowView::watch(
if (query.limit_length)
{
has_limit = true;
limit = safeGet<UInt64>(typeid_cast<ASTLiteral &>(*query.limit_length).value);
limit = typeid_cast<ASTLiteral &>(*query.limit_length).value.safeGet<UInt64>();
}
auto reader = std::make_shared<WindowViewSource>(

View File

@ -196,8 +196,8 @@ ColumnsDescriptionByShardNum getExtendedObjectsOfRemoteTables(
size_t size = name_col.size();
for (size_t i = 0; i < size; ++i)
{
auto name = get<const String &>(name_col[i]);
auto type_name = get<const String &>(type_col[i]);
auto name = name_col[i].get<const String &>();
auto type_name = type_col[i].get<const String &>();
auto storage_column = storage_columns.tryGetPhysical(name);
if (storage_column && isObject(storage_column->type))

View File

@ -107,7 +107,7 @@ void TableFunctionRemote::parseArguments(const ASTPtr & ast_function, ContextPtr
if (lit->value.getType() != Field::Types::String)
return false;
res = safeGet<const String &>(lit->value);
res = lit->value.safeGet<const String &>();
return true;
};

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<synonyms_extensions>
<extension>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<!-- Config for test server -->
<clickhouse>
<query_masking_rules>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<profiles>
<!-- Profile that allows only read queries. -->

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<!-- User and profile to be used in sesson_log tests, to make sure that list of user's profiles is logged correctly -->
<clickhouse>
<profiles>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<dictionaries_config>/etc/clickhouse-server/dictionaries/*.xml</dictionaries_config>
</clickhouse>

View File

@ -792,7 +792,9 @@ class ClickHouseCluster:
binary_dir = os.path.dirname(self.server_bin_path)
# always prefer clickhouse-keeper standalone binary
if os.path.exists(os.path.join(binary_dir, "clickhouse-keeper")):
if os.path.exists(
os.path.join(binary_dir, "clickhouse-keeper")
) and not os.path.islink(os.path.join(binary_dir, "clickhouse-keeper")):
binary_path = os.path.join(binary_dir, "clickhouse-keeper")
keeper_cmd_prefix = "clickhouse-keeper"
else:

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<users>
<default>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<profiles>
<default>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<max_concurrent_queries>10000</max_concurrent_queries>
</clickhouse>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<storage_configuration>
<disks>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<backups>
<on_cluster_first_sync_timeout>1000</on_cluster_first_sync_timeout>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<remote_servers>
<cluster0>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<remote_servers>
<source_trivial_cluster>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<profiles>
<default>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<remote_servers>
<events>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<distributed_ddl>
<path>/clickhouse/task_queue/ddl</path>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<logger>
<level>information</level>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<profiles>
<default>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<remote_servers>
<source>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<distributed_ddl>
<path>/clickhouse/task_queue/ddl</path>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<logger>
<level>information</level>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<profiles>
<default>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<!-- How many simualteneous workers are posssible -->
<max_workers>3</max_workers>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<remote_servers>
<source>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<!-- How many simualteneous workers are posssible -->
<max_workers>4</max_workers>

View File

@ -1,4 +1,3 @@
<?xml version="1.0"?>
<clickhouse>
<tcp_port_secure>9440</tcp_port_secure>
<remote_servers>

Some files were not shown because too many files have changed in this diff Show More