Merge remote-tracking branch 'blessed/master' into clang18

This commit is contained in:
Raúl Marín 2023-08-24 17:35:26 +02:00
commit 6490213207
236 changed files with 4102 additions and 1242 deletions

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit 1f1b3d35fb6eb73e6492d3afd8a85cde848d174f
Subproject commit 1d93838f69a802639ca144ea5704a98e2481810d

2
contrib/boost vendored

@ -1 +1 @@
Subproject commit 063a9372b4ae304e869a5c5724971d0501552731
Subproject commit a01ddc144c130777d7c6727a3fc5d5cdbae016d6

View File

@ -20,6 +20,7 @@ echo '/boost/context/*' >> $FILES_TO_CHECKOUT
echo '/boost/convert/*' >> $FILES_TO_CHECKOUT
echo '/boost/coroutine/*' >> $FILES_TO_CHECKOUT
echo '/boost/core/*' >> $FILES_TO_CHECKOUT
echo '/boost/describe/*' >> $FILES_TO_CHECKOUT
echo '/boost/detail/*' >> $FILES_TO_CHECKOUT
echo '/boost/dynamic_bitset/*' >> $FILES_TO_CHECKOUT
echo '/boost/exception/*' >> $FILES_TO_CHECKOUT

View File

@ -90,34 +90,117 @@ Process 1 stopped
## Visual Studio Code integration
- [CodeLLDB extension](https://github.com/vadimcn/vscode-lldb) is required for visual debugging, the [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [cmake variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md).
- Make sure to set the backend to your llvm installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
- Launcher:
- [CodeLLDB](https://github.com/vadimcn/vscode-lldb) extension is required for visual debugging.
- [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [CMake Variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md).
- Make sure to set the backend to your LLVM installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this)
### Example configurations
#### cmake-variants.yaml
```yaml
buildType:
default: relwithdebinfo
choices:
debug:
short: Debug
long: Emit debug information
buildType: Debug
release:
short: Release
long: Optimize generated code
buildType: Release
relwithdebinfo:
short: RelWithDebInfo
long: Release with Debug Info
buildType: RelWithDebInfo
tsan:
short: MinSizeRel
long: Minimum Size Release
buildType: MinSizeRel
toolchain:
default: default
description: Select toolchain
choices:
default:
short: x86_64
long: x86_64
s390x:
short: s390x
long: s390x
settings:
CMAKE_TOOLCHAIN_FILE: cmake/linux/toolchain-s390x.cmake
```
#### launch.json
```json
{
"version": "0.2.0",
"configurations": [
{
"name": "Debug",
"type": "lldb",
"request": "custom",
"targetCreateCommands": ["target create ${command:cmake.launchTargetDirectory}/clickhouse"],
"processCreateCommands": ["settings set target.source-map ${input:targetdir} ${workspaceFolder}", "gdb-remote 31338"],
"sourceMap": { "${input:targetdir}": "${workspaceFolder}" },
}
],
"inputs": [
{
"id": "targetdir",
"type": "command",
"command": "extension.commandvariable.transform",
"args": {
"text": "${command:cmake.launchTargetDirectory}",
"find": ".*/([^/]+)/[^/]+$",
"replace": "$1"
}
"name": "(lldb) Launch s390x with qemu",
"targetCreateCommands": ["target create ${command:cmake.launchTargetPath}"],
"processCreateCommands": ["gdb-remote 2159"],
"preLaunchTask": "Run ClickHouse"
}
]
}
```
#### settings.json
This would also put different builds under different subfolders of the `build` folder.
```json
{
"cmake.buildDirectory": "${workspaceFolder}/build/${buildKitVendor}-${buildKitVersion}-${variant:toolchain}-${variant:buildType}",
"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"
}
```
#### run-debug.sh
```sh
#! /bin/sh
echo 'Starting debugger session'
cd $1
qemu-s390x-static -g 2159 -L /usr/s390x-linux-gnu $2 $3 $4
```
#### tasks.json
Defines a task to run the compiled executable in `server` mode under a `tmp` folder next to the binaries, with configuration from under `programs/server/config.xml`.
```json
{
"version": "2.0.0",
"tasks": [
{
"label": "Run ClickHouse",
"type": "shell",
"isBackground": true,
"command": "${workspaceFolder}/.vscode/run-debug.sh",
"args": [
"${command:cmake.launchTargetDirectory}/tmp",
"${command:cmake.launchTargetPath}",
"server",
"--config-file=${workspaceFolder}/programs/server/config.xml"
],
"problemMatcher": [
{
"pattern": [
{
"regexp": ".",
"file": 1,
"location": 2,
"message": 3
}
],
"background": {
"activeOnStart": true,
"beginsPattern": "^Starting debugger session",
"endsPattern": ".*"
}
}
]
}
]
}
```
- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this)

View File

@ -219,6 +219,10 @@ LIMIT N
SETTINGS annoy_index_search_k_nodes=100;
```
:::note
The Annoy index currently does not work with per-table, non-default `index_granularity` settings (see
[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml.
:::
## USearch {#usearch}
This type of ANN index is based on the [the USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW

View File

@ -217,6 +217,14 @@ Type: UInt32
Default: 1024
## index_mark_cache_policy
Index mark cache policy name.
Type: String
Default: SLRU
## index_mark_cache_size
Size of cache for index marks. Zero means disabled.
@ -229,6 +237,21 @@ Type: UInt64
Default: 0
## index_mark_cache_size_ratio
The size of the protected queue in the index mark cache relative to the cache's total size.
Type: Double
Default: 0.5
## index_uncompressed_cache_policy
Index uncompressed cache policy name.
Type: String
Default: SLRU
## index_uncompressed_cache_size
@ -242,6 +265,13 @@ Type: UInt64
Default: 0
## index_uncompressed_cache_size_ratio
The size of the protected queue in the index uncompressed cache relative to the cache's total size.
Type: Double
Default: 0.5
## io_thread_pool_queue_size
@ -271,6 +301,14 @@ Type: UInt64
Default: 5368709120
## mark_cache_size_ratio
The size of the protected queue in the mark cache relative to the cache's total size.
Type: Double
Default: 0.5
## max_backup_bandwidth_for_server
The maximum read speed in bytes per second for all backups on server. Zero means unlimited.
@ -629,6 +667,14 @@ Type: UInt64
Default: 0
## uncompressed_cache_size_ratio
The size of the protected queue in the uncompressed cache relative to the cache's total size.
Type: Double
Default: 0.5
## builtin_dictionaries_reload_interval {#builtin-dictionaries-reload-interval}
The interval in seconds before reloading built-in dictionaries.

View File

@ -2383,6 +2383,17 @@ See also:
- [optimize_functions_to_subcolumns](#optimize-functions-to-subcolumns)
## optimize_count_from_files {#optimize_count_from_files}
Enables or disables the optimization of counting number of rows from files in different input formats. It applies to table functions/engines `file`/`s3`/`url`/`hdfs`/`azureBlobStorage`.
Possible values:
- 0 — Optimization disabled.
- 1 — Optimization enabled.
Default value: `1`.
## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life}
- Type: seconds

View File

@ -815,16 +815,16 @@ Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
- `microsecond` (possible abbreviations: `microseconds`, `us`, `u`)
- `millisecond` (possible abbreviations: `milliseconds`, `ms`)
- `second` (possible abbreviations: `seconds`, `ss`, `s`)
- `minute` (possible abbreviations: `minutes`, `mi`, `n`)
- `hour` (possible abbreviations: `hours`, `hh`, `h`)
- `day` (possible abbreviations: `days`, `dd`, `d`)
- `week` (possible abbreviations: `weeks`, `wk`, `ww`)
- `month` (possible abbreviations: `months`, `mm`, `m`)
- `quarter` (possible abbreviations: `quarters`, `qq`, `q`)
- `year` (possible abbreviations: `years`, `yyyy`, `yy`)
- `microsecond` (possible abbreviations: `us`, `u`)
- `millisecond` (possible abbreviations: `ms`)
- `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`)
- `day` (possible abbreviations: `dd`, `d`)
- `week` (possible abbreviations: `wk`, `ww`)
- `month` (possible abbreviations: `mm`, `m`)
- `quarter` (possible abbreviations: `qq`, `q`)
- `year` (possible abbreviations: `yyyy`, `yy`)
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).

View File

@ -1171,10 +1171,267 @@ ClickHouse服务器日志文件中跟踪日志确认了ClickHouse正在对索引
主键键列之间的基数差得越大,主键中的列的顺序越重要。我们将在下一章节对此进行演示。
# 高效地为键列排序
## 高效地为键列排序
TODO
<a name="test"></a>
# 高效地识别单行
TODO
在复合主键中,键列的顺序会对以下两方面产生重大影响:
- 查询中过滤次关键字列的效率,以及
- 表数据文件的压缩率。
为了演示这一点,我们将使用我们的[网络流量样本数据集(web traffic sample data set)](#数据集)这个版本,
其中每一行包含三列,分别表示互联网用户(`UserID` 列)对 URL`URL`列)的访问是否被标记为僵尸流量(`IsRobot` 列)。
我们将使用一个包含上述所有三列的复合主键,该主键可用于加快计算以下内容的典型网络分析查询速度
- 特定 URL 有多少(百分比)流量来自机器人,或
- 我们对特定用户是否为僵尸用户有多大把握(来自该用户的流量中有多大比例被认为是(或不是)僵尸流量)
我们使用该查询来计算我们要用作复合主键中三个列的基数(注意,我们使用 [URL 表函数](/docs/en/sql-reference/table-functions/url.md) 来即席查询 TSV 数据,而无需创建本地表)。在 `clickhouse client`中运行此查询:
```sql
SELECT
formatReadableQuantity(uniq(URL)) AS cardinality_URL,
formatReadableQuantity(uniq(UserID)) AS cardinality_UserID,
formatReadableQuantity(uniq(IsRobot)) AS cardinality_IsRobot
FROM
(
SELECT
c11::UInt64 AS UserID,
c15::String AS URL,
c20::UInt8 AS IsRobot
FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz')
WHERE URL != ''
)
```
响应如下:
```response
┌─cardinality_URL─┬─cardinality_UserID─┬─cardinality_IsRobot─┐
│ 2.39 million │ 119.08 thousand │ 4.00 │
└─────────────────┴────────────────────┴─────────────────────┘
1 row in set. Elapsed: 118.334 sec. Processed 8.87 million rows, 15.88 GB (74.99 thousand rows/s., 134.21 MB/s.)
```
我们可以看到,各列之间的基数,尤其是 `URL` 列和 `IsRobot` 列之间,存在着很大的差异,因此,在复合主键中,这些列的顺序对于有效加快对这些列的查询过滤速度,以及实现表中列数据文件的最佳压缩比都非常重要。
为了证明这一点,我们为僵尸流量分析数据创建了两个版本的表:
- 带有复合主键`(URL、UserID、IsRobot)`的表 `hits_URL_UserID_IsRobot`,其中的键列按基数降序排列
- 使用复合主键`(IsRobot, UserID, URL)` 创建表 `hits_IsRobot_UserID_URL`,其中的键列按基数升序排列
创建具有复合主键`(URL、UserID、IsRobot)`的表 `hits_URL_UserID_IsRobot`
```sql
CREATE TABLE hits_URL_UserID_IsRobot
(
`UserID` UInt32,
`URL` String,
`IsRobot` UInt8
)
ENGINE = MergeTree
// highlight-next-line
PRIMARY KEY (URL, UserID, IsRobot);
```
然后填充887万行数据
```sql
INSERT INTO hits_URL_UserID_IsRobot SELECT
intHash32(c11::UInt64) AS UserID,
c15 AS URL,
c20 AS IsRobot
FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz')
WHERE URL != '';
```
响应如下:
```response
0 rows in set. Elapsed: 104.729 sec. Processed 8.87 million rows, 15.88 GB (84.73 thousand rows/s., 151.64 MB/s.)
```
接下来,创建带有复合主键 `(IsRobot,UserID,URL)`的表 `hits_IsRobot_UserID_URL`
```sql
CREATE TABLE hits_IsRobot_UserID_URL
(
`UserID` UInt32,
`URL` String,
`IsRobot` UInt8
)
ENGINE = MergeTree
// highlight-next-line
PRIMARY KEY (IsRobot, UserID, URL);
```
并在其中填入与上一个表相同的 887 万行数据:
```sql
INSERT INTO hits_IsRobot_UserID_URL SELECT
intHash32(c11::UInt64) AS UserID,
c15 AS URL,
c20 AS IsRobot
FROM url('https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz')
WHERE URL != '';
```
响应如下:
```response
0 rows in set. Elapsed: 95.959 sec. Processed 8.87 million rows, 15.88 GB (92.48 thousand rows/s., 165.50 MB/s.)
```
### 在次关键字列上高效过滤
当查询对至少一列进行过滤时,该列是复合关键字的一部分,并且是第一关键字列,[那么 ClickHouse 将在关键字列的索引标记上运行二分查找算法](#主索引被用来选择颗粒)。
当查询(仅)过滤属于复合关键字的某一列,但不是第一关键字列时,[ClickHouse 将在关键字列的索引标记上使用通用排除搜索算法](#查询使用第二位主键的性能问题)。
对于第二种情况,复合主键中关键列的排序对[通用排除搜索算法](https://github.com/ClickHouse/ClickHouse/blob/22.3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L1444)的有效性很重要。
这是一个对表中的 `UserID` 列进行过滤的查询,我们对该表的关键字列`(URL、UserID、IsRobot)`按基数进行了降序排序:
```sql
SELECT count(*)
FROM hits_URL_UserID_IsRobot
WHERE UserID = 112304
```
响应如下:
```response
┌─count()─┐
│ 73 │
└─────────┘
1 row in set. Elapsed: 0.026 sec.
// highlight-next-line
Processed 7.92 million rows,
31.67 MB (306.90 million rows/s., 1.23 GB/s.)
```
对关键字列`(IsRobot, UserID, URL)`按基数升序排列的表,进行相同的查询:
```sql
SELECT count(*)
FROM hits_IsRobot_UserID_URL
WHERE UserID = 112304
```
响应如下:
```response
┌─count()─┐
│ 73 │
└─────────┘
1 row in set. Elapsed: 0.003 sec.
// highlight-next-line
Processed 20.32 thousand rows,
81.28 KB (6.61 million rows/s., 26.44 MB/s.)
```
我们可以看到,在对关键列按基数进行升序排列的表中,查询执行的效率和速度明显更高。
其原因是,当通过具有较低基数前键列的次关键字列选择[颗粒](#主索引被用来选择颗粒)时, [通用排除搜索算法](https://github.com/ClickHouse/ClickHouse/blob/22.3/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp#L1444)最有效。 我们在本指南的[上一节](#generic-exclusion-search-algorithm)中对此进行了详细说明。
### 数据文件的最佳压缩率
此查询将比较上面创建的两个表中 `UserID` 列的压缩率:
```sql
SELECT
table AS Table,
name AS Column,
formatReadableSize(data_uncompressed_bytes) AS Uncompressed,
formatReadableSize(data_compressed_bytes) AS Compressed,
round(data_uncompressed_bytes / data_compressed_bytes, 0) AS Ratio
FROM system.columns
WHERE (table = 'hits_URL_UserID_IsRobot' OR table = 'hits_IsRobot_UserID_URL') AND (name = 'UserID')
ORDER BY Ratio ASC
```
这是响应:
```response
┌─Table───────────────────┬─Column─┬─Uncompressed─┬─Compressed─┬─Ratio─┐
│ hits_URL_UserID_IsRobot │ UserID │ 33.83 MiB │ 11.24 MiB │ 3 │
│ hits_IsRobot_UserID_URL │ UserID │ 33.83 MiB │ 877.47 KiB │ 39 │
└─────────────────────────┴────────┴──────────────┴────────────┴───────┘
2 rows in set. Elapsed: 0.006 sec.
```
我们可以看到,在按关键字列`(IsRobot、UserID、URL)` 按基数升序排列的表中,`UserID` 列的压缩率明显更高。
虽然两个表中存储的数据完全相同(我们在两个表中插入了相同的 887 万行),但复合主键中关键字列的顺序对表的 [列数据文件](#数据按照主键排序存储在磁盘上)中的 <a href="https://clickhouse.com/docs/en/introduction/distinctive-features/#data-compression" target="_blank">压缩</a>数据所需的磁盘空间有很大影响:
- 在具有复合主键`(URL, UserID, IsRobot)` 的表 `hits_URL_UserID_IsRobot` 中,我们按照键列的基数降序排列,此时 `UserID.bin` 数据文件占用**11.24MB**的磁盘空间。
- 在具有复合主键`(IsRobot, UserID, URL)` 的表 `hits_IsRobot_UserID_URL` 中,我们按照键列的基数升序排列,`UserID.bin` 数据文件仅占用**877.47 KiB**的磁盘空间。
对磁盘上表的列数据进行良好的压缩比不仅能节省磁盘空间,还能使需要从该列读取数据的查询(尤其是分析查询)更快,因为将列数据从磁盘移动到主内存(操作系统的文件缓存)所需的 i/o 更少。
下面我们将说明,为什么主键列按基数升序排列有利于提高表列的压缩率。
下图阐述了主键的磁盘上行顺序,其中键列是按基数升序排列的:
<img src={require('../../../en/guides/best-practices/images/sparse-primary-indexes-14a.png').default} class="image"/>
我们讨论过 [表的行数据按主键列有序存储在磁盘上](#数据按照主键排序存储在磁盘上)。
在上图中,表格的行(它们在磁盘上的列值)首先按其 `cl` 值排序,具有相同 `cl` 值的行按其 `ch` 值排序。由于第一键列 `cl` 的基数较低,因此很可能存在具有相同 `cl` 值的行。因此,`ch`值也很可能是有序的(局部地--对于具有相同`cl`值的行而言)。
如果在一列中,相似的数据被放在彼此相近的位置,例如通过排序,那么这些数据将得到更好的压缩。
一般来说,压缩算法会受益于数据的运行长度(可见的数据越多,压缩效果越好)和局部性(数据越相似,压缩率越高)。
与上图不同的是,下图阐述了主键的磁盘上行顺序,其中主键列是按基数降序排列的:
<img src={require('../../../en/guides/best-practices/images/sparse-primary-indexes-14b.png').default} class="image"/>
现在,表格的行首先按其 `ch` 值排序,具有相同 `ch` 值的行按其 `cl` 值排序。
但是,由于第一键列 `ch` 的基数很高,因此不太可能存在具有相同 `ch` 值的行。因此,`cl`值也不太可能是有序的(局部地--对于具有相同`ch`值的行而言)。
因此,`cl`值很可能是随机排序的,因此局部性和压缩比都很差。
### 小结
为了在查询中有效地过滤次关键字列和提高表列数据文件的压缩率,按基数升序排列主键中的列是有益的。
### 相关内容
- 博客: [Super charging your ClickHouse queries](https://clickhouse.com/blog/clickhouse-faster-queries-with-projections-and-primary-indexes)
## 有效识别单行
尽管在一般情况下,它[不](/knowledgebase/key-value)是ClickHouse 的最佳用例,
但是有时建立在ClickHouse之上的应用程序,需要识别ClickHouse表中的单行。
一个直观的解决方案可能是使用[UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier) 列,每一行的值都是唯一的,并且为了快速检索行,将该列用作主键列。
为了实现最快的检索UUID 列[需要成为主键列](#主索引被用来选择颗粒)。
我们讨论过,由于[ClickHouse 表的行数据是按主键列顺序存储在磁盘上的](#数据按照主键排序存储在磁盘上),因此在主键或复合主键中,在基数较小的列之前设置基数非常大的列(如 UUID 列)[不利于其他表列的压缩率](#数据文件的最佳压缩率)。
在最快检索速度和最佳数据压缩之间的折中方法是使用某种复合主键,其中 UUID 是最后一列关键字,位于(更)小基数关键字列之后,这些关键字列用于确保表中某些列的良好压缩比。
### 一个具体例子
一个具体的例子是 Alexey Milovidov 开发的文本粘贴服务 https://pastila.nl 相关[博客](https://clickhouse.com/blog/building-a-paste-service-with-clickhouse/)。
每次更改文本区域时,数据都会自动保存到 ClickHouse 表格行中(每次更改保存一行)。
识别和检索(特定版本)粘贴内容的一种方法是使用内容的哈希值作为包含内容的表行的 UUID。
下图显示了
- 当内容发生变化时(例如由于按键将文本键入文本框),行的插入顺序,以及
- 当使用 `PRIMARY KEY (hash)` 时,插入行数据的磁盘顺序:
<img src={require('../../../en/guides/best-practices/images/sparse-primary-indexes-15a.png').default} class="image"/>
由于 `hash` 列被用作主键列
- 可以[非常快速](#主索引被用来选择颗粒) 检索特定行,但
- 表格的行(列数据)是按照(唯一和随机的)哈希值升序存储在磁盘上的。因此,内容列的值也是按随机顺序存储的,不具有数据局部性,导致**内容列数据文件的压缩率不理想**。
为了大幅提高内容列的压缩率同时仍能快速检索特定行pastila.nl 使用两个哈希值(和一个复合主键)来识别特定行:
- 内容哈希值,如上所述,对于不同的数据是不同的,以及
- 对[局部性敏感的哈希值fingerprint](https://en.wikipedia.org/wiki/Locality-sensitive_hashing) 它**不会**因数据的微小变化而变化。
下图显示了
- 当内容发生变化时(例如,由于按键将文本输入文本区),行的插入顺序以及
- 当使用复合主键`(fingerprint,hash)` 时,插入行数据的磁盘顺序:
<img src={require('../../../en/guides/best-practices/images/sparse-primary-indexes-15b.png').default} class="image"/>
现在,磁盘上的行首先按指纹 (`fingerprint`) 排序,对于`fingerprint` 值相同的行,其哈希(`hash`)值决定最终的排序。
由于仅有细微差别的数据会获得相同的指纹值,因此类似的数据现在会被存储在磁盘的内容列中,并且彼此靠近。这对内容列的压缩率非常有利,因为压缩算法一般会从数据局部性中获益(数据越相似,压缩率越高)。
由此带来的妥协是,检索特定行时需要两个字段("指纹"和 "散列"),以便最佳地利用由复合主键 `(fingerprint, hash)` 产生的主索引。

View File

@ -133,8 +133,6 @@ if (BUILD_STANDALONE_KEEPER)
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/S3Capabilities.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/diskSettings.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/ProxyListConfiguration.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/ObjectStorages/S3/ProxyResolverConfiguration.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/createReadBufferFromFileBase.cpp
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Disks/IO/ReadBufferFromRemoteFSGather.cpp

View File

@ -657,21 +657,23 @@ void LocalServer::processConfig()
/// There is no need for concurrent queries, override max_concurrent_queries.
global_context->getProcessList().setMaxSize(0);
const size_t memory_amount = getMemoryAmount();
const size_t physical_server_memory = getMemoryAmount();
const double cache_size_to_ram_max_ratio = config().getDouble("cache_size_to_ram_max_ratio", 0.5);
const size_t max_cache_size = static_cast<size_t>(memory_amount * cache_size_to_ram_max_ratio);
const size_t max_cache_size = static_cast<size_t>(physical_server_memory * cache_size_to_ram_max_ratio);
String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", DEFAULT_UNCOMPRESSED_CACHE_POLICY);
size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE);
double uncompressed_cache_size_ratio = config().getDouble("uncompressed_cache_size_ratio", DEFAULT_UNCOMPRESSED_CACHE_SIZE_RATIO);
if (uncompressed_cache_size > max_cache_size)
{
uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Lowered uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size);
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size, uncompressed_cache_size_ratio);
String mark_cache_policy = config().getString("mark_cache_policy", DEFAULT_MARK_CACHE_POLICY);
size_t mark_cache_size = config().getUInt64("mark_cache_size", DEFAULT_MARK_CACHE_MAX_SIZE);
double mark_cache_size_ratio = config().getDouble("mark_cache_size_ratio", DEFAULT_MARK_CACHE_SIZE_RATIO);
if (!mark_cache_size)
LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
if (mark_cache_size > max_cache_size)
@ -679,23 +681,27 @@ void LocalServer::processConfig()
mark_cache_size = max_cache_size;
LOG_INFO(log, "Lowered mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mark_cache_size));
}
global_context->setMarkCache(mark_cache_policy, mark_cache_size);
global_context->setMarkCache(mark_cache_policy, mark_cache_size, mark_cache_size_ratio);
String index_uncompressed_cache_policy = config().getString("index_uncompressed_cache_policy", DEFAULT_INDEX_UNCOMPRESSED_CACHE_POLICY);
size_t index_uncompressed_cache_size = config().getUInt64("index_uncompressed_cache_size", DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE);
double index_uncompressed_cache_size_ratio = config().getDouble("index_uncompressed_cache_size_ratio", DEFAULT_INDEX_UNCOMPRESSED_CACHE_SIZE_RATIO);
if (index_uncompressed_cache_size > max_cache_size)
{
index_uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setIndexUncompressedCache(index_uncompressed_cache_size);
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
String index_mark_cache_policy = config().getString("index_mark_cache_policy", DEFAULT_INDEX_MARK_CACHE_POLICY);
size_t index_mark_cache_size = config().getUInt64("index_mark_cache_size", DEFAULT_INDEX_MARK_CACHE_MAX_SIZE);
double index_mark_cache_size_ratio = config().getDouble("index_mark_cache_size_ratio", DEFAULT_INDEX_MARK_CACHE_SIZE_RATIO);
if (index_mark_cache_size > max_cache_size)
{
index_mark_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setIndexMarkCache(index_mark_cache_size);
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
size_t mmap_cache_size = config().getUInt64("mmap_cache_size", DEFAULT_MMAP_CACHE_MAX_SIZE);
if (mmap_cache_size > max_cache_size)

View File

@ -1111,37 +1111,43 @@ try
String uncompressed_cache_policy = server_settings.uncompressed_cache_policy;
size_t uncompressed_cache_size = server_settings.uncompressed_cache_size;
double uncompressed_cache_size_ratio = server_settings.uncompressed_cache_size_ratio;
if (uncompressed_cache_size > max_cache_size)
{
uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Lowered uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size);
global_context->setUncompressedCache(uncompressed_cache_policy, uncompressed_cache_size, uncompressed_cache_size_ratio);
String mark_cache_policy = server_settings.mark_cache_policy;
size_t mark_cache_size = server_settings.mark_cache_size;
double mark_cache_size_ratio = server_settings.mark_cache_size_ratio;
if (mark_cache_size > max_cache_size)
{
mark_cache_size = max_cache_size;
LOG_INFO(log, "Lowered mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(mark_cache_size));
}
global_context->setMarkCache(mark_cache_policy, mark_cache_size);
global_context->setMarkCache(mark_cache_policy, mark_cache_size, mark_cache_size_ratio);
String index_uncompressed_cache_policy = server_settings.index_uncompressed_cache_policy;
size_t index_uncompressed_cache_size = server_settings.index_uncompressed_cache_size;
double index_uncompressed_cache_size_ratio = server_settings.index_uncompressed_cache_size_ratio;
if (index_uncompressed_cache_size > max_cache_size)
{
index_uncompressed_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index uncompressed cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setIndexUncompressedCache(index_uncompressed_cache_size);
global_context->setIndexUncompressedCache(index_uncompressed_cache_policy, index_uncompressed_cache_size, index_uncompressed_cache_size_ratio);
String index_mark_cache_policy = server_settings.index_mark_cache_policy;
size_t index_mark_cache_size = server_settings.index_mark_cache_size;
double index_mark_cache_size_ratio = server_settings.index_mark_cache_size_ratio;
if (index_mark_cache_size > max_cache_size)
{
index_mark_cache_size = max_cache_size;
LOG_INFO(log, "Lowered index mark cache size to {} because the system has limited RAM", formatReadableSizeWithBinarySuffix(uncompressed_cache_size));
}
global_context->setIndexMarkCache(index_mark_cache_size);
global_context->setIndexMarkCache(index_mark_cache_policy, index_mark_cache_size, index_mark_cache_size_ratio);
size_t mmap_cache_size = server_settings.mmap_cache_size;
if (mmap_cache_size > max_cache_size)

View File

@ -153,6 +153,7 @@ enum class AccessType
M(SYSTEM_DROP_QUERY_CACHE, "SYSTEM DROP QUERY, DROP QUERY CACHE, DROP QUERY", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_FILESYSTEM_CACHE, "SYSTEM DROP FILESYSTEM CACHE, DROP FILESYSTEM CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_SYNC_FILESYSTEM_CACHE, "SYSTEM REPAIR FILESYSTEM CACHE, REPAIR FILESYSTEM CACHE, SYNC FILESYSTEM CACHE", GLOBAL, SYSTEM) \
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_S3_CLIENT_CACHE, "SYSTEM DROP S3 CLIENT, DROP S3 CLIENT CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \

View File

@ -50,7 +50,8 @@ namespace
context->getRemoteHostFilter(),
static_cast<unsigned>(context->getGlobalContext()->getSettingsRef().s3_max_redirects),
context->getGlobalContext()->getSettingsRef().enable_s3_requests_logging,
/* for_disk_s3 = */ false, settings.request_settings.get_request_throttler, settings.request_settings.put_request_throttler);
/* for_disk_s3 = */ false, settings.request_settings.get_request_throttler, settings.request_settings.put_request_throttler,
s3_uri.uri.getScheme());
client_configuration.endpointOverride = s3_uri.endpoint;
client_configuration.maxConnections = static_cast<unsigned>(context->getSettingsRef().s3_max_connections);

View File

@ -40,14 +40,17 @@ public:
using MappedPtr = typename CachePolicy::MappedPtr;
using KeyMapped = typename CachePolicy::KeyMapped;
/// Use this ctor if you don't care about the internal cache policy.
explicit CacheBase(size_t max_size_in_bytes, size_t max_count = 0, double size_ratio = 0.5)
static constexpr auto NO_MAX_COUNT = 0uz;
static constexpr auto DEFAULT_SIZE_RATIO = 0.5l;
/// Use this ctor if you only care about the cache size but not internals like the cache policy.
explicit CacheBase(size_t max_size_in_bytes, size_t max_count = NO_MAX_COUNT, double size_ratio = DEFAULT_SIZE_RATIO)
: CacheBase("SLRU", max_size_in_bytes, max_count, size_ratio)
{
}
/// Use this ctor if you want the user to configure the cache policy via some setting. Supports only general-purpose policies LRU and SLRU.
explicit CacheBase(std::string_view cache_policy_name, size_t max_size_in_bytes, size_t max_count = 0, double size_ratio = 0.5)
/// Use this ctor if the user should be able to configure the cache policy and cache sizes via settings. Supports only general-purpose policies LRU and SLRU.
explicit CacheBase(std::string_view cache_policy_name, size_t max_size_in_bytes, size_t max_count, double size_ratio)
{
auto on_weight_loss_function = [&](size_t weight_loss) { onRemoveOverflowWeightLoss(weight_loss); };
@ -79,7 +82,7 @@ public:
MappedPtr get(const Key & key)
{
std::lock_guard lock(mutex);
auto res = cache_policy->get(key, lock);
auto res = cache_policy->get(key);
if (res)
++hits;
else
@ -90,7 +93,7 @@ public:
std::optional<KeyMapped> getWithKey(const Key & key)
{
std::lock_guard lock(mutex);
auto res = cache_policy->getWithKey(key, lock);
auto res = cache_policy->getWithKey(key);
if (res.has_value())
++hits;
else
@ -101,7 +104,7 @@ public:
void set(const Key & key, const MappedPtr & mapped)
{
std::lock_guard lock(mutex);
cache_policy->set(key, mapped, lock);
cache_policy->set(key, mapped);
}
/// If the value for the key is in the cache, returns it. If it is not, calls load_func() to
@ -118,7 +121,7 @@ public:
InsertTokenHolder token_holder;
{
std::lock_guard cache_lock(mutex);
auto val = cache_policy->get(key, cache_lock);
auto val = cache_policy->get(key);
if (val)
{
++hits;
@ -156,7 +159,7 @@ public:
auto token_it = insert_tokens.find(key);
if (token_it != insert_tokens.end() && token_it->second.get() == token)
{
cache_policy->set(key, token->value, cache_lock);
cache_policy->set(key, token->value);
result = true;
}
@ -185,49 +188,49 @@ public:
insert_tokens.clear();
hits = 0;
misses = 0;
cache_policy->clear(lock);
cache_policy->clear();
}
void remove(const Key & key)
{
std::lock_guard lock(mutex);
cache_policy->remove(key, lock);
cache_policy->remove(key);
}
size_t weight() const
size_t sizeInBytes() const
{
std::lock_guard lock(mutex);
return cache_policy->weight(lock);
return cache_policy->sizeInBytes();
}
size_t count() const
{
std::lock_guard lock(mutex);
return cache_policy->count(lock);
return cache_policy->count();
}
size_t maxSize() const
size_t maxSizeInBytes() const
{
std::lock_guard lock(mutex);
return cache_policy->maxSize(lock);
return cache_policy->maxSizeInBytes();
}
void setMaxCount(size_t max_count)
{
std::lock_guard lock(mutex);
cache_policy->setMaxCount(max_count, lock);
cache_policy->setMaxCount(max_count);
}
void setMaxSize(size_t max_size_in_bytes)
void setMaxSizeInBytes(size_t max_size_in_bytes)
{
std::lock_guard lock(mutex);
cache_policy->setMaxSize(max_size_in_bytes, lock);
cache_policy->setMaxSizeInBytes(max_size_in_bytes);
}
void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries)
{
std::lock_guard lock(mutex);
cache_policy->setQuotaForUser(user_name, max_size_in_bytes, max_entries, lock);
cache_policy->setQuotaForUser(user_name, max_size_in_bytes, max_entries);
}
virtual ~CacheBase() = default;

View File

@ -0,0 +1,74 @@
#include "EnvironmentProxyConfigurationResolver.h"
#include <Common/logger_useful.h>
#include <Poco/URI.h>
namespace DB
{
/*
* Usually environment variables are upper-case, but it seems like proxy related variables are an exception.
* See https://unix.stackexchange.com/questions/212894/whats-the-right-format-for-the-http-proxy-environment-variable-caps-or-no-ca/212972#212972
* */
static constexpr auto PROXY_HTTP_ENVIRONMENT_VARIABLE = "http_proxy";
static constexpr auto PROXY_HTTPS_ENVIRONMENT_VARIABLE = "https_proxy";
EnvironmentProxyConfigurationResolver::EnvironmentProxyConfigurationResolver(Protocol protocol_)
: protocol(protocol_)
{}
namespace
{
const char * getProxyHost(DB::ProxyConfiguration::Protocol protocol)
{
/*
* getenv is safe to use here because ClickHouse code does not make any call to `setenv` or `putenv`
* aside from tests and a very early call during startup: https://github.com/ClickHouse/ClickHouse/blob/master/src/Daemon/BaseDaemon.cpp#L791
* */
if (protocol == DB::ProxyConfiguration::Protocol::HTTP)
{
return std::getenv(PROXY_HTTP_ENVIRONMENT_VARIABLE); // NOLINT(concurrency-mt-unsafe)
}
else if (protocol == DB::ProxyConfiguration::Protocol::HTTPS)
{
return std::getenv(PROXY_HTTPS_ENVIRONMENT_VARIABLE); // NOLINT(concurrency-mt-unsafe)
}
else
{
if (const char * http_proxy_host = std::getenv(PROXY_HTTP_ENVIRONMENT_VARIABLE)) // NOLINT(concurrency-mt-unsafe)
{
return http_proxy_host;
}
else
{
return std::getenv(PROXY_HTTPS_ENVIRONMENT_VARIABLE); // NOLINT(concurrency-mt-unsafe)
}
}
}
}
ProxyConfiguration EnvironmentProxyConfigurationResolver::resolve()
{
const auto * proxy_host = getProxyHost(protocol);
if (!proxy_host)
{
return {};
}
auto uri = Poco::URI(proxy_host);
auto host = uri.getHost();
auto scheme = uri.getScheme();
auto port = uri.getPort();
LOG_TRACE(&Poco::Logger::get("EnvironmentProxyConfigurationResolver"), "Use proxy from environment: {}://{}:{}", scheme, host, port);
return ProxyConfiguration {
host,
ProxyConfiguration::protocolFromString(scheme),
port
};
}
}

View File

@ -0,0 +1,23 @@
#pragma once
#include <Common/ProxyConfigurationResolver.h>
namespace DB
{
/*
* Grabs proxy configuration from environment variables (http_proxy and https_proxy).
* */
class EnvironmentProxyConfigurationResolver : public ProxyConfigurationResolver
{
public:
explicit EnvironmentProxyConfigurationResolver(Protocol protocol_);
ProxyConfiguration resolve() override;
void errorReport(const ProxyConfiguration &) override {}
private:
Protocol protocol;
};
}

View File

@ -37,25 +37,25 @@ public:
explicit ICachePolicy(CachePolicyUserQuotaPtr user_quotas_) : user_quotas(std::move(user_quotas_)) {}
virtual ~ICachePolicy() = default;
virtual size_t weight(std::lock_guard<std::mutex> & /*cache_lock*/) const = 0;
virtual size_t count(std::lock_guard<std::mutex> & /*cache_lock*/) const = 0;
virtual size_t maxSize(std::lock_guard<std::mutex>& /*cache_lock*/) const = 0;
virtual size_t sizeInBytes() const = 0;
virtual size_t count() const = 0;
virtual size_t maxSizeInBytes() const = 0;
virtual void setMaxCount(size_t /*max_count*/, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
virtual void setMaxSize(size_t /*max_size_in_bytes*/, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries, std::lock_guard<std::mutex> & /*cache_lock*/) { user_quotas->setQuotaForUser(user_name, max_size_in_bytes, max_entries); }
virtual void setMaxCount(size_t /*max_count*/) = 0;
virtual void setMaxSizeInBytes(size_t /*max_size_in_bytes*/) = 0;
virtual void setQuotaForUser(const String & user_name, size_t max_size_in_bytes, size_t max_entries) { user_quotas->setQuotaForUser(user_name, max_size_in_bytes, max_entries); }
/// HashFunction usually hashes the entire key and the found key will be equal the provided key. In such cases, use get(). It is also
/// possible to store other, non-hashed data in the key. In that case, the found key is potentially different from the provided key.
/// Then use getWithKey() to also return the found key including it's non-hashed data.
virtual MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) = 0;
virtual std::optional<KeyMapped> getWithKey(const Key &, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
virtual MappedPtr get(const Key & key) = 0;
virtual std::optional<KeyMapped> getWithKey(const Key &) = 0;
virtual void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
virtual void set(const Key & key, const MappedPtr & mapped) = 0;
virtual void remove(const Key & key, std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
virtual void remove(const Key & key) = 0;
virtual void clear(std::lock_guard<std::mutex> & /*cache_lock*/) = 0;
virtual void clear() = 0;
virtual std::vector<KeyMapped> dump() const = 0;
protected:

View File

@ -34,41 +34,41 @@ public:
{
}
size_t weight(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t sizeInBytes() const override
{
return current_size_in_bytes;
}
size_t count(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t count() const override
{
return cells.size();
}
size_t maxSize(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t maxSizeInBytes() const override
{
return max_size_in_bytes;
}
void setMaxCount(size_t max_count_, std::lock_guard<std::mutex> & /* cache_lock */) override
void setMaxCount(size_t max_count_) override
{
max_count = max_count_;
removeOverflow();
}
void setMaxSize(size_t max_size_in_bytes_, std::lock_guard<std::mutex> & /* cache_lock */) override
void setMaxSizeInBytes(size_t max_size_in_bytes_) override
{
max_size_in_bytes = max_size_in_bytes_;
removeOverflow();
}
void clear(std::lock_guard<std::mutex> & /* cache_lock */) override
void clear() override
{
queue.clear();
cells.clear();
current_size_in_bytes = 0;
}
void remove(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
void remove(const Key & key) override
{
auto it = cells.find(key);
if (it == cells.end())
@ -79,7 +79,7 @@ public:
cells.erase(it);
}
MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
MappedPtr get(const Key & key) override
{
auto it = cells.find(key);
if (it == cells.end())
@ -93,7 +93,7 @@ public:
return cell.value;
}
std::optional<KeyMapped> getWithKey(const Key & key, std::lock_guard<std::mutex> & /*cache_lock*/) override
std::optional<KeyMapped> getWithKey(const Key & key) override
{
auto it = cells.find(key);
if (it == cells.end())
@ -107,7 +107,7 @@ public:
return std::make_optional<KeyMapped>({it->first, cell.value});
}
void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /* cache_lock */) override
void set(const Key & key, const MappedPtr & mapped) override
{
auto [it, inserted] = cells.emplace(std::piecewise_construct,
std::forward_as_tuple(key),

View File

@ -366,6 +366,8 @@ The server successfully detected this situation and will download merged part fr
M(DiskS3PutObject, "Number of DiskS3 API PutObject calls.") \
M(DiskS3GetObject, "Number of DiskS3 API GetObject calls.") \
\
M(EngineFileLikeReadFiles, "Number of files read in table engines working with files (like File/S3/URL/HDFS).") \
\
M(ReadBufferFromS3Microseconds, "Time spent on reading from S3.") \
M(ReadBufferFromS3InitMicroseconds, "Time spent initializing connection to S3.") \
M(ReadBufferFromS3Bytes, "Bytes read from S3.") \

View File

@ -0,0 +1,51 @@
#pragma once
#include <string>
namespace DB
{
struct ProxyConfiguration
{
enum class Protocol
{
HTTP,
HTTPS,
ANY
};
static auto protocolFromString(const std::string & str)
{
if (str == "http")
{
return Protocol::HTTP;
}
else if (str == "https")
{
return Protocol::HTTPS;
}
else
{
return Protocol::ANY;
}
}
static auto protocolToString(Protocol protocol)
{
switch (protocol)
{
case Protocol::HTTP:
return "http";
case Protocol::HTTPS:
return "https";
case Protocol::ANY:
return "any";
}
}
std::string host;
Protocol protocol;
uint16_t port;
};
}

View File

@ -0,0 +1,17 @@
#pragma once
#include <Common/ProxyConfiguration.h>
namespace DB
{
struct ProxyConfigurationResolver
{
using Protocol = ProxyConfiguration::Protocol;
virtual ~ProxyConfigurationResolver() = default;
virtual ProxyConfiguration resolve() = 0;
virtual void errorReport(const ProxyConfiguration & config) = 0;
};
}

View File

@ -0,0 +1,208 @@
#include <Common/ProxyConfigurationResolverProvider.h>
#include <Common/EnvironmentProxyConfigurationResolver.h>
#include <Common/Exception.h>
#include <Common/ProxyListConfigurationResolver.h>
#include <Common/RemoteProxyConfigurationResolver.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/logger_useful.h>
#include <Interpreters/Context.h>
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
namespace
{
std::shared_ptr<ProxyConfigurationResolver> getRemoteResolver(
const String & config_prefix, const Poco::Util::AbstractConfiguration & configuration)
{
auto endpoint = Poco::URI(configuration.getString(config_prefix + ".endpoint"));
auto proxy_scheme = configuration.getString(config_prefix + ".proxy_scheme");
if (proxy_scheme != "http" && proxy_scheme != "https")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Only HTTP/HTTPS schemas allowed in proxy resolver config: {}", proxy_scheme);
auto proxy_port = configuration.getUInt(config_prefix + ".proxy_port");
auto cache_ttl = configuration.getUInt(config_prefix + ".proxy_cache_time", 10);
LOG_DEBUG(&Poco::Logger::get("ProxyConfigurationResolverProvider"), "Configured remote proxy resolver: {}, Scheme: {}, Port: {}",
endpoint.toString(), proxy_scheme, proxy_port);
return std::make_shared<RemoteProxyConfigurationResolver>(endpoint, proxy_scheme, proxy_port, cache_ttl);
}
std::shared_ptr<ProxyConfigurationResolver> getRemoteResolver(
ProxyConfiguration::Protocol protocol, const String & config_prefix, const Poco::Util::AbstractConfiguration & configuration)
{
std::vector<String> keys;
configuration.keys(config_prefix, keys);
std::vector<Poco::URI> uris;
for (const auto & key : keys)
{
if (startsWith(key, "resolver"))
{
auto prefix_with_key = config_prefix + "." + key;
auto proxy_scheme_config_string = prefix_with_key + ".proxy_scheme";
auto config_protocol = configuration.getString(proxy_scheme_config_string);
if (ProxyConfiguration::Protocol::ANY == protocol || config_protocol == ProxyConfiguration::protocolToString(protocol))
{
return getRemoteResolver(prefix_with_key, configuration);
}
}
}
return nullptr;
}
auto extractURIList(const String & config_prefix, const Poco::Util::AbstractConfiguration & configuration)
{
std::vector<String> keys;
configuration.keys(config_prefix, keys);
std::vector<Poco::URI> uris;
for (const auto & key : keys)
{
if (startsWith(key, "uri"))
{
Poco::URI proxy_uri(configuration.getString(config_prefix + "." + key));
if (proxy_uri.getScheme() != "http" && proxy_uri.getScheme() != "https")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Only HTTP/HTTPS schemas allowed in proxy uri: {}", proxy_uri.toString());
if (proxy_uri.getHost().empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty host in proxy uri: {}", proxy_uri.toString());
uris.push_back(proxy_uri);
LOG_DEBUG(&Poco::Logger::get("ProxyConfigurationResolverProvider"), "Configured proxy: {}", proxy_uri.toString());
}
}
return uris;
}
std::shared_ptr<ProxyConfigurationResolver> getListResolverNewSyntax(
ProxyConfiguration::Protocol protocol,
const String & config_prefix,
const Poco::Util::AbstractConfiguration & configuration
)
{
std::vector<Poco::URI> uris;
bool include_http_uris = ProxyConfiguration::Protocol::ANY == protocol || ProxyConfiguration::Protocol::HTTP == protocol;
if (include_http_uris && configuration.has(config_prefix + ".http"))
{
auto http_uris = extractURIList(config_prefix + ".http", configuration);
uris.insert(uris.end(), http_uris.begin(), http_uris.end());
}
bool include_https_uris = ProxyConfiguration::Protocol::ANY == protocol || ProxyConfiguration::Protocol::HTTPS == protocol;
if (include_https_uris && configuration.has(config_prefix + ".https"))
{
auto https_uris = extractURIList(config_prefix + ".https", configuration);
uris.insert(uris.end(), https_uris.begin(), https_uris.end());
}
return uris.empty() ? nullptr : std::make_shared<ProxyListConfigurationResolver>(uris);
}
std::shared_ptr<ProxyConfigurationResolver> getListResolverOldSyntax(
const String & config_prefix,
const Poco::Util::AbstractConfiguration & configuration
)
{
auto uris = extractURIList(config_prefix, configuration);
return uris.empty() ? nullptr : std::make_shared<ProxyListConfigurationResolver>(uris);
}
std::shared_ptr<ProxyConfigurationResolver> getListResolver(
ProxyConfiguration::Protocol protocol, const String & config_prefix, const Poco::Util::AbstractConfiguration & configuration
)
{
std::vector<String> keys;
configuration.keys(config_prefix, keys);
bool new_setting_syntax = std::find_if(
keys.begin(),
keys.end(),
[](const String & key)
{
return startsWith(key, "http") || startsWith(key, "https");
}) != keys.end();
return new_setting_syntax ? getListResolverNewSyntax(protocol, config_prefix, configuration)
: getListResolverOldSyntax(config_prefix, configuration);
}
}
std::shared_ptr<ProxyConfigurationResolver> ProxyConfigurationResolverProvider::get(Protocol protocol)
{
auto context = Context::getGlobalContextInstance();
chassert(context);
if (auto resolver = getFromSettings(protocol, "", context->getConfigRef()))
{
return resolver;
}
return std::make_shared<EnvironmentProxyConfigurationResolver>(protocol);
}
std::shared_ptr<ProxyConfigurationResolver> ProxyConfigurationResolverProvider::getFromSettings(
Protocol protocol,
const String & config_prefix,
const Poco::Util::AbstractConfiguration & configuration
)
{
auto proxy_prefix = config_prefix.empty() ? "proxy" : config_prefix + ".proxy";
if (configuration.has(proxy_prefix))
{
std::vector<String> config_keys;
configuration.keys(proxy_prefix, config_keys);
if (auto remote_resolver = getRemoteResolver(protocol, proxy_prefix, configuration))
{
return remote_resolver;
}
if (auto list_resolver = getListResolver(protocol, proxy_prefix, configuration))
{
return list_resolver;
}
}
return nullptr;
}
std::shared_ptr<ProxyConfigurationResolver> ProxyConfigurationResolverProvider::getFromOldSettingsFormat(
const String & config_prefix,
const Poco::Util::AbstractConfiguration & configuration
)
{
/*
* First try to get it from settings only using the combination of config_prefix and configuration.
* This logic exists for backward compatibility with old S3 storage specific proxy configuration.
* */
if (auto resolver = ProxyConfigurationResolverProvider::getFromSettings(Protocol::ANY, config_prefix, configuration))
{
return resolver;
}
/*
* In case the combination of config_prefix and configuration does not provide a resolver, try to get it from general / new settings.
* Falls back to Environment resolver if no configuration is found.
* */
return ProxyConfigurationResolverProvider::get(Protocol::ANY);
}
}

View File

@ -0,0 +1,40 @@
#pragma once
#include <base/types.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/ProxyConfigurationResolver.h>
namespace DB
{
class ProxyConfigurationResolverProvider
{
public:
using Protocol = ProxyConfiguration::Protocol;
/*
* Returns appropriate ProxyConfigurationResolver based on current CH settings (Remote resolver or List resolver).
* If no configuration is found, returns Environment Resolver.
* */
static std::shared_ptr<ProxyConfigurationResolver> get(Protocol protocol);
/*
* This API exists exclusively for backward compatibility with old S3 storage specific proxy configuration.
* If no configuration is found, returns nullptr.
* */
static std::shared_ptr<ProxyConfigurationResolver> getFromOldSettingsFormat(
const String & config_prefix,
const Poco::Util::AbstractConfiguration & configuration
);
private:
static std::shared_ptr<ProxyConfigurationResolver> getFromSettings(
Protocol protocol,
const String & config_prefix,
const Poco::Util::AbstractConfiguration & configuration
);
};
}

View File

@ -0,0 +1,31 @@
#include <Common/ProxyListConfigurationResolver.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/logger_useful.h>
#include <Poco/URI.h>
namespace DB
{
ProxyListConfigurationResolver::ProxyListConfigurationResolver(std::vector<Poco::URI> proxies_)
: proxies(std::move(proxies_))
{
}
ProxyConfiguration ProxyListConfigurationResolver::resolve()
{
if (proxies.empty())
{
return {};
}
/// Avoid atomic increment if number of proxies is 1.
size_t index = proxies.size() > 1 ? (access_counter++) % proxies.size() : 0;
auto & proxy = proxies[index];
LOG_DEBUG(&Poco::Logger::get("ProxyListConfigurationResolver"), "Use proxy: {}", proxies[index].toString());
return ProxyConfiguration {proxy.getHost(), ProxyConfiguration::protocolFromString(proxy.getScheme()), proxy.getPort()};
}
}

View File

@ -0,0 +1,31 @@
#pragma once
#include <base/types.h>
#include <Common/ProxyConfigurationResolver.h>
#include <Poco/URI.h>
namespace DB
{
/*
* Round-robin proxy list resolver.
* */
class ProxyListConfigurationResolver : public ProxyConfigurationResolver
{
public:
explicit ProxyListConfigurationResolver(std::vector<Poco::URI> proxies_);
ProxyConfiguration resolve() override;
void errorReport(const ProxyConfiguration &) override {}
private:
std::vector<Poco::URI> proxies;
/// Access counter to get proxy using round-robin strategy.
std::atomic<size_t> access_counter;
};
}

View File

@ -1,32 +1,36 @@
#include "ProxyResolverConfiguration.h"
#if USE_AWS_S3
#include <Common/RemoteProxyConfigurationResolver.h>
#include <utility>
#include <IO/HTTPCommon.h>
#include "Poco/StreamCopier.h"
#include <Poco/StreamCopier.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/Net/HTTPResponse.h>
#include <Common/logger_useful.h>
#include <Common/DNSResolver.h>
namespace DB::ErrorCodes
namespace DB
{
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
}
namespace DB::S3
{
ProxyResolverConfiguration::ProxyResolverConfiguration(const Poco::URI & endpoint_, String proxy_scheme_
, unsigned proxy_port_, unsigned cache_ttl_)
: endpoint(endpoint_), proxy_scheme(std::move(proxy_scheme_)), proxy_port(proxy_port_), cache_ttl(cache_ttl_)
RemoteProxyConfigurationResolver::RemoteProxyConfigurationResolver(
const Poco::URI & endpoint_,
String proxy_protocol_,
unsigned proxy_port_,
unsigned cache_ttl_
)
: endpoint(endpoint_), proxy_protocol(std::move(proxy_protocol_)), proxy_port(proxy_port_), cache_ttl(cache_ttl_)
{
}
ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const Aws::Http::HttpRequest &)
ProxyConfiguration RemoteProxyConfigurationResolver::resolve()
{
LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Obtain proxy using resolver: {}", endpoint.toString());
auto * logger = &Poco::Logger::get("RemoteProxyConfigurationResolver");
LOG_DEBUG(logger, "Obtain proxy using resolver: {}", endpoint.toString());
std::lock_guard lock(cache_mutex);
@ -34,7 +38,12 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const
if (cache_ttl.count() && cache_valid && now <= cache_timestamp + cache_ttl && now >= cache_timestamp)
{
LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use cached proxy: {}://{}:{}", Aws::Http::SchemeMapper::ToString(cached_config.proxy_scheme), cached_config.proxy_host, cached_config.proxy_port);
LOG_DEBUG(logger,
"Use cached proxy: {}://{}:{}",
cached_config.protocol,
cached_config.host,
cached_config.port
);
return cached_config;
}
@ -84,11 +93,11 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const
/// Read proxy host as string from response body.
Poco::StreamCopier::copyToString(response_body_stream, proxy_host);
LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}://{}:{}", proxy_scheme, proxy_host, proxy_port);
LOG_DEBUG(logger, "Use proxy: {}://{}:{}", proxy_protocol, proxy_host, proxy_port);
cached_config.proxy_scheme = Aws::Http::SchemeMapper::FromString(proxy_scheme.c_str());
cached_config.proxy_host = proxy_host;
cached_config.proxy_port = proxy_port;
cached_config.protocol = ProxyConfiguration::protocolFromString(proxy_protocol);
cached_config.host = proxy_host;
cached_config.port = proxy_port;
cache_timestamp = std::chrono::system_clock::now();
cache_valid = true;
@ -96,16 +105,14 @@ ClientConfigurationPerRequest ProxyResolverConfiguration::getConfiguration(const
}
catch (...)
{
tryLogCurrentException("AWSClient", "Failed to obtain proxy");
/// Don't use proxy if it can't be obtained.
ClientConfigurationPerRequest cfg;
return cfg;
tryLogCurrentException("RemoteProxyConfigurationResolver", "Failed to obtain proxy");
return {};
}
}
void ProxyResolverConfiguration::errorReport(const ClientConfigurationPerRequest & config)
void RemoteProxyConfigurationResolver::errorReport(const ProxyConfiguration & config)
{
if (config.proxy_host.empty())
if (config.host.empty())
return;
std::lock_guard lock(cache_mutex);
@ -113,8 +120,8 @@ void ProxyResolverConfiguration::errorReport(const ClientConfigurationPerRequest
if (!cache_ttl.count() || !cache_valid)
return;
if (std::tie(cached_config.proxy_scheme, cached_config.proxy_host, cached_config.proxy_port)
!= std::tie(config.proxy_scheme, config.proxy_host, config.proxy_port))
if (std::tie(cached_config.protocol, cached_config.host, cached_config.port)
!= std::tie(config.protocol, config.host, config.port))
return;
/// Invalidate cached proxy when got error with this proxy
@ -122,5 +129,3 @@ void ProxyResolverConfiguration::errorReport(const ClientConfigurationPerRequest
}
}
#endif

View File

@ -0,0 +1,46 @@
#pragma once
#include <base/types.h>
#include <mutex>
#include <Common/ProxyConfigurationResolver.h>
#include <Poco/URI.h>
namespace DB
{
/*
* Makes an HTTP GET request to the specified endpoint to obtain a proxy host.
* */
class RemoteProxyConfigurationResolver : public ProxyConfigurationResolver
{
public:
RemoteProxyConfigurationResolver(
const Poco::URI & endpoint_,
String proxy_protocol_,
unsigned proxy_port_,
unsigned cache_ttl_
);
ProxyConfiguration resolve() override;
void errorReport(const ProxyConfiguration & config) override;
private:
/// Endpoint to obtain a proxy host.
const Poco::URI endpoint;
/// Scheme for obtained proxy.
const String proxy_protocol;
/// Port for obtained proxy.
const unsigned proxy_port;
std::mutex cache_mutex;
bool cache_valid = false;
std::chrono::time_point<std::chrono::system_clock> cache_timestamp;
const std::chrono::seconds cache_ttl{0};
ProxyConfiguration cached_config;
};
}

View File

@ -31,45 +31,45 @@ public:
/// TODO: construct from special struct with cache policy parameters (also with max_protected_size).
SLRUCachePolicy(size_t max_size_in_bytes_, size_t max_count_, double size_ratio_, OnWeightLossFunction on_weight_loss_function_)
: Base(std::make_unique<NoCachePolicyUserQuota>())
, size_ratio(size_ratio_)
, max_protected_size(static_cast<size_t>(max_size_in_bytes_ * std::min(1.0, size_ratio)))
, max_size_in_bytes(max_size_in_bytes_)
, max_protected_size(calculateMaxProtectedSize(max_size_in_bytes_, size_ratio_))
, max_count(max_count_)
, size_ratio(size_ratio_)
, on_weight_loss_function(on_weight_loss_function_)
{
}
size_t weight(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t sizeInBytes() const override
{
return current_size_in_bytes;
}
size_t count(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t count() const override
{
return cells.size();
}
size_t maxSize(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t maxSizeInBytes() const override
{
return max_size_in_bytes;
}
void setMaxCount(size_t max_count_, std::lock_guard<std::mutex> & /* cache_lock */) override
void setMaxCount(size_t max_count_) override
{
max_count = max_count_;
removeOverflow(protected_queue, max_protected_size, current_protected_size, /*is_protected=*/true);
removeOverflow(probationary_queue, max_size_in_bytes, current_size_in_bytes, /*is_protected=*/false);
}
void setMaxSize(size_t max_size_in_bytes_, std::lock_guard<std::mutex> & /* cache_lock */) override
void setMaxSizeInBytes(size_t max_size_in_bytes_) override
{
max_protected_size = static_cast<size_t>(max_size_in_bytes_ * std::min(1.0, size_ratio));
max_protected_size = calculateMaxProtectedSize(max_size_in_bytes_, size_ratio);
max_size_in_bytes = max_size_in_bytes_;
removeOverflow(protected_queue, max_protected_size, current_protected_size, /*is_protected=*/true);
removeOverflow(probationary_queue, max_size_in_bytes, current_size_in_bytes, /*is_protected=*/false);
}
void clear(std::lock_guard<std::mutex> & /* cache_lock */) override
void clear() override
{
cells.clear();
probationary_queue.clear();
@ -78,7 +78,7 @@ public:
current_protected_size = 0;
}
void remove(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
void remove(const Key & key) override
{
auto it = cells.find(key);
if (it == cells.end())
@ -95,7 +95,7 @@ public:
cells.erase(it);
}
MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
MappedPtr get(const Key & key) override
{
auto it = cells.find(key);
if (it == cells.end())
@ -116,7 +116,7 @@ public:
return cell.value;
}
std::optional<KeyMapped> getWithKey(const Key & key, std::lock_guard<std::mutex> & /*cache_lock*/) override
std::optional<KeyMapped> getWithKey(const Key & key) override
{
auto it = cells.find(key);
if (it == cells.end())
@ -137,7 +137,7 @@ public:
return std::make_optional<KeyMapped>({it->first, cell.value});
}
void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /* cache_lock */) override
void set(const Key & key, const MappedPtr & mapped) override
{
auto [it, inserted] = cells.emplace(std::piecewise_construct,
std::forward_as_tuple(key),
@ -208,16 +208,21 @@ private:
Cells cells;
size_t max_size_in_bytes;
size_t max_protected_size;
size_t max_count;
const double size_ratio;
size_t current_protected_size = 0;
size_t current_size_in_bytes = 0;
size_t max_protected_size;
size_t max_size_in_bytes;
size_t max_count;
WeightFunction weight_function;
OnWeightLossFunction on_weight_loss_function;
static size_t calculateMaxProtectedSize(size_t max_size_in_bytes, double size_ratio)
{
return static_cast<size_t>(max_size_in_bytes * std::max(0.0, std::min(1.0, size_ratio)));
}
void removeOverflow(SLRUQueue & queue, size_t max_weight_size, size_t & current_weight_size, bool is_protected)
{
size_t current_weight_lost = 0;

View File

@ -94,39 +94,39 @@ public:
{
}
size_t weight(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t sizeInBytes() const override
{
return size_in_bytes;
}
size_t count(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t count() const override
{
return cache.size();
}
size_t maxSize(std::lock_guard<std::mutex> & /* cache_lock */) const override
size_t maxSizeInBytes() const override
{
return max_size_in_bytes;
}
void setMaxCount(size_t max_count_, std::lock_guard<std::mutex> & /* cache_lock */) override
void setMaxCount(size_t max_count_) override
{
/// lazy behavior: the cache only shrinks upon the next insert
max_count = max_count_;
}
void setMaxSize(size_t max_size_in_bytes_, std::lock_guard<std::mutex> & /* cache_lock */) override
void setMaxSizeInBytes(size_t max_size_in_bytes_) override
{
/// lazy behavior: the cache only shrinks upon the next insert
max_size_in_bytes = max_size_in_bytes_;
}
void clear(std::lock_guard<std::mutex> & /* cache_lock */) override
void clear() override
{
cache.clear();
}
void remove(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
void remove(const Key & key) override
{
auto it = cache.find(key);
if (it == cache.end())
@ -137,7 +137,7 @@ public:
size_in_bytes -= sz;
}
MappedPtr get(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
MappedPtr get(const Key & key) override
{
auto it = cache.find(key);
if (it == cache.end())
@ -145,7 +145,7 @@ public:
return it->second;
}
std::optional<KeyMapped> getWithKey(const Key & key, std::lock_guard<std::mutex> & /* cache_lock */) override
std::optional<KeyMapped> getWithKey(const Key & key) override
{
auto it = cache.find(key);
if (it == cache.end())
@ -154,7 +154,7 @@ public:
}
/// Evicts on a best-effort basis. If there are too many non-stale entries, the new entry may not be cached at all!
void set(const Key & key, const MappedPtr & mapped, std::lock_guard<std::mutex> & /* cache_lock */) override
void set(const Key & key, const MappedPtr & mapped) override
{
chassert(mapped.get());

View File

@ -73,3 +73,25 @@ inline std::string xmlNodeAsString(Poco::XML::Node *pNode)
result += ("</"+ node_name + ">\n");
return Poco::XML::fromXMLString(result);
}
struct EnvironmentProxySetter
{
EnvironmentProxySetter(const Poco::URI & http_proxy, const Poco::URI & https_proxy)
{
if (!http_proxy.empty())
{
setenv("http_proxy", http_proxy.toString().c_str(), 1); // NOLINT(concurrency-mt-unsafe)
}
if (!https_proxy.empty())
{
setenv("https_proxy", https_proxy.toString().c_str(), 1); // NOLINT(concurrency-mt-unsafe)
}
}
~EnvironmentProxySetter()
{
unsetenv("http_proxy"); // NOLINT(concurrency-mt-unsafe)
unsetenv("https_proxy"); // NOLINT(concurrency-mt-unsafe)
}
};

View File

@ -5,11 +5,11 @@
TEST(LRUCache, set)
{
using SimpleCacheBase = DB::CacheBase<int, int>;
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10);
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10, /*size_ratio*/ 0.5);
lru_cache.set(1, std::make_shared<int>(2));
lru_cache.set(2, std::make_shared<int>(3));
auto w = lru_cache.weight();
auto w = lru_cache.sizeInBytes();
auto n = lru_cache.count();
ASSERT_EQ(w, 2);
ASSERT_EQ(n, 2);
@ -18,7 +18,7 @@ TEST(LRUCache, set)
TEST(LRUCache, update)
{
using SimpleCacheBase = DB::CacheBase<int, int>;
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10);
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10, /*size_ratio*/ 0.5);
lru_cache.set(1, std::make_shared<int>(2));
lru_cache.set(1, std::make_shared<int>(3));
auto val = lru_cache.get(1);
@ -29,7 +29,7 @@ TEST(LRUCache, update)
TEST(LRUCache, get)
{
using SimpleCacheBase = DB::CacheBase<int, int>;
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10);
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10, /*size_ratio*/ 0.5);
lru_cache.set(1, std::make_shared<int>(2));
lru_cache.set(2, std::make_shared<int>(3));
SimpleCacheBase::MappedPtr value = lru_cache.get(1);
@ -49,7 +49,7 @@ struct ValueWeight
TEST(LRUCache, evictOnSize)
{
using SimpleCacheBase = DB::CacheBase<int, size_t>;
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 20, /*max_count*/ 3);
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 20, /*max_count*/ 3, /*size_ratio*/ 0.5);
lru_cache.set(1, std::make_shared<size_t>(2));
lru_cache.set(2, std::make_shared<size_t>(3));
lru_cache.set(3, std::make_shared<size_t>(4));
@ -65,7 +65,7 @@ TEST(LRUCache, evictOnSize)
TEST(LRUCache, evictOnWeight)
{
using SimpleCacheBase = DB::CacheBase<int, size_t, std::hash<int>, ValueWeight>;
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10);
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10, /*size_ratio*/ 0.5);
lru_cache.set(1, std::make_shared<size_t>(2));
lru_cache.set(2, std::make_shared<size_t>(3));
lru_cache.set(3, std::make_shared<size_t>(4));
@ -74,7 +74,7 @@ TEST(LRUCache, evictOnWeight)
auto n = lru_cache.count();
ASSERT_EQ(n, 2);
auto w = lru_cache.weight();
auto w = lru_cache.sizeInBytes();
ASSERT_EQ(w, 9);
auto value = lru_cache.get(1);
@ -86,7 +86,7 @@ TEST(LRUCache, evictOnWeight)
TEST(LRUCache, getOrSet)
{
using SimpleCacheBase = DB::CacheBase<int, size_t, std::hash<int>, ValueWeight>;
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10);
auto lru_cache = SimpleCacheBase("LRU", /*max_size_in_bytes*/ 10, /*max_count*/ 10, /*size_ratio*/ 0.5);
size_t x = 10;
auto load_func = [&] { return std::make_shared<size_t>(x); };
auto [value, loaded] = lru_cache.getOrSet(1, load_func);

View File

@ -0,0 +1,122 @@
#include <gtest/gtest.h>
#include <Common/ProxyConfigurationResolverProvider.h>
#include <Common/tests/gtest_global_context.h>
#include <Common/tests/gtest_helper_functions.h>
using ConfigurationPtr = Poco::AutoPtr<Poco::Util::AbstractConfiguration>;
class ProxyConfigurationResolverProviderTests : public ::testing::Test
{
protected:
static void SetUpTestSuite() {
context = getContext().context;
}
static void TearDownTestSuite() {
context->setConfig(Poco::AutoPtr(new Poco::Util::MapConfiguration()));
}
static DB::ContextMutablePtr context;
};
DB::ContextMutablePtr ProxyConfigurationResolverProviderTests::context;
Poco::URI http_env_proxy_server = Poco::URI("http://http_environment_proxy:3128");
Poco::URI https_env_proxy_server = Poco::URI("http://https_environment_proxy:3128");
Poco::URI http_list_proxy_server = Poco::URI("http://http_list_proxy:3128");
Poco::URI https_list_proxy_server = Poco::URI("http://https_list_proxy:3128");
TEST_F(ProxyConfigurationResolverProviderTests, EnvironmentResolverShouldBeUsedIfNoSettings)
{
EnvironmentProxySetter setter(http_env_proxy_server, https_env_proxy_server);
auto http_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
auto https_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
ASSERT_EQ(http_configuration.host, http_env_proxy_server.getHost());
ASSERT_EQ(http_configuration.port, http_env_proxy_server.getPort());
ASSERT_EQ(http_configuration.protocol, DB::ProxyConfiguration::protocolFromString(http_env_proxy_server.getScheme()));
ASSERT_EQ(https_configuration.host, https_env_proxy_server.getHost());
ASSERT_EQ(https_configuration.port, https_env_proxy_server.getPort());
ASSERT_EQ(https_configuration.protocol, DB::ProxyConfiguration::protocolFromString(https_env_proxy_server.getScheme()));
}
TEST_F(ProxyConfigurationResolverProviderTests, ListHTTPOnly)
{
ConfigurationPtr config = Poco::AutoPtr(new Poco::Util::MapConfiguration());
config->setString("proxy", "");
config->setString("proxy.http", "");
config->setString("proxy.http.uri", http_list_proxy_server.toString());
context->setConfig(config);
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
ASSERT_EQ(http_proxy_configuration.host, http_list_proxy_server.getHost());
ASSERT_EQ(http_proxy_configuration.port, http_list_proxy_server.getPort());
ASSERT_EQ(http_proxy_configuration.protocol, DB::ProxyConfiguration::protocolFromString(http_list_proxy_server.getScheme()));
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
// No https configuration since it's not set
ASSERT_EQ(https_proxy_configuration.host, "");
ASSERT_EQ(https_proxy_configuration.port, 0);
}
TEST_F(ProxyConfigurationResolverProviderTests, ListHTTPSOnly)
{
ConfigurationPtr config = Poco::AutoPtr(new Poco::Util::MapConfiguration());
config->setString("proxy", "");
config->setString("proxy.https", "");
config->setString("proxy.https.uri", https_list_proxy_server.toString());
context->setConfig(config);
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
ASSERT_EQ(http_proxy_configuration.host, "");
ASSERT_EQ(http_proxy_configuration.port, 0);
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
ASSERT_EQ(https_proxy_configuration.host, https_list_proxy_server.getHost());
// still HTTP because the proxy host is not HTTPS
ASSERT_EQ(https_proxy_configuration.protocol, DB::ProxyConfiguration::protocolFromString(https_list_proxy_server.getScheme()));
ASSERT_EQ(https_proxy_configuration.port, https_list_proxy_server.getPort());
}
TEST_F(ProxyConfigurationResolverProviderTests, ListBoth)
{
ConfigurationPtr config = Poco::AutoPtr(new Poco::Util::MapConfiguration());
config->setString("proxy", "");
config->setString("proxy.http", "");
config->setString("proxy.http.uri", http_list_proxy_server.toString());
config->setString("proxy", "");
config->setString("proxy.https", "");
config->setString("proxy.https.uri", https_list_proxy_server.toString());
context->setConfig(config);
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
ASSERT_EQ(http_proxy_configuration.host, http_list_proxy_server.getHost());
ASSERT_EQ(http_proxy_configuration.protocol, DB::ProxyConfiguration::protocolFromString(http_list_proxy_server.getScheme()));
ASSERT_EQ(http_proxy_configuration.port, http_list_proxy_server.getPort());
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
ASSERT_EQ(https_proxy_configuration.host, https_list_proxy_server.getHost());
// still HTTP because the proxy host is not HTTPS
ASSERT_EQ(https_proxy_configuration.protocol, DB::ProxyConfiguration::protocolFromString(https_list_proxy_server.getScheme()));
ASSERT_EQ(https_proxy_configuration.port, https_list_proxy_server.getPort());
}
// remote resolver is tricky to be tested in unit tests

View File

@ -0,0 +1,96 @@
#include <gtest/gtest.h>
#include <Common/EnvironmentProxyConfigurationResolver.h>
#include <Common/tests/gtest_helper_functions.h>
#include <Poco/URI.h>
namespace
{
auto http_proxy_server = Poco::URI("http://proxy_server:3128");
auto https_proxy_server = Poco::URI("https://proxy_server:3128");
}
TEST(EnvironmentProxyConfigurationResolver, TestHTTP)
{
EnvironmentProxySetter setter(http_proxy_server, {});
DB::EnvironmentProxyConfigurationResolver resolver(DB::ProxyConfiguration::Protocol::HTTP);
auto configuration = resolver.resolve();
ASSERT_EQ(configuration.host, http_proxy_server.getHost());
ASSERT_EQ(configuration.port, http_proxy_server.getPort());
ASSERT_EQ(configuration.protocol, DB::ProxyConfiguration::protocolFromString(http_proxy_server.getScheme()));
}
TEST(EnvironmentProxyConfigurationResolver, TestHTTPNoEnv)
{
DB::EnvironmentProxyConfigurationResolver resolver(DB::ProxyConfiguration::Protocol::HTTP);
auto configuration = resolver.resolve();
ASSERT_EQ(configuration.host, "");
ASSERT_EQ(configuration.protocol, DB::ProxyConfiguration::Protocol::HTTP);
ASSERT_EQ(configuration.port, 0u);
}
TEST(EnvironmentProxyConfigurationResolver, TestHTTPs)
{
EnvironmentProxySetter setter({}, https_proxy_server);
DB::EnvironmentProxyConfigurationResolver resolver(DB::ProxyConfiguration::Protocol::HTTPS);
auto configuration = resolver.resolve();
ASSERT_EQ(configuration.host, https_proxy_server.getHost());
ASSERT_EQ(configuration.port, https_proxy_server.getPort());
ASSERT_EQ(configuration.protocol, DB::ProxyConfiguration::protocolFromString(https_proxy_server.getScheme()));
}
TEST(EnvironmentProxyConfigurationResolver, TestHTTPsNoEnv)
{
DB::EnvironmentProxyConfigurationResolver resolver(DB::ProxyConfiguration::Protocol::HTTPS);
auto configuration = resolver.resolve();
ASSERT_EQ(configuration.host, "");
ASSERT_EQ(configuration.protocol, DB::ProxyConfiguration::Protocol::HTTP);
ASSERT_EQ(configuration.port, 0u);
}
TEST(EnvironmentProxyConfigurationResolver, TestANYHTTP)
{
EnvironmentProxySetter setter(http_proxy_server, {});
DB::EnvironmentProxyConfigurationResolver resolver(DB::ProxyConfiguration::Protocol::ANY);
auto configuration = resolver.resolve();
ASSERT_EQ(configuration.host, http_proxy_server.getHost());
ASSERT_EQ(configuration.port, http_proxy_server.getPort());
ASSERT_EQ(configuration.protocol, DB::ProxyConfiguration::protocolFromString(http_proxy_server.getScheme()));
}
TEST(EnvironmentProxyConfigurationResolver, TestANYHTTPS)
{
EnvironmentProxySetter setter({}, https_proxy_server);
DB::EnvironmentProxyConfigurationResolver resolver(DB::ProxyConfiguration::Protocol::ANY);
auto configuration = resolver.resolve();
ASSERT_EQ(configuration.host, https_proxy_server.getHost());
ASSERT_EQ(configuration.port, https_proxy_server.getPort());
ASSERT_EQ(configuration.protocol, DB::ProxyConfiguration::protocolFromString(https_proxy_server.getScheme()));
}
TEST(EnvironmentProxyConfigurationResolver, TestANYNoEnv)
{
DB::EnvironmentProxyConfigurationResolver resolver(DB::ProxyConfiguration::Protocol::ANY);
auto configuration = resolver.resolve();
ASSERT_EQ(configuration.host, "");
ASSERT_EQ(configuration.protocol, DB::ProxyConfiguration::Protocol::HTTP);
ASSERT_EQ(configuration.port, 0u);
}

View File

@ -0,0 +1,26 @@
#include <gtest/gtest.h>
#include <Common/ProxyListConfigurationResolver.h>
#include <Poco/URI.h>
namespace
{
auto proxy_server1 = Poco::URI("http://proxy_server1:3128");
auto proxy_server2 = Poco::URI("http://proxy_server2:3128");
}
TEST(ProxyListConfigurationResolver, SimpleTest)
{
DB::ProxyListConfigurationResolver resolver({proxy_server1, proxy_server2});
auto configuration1 = resolver.resolve();
auto configuration2 = resolver.resolve();
ASSERT_EQ(configuration1.host, proxy_server1.getHost());
ASSERT_EQ(configuration1.port, proxy_server1.getPort());
ASSERT_EQ(configuration1.protocol, DB::ProxyConfiguration::protocolFromString(proxy_server1.getScheme()));
ASSERT_EQ(configuration2.host, proxy_server2.getHost());
ASSERT_EQ(configuration2.port, proxy_server2.getPort());
ASSERT_EQ(configuration2.protocol, DB::ProxyConfiguration::protocolFromString(proxy_server2.getScheme()));
}

View File

@ -9,7 +9,7 @@ TEST(SLRUCache, set)
slru_cache.set(1, std::make_shared<int>(2));
slru_cache.set(2, std::make_shared<int>(3));
auto w = slru_cache.weight();
auto w = slru_cache.sizeInBytes();
auto n = slru_cache.count();
ASSERT_EQ(w, 2);
ASSERT_EQ(n, 2);
@ -125,7 +125,7 @@ TEST(SLRUCache, evictOnElements)
auto n = slru_cache.count();
ASSERT_EQ(n, 1);
auto w = slru_cache.weight();
auto w = slru_cache.sizeInBytes();
ASSERT_EQ(w, 3);
auto value = slru_cache.get(1);
@ -148,7 +148,7 @@ TEST(SLRUCache, evictOnWeight)
auto n = slru_cache.count();
ASSERT_EQ(n, 2);
auto w = slru_cache.weight();
auto w = slru_cache.sizeInBytes();
ASSERT_EQ(w, 9);
auto value = slru_cache.get(1);

View File

@ -23,7 +23,7 @@ int main(int argc, char ** argv)
try
{
UncompressedCache cache(1024);
UncompressedCache cache("SLRU", 1024, 0.5);
std::string path = argv[1];
std::cerr << std::fixed << std::setprecision(3);

View File

@ -92,7 +92,8 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo
auth_settings.region,
RemoteHostFilter(), s3_max_redirects,
enable_s3_requests_logging,
/* for_disk_s3 = */ false, /* get_request_throttler = */ {}, /* put_request_throttler = */ {});
/* for_disk_s3 = */ false, /* get_request_throttler = */ {}, /* put_request_throttler = */ {},
new_uri.uri.getScheme());
client_configuration.endpointOverride = new_uri.endpoint;

View File

@ -66,12 +66,18 @@
#define DBMS_HIERARCHICAL_DICTIONARY_MAX_DEPTH 1000
/// Default maximum (total and entry) sizes and policies of various caches
static constexpr auto DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE = 0_MiB;
static constexpr auto DEFAULT_UNCOMPRESSED_CACHE_POLICY = "SLRU";
static constexpr auto DEFAULT_MARK_CACHE_MAX_SIZE = 5368_MiB;
static constexpr auto DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE = 0_MiB;
static constexpr auto DEFAULT_UNCOMPRESSED_CACHE_SIZE_RATIO = 0.5l;
static constexpr auto DEFAULT_MARK_CACHE_POLICY = "SLRU";
static constexpr auto DEFAULT_MARK_CACHE_MAX_SIZE = 5368_MiB;
static constexpr auto DEFAULT_MARK_CACHE_SIZE_RATIO = 0.5l;
static constexpr auto DEFAULT_INDEX_UNCOMPRESSED_CACHE_POLICY = "SLRU";
static constexpr auto DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE = 0_MiB;
static constexpr auto DEFAULT_INDEX_UNCOMPRESSED_CACHE_SIZE_RATIO = 0.5l;
static constexpr auto DEFAULT_INDEX_MARK_CACHE_POLICY = "SLRU";
static constexpr auto DEFAULT_INDEX_MARK_CACHE_MAX_SIZE = 0_MiB;
static constexpr auto DEFAULT_INDEX_MARK_CACHE_SIZE_RATIO = 0.5l;
static constexpr auto DEFAULT_MMAP_CACHE_MAX_SIZE = 1_KiB; /// chosen by rolling dice
static constexpr auto DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_SIZE = 128_MiB;
static constexpr auto DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_ENTRIES = 10'000;

View File

@ -60,10 +60,16 @@ namespace DB
M(Double, cache_size_to_ram_max_ratio, 0.5, "Set cache size ro RAM max ratio. Allows to lower cache size on low-memory systems.", 0) \
M(String, uncompressed_cache_policy, DEFAULT_UNCOMPRESSED_CACHE_POLICY, "Uncompressed cache policy name.", 0) \
M(UInt64, uncompressed_cache_size, DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE, "Size of cache for uncompressed blocks. Zero means disabled.", 0) \
M(UInt64, mark_cache_size, DEFAULT_MARK_CACHE_MAX_SIZE, "Size of cache for marks (index of MergeTree family of tables).", 0) \
M(Double, uncompressed_cache_size_ratio, DEFAULT_UNCOMPRESSED_CACHE_SIZE_RATIO, "The size of the protected queue in the uncompressed cache relative to the cache's total size.", 0) \
M(String, mark_cache_policy, DEFAULT_MARK_CACHE_POLICY, "Mark cache policy name.", 0) \
M(UInt64, mark_cache_size, DEFAULT_MARK_CACHE_MAX_SIZE, "Size of cache for marks (index of MergeTree family of tables).", 0) \
M(Double, mark_cache_size_ratio, DEFAULT_MARK_CACHE_SIZE_RATIO, "The size of the protected queue in the mark cache relative to the cache's total size.", 0) \
M(String, index_uncompressed_cache_policy, DEFAULT_INDEX_UNCOMPRESSED_CACHE_POLICY, "Index uncompressed cache policy name.", 0) \
M(UInt64, index_uncompressed_cache_size, DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE, "Size of cache for uncompressed blocks of MergeTree indices. Zero means disabled.", 0) \
M(Double, index_uncompressed_cache_size_ratio, DEFAULT_INDEX_UNCOMPRESSED_CACHE_SIZE_RATIO, "The size of the protected queue in the index uncompressed cache relative to the cache's total size.", 0) \
M(String, index_mark_cache_policy, DEFAULT_INDEX_MARK_CACHE_POLICY, "Index mark cache policy name.", 0) \
M(UInt64, index_mark_cache_size, DEFAULT_INDEX_MARK_CACHE_MAX_SIZE, "Size of cache for index marks. Zero means disabled.", 0) \
M(Double, index_mark_cache_size_ratio, DEFAULT_INDEX_MARK_CACHE_SIZE_RATIO, "The size of the protected queue in the index mark cache relative to the cache's total size.", 0) \
M(UInt64, mmap_cache_size, DEFAULT_MMAP_CACHE_MAX_SIZE, "A cache for mmapped files.", 0) \
\
M(Bool, disable_internal_dns_cache, false, "Disable internal DNS caching at all.", 0) \

View File

@ -535,6 +535,7 @@ class IColumn;
M(Bool, database_atomic_wait_for_drop_and_detach_synchronously, false, "When executing DROP or DETACH TABLE in Atomic database, wait for table data to be finally dropped or detached.", 0) \
M(Bool, enable_scalar_subquery_optimization, true, "If it is set to true, prevent scalar subqueries from (de)serializing large scalar values and possibly avoid running the same subquery more than once.", 0) \
M(Bool, optimize_trivial_count_query, true, "Process trivial 'SELECT count() FROM table' query from metadata.", 0) \
M(Bool, optimize_count_from_files, true, "Optimize counting rows from files in supported input formats", 0) \
M(Bool, optimize_respect_aliases, true, "If it is set to true, it will respect aliases in WHERE/GROUP BY/ORDER BY, that will help with partition pruning/secondary indexes/optimize_aggregation_in_order/optimize_read_in_order/optimize_trivial_count", 0) \
M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \
M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \

View File

@ -1,25 +0,0 @@
#pragma once
#include "config.h"
#if USE_AWS_S3
#include <utility>
#include <base/types.h>
#include <IO/S3/PocoHTTPClient.h>
#include <Poco/URI.h>
namespace DB::S3
{
class ProxyConfiguration
{
public:
virtual ~ProxyConfiguration() = default;
/// Returns proxy configuration on each HTTP request.
virtual ClientConfigurationPerRequest getConfiguration(const Aws::Http::HttpRequest & request) = 0;
virtual void errorReport(const ClientConfigurationPerRequest & config) = 0;
};
}
#endif

View File

@ -1,32 +0,0 @@
#include "ProxyListConfiguration.h"
#if USE_AWS_S3
#include <utility>
#include <Common/logger_useful.h>
namespace DB::S3
{
ProxyListConfiguration::ProxyListConfiguration(std::vector<Poco::URI> proxies_) : proxies(std::move(proxies_)), access_counter(0)
{
}
ClientConfigurationPerRequest ProxyListConfiguration::getConfiguration(const Aws::Http::HttpRequest &)
{
/// Avoid atomic increment if number of proxies is 1.
size_t index = proxies.size() > 1 ? (access_counter++) % proxies.size() : 0;
ClientConfigurationPerRequest cfg;
cfg.proxy_scheme = Aws::Http::SchemeMapper::FromString(proxies[index].getScheme().c_str());
cfg.proxy_host = proxies[index].getHost();
cfg.proxy_port = proxies[index].getPort();
LOG_DEBUG(&Poco::Logger::get("AWSClient"), "Use proxy: {}", proxies[index].toString());
return cfg;
}
}
#endif

View File

@ -1,32 +0,0 @@
#pragma once
#include "config.h"
#if USE_AWS_S3
#include <atomic> // for std::atomic<size_t>
#include "ProxyConfiguration.h"
namespace DB::S3
{
/**
* For each request to S3 it chooses a proxy from the specified list using round-robin strategy.
*/
class ProxyListConfiguration : public ProxyConfiguration
{
public:
explicit ProxyListConfiguration(std::vector<Poco::URI> proxies_);
ClientConfigurationPerRequest getConfiguration(const Aws::Http::HttpRequest & request) override;
void errorReport(const ClientConfigurationPerRequest &) override {}
private:
/// List of configured proxies.
const std::vector<Poco::URI> proxies;
/// Access counter to get proxy using round-robin strategy.
std::atomic<size_t> access_counter;
};
}
#endif

View File

@ -1,42 +0,0 @@
#pragma once
#include "config.h"
#if USE_AWS_S3
#include "ProxyConfiguration.h"
#include <mutex>
namespace DB::S3
{
/**
* Proxy configuration where proxy host is obtained each time from specified endpoint.
* For each request to S3 it makes GET request to specified endpoint URL and reads proxy host from a response body.
* Specified scheme and port added to obtained proxy host to form completed proxy URL.
*/
class ProxyResolverConfiguration : public ProxyConfiguration
{
public:
ProxyResolverConfiguration(const Poco::URI & endpoint_, String proxy_scheme_, unsigned proxy_port_, unsigned cache_ttl_);
ClientConfigurationPerRequest getConfiguration(const Aws::Http::HttpRequest & request) override;
void errorReport(const ClientConfigurationPerRequest & config) override;
private:
/// Endpoint to obtain a proxy host.
const Poco::URI endpoint;
/// Scheme for obtained proxy.
const String proxy_scheme;
/// Port for obtained proxy.
const unsigned proxy_port;
std::mutex cache_mutex;
bool cache_valid = false;
std::chrono::time_point<std::chrono::system_clock> cache_timestamp;
const std::chrono::seconds cache_ttl{0};
ClientConfigurationPerRequest cached_config;
};
}
#endif

View File

@ -5,6 +5,7 @@
#include <Common/StringUtils/StringUtils.h>
#include <Common/logger_useful.h>
#include <Common/Throttler.h>
#include <Common/ProxyConfigurationResolverProvider.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Interpreters/Context.h>
@ -17,9 +18,6 @@
#include <Storages/StorageS3Settings.h>
#include <Disks/ObjectStorages/S3/S3ObjectStorage.h>
#include <Disks/ObjectStorages/S3/ProxyConfiguration.h>
#include <Disks/ObjectStorages/S3/ProxyListConfiguration.h>
#include <Disks/ObjectStorages/S3/ProxyResolverConfiguration.h>
#include <Disks/ObjectStorages/DiskObjectStorageCommon.h>
#include <Disks/DiskLocal.h>
#include <Common/Macros.h>
@ -44,76 +42,15 @@ std::unique_ptr<S3ObjectStorageSettings> getSettings(const Poco::Util::AbstractC
config.getInt(config_prefix + ".objects_chunk_size_to_delete", 1000));
}
std::shared_ptr<S3::ProxyResolverConfiguration> getProxyResolverConfiguration(
const String & prefix, const Poco::Util::AbstractConfiguration & proxy_resolver_config)
{
auto endpoint = Poco::URI(proxy_resolver_config.getString(prefix + ".endpoint"));
auto proxy_scheme = proxy_resolver_config.getString(prefix + ".proxy_scheme");
if (proxy_scheme != "http" && proxy_scheme != "https")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Only HTTP/HTTPS schemas allowed in proxy resolver config: {}", proxy_scheme);
auto proxy_port = proxy_resolver_config.getUInt(prefix + ".proxy_port");
auto cache_ttl = proxy_resolver_config.getUInt(prefix + ".proxy_cache_time", 10);
LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy resolver: {}, Scheme: {}, Port: {}",
endpoint.toString(), proxy_scheme, proxy_port);
return std::make_shared<S3::ProxyResolverConfiguration>(endpoint, proxy_scheme, proxy_port, cache_ttl);
}
std::shared_ptr<S3::ProxyListConfiguration> getProxyListConfiguration(
const String & prefix, const Poco::Util::AbstractConfiguration & proxy_config)
{
std::vector<String> keys;
proxy_config.keys(prefix, keys);
std::vector<Poco::URI> proxies;
for (const auto & key : keys)
if (startsWith(key, "uri"))
{
Poco::URI proxy_uri(proxy_config.getString(prefix + "." + key));
if (proxy_uri.getScheme() != "http" && proxy_uri.getScheme() != "https")
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Only HTTP/HTTPS schemas allowed in proxy uri: {}", proxy_uri.toString());
if (proxy_uri.getHost().empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty host in proxy uri: {}", proxy_uri.toString());
proxies.push_back(proxy_uri);
LOG_DEBUG(&Poco::Logger::get("DiskS3"), "Configured proxy: {}", proxy_uri.toString());
}
if (!proxies.empty())
return std::make_shared<S3::ProxyListConfiguration>(proxies);
return nullptr;
}
std::shared_ptr<S3::ProxyConfiguration> getProxyConfiguration(const String & prefix, const Poco::Util::AbstractConfiguration & config)
{
if (!config.has(prefix + ".proxy"))
return nullptr;
std::vector<String> config_keys;
config.keys(prefix + ".proxy", config_keys);
if (auto resolver_configs = std::count(config_keys.begin(), config_keys.end(), "resolver"))
{
if (resolver_configs > 1)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple proxy resolver configurations aren't allowed");
return getProxyResolverConfiguration(prefix + ".proxy.resolver", config);
}
return getProxyListConfiguration(prefix + ".proxy", config);
}
std::unique_ptr<S3::Client> getClient(
const Poco::Util::AbstractConfiguration & config,
const String & config_prefix,
ContextPtr context,
const S3ObjectStorageSettings & settings)
{
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
S3::URI uri(endpoint);
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
config.getString(config_prefix + ".region", ""),
context->getRemoteHostFilter(),
@ -121,10 +58,9 @@ std::unique_ptr<S3::Client> getClient(
context->getGlobalContext()->getSettingsRef().enable_s3_requests_logging,
/* for_disk_s3 = */ true,
settings.request_settings.get_request_throttler,
settings.request_settings.put_request_throttler);
settings.request_settings.put_request_throttler,
uri.uri.getScheme());
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
S3::URI uri(endpoint);
if (uri.key.back() != '/')
throw Exception(ErrorCodes::BAD_ARGUMENTS, "S3 path must ends with '/', but '{}' doesn't.", uri.key);
@ -136,11 +72,14 @@ std::unique_ptr<S3::Client> getClient(
client_configuration.http_connection_pool_size = config.getUInt(config_prefix + ".http_connection_pool_size", 1000);
client_configuration.wait_on_pool_size_limit = false;
auto proxy_config = getProxyConfiguration(config_prefix, config);
/*
* Override proxy configuration for backwards compatibility with old configuration format.
* */
auto proxy_config = DB::ProxyConfigurationResolverProvider::getFromOldSettingsFormat(config_prefix, config);
if (proxy_config)
{
client_configuration.per_request_configuration
= [proxy_config](const auto & request) { return proxy_config->getConfiguration(request); };
= [proxy_config]() { return proxy_config->resolve(); };
client_configuration.error_report
= [proxy_config](const auto & request_config) { proxy_config->errorReport(request_config); };
}

View File

@ -126,6 +126,86 @@ namespace JSONUtils
return fileSegmentationEngineJSONEachRowImpl<'[', ']'>(in, memory, min_bytes, min_rows, max_rows);
}
template <const char opening_bracket, const char closing_bracket>
void skipRowForJSONEachRowImpl(ReadBuffer & in)
{
size_t balance = 0;
bool quotes = false;
while (!in.eof())
{
if (quotes)
{
auto * pos = find_first_symbols<'\\', '"'>(in.position(), in.buffer().end());
in.position() = pos;
if (in.position() > in.buffer().end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Position in buffer is out of bounds. There must be a bug.");
else if (in.position() == in.buffer().end())
continue;
if (*in.position() == '\\')
{
++in.position();
if (!in.eof())
++in.position();
}
else if (*in.position() == '"')
{
++in.position();
quotes = false;
}
}
else
{
auto * pos = find_first_symbols<opening_bracket, closing_bracket, '\\', '"'>(in.position(), in.buffer().end());
in.position() = pos;
if (in.position() > in.buffer().end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Position in buffer is out of bounds. There must be a bug.");
else if (in.position() == in.buffer().end())
continue;
else if (*in.position() == opening_bracket)
{
++balance;
++in.position();
}
else if (*in.position() == closing_bracket)
{
--balance;
++in.position();
}
else if (*in.position() == '\\')
{
++in.position();
if (!in.eof())
++in.position();
}
else if (*in.position() == '"')
{
quotes = true;
++in.position();
}
if (balance == 0)
return;
}
}
throw Exception(ErrorCodes::INCORRECT_DATA, "Unexpected eof");
}
void skipRowForJSONEachRow(ReadBuffer & in)
{
return skipRowForJSONEachRowImpl<'{', '}'>(in);
}
void skipRowForJSONCompactEachRow(ReadBuffer & in)
{
return skipRowForJSONEachRowImpl<'[', ']'>(in);
}
NamesAndTypesList readRowAndGetNamesAndDataTypesForJSONEachRow(ReadBuffer & in, const FormatSettings & settings, JSONInferenceInfo * inference_info)
{
skipWhitespaceIfAny(in);
@ -612,8 +692,11 @@ namespace JSONUtils
auto names_and_types = JSONUtils::readMetadata(in);
for (const auto & [name, type] : names_and_types)
{
if (!header.has(name))
continue;
auto header_type = header.getByName(name).type;
if (header.has(name) && !type->equals(*header_type))
if (!type->equals(*header_type))
throw Exception(
ErrorCodes::INCORRECT_DATA,
"Type {} of column '{}' from metadata is not the same as type in header {}",

View File

@ -20,6 +20,9 @@ namespace JSONUtils
std::pair<bool, size_t> fileSegmentationEngineJSONEachRow(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t max_rows);
std::pair<bool, size_t> fileSegmentationEngineJSONCompactEachRow(ReadBuffer & in, DB::Memory<> & memory, size_t min_bytes, size_t min_rows, size_t max_rows);
void skipRowForJSONEachRow(ReadBuffer & in);
void skipRowForJSONCompactEachRow(ReadBuffer & in);
/// Read row in JSONEachRow format and try to determine type for each field.
/// Return list of names and types.
/// If cannot determine the type of some field, return nullptr for it.

View File

@ -381,25 +381,25 @@ public:
const auto & timezone_x = extractTimeZoneFromFunctionArguments(arguments, 3, 1);
const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2);
if (unit == "year" || unit == "years" || unit == "yy" || unit == "yyyy")
if (unit == "year" || unit == "yy" || unit == "yyyy")
impl.template dispatchForColumns<ToRelativeYearNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "quarter" || unit == "quarters" || unit == "qq" || unit == "q")
else if (unit == "quarter" || unit == "qq" || unit == "q")
impl.template dispatchForColumns<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "month" || unit == "months" || unit == "mm" || unit == "m")
else if (unit == "month" || unit == "mm" || unit == "m")
impl.template dispatchForColumns<ToRelativeMonthNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "week" || unit == "weeks" || unit == "wk" || unit == "ww")
else if (unit == "week" || unit == "wk" || unit == "ww")
impl.template dispatchForColumns<ToRelativeWeekNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "day" || unit == "days" || unit == "dd" || unit == "d")
else if (unit == "day" || unit == "dd" || unit == "d")
impl.template dispatchForColumns<ToRelativeDayNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "hour" || unit == "hours" || unit == "hh" || unit == "h")
else if (unit == "hour" || unit == "hh" || unit == "h")
impl.template dispatchForColumns<ToRelativeHourNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "minute" || unit == "minutes" || unit == "mi" || unit == "n")
else if (unit == "minute" || unit == "mi" || unit == "n")
impl.template dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "second" || unit == "seconds" || unit == "ss" || unit == "s")
else if (unit == "second" || unit == "ss" || unit == "s")
impl.template dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "millisecond" || unit == "milliseconds" || unit == "ms")
else if (unit == "millisecond" || unit == "ms")
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<millisecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "microsecond" || unit == "microseconds" || unit == "us" || unit == "u")
else if (unit == "microsecond" || unit == "us" || unit == "u")
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
else
throw Exception(ErrorCodes::BAD_ARGUMENTS,

View File

@ -485,15 +485,16 @@ namespace
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
FunctionArgumentDescriptors args{
FunctionArgumentDescriptors mandatory_args{
{"time", &isString<IDataType>, nullptr, "String"},
{"format", &isString<IDataType>, nullptr, "String"},
{"format", &isString<IDataType>, nullptr, "String"}
};
if (arguments.size() == 3)
args.emplace_back(FunctionArgumentDescriptor{"timezone", &isString<IDataType>, nullptr, "String"});
FunctionArgumentDescriptors optional_args{
{"timezone", &isString<IDataType>, &isColumnConst, "const String"}
};
validateFunctionArgumentTypes(*this, arguments, args);
validateFunctionArgumentTypes(*this, arguments, mandatory_args, optional_args);
String time_zone_name = getTimeZone(arguments).getTimeZone();
DataTypePtr date_type = std::make_shared<DataTypeDateTime>(time_zone_name);

View File

@ -137,7 +137,12 @@ namespace
throw Exception(ErrorCodes::UNSUPPORTED_URI_SCHEME, "Unsupported scheme in URI '{}'", uri.toString());
}
HTTPSessionPtr makeHTTPSessionImpl(const std::string & host, UInt16 port, bool https, bool keep_alive)
HTTPSessionPtr makeHTTPSessionImpl(
const std::string & host,
UInt16 port,
bool https,
bool keep_alive,
Poco::Net::HTTPClientSession::ProxyConfig proxy_config = {})
{
HTTPSessionPtr session;
@ -158,6 +163,9 @@ namespace
/// doesn't work properly without patch
session->setKeepAlive(keep_alive);
session->setProxyConfig(proxy_config);
return session;
}
@ -333,13 +341,17 @@ void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_
response.set("Keep-Alive", "timeout=" + std::to_string(timeout.totalSeconds()));
}
HTTPSessionPtr makeHTTPSession(const Poco::URI & uri, const ConnectionTimeouts & timeouts)
HTTPSessionPtr makeHTTPSession(
const Poco::URI & uri,
const ConnectionTimeouts & timeouts,
Poco::Net::HTTPClientSession::ProxyConfig proxy_config
)
{
const std::string & host = uri.getHost();
UInt16 port = uri.getPort();
bool https = isHTTPS(uri);
auto session = makeHTTPSessionImpl(host, port, https, false);
auto session = makeHTTPSessionImpl(host, port, https, false, proxy_config);
setTimeouts(*session, timeouts);
return session;
}

View File

@ -69,7 +69,11 @@ void markSessionForReuse(PooledHTTPSessionPtr session);
void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout);
/// Create session object to perform requests and set required parameters.
HTTPSessionPtr makeHTTPSession(const Poco::URI & uri, const ConnectionTimeouts & timeouts);
HTTPSessionPtr makeHTTPSession(
const Poco::URI & uri,
const ConnectionTimeouts & timeouts,
Poco::Net::HTTPClientSession::ProxyConfig proxy_config = {}
);
/// As previous method creates session, but tooks it from pool, without and with proxy uri.
PooledHTTPSessionPtr makePooledHTTPSession(

View File

@ -250,7 +250,8 @@ ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::ReadWriteBufferFromHTTPBase(
bool delay_initialization,
bool use_external_buffer_,
bool http_skip_not_found_url_,
std::optional<HTTPFileInfo> file_info_)
std::optional<HTTPFileInfo> file_info_,
Poco::Net::HTTPClientSession::ProxyConfig proxy_config_)
: SeekableReadBuffer(nullptr, 0)
, uri {uri_}
, method {!method_.empty() ? method_ : out_stream_callback_ ? Poco::Net::HTTPRequest::HTTP_POST : Poco::Net::HTTPRequest::HTTP_GET}
@ -265,6 +266,7 @@ ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::ReadWriteBufferFromHTTPBase(
, http_skip_not_found_url(http_skip_not_found_url_)
, settings {settings_}
, log(&Poco::Logger::get("ReadWriteBufferFromHTTP"))
, proxy_config(proxy_config_)
{
if (settings.http_max_tries <= 0 || settings.http_retry_initial_backoff_ms <= 0
|| settings.http_retry_initial_backoff_ms >= settings.http_retry_max_backoff_ms)
@ -848,12 +850,12 @@ HTTPFileInfo ReadWriteBufferFromHTTPBase<UpdatableSessionPtr>::parseFileInfo(con
}
SessionFactory::SessionFactory(const ConnectionTimeouts & timeouts_)
: timeouts(timeouts_) {}
SessionFactory::SessionFactory(const ConnectionTimeouts & timeouts_, Poco::Net::HTTPClientSession::ProxyConfig proxy_config_)
: timeouts(timeouts_), proxy_config(proxy_config_) {}
SessionFactory::SessionType SessionFactory::buildNewSession(const Poco::URI & uri)
{
return makeHTTPSession(uri, timeouts);
return makeHTTPSession(uri, timeouts, proxy_config);
}
ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP(
@ -870,9 +872,10 @@ ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP(
bool delay_initialization_,
bool use_external_buffer_,
bool skip_not_found_url_,
std::optional<HTTPFileInfo> file_info_)
std::optional<HTTPFileInfo> file_info_,
Poco::Net::HTTPClientSession::ProxyConfig proxy_config_)
: Parent(
std::make_shared<SessionType>(uri_, max_redirects, std::make_shared<SessionFactory>(timeouts)),
std::make_shared<SessionType>(uri_, max_redirects, std::make_shared<SessionFactory>(timeouts, proxy_config_)),
uri_,
credentials_,
method_,
@ -884,7 +887,8 @@ ReadWriteBufferFromHTTP::ReadWriteBufferFromHTTP(
delay_initialization_,
use_external_buffer_,
skip_not_found_url_,
file_info_) {}
file_info_,
proxy_config_) {}
PooledSessionFactory::PooledSessionFactory(

View File

@ -111,6 +111,8 @@ namespace detail
ReadSettings settings;
Poco::Logger * log;
Poco::Net::HTTPClientSession::ProxyConfig proxy_config;
bool withPartialContent(const HTTPRange & range) const;
size_t getOffset() const;
@ -161,7 +163,8 @@ namespace detail
bool delay_initialization = false,
bool use_external_buffer_ = false,
bool http_skip_not_found_url_ = false,
std::optional<HTTPFileInfo> file_info_ = std::nullopt);
std::optional<HTTPFileInfo> file_info_ = std::nullopt,
Poco::Net::HTTPClientSession::ProxyConfig proxy_config_ = {});
void callWithRedirects(Poco::Net::HTTPResponse & response, const String & method_, bool throw_on_all_errors = false, bool for_object_info = false);
@ -212,13 +215,14 @@ namespace detail
class SessionFactory
{
public:
explicit SessionFactory(const ConnectionTimeouts & timeouts_);
explicit SessionFactory(const ConnectionTimeouts & timeouts_, Poco::Net::HTTPClientSession::ProxyConfig proxy_config_ = {});
using SessionType = HTTPSessionPtr;
SessionType buildNewSession(const Poco::URI & uri);
private:
ConnectionTimeouts timeouts;
Poco::Net::HTTPClientSession::ProxyConfig proxy_config;
};
class ReadWriteBufferFromHTTP : public detail::ReadWriteBufferFromHTTPBase<std::shared_ptr<UpdatableSession<SessionFactory>>>
@ -241,7 +245,8 @@ public:
bool delay_initialization_ = true,
bool use_external_buffer_ = false,
bool skip_not_found_url_ = false,
std::optional<HTTPFileInfo> file_info_ = std::nullopt);
std::optional<HTTPFileInfo> file_info_ = std::nullopt,
Poco::Net::HTTPClientSession::ProxyConfig proxy_config_ = {});
};
class PooledSessionFactory

View File

@ -24,6 +24,8 @@
#include <Common/assert_cast.h>
#include <Common/logger_useful.h>
#include <Common/ProxyConfigurationResolverProvider.h>
namespace ProfileEvents
{
@ -861,16 +863,28 @@ PocoHTTPClientConfiguration ClientFactory::createClientConfiguration( // NOLINT
bool enable_s3_requests_logging,
bool for_disk_s3,
const ThrottlerPtr & get_request_throttler,
const ThrottlerPtr & put_request_throttler)
const ThrottlerPtr & put_request_throttler,
const String & protocol)
{
return PocoHTTPClientConfiguration(
auto proxy_configuration_resolver = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::protocolFromString(protocol));
auto per_request_configuration = [=] () { return proxy_configuration_resolver->resolve(); };
auto error_report = [=] (const DB::ProxyConfiguration & req) { proxy_configuration_resolver->errorReport(req); };
auto config = PocoHTTPClientConfiguration(
per_request_configuration,
force_region,
remote_host_filter,
s3_max_redirects,
enable_s3_requests_logging,
for_disk_s3,
get_request_throttler,
put_request_throttler);
put_request_throttler,
error_report);
config.scheme = Aws::Http::SchemeMapper::FromString(protocol.c_str());
return config;
}
}

View File

@ -314,7 +314,8 @@ public:
bool enable_s3_requests_logging,
bool for_disk_s3,
const ThrottlerPtr & get_request_throttler,
const ThrottlerPtr & put_request_throttler);
const ThrottlerPtr & put_request_throttler,
const String & protocol = "https");
private:
ClientFactory();

View File

@ -532,13 +532,13 @@ S3CredentialsProviderChain::S3CredentialsProviderChain(
configuration.enable_s3_requests_logging,
configuration.for_disk_s3,
configuration.get_request_throttler,
configuration.put_request_throttler);
configuration.put_request_throttler,
Aws::Http::SchemeMapper::ToString(Aws::Http::Scheme::HTTP));
/// See MakeDefaultHttpResourceClientConfiguration().
/// This is part of EC2 metadata client, but unfortunately it can't be accessed from outside
/// of contrib/aws/aws-cpp-sdk-core/source/internal/AWSHttpResourceClient.cpp
aws_client_configuration.maxConnections = 2;
aws_client_configuration.scheme = Aws::Http::Scheme::HTTP;
/// Explicitly set the proxy settings to empty/zero to avoid relying on defaults that could potentially change
/// in the future.

View File

@ -85,20 +85,24 @@ namespace DB::S3
{
PocoHTTPClientConfiguration::PocoHTTPClientConfiguration(
std::function<DB::ProxyConfiguration()> per_request_configuration_,
const String & force_region_,
const RemoteHostFilter & remote_host_filter_,
unsigned int s3_max_redirects_,
bool enable_s3_requests_logging_,
bool for_disk_s3_,
const ThrottlerPtr & get_request_throttler_,
const ThrottlerPtr & put_request_throttler_)
: force_region(force_region_)
const ThrottlerPtr & put_request_throttler_,
std::function<void(const DB::ProxyConfiguration &)> error_report_)
: per_request_configuration(per_request_configuration_)
, force_region(force_region_)
, remote_host_filter(remote_host_filter_)
, s3_max_redirects(s3_max_redirects_)
, enable_s3_requests_logging(enable_s3_requests_logging_)
, for_disk_s3(for_disk_s3_)
, get_request_throttler(get_request_throttler_)
, put_request_throttler(put_request_throttler_)
, error_report(error_report_)
{
}
@ -262,8 +266,8 @@ void PocoHTTPClient::makeRequestInternal(
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const
{
/// Most sessions in pool are already connected and it is not possible to set proxy host/port to a connected session.
const auto request_configuration = per_request_configuration(request);
if (http_connection_pool_size && request_configuration.proxy_host.empty())
const auto request_configuration = per_request_configuration();
if (http_connection_pool_size && request_configuration.host.empty())
makeRequestInternalImpl<true>(request, request_configuration, response, readLimiter, writeLimiter);
else
makeRequestInternalImpl<false>(request, request_configuration, response, readLimiter, writeLimiter);
@ -272,7 +276,7 @@ void PocoHTTPClient::makeRequestInternal(
template <bool pooled>
void PocoHTTPClient::makeRequestInternalImpl(
Aws::Http::HttpRequest & request,
const ClientConfigurationPerRequest & request_configuration,
const DB::ProxyConfiguration & request_configuration,
std::shared_ptr<PocoHTTPResponse> & response,
Aws::Utils::RateLimits::RateLimiterInterface *,
Aws::Utils::RateLimits::RateLimiterInterface *) const
@ -327,7 +331,7 @@ void PocoHTTPClient::makeRequestInternalImpl(
Poco::URI target_uri(uri);
SessionPtr session;
if (!request_configuration.proxy_host.empty())
if (!request_configuration.host.empty())
{
if (enable_s3_requests_logging)
LOG_TEST(log, "Due to reverse proxy host name ({}) won't be resolved on ClickHouse side", uri);
@ -339,12 +343,12 @@ void PocoHTTPClient::makeRequestInternalImpl(
target_uri, timeouts, http_connection_pool_size, wait_on_pool_size_limit);
else
session = makeHTTPSession(target_uri, timeouts);
bool use_tunnel = request_configuration.proxy_scheme == Aws::Http::Scheme::HTTP && target_uri.getScheme() == "https";
bool use_tunnel = request_configuration.protocol == DB::ProxyConfiguration::Protocol::HTTP && target_uri.getScheme() == "https";
session->setProxy(
request_configuration.proxy_host,
request_configuration.proxy_port,
Aws::Http::SchemeMapper::ToString(request_configuration.proxy_scheme),
request_configuration.host,
request_configuration.port,
DB::ProxyConfiguration::protocolToString(request_configuration.protocol),
use_tunnel
);
}

View File

@ -9,6 +9,7 @@
#include <Common/RemoteHostFilter.h>
#include <Common/Throttler_fwd.h>
#include <Common/ProxyConfiguration.h>
#include <IO/ConnectionTimeouts.h>
#include <IO/HTTPCommon.h>
#include <IO/HTTPHeaderEntries.h>
@ -34,16 +35,9 @@ namespace DB::S3
{
class ClientFactory;
struct ClientConfigurationPerRequest
{
Aws::Http::Scheme proxy_scheme = Aws::Http::Scheme::HTTPS;
String proxy_host;
unsigned proxy_port = 0;
};
struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration
{
std::function<ClientConfigurationPerRequest(const Aws::Http::HttpRequest &)> per_request_configuration = [] (const Aws::Http::HttpRequest &) { return ClientConfigurationPerRequest(); };
std::function<DB::ProxyConfiguration()> per_request_configuration;
String force_region;
const RemoteHostFilter & remote_host_filter;
unsigned int s3_max_redirects;
@ -62,17 +56,19 @@ struct PocoHTTPClientConfiguration : public Aws::Client::ClientConfiguration
void updateSchemeAndRegion();
std::function<void(const ClientConfigurationPerRequest &)> error_report;
std::function<void(const DB::ProxyConfiguration &)> error_report;
private:
PocoHTTPClientConfiguration(
std::function<DB::ProxyConfiguration()> per_request_configuration_,
const String & force_region_,
const RemoteHostFilter & remote_host_filter_,
unsigned int s3_max_redirects_,
bool enable_s3_requests_logging_,
bool for_disk_s3_,
const ThrottlerPtr & get_request_throttler_,
const ThrottlerPtr & put_request_throttler_
const ThrottlerPtr & put_request_throttler_,
std::function<void(const DB::ProxyConfiguration &)> error_report_
);
/// Constructor of Aws::Client::ClientConfiguration must be called after AWS SDK initialization.
@ -165,7 +161,7 @@ private:
template <bool pooled>
void makeRequestInternalImpl(
Aws::Http::HttpRequest & request,
const ClientConfigurationPerRequest & per_request_configuration,
const DB::ProxyConfiguration & per_request_configuration,
std::shared_ptr<PocoHTTPResponse> & response,
Aws::Utils::RateLimits::RateLimiterInterface * readLimiter,
Aws::Utils::RateLimits::RateLimiterInterface * writeLimiter) const;
@ -174,8 +170,8 @@ protected:
static S3MetricKind getMetricKind(const Aws::Http::HttpRequest & request);
void addMetric(const Aws::Http::HttpRequest & request, S3MetricType type, ProfileEvents::Count amount = 1) const;
std::function<ClientConfigurationPerRequest(const Aws::Http::HttpRequest &)> per_request_configuration;
std::function<void(const ClientConfigurationPerRequest &)> error_report;
std::function<DB::ProxyConfiguration()> per_request_configuration;
std::function<void(const DB::ProxyConfiguration &)> error_report;
ConnectionTimeouts timeouts;
const RemoteHostFilter & remote_host_filter;
unsigned int s3_max_redirects;

View File

@ -26,9 +26,19 @@
#include <IO/S3/Client.h>
#include <IO/HTTPHeaderEntries.h>
#include <Storages/StorageS3Settings.h>
#include <Poco/Util/ServerApplication.h>
#include "TestPocoHTTPServer.h"
/*
* When all tests are executed together, `Context::getGlobalContextInstance()` is not null. Global context is used by
* ProxyResolvers to get proxy configuration (used by S3 clients). If global context does not have a valid ConfigRef, it relies on
* Poco::Util::Application::instance() to grab the config. However, at this point, the application is not yet initialized and
* `Poco::Util::Application::instance()` returns nullptr. This causes the test to fail. To fix this, we create a dummy application that takes
* care of initialization.
* */
[[maybe_unused]] static Poco::Util::ServerApplication app;
class NoRetryStrategy : public Aws::Client::StandardRetryStrategy
{
@ -125,7 +135,8 @@ void testServerSideEncryption(
enable_s3_requests_logging,
/* for_disk_s3 = */ false,
/* get_request_throttler = */ {},
/* put_request_throttler = */ {}
/* put_request_throttler = */ {},
uri.uri.getScheme()
);
client_configuration.endpointOverride = uri.endpoint;

View File

@ -42,11 +42,8 @@ private:
using Base = CacheBase<UInt128, UncompressedCacheCell, UInt128TrivialHash, UncompressedSizeWeightFunction>;
public:
explicit UncompressedCache(size_t max_size_in_bytes)
: Base(max_size_in_bytes) {}
UncompressedCache(const String & uncompressed_cache_policy, size_t max_size_in_bytes)
: Base(uncompressed_cache_policy, max_size_in_bytes) {}
UncompressedCache(const String & cache_policy, size_t max_size_in_bytes, double size_ratio)
: Base(cache_policy, max_size_in_bytes, 0, size_ratio) {}
/// Calculate key from path to file and offset.
static UInt128 hash(const String & path_to_file, size_t offset)

View File

@ -13,9 +13,10 @@ WriteBufferFromHTTP::WriteBufferFromHTTP(
const std::string & content_encoding,
const HTTPHeaderEntries & additional_headers,
const ConnectionTimeouts & timeouts,
size_t buffer_size_)
size_t buffer_size_,
Poco::Net::HTTPClientSession::ProxyConfig proxy_configuration)
: WriteBufferFromOStream(buffer_size_)
, session{makeHTTPSession(uri, timeouts)}
, session{makeHTTPSession(uri, timeouts, proxy_configuration)}
, request{method, uri.getPathAndQuery(), Poco::Net::HTTPRequest::HTTP_1_1}
{
request.setHost(uri.getHost());

View File

@ -25,7 +25,8 @@ public:
const std::string & content_encoding = "",
const HTTPHeaderEntries & additional_headers = {},
const ConnectionTimeouts & timeouts = {},
size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE);
size_t buffer_size_ = DBMS_DEFAULT_BUFFER_SIZE,
Poco::Net::HTTPClientSession::ProxyConfig proxy_configuration = {});
private:
/// Receives response from the server after sending all data.

View File

@ -175,7 +175,7 @@ public:
private:
CachePtr getHashTableStatsCache(const Params & params, const std::lock_guard<std::mutex> &)
{
if (!hash_table_stats || hash_table_stats->maxSize() != params.max_entries_for_hash_table_stats)
if (!hash_table_stats || hash_table_stats->maxSizeInBytes() != params.max_entries_for_hash_table_stats)
hash_table_stats = std::make_shared<Cache>(params.max_entries_for_hash_table_stats);
return hash_table_stats;
}

View File

@ -42,6 +42,7 @@ size_t roundUpToMultiple(size_t num, size_t multiple)
{
return roundDownToMultiple(num + multiple - 1, multiple);
}
}
namespace DB
@ -174,31 +175,6 @@ FileSegments FileCache::getImpl(const LockedKey & locked_key, const FileSegment:
"Cannot have zero size downloaded file segments. {}",
file_segment->getInfoForLog());
}
#ifndef NDEBUG
/**
* Check that in-memory state of the cache is consistent with the state on disk.
* Check only in debug build, because such checks can be done often and can be quite
* expensive compared to overall query execution time.
*/
fs::path path = file_segment->getPathInLocalCache();
if (!fs::exists(path))
{
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"File path does not exist, but file has DOWNLOADED state. {}",
file_segment->getInfoForLog());
}
if (fs::file_size(path) == 0)
{
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Cannot have zero size downloaded file segments. {}",
file_segment->getInfoForLog());
}
#endif
}
}
else
@ -1037,7 +1013,7 @@ void FileCache::deactivateBackgroundOperations()
cleanup_thread->join();
}
FileSegmentsHolderPtr FileCache::getSnapshot()
FileSegments FileCache::getSnapshot()
{
assertInitialized();
#ifndef NDEBUG
@ -1050,19 +1026,19 @@ FileSegmentsHolderPtr FileCache::getSnapshot()
for (const auto & [_, file_segment_metadata] : locked_key)
file_segments.push_back(FileSegment::getSnapshot(file_segment_metadata->file_segment));
});
return std::make_unique<FileSegmentsHolder>(std::move(file_segments), /* complete_on_dtor */false);
return file_segments;
}
FileSegmentsHolderPtr FileCache::getSnapshot(const Key & key)
FileSegments FileCache::getSnapshot(const Key & key)
{
FileSegments file_segments;
auto locked_key = metadata.lockKeyMetadata(key, CacheMetadata::KeyNotFoundPolicy::THROW_LOGICAL);
for (const auto & [_, file_segment_metadata] : *locked_key->getKeyMetadata())
file_segments.push_back(FileSegment::getSnapshot(file_segment_metadata->file_segment));
return std::make_unique<FileSegmentsHolder>(std::move(file_segments));
return file_segments;
}
FileSegmentsHolderPtr FileCache::dumpQueue()
FileSegments FileCache::dumpQueue()
{
assertInitialized();
@ -1073,7 +1049,7 @@ FileSegmentsHolderPtr FileCache::dumpQueue()
return PriorityIterationResult::CONTINUE;
}, lockCache());
return std::make_unique<FileSegmentsHolder>(std::move(file_segments));
return file_segments;
}
std::vector<String> FileCache::tryGetCachePaths(const Key & key)
@ -1148,4 +1124,15 @@ FileCache::QueryContextHolderPtr FileCache::getQueryContextHolder(
return std::make_unique<QueryContextHolder>(query_id, this, std::move(context));
}
FileSegments FileCache::sync()
{
FileSegments file_segments;
metadata.iterate([&](LockedKey & locked_key)
{
auto broken = locked_key.sync();
file_segments.insert(file_segments.end(), broken.begin(), broken.end());
});
return file_segments;
}
}

View File

@ -124,11 +124,11 @@ public:
bool tryReserve(FileSegment & file_segment, size_t size, FileCacheReserveStat & stat);
FileSegmentsHolderPtr getSnapshot();
FileSegments getSnapshot();
FileSegmentsHolderPtr getSnapshot(const Key & key);
FileSegments getSnapshot(const Key & key);
FileSegmentsHolderPtr dumpQueue();
FileSegments dumpQueue();
void deactivateBackgroundOperations();
@ -150,6 +150,8 @@ public:
CacheGuard::Lock lockCache() const;
FileSegments sync();
private:
using KeyAndOffset = FileCacheKeyAndOffset;

View File

@ -882,8 +882,15 @@ void FileSegment::setDetachedState(const FileSegmentGuard::Lock & lock)
key_metadata.reset();
cache = nullptr;
queue_iterator = nullptr;
cache_writer.reset();
remote_file_reader.reset();
try
{
cache_writer.reset();
remote_file_reader.reset();
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
}
}
void FileSegment::detach(const FileSegmentGuard::Lock & lock, const LockedKey &)

View File

@ -128,7 +128,7 @@ bool KeyMetadata::createBaseDirectory()
return true;
}
std::string KeyMetadata::getFileSegmentPath(const FileSegment & file_segment)
std::string KeyMetadata::getFileSegmentPath(const FileSegment & file_segment) const
{
return fs::path(key_path)
/ CacheMetadata::getFileNameForFileSegment(file_segment.offset(), file_segment.getKind());
@ -704,26 +704,26 @@ bool LockedKey::removeAllFileSegments(bool if_releasable)
return removed_all;
}
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset)
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, bool can_be_broken)
{
auto it = key_metadata->find(offset);
if (it == key_metadata->end())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no offset {}", offset);
auto file_segment = it->second->file_segment;
return removeFileSegmentImpl(it, file_segment->lock());
return removeFileSegmentImpl(it, file_segment->lock(), can_be_broken);
}
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, const FileSegmentGuard::Lock & segment_lock)
KeyMetadata::iterator LockedKey::removeFileSegment(size_t offset, const FileSegmentGuard::Lock & segment_lock, bool can_be_broken)
{
auto it = key_metadata->find(offset);
if (it == key_metadata->end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no offset {}", offset);
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no offset {} in key {}", offset, getKey());
return removeFileSegmentImpl(it, segment_lock);
return removeFileSegmentImpl(it, segment_lock, can_be_broken);
}
KeyMetadata::iterator LockedKey::removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock & segment_lock)
KeyMetadata::iterator LockedKey::removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock & segment_lock, bool can_be_broken)
{
auto file_segment = it->second->file_segment;
@ -731,30 +731,45 @@ KeyMetadata::iterator LockedKey::removeFileSegmentImpl(KeyMetadata::iterator it,
key_metadata->log, "Remove from cache. Key: {}, offset: {}, size: {}",
getKey(), file_segment->offset(), file_segment->reserved_size);
chassert(file_segment->assertCorrectnessUnlocked(segment_lock));
chassert(can_be_broken || file_segment->assertCorrectnessUnlocked(segment_lock));
if (file_segment->queue_iterator)
file_segment->queue_iterator->invalidate();
file_segment->detach(segment_lock, *this);
const auto path = key_metadata->getFileSegmentPath(*file_segment);
bool exists = fs::exists(path);
if (exists)
try
{
fs::remove(path);
const auto path = key_metadata->getFileSegmentPath(*file_segment);
bool exists = fs::exists(path);
if (exists)
{
fs::remove(path);
/// Clear OpenedFileCache to avoid reading from incorrect file descriptor.
int flags = file_segment->getFlagsForLocalRead();
/// Files are created with flags from file_segment->getFlagsForLocalRead()
/// plus optionally O_DIRECT is added, depends on query setting, so remove both.
OpenedFileCache::instance().remove(path, flags);
OpenedFileCache::instance().remove(path, flags | O_DIRECT);
/// Clear OpenedFileCache to avoid reading from incorrect file descriptor.
int flags = file_segment->getFlagsForLocalRead();
/// Files are created with flags from file_segment->getFlagsForLocalRead()
/// plus optionally O_DIRECT is added, depends on query setting, so remove both.
OpenedFileCache::instance().remove(path, flags);
OpenedFileCache::instance().remove(path, flags | O_DIRECT);
LOG_TEST(key_metadata->log, "Removed file segment at path: {}", path);
LOG_TEST(key_metadata->log, "Removed file segment at path: {}", path);
}
else if (file_segment->downloaded_size && !can_be_broken)
{
#ifdef ABORT_ON_LOGICAL_ERROR
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected path {} to exist", path);
#else
LOG_WARNING(key_metadata->log, "Expected path {} to exist, while removing {}:{}",
path, getKey(), file_segment->offset());
#endif
}
}
catch (...)
{
tryLogCurrentException(__PRETTY_FUNCTION__);
chassert(false);
}
else if (file_segment->downloaded_size)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected path {} to exist", path);
return key_metadata->erase(it);
}
@ -870,4 +885,56 @@ std::string LockedKey::toString() const
return result;
}
FileSegments LockedKey::sync()
{
FileSegments broken;
for (auto it = key_metadata->begin(); it != key_metadata->end();)
{
auto file_segment = it->second->file_segment;
if (file_segment->isDetached())
{
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"File segment has unexpected state: DETACHED ({})", file_segment->getInfoForLog());
}
if (file_segment->getDownloadedSize(false) == 0)
{
++it;
continue;
}
const auto & path = key_metadata->getFileSegmentPath(*file_segment);
if (!fs::exists(path))
{
LOG_WARNING(
key_metadata->log,
"File segment has DOWNLOADED state, but file does not exist ({})",
file_segment->getInfoForLog());
broken.push_back(FileSegment::getSnapshot(file_segment));
it = removeFileSegment(file_segment->offset(), file_segment->lock(), /* can_be_broken */true);
continue;
}
const size_t actual_size = fs::file_size(path);
const size_t expected_size = file_segment->getDownloadedSize(false);
if (actual_size == expected_size)
{
++it;
continue;
}
LOG_WARNING(
key_metadata->log,
"File segment has unexpected size. Having {}, expected {} ({})",
actual_size, expected_size, file_segment->getInfoForLog());
broken.push_back(FileSegment::getSnapshot(file_segment));
it = removeFileSegment(file_segment->offset(), file_segment->lock(), /* can_be_broken */false);
}
return broken;
}
}

View File

@ -73,7 +73,7 @@ struct KeyMetadata : public std::map<size_t, FileSegmentMetadataPtr>,
bool createBaseDirectory();
std::string getFileSegmentPath(const FileSegment & file_segment);
std::string getFileSegmentPath(const FileSegment & file_segment) const;
private:
KeyState key_state = KeyState::ACTIVE;
@ -192,8 +192,8 @@ struct LockedKey : private boost::noncopyable
bool removeAllFileSegments(bool if_releasable = true);
KeyMetadata::iterator removeFileSegment(size_t offset, const FileSegmentGuard::Lock &);
KeyMetadata::iterator removeFileSegment(size_t offset);
KeyMetadata::iterator removeFileSegment(size_t offset, const FileSegmentGuard::Lock &, bool can_be_broken = false);
KeyMetadata::iterator removeFileSegment(size_t offset, bool can_be_broken = false);
void shrinkFileSegmentToDownloadedSize(size_t offset, const FileSegmentGuard::Lock &);
@ -207,10 +207,12 @@ struct LockedKey : private boost::noncopyable
void markAsRemoved();
FileSegments sync();
std::string toString() const;
private:
KeyMetadata::iterator removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock &);
KeyMetadata::iterator removeFileSegmentImpl(KeyMetadata::iterator it, const FileSegmentGuard::Lock &, bool can_be_broken = false);
const std::shared_ptr<KeyMetadata> key_metadata;
KeyGuard::Lock lock; /// `lock` must be destructed before `key_metadata`.

View File

@ -480,7 +480,7 @@ QueryCache::QueryCache(size_t max_size_in_bytes, size_t max_entries, size_t max_
void QueryCache::updateConfiguration(size_t max_size_in_bytes, size_t max_entries, size_t max_entry_size_in_bytes_, size_t max_entry_size_in_rows_)
{
std::lock_guard lock(mutex);
cache.setMaxSize(max_size_in_bytes);
cache.setMaxSizeInBytes(max_size_in_bytes);
cache.setMaxCount(max_entries);
max_entry_size_in_bytes = max_entry_size_in_bytes_;
max_entry_size_in_rows = max_entry_size_in_rows_;
@ -510,9 +510,9 @@ void QueryCache::clear()
times_executed.clear();
}
size_t QueryCache::weight() const
size_t QueryCache::sizeInBytes() const
{
return cache.weight();
return cache.sizeInBytes();
}
size_t QueryCache::count() const

View File

@ -182,7 +182,7 @@ public:
void clear();
size_t weight() const;
size_t sizeInBytes() const;
size_t count() const;
/// Record new execution of query represented by key. Returns number of executions so far.

View File

@ -479,6 +479,9 @@ struct ContextSharedPart : boost::noncopyable
return;
shutdown_called = true;
/// Need to flush the async insert queue before shutting down the database catalog
async_insert_queue.reset();
/// Stop periodic reloading of the configuration files.
/// This must be done first because otherwise the reloading may pass a changed config
/// to some destroyed parts of ContextSharedPart.
@ -2268,14 +2271,14 @@ QueryStatusPtr Context::getProcessListElement() const
}
void Context::setUncompressedCache(const String & uncompressed_cache_policy, size_t max_size_in_bytes)
void Context::setUncompressedCache(const String & cache_policy, size_t max_size_in_bytes, double size_ratio)
{
auto lock = getLock();
if (shared->uncompressed_cache)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Uncompressed cache has been already created.");
shared->uncompressed_cache = std::make_shared<UncompressedCache>(uncompressed_cache_policy, max_size_in_bytes);
shared->uncompressed_cache = std::make_shared<UncompressedCache>(cache_policy, max_size_in_bytes, size_ratio);
}
void Context::updateUncompressedCacheConfiguration(const Poco::Util::AbstractConfiguration & config)
@ -2286,7 +2289,7 @@ void Context::updateUncompressedCacheConfiguration(const Poco::Util::AbstractCon
throw Exception(ErrorCodes::LOGICAL_ERROR, "Uncompressed cache was not created yet.");
size_t max_size_in_bytes = config.getUInt64("uncompressed_cache_size", DEFAULT_UNCOMPRESSED_CACHE_MAX_SIZE);
shared->uncompressed_cache->setMaxSize(max_size_in_bytes);
shared->uncompressed_cache->setMaxSizeInBytes(max_size_in_bytes);
}
UncompressedCachePtr Context::getUncompressedCache() const
@ -2303,14 +2306,14 @@ void Context::clearUncompressedCache() const
shared->uncompressed_cache->clear();
}
void Context::setMarkCache(const String & mark_cache_policy, size_t cache_size_in_bytes)
void Context::setMarkCache(const String & cache_policy, size_t max_cache_size_in_bytes, double size_ratio)
{
auto lock = getLock();
if (shared->mark_cache)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mark cache has been already created.");
shared->mark_cache = std::make_shared<MarkCache>(mark_cache_policy, cache_size_in_bytes);
shared->mark_cache = std::make_shared<MarkCache>(cache_policy, max_cache_size_in_bytes, size_ratio);
}
void Context::updateMarkCacheConfiguration(const Poco::Util::AbstractConfiguration & config)
@ -2321,7 +2324,7 @@ void Context::updateMarkCacheConfiguration(const Poco::Util::AbstractConfigurati
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mark cache was not created yet.");
size_t max_size_in_bytes = config.getUInt64("mark_cache_size", DEFAULT_MARK_CACHE_MAX_SIZE);
shared->mark_cache->setMaxSize(max_size_in_bytes);
shared->mark_cache->setMaxSizeInBytes(max_size_in_bytes);
}
MarkCachePtr Context::getMarkCache() const
@ -2353,14 +2356,14 @@ ThreadPool & Context::getLoadMarksThreadpool() const
return *shared->load_marks_threadpool;
}
void Context::setIndexUncompressedCache(size_t max_size_in_bytes)
void Context::setIndexUncompressedCache(const String & cache_policy, size_t max_size_in_bytes, double size_ratio)
{
auto lock = getLock();
if (shared->index_uncompressed_cache)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index uncompressed cache has been already created.");
shared->index_uncompressed_cache = std::make_shared<UncompressedCache>(max_size_in_bytes);
shared->index_uncompressed_cache = std::make_shared<UncompressedCache>(cache_policy, max_size_in_bytes, size_ratio);
}
void Context::updateIndexUncompressedCacheConfiguration(const Poco::Util::AbstractConfiguration & config)
@ -2371,7 +2374,7 @@ void Context::updateIndexUncompressedCacheConfiguration(const Poco::Util::Abstra
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index uncompressed cache was not created yet.");
size_t max_size_in_bytes = config.getUInt64("index_uncompressed_cache_size", DEFAULT_INDEX_UNCOMPRESSED_CACHE_MAX_SIZE);
shared->index_uncompressed_cache->setMaxSize(max_size_in_bytes);
shared->index_uncompressed_cache->setMaxSizeInBytes(max_size_in_bytes);
}
UncompressedCachePtr Context::getIndexUncompressedCache() const
@ -2388,14 +2391,14 @@ void Context::clearIndexUncompressedCache() const
shared->index_uncompressed_cache->clear();
}
void Context::setIndexMarkCache(size_t cache_size_in_bytes)
void Context::setIndexMarkCache(const String & cache_policy, size_t max_cache_size_in_bytes, double size_ratio)
{
auto lock = getLock();
if (shared->index_mark_cache)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index mark cache has been already created.");
shared->index_mark_cache = std::make_shared<MarkCache>(cache_size_in_bytes);
shared->index_mark_cache = std::make_shared<MarkCache>(cache_policy, max_cache_size_in_bytes, size_ratio);
}
void Context::updateIndexMarkCacheConfiguration(const Poco::Util::AbstractConfiguration & config)
@ -2406,7 +2409,7 @@ void Context::updateIndexMarkCacheConfiguration(const Poco::Util::AbstractConfig
throw Exception(ErrorCodes::LOGICAL_ERROR, "Index mark cache was not created yet.");
size_t max_size_in_bytes = config.getUInt64("index_mark_cache_size", DEFAULT_INDEX_MARK_CACHE_MAX_SIZE);
shared->index_mark_cache->setMaxSize(max_size_in_bytes);
shared->index_mark_cache->setMaxSizeInBytes(max_size_in_bytes);
}
MarkCachePtr Context::getIndexMarkCache() const
@ -2423,14 +2426,14 @@ void Context::clearIndexMarkCache() const
shared->index_mark_cache->clear();
}
void Context::setMMappedFileCache(size_t cache_size_in_num_entries)
void Context::setMMappedFileCache(size_t max_cache_size_in_num_entries)
{
auto lock = getLock();
if (shared->mmap_cache)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mapped file cache has been already created.");
shared->mmap_cache = std::make_shared<MMappedFileCache>(cache_size_in_num_entries);
shared->mmap_cache = std::make_shared<MMappedFileCache>(max_cache_size_in_num_entries);
}
void Context::updateMMappedFileCacheConfiguration(const Poco::Util::AbstractConfiguration & config)
@ -2441,7 +2444,7 @@ void Context::updateMMappedFileCacheConfiguration(const Poco::Util::AbstractConf
throw Exception(ErrorCodes::LOGICAL_ERROR, "Mapped file cache was not created yet.");
size_t max_size_in_bytes = config.getUInt64("mmap_cache_size", DEFAULT_MMAP_CACHE_MAX_SIZE);
shared->mmap_cache->setMaxSize(max_size_in_bytes);
shared->mmap_cache->setMaxSizeInBytes(max_size_in_bytes);
}
MMappedFileCachePtr Context::getMMappedFileCache() const

View File

@ -922,28 +922,28 @@ public:
/// --- Caches ------------------------------------------------------------------------------------------
void setUncompressedCache(const String & uncompressed_cache_policy, size_t max_size_in_bytes);
void setUncompressedCache(const String & cache_policy, size_t max_size_in_bytes, double size_ratio);
void updateUncompressedCacheConfiguration(const Poco::Util::AbstractConfiguration & config);
std::shared_ptr<UncompressedCache> getUncompressedCache() const;
void clearUncompressedCache() const;
void setMarkCache(const String & mark_cache_policy, size_t cache_size_in_bytes);
void setMarkCache(const String & cache_policy, size_t max_cache_size_in_bytes, double size_ratio);
void updateMarkCacheConfiguration(const Poco::Util::AbstractConfiguration & config);
std::shared_ptr<MarkCache> getMarkCache() const;
void clearMarkCache() const;
ThreadPool & getLoadMarksThreadpool() const;
void setIndexUncompressedCache(size_t max_size_in_bytes);
void setIndexUncompressedCache(const String & cache_policy, size_t max_size_in_bytes, double size_ratio);
void updateIndexUncompressedCacheConfiguration(const Poco::Util::AbstractConfiguration & config);
std::shared_ptr<UncompressedCache> getIndexUncompressedCache() const;
void clearIndexUncompressedCache() const;
void setIndexMarkCache(size_t cache_size_in_bytes);
void setIndexMarkCache(const String & cache_policy, size_t max_cache_size_in_bytes, double size_ratio);
void updateIndexMarkCacheConfiguration(const Poco::Util::AbstractConfiguration & config);
std::shared_ptr<MarkCache> getIndexMarkCache() const;
void clearIndexMarkCache() const;
void setMMappedFileCache(size_t cache_size_in_num_entries);
void setMMappedFileCache(size_t max_cache_size_in_num_entries);
void updateMMappedFileCacheConfiguration(const Poco::Util::AbstractConfiguration & config);
std::shared_ptr<MMappedFileCache> getMMappedFileCache() const;
void clearMMappedFileCache() const;

View File

@ -2266,6 +2266,10 @@ std::optional<UInt64> InterpreterSelectQuery::getTrivialCount(UInt64 max_paralle
auto & query = getSelectQuery();
if (!query.prewhere() && !query.where() && !context->getCurrentTransaction())
{
/// Some storages can optimize trivial count in read() method instead of totalRows() because it still can
/// require reading some data (but much faster than reading columns).
/// Set a special flag in query info so the storage will see it and optimize count in read() method.
query_info.optimize_trivial_count = optimize_trivial_count;
return storage->totalRows(settings);
}
else

View File

@ -53,6 +53,7 @@
#include <Storages/StorageS3.h>
#include <Storages/StorageURL.h>
#include <Storages/HDFS/StorageHDFS.h>
#include <Storages/System/StorageSystemFilesystemCache.h>
#include <Parsers/ASTSystemQuery.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTCreateQuery.h>
@ -383,6 +384,54 @@ BlockIO InterpreterSystemQuery::execute()
}
break;
}
case Type::SYNC_FILESYSTEM_CACHE:
{
getContext()->checkAccess(AccessType::SYSTEM_SYNC_FILESYSTEM_CACHE);
ColumnsDescription columns{NamesAndTypesList{
{"cache_name", std::make_shared<DataTypeString>()},
{"path", std::make_shared<DataTypeString>()},
{"size", std::make_shared<DataTypeUInt64>()},
}};
Block sample_block;
for (const auto & column : columns)
sample_block.insert({column.type->createColumn(), column.type, column.name});
MutableColumns res_columns = sample_block.cloneEmptyColumns();
auto fill_data = [&](const std::string & cache_name, const FileCachePtr & cache, const FileSegments & file_segments)
{
for (const auto & file_segment : file_segments)
{
size_t i = 0;
const auto path = cache->getPathInLocalCache(file_segment->key(), file_segment->offset(), file_segment->getKind());
res_columns[i++]->insert(cache_name);
res_columns[i++]->insert(path);
res_columns[i++]->insert(file_segment->getDownloadedSize(false));
}
};
if (query.filesystem_cache_name.empty())
{
auto caches = FileCacheFactory::instance().getAll();
for (const auto & [cache_name, cache_data] : caches)
{
auto file_segments = cache_data->cache->sync();
fill_data(cache_name, cache_data->cache, file_segments);
}
}
else
{
auto cache = FileCacheFactory::instance().getByName(query.filesystem_cache_name).cache;
auto file_segments = cache->sync();
fill_data(query.filesystem_cache_name, cache, file_segments);
}
size_t num_rows = res_columns[0]->size();
auto source = std::make_shared<SourceFromSingleChunk>(sample_block, Chunk(std::move(res_columns), num_rows));
result.pipeline = QueryPipeline(std::move(source));
break;
}
case Type::DROP_SCHEMA_CACHE:
{
getContext()->checkAccess(AccessType::SYSTEM_DROP_SCHEMA_CACHE);
@ -1020,6 +1069,7 @@ AccessRightsElements InterpreterSystemQuery::getRequiredAccessForDDLOnCluster()
case Type::DROP_INDEX_MARK_CACHE:
case Type::DROP_INDEX_UNCOMPRESSED_CACHE:
case Type::DROP_FILESYSTEM_CACHE:
case Type::SYNC_FILESYSTEM_CACHE:
case Type::DROP_SCHEMA_CACHE:
#if USE_AWS_S3
case Type::DROP_S3_CLIENT_CACHE:

View File

@ -68,13 +68,13 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values
{
if (auto mark_cache = getContext()->getMarkCache())
{
new_values["MarkCacheBytes"] = { mark_cache->weight(), "Total size of mark cache in bytes" };
new_values["MarkCacheBytes"] = { mark_cache->sizeInBytes(), "Total size of mark cache in bytes" };
new_values["MarkCacheFiles"] = { mark_cache->count(), "Total number of mark files cached in the mark cache" };
}
if (auto uncompressed_cache = getContext()->getUncompressedCache())
{
new_values["UncompressedCacheBytes"] = { uncompressed_cache->weight(),
new_values["UncompressedCacheBytes"] = { uncompressed_cache->sizeInBytes(),
"Total size of uncompressed cache in bytes. Uncompressed cache does not usually improve the performance and should be mostly avoided." };
new_values["UncompressedCacheCells"] = { uncompressed_cache->count(),
"Total number of entries in the uncompressed cache. Each entry represents a decompressed block of data. Uncompressed cache does not usually improve performance and should be mostly avoided." };
@ -82,13 +82,13 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values
if (auto index_mark_cache = getContext()->getIndexMarkCache())
{
new_values["IndexMarkCacheBytes"] = { index_mark_cache->weight(), "Total size of mark cache for secondary indices in bytes." };
new_values["IndexMarkCacheBytes"] = { index_mark_cache->sizeInBytes(), "Total size of mark cache for secondary indices in bytes." };
new_values["IndexMarkCacheFiles"] = { index_mark_cache->count(), "Total number of mark files cached in the mark cache for secondary indices." };
}
if (auto index_uncompressed_cache = getContext()->getIndexUncompressedCache())
{
new_values["IndexUncompressedCacheBytes"] = { index_uncompressed_cache->weight(),
new_values["IndexUncompressedCacheBytes"] = { index_uncompressed_cache->sizeInBytes(),
"Total size of uncompressed cache in bytes for secondary indices. Uncompressed cache does not usually improve the performance and should be mostly avoided." };
new_values["IndexUncompressedCacheCells"] = { index_uncompressed_cache->count(),
"Total number of entries in the uncompressed cache for secondary indices. Each entry represents a decompressed block of data. Uncompressed cache does not usually improve performance and should be mostly avoided." };
@ -104,7 +104,7 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values
if (auto query_cache = getContext()->getQueryCache())
{
new_values["QueryCacheBytes"] = { query_cache->weight(), "Total size of the query cache in bytes." };
new_values["QueryCacheBytes"] = { query_cache->sizeInBytes(), "Total size of the query cache in bytes." };
new_values["QueryCacheEntries"] = { query_cache->count(), "Total number of entries in the query cache." };
}
@ -136,7 +136,7 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values
#if USE_EMBEDDED_COMPILER
if (auto * compiled_expression_cache = CompiledExpressionCacheFactory::instance().tryGetCache())
{
new_values["CompiledExpressionCacheBytes"] = { compiled_expression_cache->weight(),
new_values["CompiledExpressionCacheBytes"] = { compiled_expression_cache->sizeInBytes(),
"Total bytes used for the cache of JIT-compiled code." };
new_values["CompiledExpressionCacheCount"] = { compiled_expression_cache->count(),
"Total entries in the cache of JIT-compiled code." };

View File

@ -69,13 +69,16 @@ fs::path caches_dir = fs::current_path() / "lru_cache_test";
std::string cache_base_path = caches_dir / "cache1" / "";
void assertEqual(const HolderPtr & holder, const Ranges & expected_ranges, const States & expected_states = {})
void assertEqual(FileSegments::const_iterator segments_begin, FileSegments::const_iterator segments_end, size_t segments_size, const Ranges & expected_ranges, const States & expected_states = {})
{
std::cerr << "Holder: " << holder->toString() << "\n";
ASSERT_EQ(holder->size(), expected_ranges.size());
std::cerr << "File segments: ";
for (auto it = segments_begin; it != segments_end; ++it)
std::cerr << (*it)->range().toString() << ", ";
ASSERT_EQ(segments_size, expected_ranges.size());
if (!expected_states.empty())
ASSERT_EQ(holder->size(), expected_states.size());
ASSERT_EQ(segments_size, expected_states.size());
auto get_expected_state = [&](size_t i)
{
@ -86,14 +89,25 @@ void assertEqual(const HolderPtr & holder, const Ranges & expected_ranges, const
};
size_t i = 0;
for (const auto & file_segment : *holder)
for (auto it = segments_begin; it != segments_end; ++it)
{
const auto & file_segment = *it;
ASSERT_EQ(file_segment->range(), expected_ranges[i]);
ASSERT_EQ(file_segment->state(), get_expected_state(i));
++i;
}
}
void assertEqual(const FileSegments & file_segments, const Ranges & expected_ranges, const States & expected_states = {})
{
assertEqual(file_segments.begin(), file_segments.end(), file_segments.size(), expected_ranges, expected_states);
}
void assertEqual(const FileSegmentsHolderPtr & file_segments, const Ranges & expected_ranges, const States & expected_states = {})
{
assertEqual(file_segments->begin(), file_segments->end(), file_segments->size(), expected_ranges, expected_states);
}
FileSegment & get(const HolderPtr & holder, int i)
{
auto it = std::next(holder->begin(), i);

View File

@ -80,6 +80,7 @@ public:
UNFREEZE,
ENABLE_FAILPOINT,
DISABLE_FAILPOINT,
SYNC_FILESYSTEM_CACHE,
STOP_PULLING_REPLICATION_LOG,
START_PULLING_REPLICATION_LOG,
END

View File

@ -420,6 +420,16 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected &
return false;
break;
}
case Type::SYNC_FILESYSTEM_CACHE:
{
ParserLiteral path_parser;
ASTPtr ast;
if (path_parser.parse(pos, ast, expected))
res->filesystem_cache_name = ast->as<ASTLiteral>()->value.safeGet<String>();
if (!parseQueryWithOnCluster(res, pos, expected))
return false;
break;
}
case Type::DROP_SCHEMA_CACHE:
{
if (ParserKeyword{"FOR"}.ignore(pos, expected))

View File

@ -186,6 +186,7 @@ NameAndTypePair chooseSmallestColumnToReadFromStorage(const StoragePtr & storage
bool applyTrivialCountIfPossible(
QueryPlan & query_plan,
SelectQueryInfo & select_query_info,
const TableNode & table_node,
const QueryTreeNodePtr & query_tree,
ContextMutablePtr & query_context,
@ -243,6 +244,11 @@ bool applyTrivialCountIfPossible(
if (!count_func)
return false;
/// Some storages can optimize trivial count in read() method instead of totalRows() because it still can
/// require reading some data (but much faster than reading columns).
/// Set a special flag in query info so the storage will see it and optimize count in read() method.
select_query_info.optimize_trivial_count = true;
/// Get number of rows
std::optional<UInt64> num_rows = storage->totalRows(settings);
if (!num_rows)
@ -505,7 +511,7 @@ FilterDAGInfo buildAdditionalFiltersIfNeeded(const StoragePtr & storage,
}
JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expression,
const SelectQueryInfo & select_query_info,
SelectQueryInfo & select_query_info,
const SelectQueryOptions & select_query_options,
PlannerContextPtr & planner_context,
bool is_single_table_expression,
@ -651,7 +657,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
is_single_table_expression &&
table_node &&
select_query_info.has_aggregates &&
applyTrivialCountIfPossible(query_plan, *table_node, select_query_info.query_tree, planner_context->getMutableQueryContext(), table_expression_data.getColumnNames());
applyTrivialCountIfPossible(query_plan, select_query_info, *table_node, select_query_info.query_tree, planner_context->getMutableQueryContext(), table_expression_data.getColumnNames());
if (is_trivial_count_applied)
{
@ -1389,7 +1395,7 @@ JoinTreeQueryPlan buildQueryPlanForArrayJoinNode(const QueryTreeNodePtr & array_
}
JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node,
const SelectQueryInfo & select_query_info,
SelectQueryInfo & select_query_info,
SelectQueryOptions & select_query_options,
const ColumnIdentifierSet & outer_scope_columns,
PlannerContextPtr & planner_context)

View File

@ -19,7 +19,7 @@ struct JoinTreeQueryPlan
/// Build JOIN TREE query plan for query node
JoinTreeQueryPlan buildJoinTreeQueryPlan(const QueryTreeNodePtr & query_node,
const SelectQueryInfo & select_query_info,
SelectQueryInfo & select_query_info,
SelectQueryOptions & select_query_options,
const ColumnIdentifierSet & outer_scope_columns,
PlannerContextPtr & planner_context);

View File

@ -2,6 +2,7 @@
#include <IO/WriteHelpers.h>
#include <IO/Operators.h>
#include <Columns/ColumnSparse.h>
#include <Columns/ColumnConst.h>
#include <DataTypes/DataTypeLowCardinality.h>
namespace DB
@ -221,4 +222,16 @@ void convertToFullIfSparse(Chunk & chunk)
chunk.setColumns(std::move(columns), num_rows);
}
Chunk cloneConstWithDefault(const Chunk & chunk, size_t num_rows)
{
auto columns = chunk.cloneEmptyColumns();
for (auto & column : columns)
{
column->insertDefault();
column = ColumnConst::create(std::move(column), num_rows);
}
return Chunk(std::move(columns), num_rows);
}
}

View File

@ -156,4 +156,7 @@ private:
void convertToFullIfConst(Chunk & chunk);
void convertToFullIfSparse(Chunk & chunk);
/// Creates chunks with same columns but makes them const with default value and specified number of rows.
Chunk cloneConstWithDefault(const Chunk & chunk, size_t num_rows);
}

View File

@ -28,4 +28,10 @@ void IInputFormat::setReadBuffer(ReadBuffer & in_)
in = &in_;
}
Chunk IInputFormat::getChunkForCount(size_t rows)
{
const auto & header = getPort().getHeader();
return cloneConstWithDefault(Chunk{header.getColumns(), 0}, rows);
}
}

View File

@ -61,11 +61,17 @@ public:
virtual size_t getApproxBytesReadForChunk() const { return 0; }
void needOnlyCount() { need_only_count = true; }
protected:
virtual Chunk getChunkForCount(size_t rows);
ColumnMappingPtr column_mapping{};
InputFormatErrorsLoggerPtr errors_logger;
bool need_only_count = false;
private:
/// Number of currently parsed chunk (if parallel parsing is enabled)
size_t current_unit_number = 0;

View File

@ -113,6 +113,19 @@ Chunk IRowInputFormat::generate()
size_t chunk_start_offset = getDataOffsetMaybeCompressed(getReadBuffer());
try
{
if (need_only_count && supportsCountRows())
{
num_rows = countRows(params.max_block_size);
if (num_rows == 0)
{
readSuffix();
return {};
}
total_rows += num_rows;
approx_bytes_read_for_chunk = getDataOffsetMaybeCompressed(getReadBuffer()) - chunk_start_offset;
return getChunkForCount(num_rows);
}
RowReadExtension info;
bool continue_reading = true;
for (size_t rows = 0; rows < params.max_block_size && continue_reading; ++rows)
@ -262,7 +275,7 @@ Chunk IRowInputFormat::generate()
void IRowInputFormat::syncAfterError()
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method syncAfterError is not implemented for input format");
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method syncAfterError is not implemented for input format {}", getName());
}
void IRowInputFormat::resetParser()
@ -273,5 +286,10 @@ void IRowInputFormat::resetParser()
block_missing_values.clear();
}
size_t IRowInputFormat::countRows(size_t)
{
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method countRows is not implemented for input format {}", getName());
}
}

View File

@ -52,6 +52,13 @@ protected:
*/
virtual bool readRow(MutableColumns & columns, RowReadExtension & extra) = 0;
/// Count some rows. Called in a loop until it returns 0, and the return values are added up.
/// `max_block_size` is the recommended number of rows after which to stop, if the implementation
/// involves scanning the data. If the implementation just takes the count from metadata,
/// `max_block_size` can be ignored.
virtual size_t countRows(size_t max_block_size);
virtual bool supportsCountRows() const { return false; }
virtual void readPrefix() {} /// delimiter before begin of result
virtual void readSuffix() {} /// delimiter after end of result

View File

@ -45,6 +45,9 @@ Chunk ArrowBlockInputFormat::generate()
batch_result = stream_reader->Next();
if (batch_result.ok() && !(*batch_result))
return res;
if (need_only_count && batch_result.ok())
return getChunkForCount((*batch_result)->num_rows());
}
else
{
@ -57,6 +60,15 @@ Chunk ArrowBlockInputFormat::generate()
if (record_batch_current >= record_batch_total)
return res;
if (need_only_count)
{
auto rows = file_reader->RecordBatchCountRows(record_batch_current++);
if (!rows.ok())
throw ParsingException(
ErrorCodes::CANNOT_READ_ALL_DATA, "Error while reading batch of Arrow data: {}", rows.status().ToString());
return getChunkForCount(*rows);
}
batch_result = file_reader->ReadRecordBatch(record_batch_current);
}

View File

@ -912,6 +912,19 @@ bool AvroRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &ext
return false;
}
size_t AvroRowInputFormat::countRows(size_t max_block_size)
{
size_t num_rows = 0;
while (file_reader_ptr->hasMore() && num_rows < max_block_size)
{
file_reader_ptr->decr();
file_reader_ptr->decoder().drain();
++num_rows;
}
return num_rows;
}
class AvroConfluentRowInputFormat::SchemaRegistry
{
public:

View File

@ -160,6 +160,9 @@ private:
bool readRow(MutableColumns & columns, RowReadExtension & ext) override;
void readPrefix() override;
bool supportsCountRows() const override { return true; }
size_t countRows(size_t max_block_size) override;
std::unique_ptr<avro::DataFileReaderBase> file_reader_ptr;
std::unique_ptr<AvroDeserializer> deserializer_ptr;
FormatSettings format_settings;

View File

@ -372,6 +372,9 @@ void BSONEachRowRowInputFormat::readArray(IColumn & column, const DataTypePtr &
size_t document_start = in->count();
BSONSizeT document_size;
readBinary(document_size, *in);
if (document_size < sizeof(BSONSizeT) + sizeof(BSON_DOCUMENT_END))
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid document size: {}", document_size);
while (in->count() - document_start + sizeof(BSON_DOCUMENT_END) != document_size)
{
auto nested_bson_type = getBSONType(readBSONType(*in));
@ -399,6 +402,9 @@ void BSONEachRowRowInputFormat::readTuple(IColumn & column, const DataTypePtr &
size_t document_start = in->count();
BSONSizeT document_size;
readBinary(document_size, *in);
if (document_size < sizeof(BSONSizeT) + sizeof(BSON_DOCUMENT_END))
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid document size: {}", document_size);
while (in->count() - document_start + sizeof(BSON_DOCUMENT_END) != document_size)
{
auto nested_bson_type = getBSONType(readBSONType(*in));
@ -457,6 +463,9 @@ void BSONEachRowRowInputFormat::readMap(IColumn & column, const DataTypePtr & da
size_t document_start = in->count();
BSONSizeT document_size;
readBinary(document_size, *in);
if (document_size < sizeof(BSONSizeT) + sizeof(BSON_DOCUMENT_END))
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid document size: {}", document_size);
while (in->count() - document_start + sizeof(BSON_DOCUMENT_END) != document_size)
{
auto nested_bson_type = getBSONType(readBSONType(*in));
@ -696,6 +705,8 @@ static void skipBSONField(ReadBuffer & in, BSONType type)
{
BSONSizeT size;
readBinary(size, in);
if (size < sizeof(BSONSizeT) + sizeof(BSON_DOCUMENT_END))
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid document size: {}", size);
in.ignore(size - sizeof(size));
break;
}
@ -735,6 +746,8 @@ static void skipBSONField(ReadBuffer & in, BSONType type)
{
BSONSizeT size;
readBinary(size, in);
if (size < sizeof(BSONSizeT))
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid java code_w_scope size: {}", size);
in.ignore(size - sizeof(size));
break;
}
@ -775,6 +788,9 @@ bool BSONEachRowRowInputFormat::readRow(MutableColumns & columns, RowReadExtensi
current_document_start = in->count();
readBinary(current_document_size, *in);
if (current_document_size < sizeof(BSONSizeT) + sizeof(BSON_DOCUMENT_END))
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid document size: {}", current_document_size);
while (in->count() - current_document_start + sizeof(BSON_DOCUMENT_END) != current_document_size)
{
auto type = getBSONType(readBSONType(*in));
@ -822,6 +838,22 @@ void BSONEachRowRowInputFormat::resetParser()
prev_positions.clear();
}
size_t BSONEachRowRowInputFormat::countRows(size_t max_block_size)
{
size_t num_rows = 0;
BSONSizeT document_size;
while (!in->eof() && num_rows < max_block_size)
{
readBinary(document_size, *in);
if (document_size < sizeof(BSONSizeT) + sizeof(BSON_DOCUMENT_END))
throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid document size: {}", document_size);
in->ignore(document_size - sizeof(BSONSizeT));
++num_rows;
}
return num_rows;
}
BSONEachRowSchemaReader::BSONEachRowSchemaReader(ReadBuffer & in_, const FormatSettings & settings_)
: IRowWithNamesSchemaReader(in_, settings_)
{

View File

@ -64,6 +64,9 @@ private:
bool allowSyncAfterError() const override { return true; }
void syncAfterError() override;
bool supportsCountRows() const override { return true; }
size_t countRows(size_t max_block_size) override;
size_t columnIndex(const StringRef & name, size_t key_index);
using ColumnReader = std::function<void(StringRef name, BSONType type)>;

View File

@ -114,6 +114,66 @@ void CSVRowInputFormat::resetParser()
buf->reset();
}
void CSVFormatReader::skipRow()
{
bool quotes = false;
ReadBuffer & istr = *buf;
while (!istr.eof())
{
if (quotes)
{
auto * pos = find_first_symbols<'"'>(istr.position(), istr.buffer().end());
istr.position() = pos;
if (pos > istr.buffer().end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Position in buffer is out of bounds. There must be a bug.");
else if (pos == istr.buffer().end())
continue;
else if (*pos == '"')
{
++istr.position();
if (!istr.eof() && *istr.position() == '"')
++istr.position();
else
quotes = false;
}
}
else
{
auto * pos = find_first_symbols<'"', '\r', '\n'>(istr.position(), istr.buffer().end());
istr.position() = pos;
if (pos > istr.buffer().end())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Position in buffer is out of bounds. There must be a bug.");
else if (pos == istr.buffer().end())
continue;
if (*pos == '"')
{
quotes = true;
++istr.position();
continue;
}
if (*pos == '\n')
{
++istr.position();
if (!istr.eof() && *istr.position() == '\r')
++istr.position();
return;
}
else if (*pos == '\r')
{
++istr.position();
if (!istr.eof() && *pos == '\n')
++pos;
return;
}
}
}
}
static void skipEndOfLine(ReadBuffer & in)
{
/// \n (Unix) or \r\n (DOS/Windows) or \n\r (Mac OS Classic)

View File

@ -40,6 +40,8 @@ private:
bool allowSyncAfterError() const override { return true; }
void syncAfterError() override;
bool supportsCountRows() const override { return true; }
protected:
std::shared_ptr<PeekableReadBuffer> buf;
};
@ -59,6 +61,8 @@ public:
bool readField(IColumn & column, const DataTypePtr & type, const SerializationPtr & serialization, bool is_last_file_column, const String & column_name) override;
void skipRow() override;
void skipField(size_t /*file_column*/) override { skipField(); }
void skipField();

View File

@ -27,7 +27,7 @@ CapnProtoRowInputFormat::CapnProtoRowInputFormat(ReadBuffer & in_, Block header_
serializer = std::make_unique<CapnProtoSerializer>(header.getDataTypes(), header.getNames(), schema, format_settings.capn_proto);
}
kj::Array<capnp::word> CapnProtoRowInputFormat::readMessage()
std::pair<kj::Array<capnp::word>, size_t> CapnProtoRowInputFormat::readMessagePrefix()
{
uint32_t segment_count;
in->readStrict(reinterpret_cast<char*>(&segment_count), sizeof(uint32_t));
@ -48,6 +48,14 @@ kj::Array<capnp::word> CapnProtoRowInputFormat::readMessage()
for (size_t i = 0; i <= segment_count; ++i)
in->readStrict(prefix_chars.begin() + ((i + 1) * sizeof(uint32_t)), sizeof(uint32_t));
return {std::move(prefix), prefix_size};
}
kj::Array<capnp::word> CapnProtoRowInputFormat::readMessage()
{
auto [prefix, prefix_size] = readMessagePrefix();
auto prefix_chars = prefix.asChars();
// calculate size of message
const auto expected_words = capnp::expectedSizeInWordsFromPrefix(prefix);
const auto expected_bytes = expected_words * sizeof(capnp::word);
@ -62,6 +70,18 @@ kj::Array<capnp::word> CapnProtoRowInputFormat::readMessage()
return msg;
}
void CapnProtoRowInputFormat::skipMessage()
{
auto [prefix, prefix_size] = readMessagePrefix();
// calculate size of message
const auto expected_bytes = capnp::expectedSizeInWordsFromPrefix(prefix) * sizeof(capnp::word);
const auto data_size = expected_bytes - prefix_size;
// skip full message
in->ignore(data_size);
}
bool CapnProtoRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &)
{
if (in->eof())
@ -82,6 +102,18 @@ bool CapnProtoRowInputFormat::readRow(MutableColumns & columns, RowReadExtension
return true;
}
size_t CapnProtoRowInputFormat::countRows(size_t max_block_size)
{
size_t num_rows = 0;
while (!in->eof() && num_rows < max_block_size)
{
skipMessage();
++num_rows;
}
return num_rows;
}
CapnProtoSchemaReader::CapnProtoSchemaReader(const FormatSettings & format_settings_) : format_settings(format_settings_)
{
}

View File

@ -31,7 +31,12 @@ public:
private:
bool readRow(MutableColumns & columns, RowReadExtension &) override;
bool supportsCountRows() const override { return true; }
size_t countRows(size_t max_block_size) override;
std::pair<kj::Array<capnp::word>, size_t> readMessagePrefix();
kj::Array<capnp::word> readMessage();
void skipMessage();
std::shared_ptr<CapnProtoSchemaParser> parser;
capnp::StructSchema schema;

View File

@ -221,19 +221,34 @@ std::vector<String> CustomSeparatedFormatReader::readRowImpl()
return values;
}
void CustomSeparatedFormatReader::skipHeaderRow()
void CustomSeparatedFormatReader::skipRow()
{
skipRowStartDelimiter();
bool first = true;
do
{
if (!first)
skipFieldDelimiter();
first = false;
skipField();
/// If the number of columns in row is unknown,
/// we should check for end of row after each field.
if (columns == 0 || allowVariableNumberOfColumns())
{
bool first = true;
do
{
if (!first)
skipFieldDelimiter();
first = false;
skipField();
}
while (!checkForEndOfRow());
}
else
{
for (size_t i = 0; i != columns; ++i)
{
if (i != 0)
skipFieldDelimiter();
skipField();
}
}
while (!checkForEndOfRow());
skipRowEndDelimiter();
}

View File

@ -33,6 +33,8 @@ private:
void syncAfterError() override;
void readPrefix() override;
bool supportsCountRows() const override { return true; }
std::unique_ptr<PeekableReadBuffer> buf;
bool ignore_spaces;
};
@ -48,9 +50,9 @@ public:
void skipField(size_t /*file_column*/) override { skipField(); }
void skipField();
void skipNames() override { skipHeaderRow(); }
void skipTypes() override { skipHeaderRow(); }
void skipHeaderRow();
void skipNames() override { skipRow(); }
void skipTypes() override { skipRow(); }
void skipRow() override;
void skipPrefixBeforeHeader() override;
void skipRowStartDelimiter() override;

View File

@ -15,11 +15,11 @@ namespace ErrorCodes
extern const int ILLEGAL_COLUMN;
}
JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_)
: JSONAsRowInputFormat(header_, std::make_unique<PeekableReadBuffer>(in_), params_) {}
JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_)
: JSONAsRowInputFormat(header_, std::make_unique<PeekableReadBuffer>(in_), params_, format_settings_) {}
JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, std::unique_ptr<PeekableReadBuffer> buf_, Params params_) :
IRowInputFormat(header_, *buf_, std::move(params_)), buf(std::move(buf_))
JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, std::unique_ptr<PeekableReadBuffer> buf_, Params params_, const FormatSettings & format_settings_) :
JSONEachRowRowInputFormat(*buf_, header_, std::move(params_), format_settings_, false), buf(std::move(buf_))
{
if (header_.columns() > 1)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
@ -29,39 +29,10 @@ JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, std::unique_pt
void JSONAsRowInputFormat::resetParser()
{
IRowInputFormat::resetParser();
JSONEachRowRowInputFormat::resetParser();
buf->reset();
}
void JSONAsRowInputFormat::readPrefix()
{
/// In this format, BOM at beginning of stream cannot be confused with value, so it is safe to skip it.
skipBOMIfExists(*buf);
skipWhitespaceIfAny(*buf);
if (!buf->eof() && *buf->position() == '[')
{
++buf->position();
data_in_square_brackets = true;
}
}
void JSONAsRowInputFormat::readSuffix()
{
skipWhitespaceIfAny(*buf);
if (data_in_square_brackets)
{
assertChar(']', *buf);
skipWhitespaceIfAny(*buf);
data_in_square_brackets = false;
}
if (!buf->eof() && *buf->position() == ';')
{
++buf->position();
skipWhitespaceIfAny(*buf);
}
assertEOF(*buf);
}
bool JSONAsRowInputFormat::readRow(MutableColumns & columns, RowReadExtension &)
{
@ -104,8 +75,8 @@ void JSONAsRowInputFormat::setReadBuffer(ReadBuffer & in_)
JSONAsStringRowInputFormat::JSONAsStringRowInputFormat(
const Block & header_, ReadBuffer & in_, Params params_)
: JSONAsRowInputFormat(header_, in_, params_)
const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_)
: JSONAsRowInputFormat(header_, in_, params_, format_settings_)
{
if (!isString(removeNullable(removeLowCardinality(header_.getByPosition(0).type))))
throw Exception(ErrorCodes::BAD_ARGUMENTS,
@ -193,8 +164,7 @@ void JSONAsStringRowInputFormat::readJSONObject(IColumn & column)
JSONAsObjectRowInputFormat::JSONAsObjectRowInputFormat(
const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings_)
: JSONAsRowInputFormat(header_, in_, params_)
, format_settings(format_settings_)
: JSONAsRowInputFormat(header_, in_, params_, format_settings_)
{
if (!isObject(header_.getByPosition(0).type))
throw Exception(ErrorCodes::BAD_ARGUMENTS,
@ -207,6 +177,13 @@ void JSONAsObjectRowInputFormat::readJSONObject(IColumn & column)
serializations[0]->deserializeTextJSON(column, *buf, format_settings);
}
Chunk JSONAsObjectRowInputFormat::getChunkForCount(size_t rows)
{
auto object_type = getPort().getHeader().getDataTypes()[0];
ColumnPtr column = object_type->createColumnConst(rows, Field(Object()));
return Chunk({std::move(column)}, rows);
}
JSONAsObjectExternalSchemaReader::JSONAsObjectExternalSchemaReader(const FormatSettings & settings)
{
if (!settings.json.allow_object_type)
@ -222,9 +199,9 @@ void registerInputFormatJSONAsString(FormatFactory & factory)
ReadBuffer & buf,
const Block & sample,
const RowInputFormatParams & params,
const FormatSettings &)
const FormatSettings & format_settings)
{
return std::make_shared<JSONAsStringRowInputFormat>(sample, buf, params);
return std::make_shared<JSONAsStringRowInputFormat>(sample, buf, params, format_settings);
});
}

View File

@ -1,6 +1,6 @@
#pragma once
#include <Processors/Formats/IRowInputFormat.h>
#include <Processors/Formats/Impl/JSONEachRowRowInputFormat.h>
#include <Processors/Formats/ISchemaReader.h>
#include <Formats/FormatFactory.h>
#include <IO/PeekableReadBuffer.h>
@ -13,30 +13,22 @@ namespace DB
class ReadBuffer;
/// This format parses a sequence of JSON objects separated by newlines, spaces and/or comma.
class JSONAsRowInputFormat : public IRowInputFormat
class JSONAsRowInputFormat : public JSONEachRowRowInputFormat
{
public:
JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_);
JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings);
void resetParser() override;
void setReadBuffer(ReadBuffer & in_) override;
private:
JSONAsRowInputFormat(const Block & header_, std::unique_ptr<PeekableReadBuffer> buf_, Params params_);
JSONAsRowInputFormat(const Block & header_, std::unique_ptr<PeekableReadBuffer> buf_, Params params_, const FormatSettings & format_settings);
bool readRow(MutableColumns & columns, RowReadExtension & ext) override;
void readPrefix() override;
void readSuffix() override;
protected:
virtual void readJSONObject(IColumn & column) = 0;
std::unique_ptr<PeekableReadBuffer> buf;
private:
/// This flag is needed to know if data is in square brackets.
bool data_in_square_brackets = false;
bool allow_new_rows = true;
};
/// Each JSON object is parsed as a whole to string.
@ -44,7 +36,7 @@ private:
class JSONAsStringRowInputFormat final : public JSONAsRowInputFormat
{
public:
JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_);
JSONAsStringRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_, const FormatSettings & format_settings);
String getName() const override { return "JSONAsStringRowInputFormat"; }
private:
@ -61,8 +53,8 @@ public:
String getName() const override { return "JSONAsObjectRowInputFormat"; }
private:
Chunk getChunkForCount(size_t rows) override;
void readJSONObject(IColumn & column) override;
const FormatSettings format_settings;
};
class JSONAsStringExternalSchemaReader : public IExternalSchemaReader

Some files were not shown because too many files have changed in this diff Show More