Merge branch 'master' into issues_39469

This commit is contained in:
Alexey Milovidov 2022-08-11 04:45:06 +03:00 committed by GitHub
commit 38fc0cc783
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
320 changed files with 4998 additions and 3370 deletions

View File

@ -104,6 +104,7 @@ if [ -n "$MAKE_DEB" ]; then
fi
mv ./programs/clickhouse* /output
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
find . -name '*.so' -print -exec mv '{}' /output \;
find . -name '*.so.*' -print -exec mv '{}' /output \;

View File

@ -69,6 +69,8 @@ function download
wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
chmod +x clickhouse
# clickhouse may be compressed - run once to decompress
./clickhouse ||:
ln -s ./clickhouse ./clickhouse-server
ln -s ./clickhouse ./clickhouse-client

View File

@ -41,24 +41,9 @@ color_good = "#b0d050"
header_template = """
<!DOCTYPE html>
<html lang="en">
<link rel="preload" as="font" href="https://yastatic.net/adv-www/_/sUYVCPUAQE7ExrvMS7FoISoO83s.woff2" type="font/woff2" crossorigin="anonymous"/>
<style>
@font-face {{
font-family:'Yandex Sans Display Web';
src:url(https://yastatic.net/adv-www/_/H63jN0veW07XQUIA2317lr9UIm8.eot);
src:url(https://yastatic.net/adv-www/_/H63jN0veW07XQUIA2317lr9UIm8.eot?#iefix) format('embedded-opentype'),
url(https://yastatic.net/adv-www/_/sUYVCPUAQE7ExrvMS7FoISoO83s.woff2) format('woff2'),
url(https://yastatic.net/adv-www/_/v2Sve_obH3rKm6rKrtSQpf-eB7U.woff) format('woff'),
url(https://yastatic.net/adv-www/_/PzD8hWLMunow5i3RfJ6WQJAL7aI.ttf) format('truetype'),
url(https://yastatic.net/adv-www/_/lF_KG5g4tpQNlYIgA0e77fBSZ5s.svg#YandexSansDisplayWeb-Regular) format('svg');
font-weight:400;
font-style:normal;
font-stretch:normal;
font-display: swap;
}}
body {{
font-family: "Yandex Sans Display Web", Arial, sans-serif;
font-family: "DejaVu Sans", "Noto Sans", Arial, sans-serif;
background: #EEE;
}}

View File

@ -1,29 +0,0 @@
---
toc_priority:
toc_title:
---
# data_type_name {#data_type-name}
Description.
**Parameters** (Optional)
- `x` — Description. [Type name](relative/path/to/type/dscr.md#type).
- `y` — Description. [Type name](relative/path/to/type/dscr.md#type).
**Examples**
```sql
```
## Additional Info {#additional-info} (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)
[Original article](https://clickhouse.com/docs/en/data-types/<data-type-name>/) <!--hide-->

View File

@ -1,63 +0,0 @@
# EngineName {#enginename}
- What the Database/Table engine does.
- Relations with other engines if they exist.
## Creating a Database {#creating-a-database}
``` sql
CREATE DATABASE ...
```
or
## Creating a Table {#creating-a-table}
``` sql
CREATE TABLE ...
```
**Engine Parameters**
**Query Clauses** (for Table engines only)
## Virtual columns {#virtual-columns} (for Table engines only)
List and virtual columns with description, if they exist.
## Data Types Support {#data_types-support} (for Database engines only)
| EngineName | ClickHouse |
|-----------------------|------------------------------------|
| NativeDataTypeName | [ClickHouseDataTypeName](link#) |
## Specifics and recommendations {#specifics-and-recommendations}
Algorithms
Specifics of read and write processes
Examples of tasks
Recommendations for usage
Specifics of data storage
## Usage Example {#usage-example}
The example must show usage and use cases. The following text contains the recommended parts of this section.
Input table:
``` text
```
Query:
``` sql
```
Result:
``` text
```
Follow up with any text to clarify the example.
**See Also**
- [link](#)

View File

@ -1,51 +0,0 @@
## functionName {#functionname-in-lower-case}
Short description.
**Syntax** (without SELECT)
``` sql
<function syntax>
```
Alias: `<alias name>`. (Optional)
More text (Optional).
**Arguments** (Optional)
- `x` — Description. Optional (only for optional arguments). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
- `y` — Description. Optional (only for optional arguments). Possible values: <values list>.Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
**Parameters** (Optional, only for parametric aggregate functions)
- `z` — Description. Optional (only for optional parameters). Possible values: <values list>. Default value: <value>. [Type name](relative/path/to/type/dscr.md#type).
**Returned value(s)**
- Returned values list.
Type: [Type name](relative/path/to/type/dscr.md#type).
**Example**
The example must show usage and/or a use cases. The following text contains recommended parts of an example.
Input table (Optional):
``` text
```
Query:
``` sql
```
Result:
``` text
```
**See Also** (Optional)
- [link](#)

View File

@ -1,33 +0,0 @@
## server_setting_name {#server_setting_name}
Description.
Describe what is configured in this section of settings.
Possible value: ...
Default value: ...
**Settings** (Optional)
If the section contains several settings, list them here. Specify possible values and default values:
- setting_1 — Description.
- setting_2 — Description.
**Example**
```xml
<server_setting_name>
<setting_1> ... </setting_1>
<setting_2> ... </setting_2>
</server_setting_name>
```
**Additional Info** (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)

View File

@ -1,27 +0,0 @@
## setting_name {#setting_name}
Description.
For the switch setting, use the typical phrase: “Enables or disables something …”.
Possible values:
*For switcher setting:*
- 0 — Disabled.
- 1 — Enabled.
*For another setting (typical phrases):*
- Positive integer.
- 0 — Disabled or unlimited or something else.
Default value: `value`.
**Additional Info** (Optional)
The name of an additional section can be any, for example, **Usage**.
**See Also** (Optional)
- [link](#)

View File

@ -1,24 +0,0 @@
# Statement name (for example, SHOW USER) {#statement-name-in-lower-case}
Brief description of what the statement does.
**Syntax**
```sql
Syntax of the statement.
```
## Other necessary sections of the description (Optional) {#anchor}
Examples of descriptions with a complicated structure:
- https://clickhouse.com/docs/en/sql-reference/statements/grant/
- https://clickhouse.com/docs/en/sql-reference/statements/revoke/
- https://clickhouse.com/docs/en/sql-reference/statements/select/join/
**See Also** (Optional)
Links to related topics as a list.
- [link](#)

View File

@ -1,25 +0,0 @@
# system.table_name {#system-tables_table-name}
Description.
Columns:
- `column_name` ([data_type_name](path/to/data_type.md)) — Description.
**Example**
Query:
``` sql
SELECT * FROM system.table_name
```
Result:
``` text
Some output. It shouldn't be too long.
```
**See Also**
- [Article name](path/to/article_name.md) — Some words about referenced information.

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.1.1262-prestable FIXME as compared to v22.2.1.2139-prestable
### ClickHouse release v22.3.1.1262-prestable (92ab33f560e) FIXME as compared to v22.2.1.2139-prestable (75366fc95e5)
#### Backward Incompatible Change
* Improvement the toDatetime function overflows. When the date string is very large, it will be converted to 1970. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)).

View File

@ -0,0 +1,30 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.10.22-lts (25886f517d4) FIXME as compared to v22.3.9.19-lts (7976930b82e)
#### Bug Fix
* Backported in [#39761](https://github.com/ClickHouse/ClickHouse/issues/39761): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39206](https://github.com/ClickHouse/ClickHouse/issues/39206): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39381](https://github.com/ClickHouse/ClickHouse/issues/39381): Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#39588](https://github.com/ClickHouse/ClickHouse/issues/39588): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39610](https://github.com/ClickHouse/ClickHouse/issues/39610): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39834](https://github.com/ClickHouse/ClickHouse/issues/39834): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#39238](https://github.com/ClickHouse/ClickHouse/issues/39238): Fix performance regression of scalar query optimization. [#35986](https://github.com/ClickHouse/ClickHouse/pull/35986) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#39531](https://github.com/ClickHouse/ClickHouse/issues/39531): Fix some issues with async reads from remote filesystem which happened when reading low cardinality. [#36763](https://github.com/ClickHouse/ClickHouse/pull/36763) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,20 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.11.12-lts (137c5f72657) FIXME as compared to v22.3.10.22-lts (25886f517d4)
#### Build/Testing/Packaging Improvement
* Backported in [#39881](https://github.com/ClickHouse/ClickHouse/issues/39881): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39336](https://github.com/ClickHouse/ClickHouse/issues/39336): Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Backport [#39687](https://github.com/ClickHouse/ClickHouse/issues/39687) to 22.3: Fix seeking while reading from encrypted disk"'. [#40052](https://github.com/ClickHouse/ClickHouse/pull/40052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -5,5 +5,5 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.2.2-lts FIXME as compared to v22.3.1.1262-prestable
### ClickHouse release v22.3.2.2-lts (89a621679c6) FIXME as compared to v22.3.1.1262-prestable (92ab33f560e)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.3.44-lts FIXME as compared to v22.3.2.2-lts
### ClickHouse release v22.3.3.44-lts (abb756d3ca2) FIXME as compared to v22.3.2.2-lts (89a621679c6)
#### Bug Fix
* Backported in [#35928](https://github.com/ClickHouse/ClickHouse/issues/35928): Added settings `input_format_ipv4_default_on_conversion_error`, `input_format_ipv6_default_on_conversion_error` to allow insert of invalid ip address values as default into tables. Closes [#35726](https://github.com/ClickHouse/ClickHouse/issues/35726). [#35733](https://github.com/ClickHouse/ClickHouse/pull/35733) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.4.20-lts FIXME as compared to v22.3.3.44-lts
### ClickHouse release v22.3.4.20-lts (ecbaf001f49) FIXME as compared to v22.3.3.44-lts (abb756d3ca2)
#### Build/Testing/Packaging Improvement
* - Add `_le_` method for ClickHouseVersion - Fix auto_version for existing tag - docker_server now support getting version from tags - Add python unit tests to backport workflow. [#36028](https://github.com/ClickHouse/ClickHouse/pull/36028) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.5.5-lts FIXME as compared to v22.3.4.20-lts
### ClickHouse release v22.3.5.5-lts (438b4a81f77) FIXME as compared to v22.3.4.20-lts (ecbaf001f49)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.6.5-lts FIXME as compared to v22.3.5.5-lts
### ClickHouse release v22.3.6.5-lts (3e44e824cff) FIXME as compared to v22.3.5.5-lts (438b4a81f77)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.7.28-lts FIXME as compared to v22.3.6.5-lts
### ClickHouse release v22.3.7.28-lts (420bdfa2751) FIXME as compared to v22.3.6.5-lts (3e44e824cff)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)

View File

@ -0,0 +1,32 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.8.39-lts (6bcf982f58b) FIXME as compared to v22.3.7.28-lts (420bdfa2751)
#### Build/Testing/Packaging Improvement
* Backported in [#38826](https://github.com/ClickHouse/ClickHouse/issues/38826): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#38453](https://github.com/ClickHouse/ClickHouse/issues/38453): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38710](https://github.com/ClickHouse/ClickHouse/issues/38710): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38689](https://github.com/ClickHouse/ClickHouse/issues/38689): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38776](https://github.com/ClickHouse/ClickHouse/issues/38776): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38780](https://github.com/ClickHouse/ClickHouse/issues/38780): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#36818](https://github.com/ClickHouse/ClickHouse/issues/36818): Fix projection analysis which might lead to wrong query result when IN subquery is used. This fixes [#35336](https://github.com/ClickHouse/ClickHouse/issues/35336). [#35631](https://github.com/ClickHouse/ClickHouse/pull/35631) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#38467](https://github.com/ClickHouse/ClickHouse/issues/38467): - Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Trying backport useful features for CI [#38510](https://github.com/ClickHouse/ClickHouse/pull/38510) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.9.19-lts (7976930b82e) FIXME as compared to v22.3.8.39-lts (6bcf982f58b)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39097](https://github.com/ClickHouse/ClickHouse/issues/39097): Any allocations inside OvercommitTracker may lead to deadlock. Logging was not very informative so it's easier just to remove logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#39080](https://github.com/ClickHouse/ClickHouse/issues/39080): Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39149](https://github.com/ClickHouse/ClickHouse/issues/39149): Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39372](https://github.com/ClickHouse/ClickHouse/issues/39372): Declare RabbitMQ queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
* Backported in [#39379](https://github.com/ClickHouse/ClickHouse/issues/39379): Fix segmentation fault in MaterializedPostgreSQL database engine, which could happen if some exception occurred at replication initialisation. Closes [#36939](https://github.com/ClickHouse/ClickHouse/issues/36939). [#39272](https://github.com/ClickHouse/ClickHouse/pull/39272) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39351](https://github.com/ClickHouse/ClickHouse/issues/39351): Fix incorrect fetch postgresql tables query fro PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Reproduce and a little bit better fix for LC dict right offset. [#36856](https://github.com/ClickHouse/ClickHouse/pull/36856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Retry docker buildx commands with progressive sleep in between [#38898](https://github.com/ClickHouse/ClickHouse/pull/38898) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add docker_server.py running to backport and release CIs [#39011](https://github.com/ClickHouse/ClickHouse/pull/39011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.1.2305-prestable FIXME as compared to v22.3.1.1262-prestable
### ClickHouse release v22.4.1.2305-prestable (77a82cc090d) FIXME as compared to v22.3.1.1262-prestable (92ab33f560e)
#### Backward Incompatible Change
* Function `yandexConsistentHash` (consistent hashing algorithm by Konstantin "kostik" Oblakov) is renamed to `kostikConsistentHash`. The old name is left as an alias for compatibility. Although this change is backward compatible, we may remove the alias in subsequent releases, that's why it's recommended to update the usages of this function in your apps. [#35553](https://github.com/ClickHouse/ClickHouse/pull/35553) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
@ -68,7 +68,7 @@ sidebar_label: 2022
* For lts releases packages will be pushed to both lts and stable repos. [#35382](https://github.com/ClickHouse/ClickHouse/pull/35382) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Support uuid for postgres engines. Closes [#35384](https://github.com/ClickHouse/ClickHouse/issues/35384). [#35403](https://github.com/ClickHouse/ClickHouse/pull/35403) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add arguments `--user`, `--password`, `--host`, `--port` for clickhouse-diagnostics. [#35422](https://github.com/ClickHouse/ClickHouse/pull/35422) ([李扬](https://github.com/taiyang-li)).
* fix INSERT INTO table FROM INFILE does not display progress bar. [#35429](https://github.com/ClickHouse/ClickHouse/pull/35429) ([xiedeyantu](https://github.com/xiedeyantu)).
* fix INSERT INTO table FROM INFILE does not display progress bar. [#35429](https://github.com/ClickHouse/ClickHouse/pull/35429) ([chen](https://github.com/xiedeyantu)).
* Allow server to bind to low-numbered ports (e.g. 443). ClickHouse installation script will set `cap_net_bind_service` to the binary file. [#35451](https://github.com/ClickHouse/ClickHouse/pull/35451) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add settings `input_format_orc_case_insensitive_column_matching`, `input_format_arrow_case_insensitive_column_matching`, and `input_format_parquet_case_insensitive_column_matching` which allows ClickHouse to use case insensitive matching of columns while reading data from ORC, Arrow or Parquet files. [#35459](https://github.com/ClickHouse/ClickHouse/pull/35459) ([Antonio Andelic](https://github.com/antonio2368)).
* - Add explicit table info to the scan node of query plan and pipeline. [#35460](https://github.com/ClickHouse/ClickHouse/pull/35460) ([何李夫](https://github.com/helifu)).
@ -106,7 +106,7 @@ sidebar_label: 2022
* ASTPartition::formatImpl should output ALL while executing ALTER TABLE t DETACH PARTITION ALL. [#35987](https://github.com/ClickHouse/ClickHouse/pull/35987) ([awakeljw](https://github.com/awakeljw)).
* `clickhouse-keeper` starts answering 4-letter commands before getting the quorum. [#35992](https://github.com/ClickHouse/ClickHouse/pull/35992) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix wrong assertion in replxx which happens when navigating back the history when the first line of input is a newline. Mark as improvement because it only affects debug build. This fixes [#34511](https://github.com/ClickHouse/ClickHouse/issues/34511). [#36007](https://github.com/ClickHouse/ClickHouse/pull/36007) ([Amos Bird](https://github.com/amosbird)).
* If someone writes DEFAULT NULL in table definition, make data type Nullable. [#35887](https://github.com/ClickHouse/ClickHouse/issues/35887). [#36058](https://github.com/ClickHouse/ClickHouse/pull/36058) ([xiedeyantu](https://github.com/xiedeyantu)).
* If someone writes DEFAULT NULL in table definition, make data type Nullable. [#35887](https://github.com/ClickHouse/ClickHouse/issues/35887). [#36058](https://github.com/ClickHouse/ClickHouse/pull/36058) ([chen](https://github.com/xiedeyantu)).
* Added `thread_id` and `query_id` columns to `system.zookeeper_log` table. [#36074](https://github.com/ClickHouse/ClickHouse/pull/36074) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Auto assign numbers for Enum elements. [#36101](https://github.com/ClickHouse/ClickHouse/pull/36101) ([awakeljw](https://github.com/awakeljw)).
* Reset thread name in `ThreadPool` to `ThreadPoolIdle` after job is done. This is to avoid displaying the old thread name for idle threads. This closes [#36114](https://github.com/ClickHouse/ClickHouse/issues/36114). [#36115](https://github.com/ClickHouse/ClickHouse/pull/36115) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
@ -331,7 +331,7 @@ sidebar_label: 2022
* ci: replace directory system log tables artifacts with tsv [#35773](https://github.com/ClickHouse/ClickHouse/pull/35773) ([Azat Khuzhin](https://github.com/azat)).
* One more try to resurrect build hash [#35774](https://github.com/ClickHouse/ClickHouse/pull/35774) ([alesapin](https://github.com/alesapin)).
* Refactoring QueryPipeline [#35789](https://github.com/ClickHouse/ClickHouse/pull/35789) ([Amos Bird](https://github.com/amosbird)).
* Delete duplicate code [#35798](https://github.com/ClickHouse/ClickHouse/pull/35798) ([xiedeyantu](https://github.com/xiedeyantu)).
* Delete duplicate code [#35798](https://github.com/ClickHouse/ClickHouse/pull/35798) ([chen](https://github.com/xiedeyantu)).
* remove unused variable [#35800](https://github.com/ClickHouse/ClickHouse/pull/35800) ([flynn](https://github.com/ucasfl)).
* Make `SortDescription::column_name` always non-empty [#35805](https://github.com/ClickHouse/ClickHouse/pull/35805) ([Nikita Taranov](https://github.com/nickitat)).
* Fix latest_error referenced before assignment [#35807](https://github.com/ClickHouse/ClickHouse/pull/35807) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
@ -417,7 +417,7 @@ sidebar_label: 2022
* Revert reverting "Fix crash in ParallelReadBuffer" [#36212](https://github.com/ClickHouse/ClickHouse/pull/36212) ([Kruglov Pavel](https://github.com/Avogar)).
* Make stateless tests with s3 always green [#36214](https://github.com/ClickHouse/ClickHouse/pull/36214) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add Tyler Hannan to contributors [#36216](https://github.com/ClickHouse/ClickHouse/pull/36216) ([Tyler Hannan](https://github.com/tylerhannan)).
* Fix the repeated call of func to get the table when drop table [#36248](https://github.com/ClickHouse/ClickHouse/pull/36248) ([xiedeyantu](https://github.com/xiedeyantu)).
* Fix the repeated call of func to get the table when drop table [#36248](https://github.com/ClickHouse/ClickHouse/pull/36248) ([chen](https://github.com/xiedeyantu)).
* Split test 01675_data_type_coroutine into 2 tests to prevent possible timeouts [#36250](https://github.com/ClickHouse/ClickHouse/pull/36250) ([Kruglov Pavel](https://github.com/Avogar)).
* Merge TRUSTED_CONTRIBUTORS in lambda and import in check [#36252](https://github.com/ClickHouse/ClickHouse/pull/36252) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix exception "File segment can be completed only by downloader" in tests [#36253](https://github.com/ClickHouse/ClickHouse/pull/36253) ([Kseniia Sumarokova](https://github.com/kssenii)).

View File

@ -5,5 +5,5 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.2.1-stable FIXME as compared to v22.4.1.2305-prestable
### ClickHouse release v22.4.2.1-stable (b34ebdc36ae) FIXME as compared to v22.4.1.2305-prestable (77a82cc090d)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.3.3-stable FIXME as compared to v22.4.2.1-stable
### ClickHouse release v22.4.3.3-stable (def956d6299) FIXME as compared to v22.4.2.1-stable (b34ebdc36ae)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.4.7-stable FIXME as compared to v22.4.3.3-stable
### ClickHouse release v22.4.4.7-stable (ba44414f9b3) FIXME as compared to v22.4.3.3-stable (def956d6299)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.5.9-stable FIXME as compared to v22.4.4.7-stable
### ClickHouse release v22.4.5.9-stable (059ef6cadcd) FIXME as compared to v22.4.4.7-stable (ba44414f9b3)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -0,0 +1,44 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.4.6.53-stable (0625731c940) FIXME as compared to v22.4.5.9-stable (059ef6cadcd)
#### New Feature
* Backported in [#38714](https://github.com/ClickHouse/ClickHouse/issues/38714): SALT is allowed for CREATE USER <user> IDENTIFIED WITH sha256_hash. [#37377](https://github.com/ClickHouse/ClickHouse/pull/37377) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Build/Testing/Packaging Improvement
* Backported in [#38828](https://github.com/ClickHouse/ClickHouse/issues/38828): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#37717](https://github.com/ClickHouse/ClickHouse/issues/37717): Fix unexpected errors with a clash of constant strings in aggregate function, prewhere and join. Close [#36891](https://github.com/ClickHouse/ClickHouse/issues/36891). [#37336](https://github.com/ClickHouse/ClickHouse/pull/37336) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37512](https://github.com/ClickHouse/ClickHouse/issues/37512): Fix logical error in normalizeUTF8 functions. Closes [#37298](https://github.com/ClickHouse/ClickHouse/issues/37298). [#37443](https://github.com/ClickHouse/ClickHouse/pull/37443) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#37941](https://github.com/ClickHouse/ClickHouse/issues/37941): Fix setting cast_ipv4_ipv6_default_on_conversion_error for internal cast function. Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#37761](https://github.com/ClickHouse/ClickHouse/pull/37761) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#38452](https://github.com/ClickHouse/ClickHouse/issues/38452): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38711](https://github.com/ClickHouse/ClickHouse/issues/38711): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38593](https://github.com/ClickHouse/ClickHouse/issues/38593): Fix parts removal (will be left forever if they had not been removed on server shutdown) after incorrect server shutdown. [#38486](https://github.com/ClickHouse/ClickHouse/pull/38486) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#38596](https://github.com/ClickHouse/ClickHouse/issues/38596): Fix table creation to avoid replication issues with pre-22.4 replicas. [#38541](https://github.com/ClickHouse/ClickHouse/pull/38541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38686](https://github.com/ClickHouse/ClickHouse/issues/38686): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38663](https://github.com/ClickHouse/ClickHouse/issues/38663): Adapt some more nodes to avoid issues with pre-22.4 replicas. [#38627](https://github.com/ClickHouse/ClickHouse/pull/38627) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38777](https://github.com/ClickHouse/ClickHouse/issues/38777): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38781](https://github.com/ClickHouse/ClickHouse/issues/38781): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#37456](https://github.com/ClickHouse/ClickHouse/issues/37456): Server might fail to start if it cannot resolve hostname of external ClickHouse dictionary. It's fixed. Fixes [#36451](https://github.com/ClickHouse/ClickHouse/issues/36451). [#36463](https://github.com/ClickHouse/ClickHouse/pull/36463) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#38468](https://github.com/ClickHouse/ClickHouse/issues/38468): - Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37363](https://github.com/ClickHouse/ClickHouse/issues/37363): Fixed problem with infs in `quantileTDigest`. Fixes [#32107](https://github.com/ClickHouse/ClickHouse/issues/32107). [#37021](https://github.com/ClickHouse/ClickHouse/pull/37021) ([Vladimir Chebotarev](https://github.com/excitoon)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Integration tests [#36866](https://github.com/ClickHouse/ClickHouse/pull/36866) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update protobuf files for kafka and rabbitmq [fix integration tests] [#37884](https://github.com/ClickHouse/ClickHouse/pull/37884) ([Nikita Taranov](https://github.com/nickitat)).
* Try fix `test_grpc_protocol/test.py::test_progress` [#37908](https://github.com/ClickHouse/ClickHouse/pull/37908) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,10 +5,10 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.5.1.2079-stable FIXME as compared to v22.4.1.2305-prestable
### ClickHouse release v22.5.1.2079-stable (df0cb062098) FIXME as compared to v22.4.1.2305-prestable (77a82cc090d)
#### Backward Incompatible Change
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)).
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Deleted user](https://github.com/ghost)).
* Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### New Feature
@ -20,7 +20,7 @@ sidebar_label: 2022
* Parse collations in CREATE TABLE, throw exception or ignore. closes [#35892](https://github.com/ClickHouse/ClickHouse/issues/35892). [#36271](https://github.com/ClickHouse/ClickHouse/pull/36271) ([yuuch](https://github.com/yuuch)).
* Add aliases JSONLines and NDJSON for JSONEachRow. Closes [#36303](https://github.com/ClickHouse/ClickHouse/issues/36303). [#36327](https://github.com/ClickHouse/ClickHouse/pull/36327) ([flynn](https://github.com/ucasfl)).
* Set parts_to_delay_insert and parts_to_throw_insert as query-level settings. If they are defined, they can override table-level settings. [#36371](https://github.com/ClickHouse/ClickHouse/pull/36371) ([Memo](https://github.com/Joeywzr)).
* temporary table can show total rows and total bytes. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([xiedeyantu](https://github.com/xiedeyantu)).
* temporary table can show total rows and total bytes. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([chen](https://github.com/xiedeyantu)).
* Added new hash function - wyHash64. [#36467](https://github.com/ClickHouse/ClickHouse/pull/36467) ([olevino](https://github.com/olevino)).
* Window function nth_value was added. [#36601](https://github.com/ClickHouse/ClickHouse/pull/36601) ([Nikolay](https://github.com/ndchikin)).
* Add MySQLDump input format. It reads all data from INSERT queries belonging to one table in dump. If there are more than one table, by default it reads data from the first one. [#36667](https://github.com/ClickHouse/ClickHouse/pull/36667) ([Kruglov Pavel](https://github.com/Avogar)).
@ -212,7 +212,7 @@ sidebar_label: 2022
* Update version after release [#36502](https://github.com/ClickHouse/ClickHouse/pull/36502) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Followup on [#36172](https://github.com/ClickHouse/ClickHouse/issues/36172) password hash salt feature [#36510](https://github.com/ClickHouse/ClickHouse/pull/36510) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Update version_date.tsv after v22.4.2.1-stable [#36533](https://github.com/ClickHouse/ClickHouse/pull/36533) ([github-actions[bot]](https://github.com/apps/github-actions)).
* fix log should print 'from' path [#36535](https://github.com/ClickHouse/ClickHouse/pull/36535) ([xiedeyantu](https://github.com/xiedeyantu)).
* fix log should print 'from' path [#36535](https://github.com/ClickHouse/ClickHouse/pull/36535) ([chen](https://github.com/xiedeyantu)).
* Add function bin tests for Int/UInt128/UInt256 [#36537](https://github.com/ClickHouse/ClickHouse/pull/36537) ([Memo](https://github.com/Joeywzr)).
* Fix 01161_all_system_tables [#36539](https://github.com/ClickHouse/ClickHouse/pull/36539) ([Antonio Andelic](https://github.com/antonio2368)).
* Update PULL_REQUEST_TEMPLATE.md [#36543](https://github.com/ClickHouse/ClickHouse/pull/36543) ([Ivan Blinkov](https://github.com/blinkov)).

View File

@ -0,0 +1,40 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.2.53-stable (5fd600fda9e) FIXME as compared to v22.5.1.2079-stable (df0cb062098)
#### New Feature
* Backported in [#38713](https://github.com/ClickHouse/ClickHouse/issues/38713): SALT is allowed for CREATE USER <user> IDENTIFIED WITH sha256_hash. [#37377](https://github.com/ClickHouse/ClickHouse/pull/37377) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Build/Testing/Packaging Improvement
* Backported in [#38827](https://github.com/ClickHouse/ClickHouse/issues/38827): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#37716](https://github.com/ClickHouse/ClickHouse/issues/37716): Fix unexpected errors with a clash of constant strings in aggregate function, prewhere and join. Close [#36891](https://github.com/ClickHouse/ClickHouse/issues/36891). [#37336](https://github.com/ClickHouse/ClickHouse/pull/37336) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37408](https://github.com/ClickHouse/ClickHouse/issues/37408): Throw an exception when GROUPING SETS used with ROLLUP or CUBE. [#37367](https://github.com/ClickHouse/ClickHouse/pull/37367) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#37513](https://github.com/ClickHouse/ClickHouse/issues/37513): Fix logical error in normalizeUTF8 functions. Closes [#37298](https://github.com/ClickHouse/ClickHouse/issues/37298). [#37443](https://github.com/ClickHouse/ClickHouse/pull/37443) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#37942](https://github.com/ClickHouse/ClickHouse/issues/37942): Fix setting cast_ipv4_ipv6_default_on_conversion_error for internal cast function. Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#37761](https://github.com/ClickHouse/ClickHouse/pull/37761) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#38451](https://github.com/ClickHouse/ClickHouse/issues/38451): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38544](https://github.com/ClickHouse/ClickHouse/issues/38544): Do not allow recursive usage of OvercommitTracker during logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794) cc @tavplubix @davenger. [#38246](https://github.com/ClickHouse/ClickHouse/pull/38246) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#38708](https://github.com/ClickHouse/ClickHouse/issues/38708): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38595](https://github.com/ClickHouse/ClickHouse/issues/38595): Fix parts removal (will be left forever if they had not been removed on server shutdown) after incorrect server shutdown. [#38486](https://github.com/ClickHouse/ClickHouse/pull/38486) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#38598](https://github.com/ClickHouse/ClickHouse/issues/38598): Fix table creation to avoid replication issues with pre-22.4 replicas. [#38541](https://github.com/ClickHouse/ClickHouse/pull/38541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38688](https://github.com/ClickHouse/ClickHouse/issues/38688): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38664](https://github.com/ClickHouse/ClickHouse/issues/38664): Adapt some more nodes to avoid issues with pre-22.4 replicas. [#38627](https://github.com/ClickHouse/ClickHouse/pull/38627) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38779](https://github.com/ClickHouse/ClickHouse/issues/38779): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38783](https://github.com/ClickHouse/ClickHouse/issues/38783): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update protobuf files for kafka and rabbitmq [fix integration tests] [#37884](https://github.com/ClickHouse/ClickHouse/pull/37884) ([Nikita Taranov](https://github.com/nickitat)).
* Try fix `test_grpc_protocol/test.py::test_progress` [#37908](https://github.com/ClickHouse/ClickHouse/pull/37908) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Try to fix BC check [#38178](https://github.com/ClickHouse/ClickHouse/pull/38178) ([Kruglov Pavel](https://github.com/Avogar)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.3.21-stable (e03724efec5) FIXME as compared to v22.5.2.53-stable (5fd600fda9e)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#38241](https://github.com/ClickHouse/ClickHouse/issues/38241): Fix possible crash in `Distributed` async insert in case of removing a replica from config. [#38029](https://github.com/ClickHouse/ClickHouse/pull/38029) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39098](https://github.com/ClickHouse/ClickHouse/issues/39098): Any allocations inside OvercommitTracker may lead to deadlock. Logging was not very informative so it's easier just to remove logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#39078](https://github.com/ClickHouse/ClickHouse/issues/39078): Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39152](https://github.com/ClickHouse/ClickHouse/issues/39152): Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39274](https://github.com/ClickHouse/ClickHouse/issues/39274): Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#39369](https://github.com/ClickHouse/ClickHouse/issues/39369): Declare RabbitMQ queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
* Backported in [#39350](https://github.com/ClickHouse/ClickHouse/issues/39350): Fix incorrect fetch postgresql tables query fro PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Retry docker buildx commands with progressive sleep in between [#38898](https://github.com/ClickHouse/ClickHouse/pull/38898) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add docker_server.py running to backport and release CIs [#39011](https://github.com/ClickHouse/ClickHouse/pull/39011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.4.19-stable (c893bba830e) FIXME as compared to v22.5.3.21-stable (e03724efec5)
#### Bug Fix
* Backported in [#39748](https://github.com/ClickHouse/ClickHouse/issues/39748): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Build/Testing/Packaging Improvement
* Backported in [#39882](https://github.com/ClickHouse/ClickHouse/issues/39882): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39209](https://github.com/ClickHouse/ClickHouse/issues/39209): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39589](https://github.com/ClickHouse/ClickHouse/issues/39589): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39611](https://github.com/ClickHouse/ClickHouse/issues/39611): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39790](https://github.com/ClickHouse/ClickHouse/issues/39790): Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39835](https://github.com/ClickHouse/ClickHouse/issues/39835): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix reading from s3 in some corner cases [#38239](https://github.com/ClickHouse/ClickHouse/pull/38239) ([Anton Popov](https://github.com/CurtizJ)).
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.1.1985-stable FIXME as compared to v22.5.1.2079-stable
### ClickHouse release v22.6.1.1985-stable (7000c4e0033) FIXME as compared to v22.5.1.2079-stable (df0cb062098)
#### Backward Incompatible Change
* Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)).
@ -78,7 +78,7 @@ sidebar_label: 2022
* Allow to use String type instead of Binary in Arrow/Parquet/ORC formats. This PR introduces 3 new settings for it: `output_format_arrow_string_as_string`, `output_format_parquet_string_as_string`, `output_format_orc_string_as_string`. Default value for all settings is `false`. [#37327](https://github.com/ClickHouse/ClickHouse/pull/37327) ([Kruglov Pavel](https://github.com/Avogar)).
* Apply setting `input_format_max_rows_to_read_for_schema_inference` for all read rows in total from all files in globs. Previously setting `input_format_max_rows_to_read_for_schema_inference` was applied for each file in glob separately and in case of huge number of nulls we could read first `input_format_max_rows_to_read_for_schema_inference` rows from each file and get nothing. Also increase default value for this setting to 25000. [#37332](https://github.com/ClickHouse/ClickHouse/pull/37332) ([Kruglov Pavel](https://github.com/Avogar)).
* allows providing `NULL`/`NOT NULL` right after type in column declaration. [#37337](https://github.com/ClickHouse/ClickHouse/pull/37337) ([Igor Nikonov](https://github.com/devcrafter)).
* optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([xiedeyantu](https://github.com/xiedeyantu)).
* optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([chen](https://github.com/xiedeyantu)).
* Allow to prune the list of files via virtual columns such as `_file` and `_path` when reading from S3. This is for [#37174](https://github.com/ClickHouse/ClickHouse/issues/37174) , [#23494](https://github.com/ClickHouse/ClickHouse/issues/23494). [#37356](https://github.com/ClickHouse/ClickHouse/pull/37356) ([Amos Bird](https://github.com/amosbird)).
* Try to improve short circuit functions processing to fix problems with stress tests. [#37384](https://github.com/ClickHouse/ClickHouse/pull/37384) ([Kruglov Pavel](https://github.com/Avogar)).
* Closes [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)).
@ -117,7 +117,7 @@ sidebar_label: 2022
* Remove recursive submodules, because we don't need them and they can be confusing. Add style check to prevent recursive submodules. This closes [#32821](https://github.com/ClickHouse/ClickHouse/issues/32821). [#37616](https://github.com/ClickHouse/ClickHouse/pull/37616) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)).
* Fix overly aggressive stripping which removed the embedded hash required for checking the consistency of the executable. [#37993](https://github.com/ClickHouse/ClickHouse/pull/37993) ([Robert Schulze](https://github.com/rschu1ze)).
* fix MacOS build compressor faild. [#38007](https://github.com/ClickHouse/ClickHouse/pull/38007) ([xiedeyantu](https://github.com/xiedeyantu)).
* fix MacOS build compressor faild. [#38007](https://github.com/ClickHouse/ClickHouse/pull/38007) ([chen](https://github.com/xiedeyantu)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
@ -166,7 +166,7 @@ sidebar_label: 2022
* Fix possible incorrect result of `SELECT ... WITH FILL` in the case when `ORDER BY` should be applied after `WITH FILL` result (e.g. for outer query). Incorrect result was caused by optimization for `ORDER BY` expressions ([#35623](https://github.com/ClickHouse/ClickHouse/issues/35623)). Closes [#37904](https://github.com/ClickHouse/ClickHouse/issues/37904). [#37959](https://github.com/ClickHouse/ClickHouse/pull/37959) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Add missing default columns when pushing to the target table in WindowView, fix [#37815](https://github.com/ClickHouse/ClickHouse/issues/37815). [#37965](https://github.com/ClickHouse/ClickHouse/pull/37965) ([vxider](https://github.com/Vxider)).
* Fixed a stack overflow issue that would cause compilation to fail. [#37996](https://github.com/ClickHouse/ClickHouse/pull/37996) ([Han Shukai](https://github.com/KinderRiven)).
* when open enable_filesystem_query_cache_limit, throw Reserved cache size exceeds the remaining cache size. [#38004](https://github.com/ClickHouse/ClickHouse/pull/38004) ([xiedeyantu](https://github.com/xiedeyantu)).
* when open enable_filesystem_query_cache_limit, throw Reserved cache size exceeds the remaining cache size. [#38004](https://github.com/ClickHouse/ClickHouse/pull/38004) ([chen](https://github.com/xiedeyantu)).
* Query, containing ORDER BY ... WITH FILL, can generate extra rows when multiple WITH FILL columns are present. [#38074](https://github.com/ClickHouse/ClickHouse/pull/38074) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.2.12-stable FIXME as compared to v22.6.1.1985-stable
### ClickHouse release v22.6.2.12-stable (1fc97f10cbf) FIXME as compared to v22.6.1.1985-stable (7000c4e0033)
#### Improvement
* Backported in [#38484](https://github.com/ClickHouse/ClickHouse/issues/38484): Improve the stability for hive storage integration test. Move the data prepare step into test.py. [#38260](https://github.com/ClickHouse/ClickHouse/pull/38260) ([lgbo](https://github.com/lgbo-ustc)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.3.35-stable FIXME as compared to v22.6.2.12-stable
### ClickHouse release v22.6.3.35-stable (d5566f2f2dd) FIXME as compared to v22.6.2.12-stable (1fc97f10cbf)
#### Bug Fix
* Backported in [#38812](https://github.com/ClickHouse/ClickHouse/issues/38812): Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.4.35-stable FIXME as compared to v22.6.3.35-stable
### ClickHouse release v22.6.4.35-stable (b9202cae6f4) FIXME as compared to v22.6.3.35-stable (d5566f2f2dd)
#### Build/Testing/Packaging Improvement
* Backported in [#38822](https://github.com/ClickHouse/ClickHouse/issues/38822): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,30 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.6.5.22-stable (47ca5f14a34) FIXME as compared to v22.6.4.35-stable (b9202cae6f4)
#### Bug Fix
* Backported in [#39749](https://github.com/ClickHouse/ClickHouse/issues/39749): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Build/Testing/Packaging Improvement
* Backported in [#39883](https://github.com/ClickHouse/ClickHouse/issues/39883): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39207](https://github.com/ClickHouse/ClickHouse/issues/39207): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38932](https://github.com/ClickHouse/ClickHouse/issues/38932): Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#39590](https://github.com/ClickHouse/ClickHouse/issues/39590): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39612](https://github.com/ClickHouse/ClickHouse/issues/39612): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39791](https://github.com/ClickHouse/ClickHouse/issues/39791): Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39836](https://github.com/ClickHouse/ClickHouse/issues/39836): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix reading from s3 in some corner cases [#38239](https://github.com/ClickHouse/ClickHouse/pull/38239) ([Anton Popov](https://github.com/CurtizJ)).
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,18 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.7.3.5-stable (e140b8b5f3a) FIXME as compared to v22.7.2.15-stable (f843089624e)
#### Build/Testing/Packaging Improvement
* Backported in [#39884](https://github.com/ClickHouse/ClickHouse/issues/39884): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#39884](https://github.com/ClickHouse/ClickHouse/issues/39884): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40045](https://github.com/ClickHouse/ClickHouse/issues/40045): Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#40045](https://github.com/ClickHouse/ClickHouse/issues/40045): Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).

View File

@ -49,10 +49,17 @@ The supported formats are:
| [JSONCompactStringsEachRowWithNamesAndTypes](#jsoncompactstringseachrowwithnamesandtypes) | ✔ | ✔ |
| [TSKV](#tskv) | ✔ | ✔ |
| [Pretty](#pretty) | ✗ | ✔ |
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
| [PrettyNoEscapes](#prettynoescapes) | ✗ | ✔ |
| [PrettyMonoBlock](#prettymonoblock) | ✗ | ✔ |
| [PrettyNoEscapesMonoBlock](#prettynoescapesmonoblock) | ✗ | ✔ |
| [PrettyCompact](#prettycompact) | ✗ | ✔ |
| [PrettyCompactNoEscapes](#prettycompactnoescapes) | ✗ | ✔ |
| [PrettyCompactMonoBlock](#prettycompactmonoblock) | ✗ | ✔ |
| [PrettyCompactNoEscapesMonoBlock](#prettycompactnoescapesmonoblock) | ✗ | ✔ |
| [PrettySpace](#prettyspace) | ✗ | ✔ |
| [PrettySpaceNoEscapes](#prettyspacenoescapes) | ✗ | ✔ |
| [PrettySpaceMonoBlock](#prettyspacemonoblock) | ✗ | ✔ |
| [PrettySpaceNoEscapesMonoBlock](#prettyspacenoescapesmonoblock) | ✗ | ✔ |
| [Prometheus](#prometheus) | ✗ | ✔ |
| [Protobuf](#protobuf) | ✔ | ✔ |
| [ProtobufSingle](#protobufsingle) | ✔ | ✔ |
@ -1198,18 +1205,9 @@ Extremes:
└────────────┴─────────┘
```
## PrettyCompact {#prettycompact}
Differs from [Pretty](#pretty) in that the grid is drawn between rows and the result is more compact.
This format is used by default in the command-line client in interactive mode.
## PrettyCompactMonoBlock {#prettycompactmonoblock}
Differs from [PrettyCompact](#prettycompact) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettyNoEscapes {#prettynoescapes}
Differs from Pretty in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility.
Differs from [Pretty](#pretty) in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility.
Example:
@ -1219,19 +1217,49 @@ $ watch -n1 "clickhouse-client --query='SELECT event, value FROM system.events F
You can use the HTTP interface for displaying in the browser.
### PrettyCompactNoEscapes {#prettycompactnoescapes}
## PrettyMonoBlock {#prettymonoblock}
The same as the previous setting.
Differs from [Pretty](#pretty) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
### PrettySpaceNoEscapes {#prettyspacenoescapes}
## PrettyNoEscapesMonoBlock {#prettynoescapesmonoblock}
The same as the previous setting.
Differs from [PrettyNoEscapes](#prettynoescapes) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettyCompact {#prettycompact}
Differs from [Pretty](#pretty) in that the grid is drawn between rows and the result is more compact.
This format is used by default in the command-line client in interactive mode.
## PrettyCompactNoEscapes {#prettynoescapes}
Differs from [PrettyCompact](#prettycompact) in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility.
## PrettyCompactMonoBlock {#prettycompactmonoblock}
Differs from [PrettyCompact](#prettycompact) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettyCompactNoEscapesMonoBlock {#prettycompactnoescapesmonoblock}
Differs from [PrettyCompactNoEscapes](#prettycompactnoescapes) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettySpace {#prettyspace}
Differs from [PrettyCompact](#prettycompact) in that whitespace (space characters) is used instead of the grid.
### Pretty formats settings {#pretty-formats-settings}
## PrettySpaceNoEscapes {#prettyspacenoescapes}
Differs from [PrettySpace](#prettyspace) in that ANSI-escape sequences arent used. This is necessary for displaying this format in a browser, as well as for using the watch command-line utility.
## PrettySpaceMonoBlock {#prettyspacemonoblock}
Differs from [PrettySpace](#prettyspace) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## PrettySpaceNoEscapesMonoBlock {#prettyspacenoescapesmonoblock}
Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000 rows are buffered, then output as a single table, not by blocks.
## Pretty formats settings {#pretty-formats-settings}
- [output_format_pretty_max_rows](../operations/settings/settings.md#output_format_pretty_max_rows) - rows limit for Pretty formats. Default value - `10000`.
- [output_format_pretty_max_column_pad_width](../operations/settings/settings.md#output_format_pretty_max_column_pad_width) - maximum width to pad all values in a column in Pretty formats. Default value - `250`.

View File

@ -1072,7 +1072,7 @@ For all of the formats with separator the function parses months names expressed
Query:
``` sql
SELECT parseDateTimeBestEffort('12/12/2020 12:12:57')
SELECT parseDateTimeBestEffort('23/10/2020 12:12:57')
AS parseDateTimeBestEffort;
```
@ -1080,7 +1080,7 @@ Result:
``` text
┌─parseDateTimeBestEffort─┐
│ 2020-12-12 12:12:57 │
│ 2020-10-23 12:12:57 │
└─────────────────────────┘
```
@ -1117,7 +1117,7 @@ Result:
Query:
``` sql
SELECT parseDateTimeBestEffort('2018-12-12 10:12:12')
SELECT parseDateTimeBestEffort('2018-10-23 10:12:12')
AS parseDateTimeBestEffort;
```
@ -1125,7 +1125,7 @@ Result:
``` text
┌─parseDateTimeBestEffort─┐
│ 2018-12-12 10:12:12 │
│ 2018-10-23 10:12:12 │
└─────────────────────────┘
```
@ -1152,77 +1152,7 @@ Result:
## parseDateTimeBestEffortUS
This function is similar to [parseDateTimeBestEffort](#parsedatetimebesteffort), the only difference is that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity.
**Syntax**
``` sql
parseDateTimeBestEffortUS(time_string [, time_zone])
```
**Arguments**
- `time_string` — String containing a date and time to convert. [String](../../sql-reference/data-types/string.md).
- `time_zone` — Time zone. The function parses `time_string` according to the time zone. [String](../../sql-reference/data-types/string.md).
**Supported non-standard formats**
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- A string with a date and a time component: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY` etc.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case, `YYYY-MM` are substituted as `2000-01`.
- A string that includes the date and time along with time zone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
**Returned value**
- `time_string` converted to the `DateTime` data type.
**Examples**
Query:
``` sql
SELECT parseDateTimeBestEffortUS('09/12/2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Result:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUS('09-12-2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Result:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUS('09.12.2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Result:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
This function behaves like [parseDateTimeBestEffort](#parsedatetimebesteffort) for ISO date formats, e.g. `YYYY-MM-DD hh:mm:ss`, and other date formats where the month and date components can be unambiguously extracted, e.g. `YYYYMMDDhhmmss`, `YYYY-MM`, `DD hh`, or `YYYY-MM-DD hh:mm:ss ±h:mm`. If the month and the date components cannot be unambiguously extracted, e.g. `MM/DD/YYYY`, `MM-DD-YYYY`, or `MM-DD-YY`, it prefers the US date format instead of `DD/MM/YYYY`, `DD-MM-YYYY`, or `DD-MM-YY`. As an exception from the latter, if the month is bigger than 12 and smaller or equal than 31, this function falls back to the behavior of [parseDateTimeBestEffort](#parsedatetimebesteffort), e.g. `15/08/2020` is parsed as `2020-08-15`.
## parseDateTimeBestEffortOrNull
## parseDateTime32BestEffortOrNull
@ -1238,174 +1168,10 @@ Same as for [parseDateTimeBestEffort](#parsedatetimebesteffort) except that it r
Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns `NULL` when it encounters a date format that cannot be processed.
**Syntax**
``` sql
parseDateTimeBestEffortUSOrNull(time_string[, time_zone])
```
**Parameters**
- `time_string` — String containing a date or date with time to convert. The date must be in the US date format (`MM/DD/YYYY`, etc). [String](../../sql-reference/data-types/string.md).
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Supported non-standard formats**
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- A string with a date and a time components: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY`, etc.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case, `YYYY-MM` are substituted with `2000-01`.
- A string that includes date and time along with timezone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
**Returned values**
- `time_string` converted to the [DateTime](../../sql-reference/data-types/datetime.md) data type.
- `NULL` if the input string cannot be converted to the `DateTime` data type.
**Examples**
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02/10/2021 21:12:57') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-10 21:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02-10-2021 21:12:57 GMT', 'Asia/Istanbul') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-11 00:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02.10.2021') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-10 00:00:00 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('10.2021') AS parseDateTimeBestEffortUSOrNull;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ ᴺᵁᴸᴸ │
└─────────────────────────────────┘
```
## parseDateTimeBestEffortUSOrZero
Same as [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS) function except that it returns zero date (`1970-01-01`) or zero date with time (`1970-01-01 00:00:00`) when it encounters a date format that cannot be processed.
**Syntax**
``` sql
parseDateTimeBestEffortUSOrZero(time_string[, time_zone])
```
**Parameters**
- `time_string` — String containing a date or date with time to convert. The date must be in the US date format (`MM/DD/YYYY`, etc). [String](../../sql-reference/data-types/string.md).
- `time_zone` — [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../../sql-reference/data-types/string.md).
**Supported non-standard formats**
- A string containing 9..10 digit [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- A string with a date and a time components: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- A string with a date, but no time component: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY`, etc.
- A string with a day and time: `DD`, `DD hh`, `DD hh:mm`. In this case, `YYYY-MM` are substituted with `2000-01`.
- A string that includes date and time along with timezone offset information: `YYYY-MM-DD hh:mm:ss ±h:mm`, etc. For example, `2020-12-12 17:36:00 -5:00`.
**Returned values**
- `time_string` converted to the [DateTime](../../sql-reference/data-types/datetime.md) data type.
- Zero date or zero date with time if the input string cannot be converted to the `DateTime` data type.
**Examples**
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02/10/2021 21:12:57') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-10 21:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02-10-2021 21:12:57 GMT', 'Asia/Istanbul') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-11 00:12:57 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02.10.2021') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-10 00:00:00 │
└─────────────────────────────────┘
```
Query:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02.2021') AS parseDateTimeBestEffortUSOrZero;
```
Result:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 1970-01-01 00:00:00 │
└─────────────────────────────────┘
```
## parseDateTime64BestEffort
Same as [parseDateTimeBestEffort](#parsedatetimebesteffort) function but also parse milliseconds and microseconds and returns [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime) data type.
@ -1452,13 +1218,25 @@ Result:
└────────────────────────────┴────────────────────────────────┘
```
## parseDateTime64BestEffortUS
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity.
## parseDateTime64BestEffortOrNull
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortOrZero
Same as for [parseDateTime64BestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortUSOrNull
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity and returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortUSOrZero
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity and returns zero date or zero date time when it encounters a date format that cannot be processed.
## toLowCardinality

View File

@ -9,13 +9,13 @@ Table functions are methods for constructing tables.
You can use table functions in:
- [FROM](../../sql-reference/statements/select/from.md) clause of the `SELECT` query.
- [FROM](../../sql-reference/statements/select/from.md) clause of the `SELECT` query.
The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes.
The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes.
- [CREATE TABLE AS table_function()](../../sql-reference/statements/create/table.md) query.
It's one of the methods of creating a table.
It's one of the methods of creating a table.
- [INSERT INTO TABLE FUNCTION](../../sql-reference/statements/insert-into.md#inserting-into-table-function) query.
@ -38,4 +38,3 @@ You cant use table functions if the [allow_ddl](../../operations/settings/per
| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. |
| [sqlite](../../sql-reference/table-functions/sqlite.md) | Creates a [sqlite](../../engines/table-engines/integrations/sqlite.md)-engine table. |
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/) <!--hide-->

View File

@ -8,6 +8,7 @@ sidebar_label: Сборка на Mac OS X
:::info "Вам не нужно собирать ClickHouse самостоятельно"
Вы можете установить предварительно собранный ClickHouse, как описано в [Быстром старте](https://clickhouse.com/#quick-start).
Следуйте инструкциям по установке для `macOS (Intel)` или `macOS (Apple Silicon)`.
:::
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
@ -90,6 +91,7 @@ $ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/
:::info "Note"
Вам понадобится команда `sudo`.
:::
1. Создайте файл `/Library/LaunchDaemons/limit.maxfiles.plist` и поместите в него следующее:

View File

@ -49,6 +49,7 @@ PostgreSQL массивы конвертируются в массивы ClickHo
:::info "Внимание"
Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы.
:::
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:

View File

@ -40,6 +40,7 @@ ORDER BY (CounterID, StartDate, intHash32(UserID));
:::info "Info"
Не рекомендуется делать слишком гранулированное партиционирование то есть задавать партиции по столбцу, в котором будет слишком большой разброс значений (речь идет о порядке более тысячи партиций). Это приведет к скоплению большого числа файлов и файловых дескрипторов в системе, что может значительно снизить производительность запросов `SELECT`.
:::
Чтобы получить набор кусков и партиций таблицы, можно воспользоваться системной таблицей [system.parts](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#system_tables-parts). В качестве примера рассмотрим таблицу `visits`, в которой задано партиционирование по месяцам. Выполним `SELECT` для таблицы `system.parts`:
@ -80,6 +81,7 @@ WHERE table = 'visits'
:::info "Info"
Названия кусков для таблиц старого типа образуются следующим образом: `20190117_20190123_2_2_0` (минимальная дата _ максимальная дата _ номер минимального блока _ номер максимального блока _ уровень).
:::
Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные.

View File

@ -14,3 +14,4 @@ sidebar_position: 10
:::info "Забавный факт"
Спустя годы после того, как ClickHouse получил свое название, принцип комбинирования двух слов, каждое из которых имеет подходящий смысл, был признан лучшим способом назвать базу данных в [исследовании Andy Pavlo](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html), Associate Professor of Databases в Carnegie Mellon University. ClickHouse разделил награду "за лучшее название СУБД" с Postgres.
:::

View File

@ -20,5 +20,6 @@ sidebar_label: Общие вопросы
:::info "Если вы не нашли то, что искали:"
Загляните в другие категории F.A.Q. или поищите в остальных разделах документации, ориентируясь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/ru/faq/general/) <!--hide-->

View File

@ -60,3 +60,4 @@ sidebar_position: 8
- Ориентируйтесь на показатели, собранные при работе с реальными данными.
- Проверяйте производительность в процессе CI.
- Измеряйте и анализируйте всё, что только возможно.
:::

View File

@ -15,5 +15,6 @@ sidebar_label: Интеграция
:::info "Если вы не нашли то, что искали"
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/ru/faq/integration/)

View File

@ -14,5 +14,6 @@ sidebar_label: Операции
:::info "Если вы не нашли то, что искали"
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/en/faq/operations/)

View File

@ -293,6 +293,7 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.trips_mergetree"
:::info "Info"
Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы
нужно добавить имя базы, `datasets.trips_mergetree`.
:::
## Результаты на одном сервере {#rezultaty-na-odnom-servere}

View File

@ -157,6 +157,7 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.ontime"
:::info "Info"
Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы
нужно добавить имя базы, `datasets.ontime`.
:::
## Запросы: {#zaprosy}

View File

@ -99,6 +99,7 @@ ClickHouse предоставляет возможность аутентифи
:::info ""
Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`.
:::
### Настройка Kerberos через SQL {#enabling-kerberos-using-sql}

View File

@ -174,6 +174,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
:::info "Примечание"
Жесткое ограничение настраивается с помощью системных инструментов.
:::
**Пример**
@ -706,6 +707,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения:
@ -726,6 +728,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения:
@ -746,6 +749,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения:

View File

@ -30,6 +30,7 @@
:::info "Замечание"
Даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся операции `INSERT`, которая добавляет данные, которые нужно будет мутировать.
:::
Если во время мутации какого-либо куска возникли проблемы, заполняются следующие столбцы:

View File

@ -8,6 +8,7 @@ sidebar_position: 141
:::info "Примечание"
Чтобы эта функция работала должным образом, исходные данные должны быть отсортированы. В [материализованном представлении](../../../sql-reference/statements/create/view.md#materialized) вместо нее рекомендуется использовать [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp).
:::
**Синтаксис**

View File

@ -20,6 +20,7 @@ intervalLengthSum(start, end)
:::info "Примечание"
Аргументы должны быть одного типа. В противном случае ClickHouse сгенерирует исключение.
:::
**Возвращаемое значение**

View File

@ -26,6 +26,7 @@ sidebar_label: Nullable
:::info "Info"
Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз.
:::
## Поиск NULL {#finding-null}

View File

@ -464,6 +464,7 @@ SOURCE(ODBC(
:::info "Примечание"
Поля `table` и `query` не могут быть использованы вместе. Также обязательно должен быть один из источников данных: `table` или `query`.
:::
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
@ -543,6 +544,7 @@ SOURCE(MYSQL(
:::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
Явный параметр `secure` отсутствует. Автоматически поддержана работа в обоих случаях: когда установка SSL-соединения необходима и когда нет.
:::
MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`.
@ -633,6 +635,7 @@ SOURCE(CLICKHOUSE(
:::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
:::
### MongoDB {#dicts-external_dicts_dict_sources-mongodb}
@ -748,6 +751,7 @@ SOURCE(REDIS(
:::info "Примечание"
Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`.
:::
### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql}
@ -804,3 +808,4 @@ SOURCE(POSTGRESQL(
:::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
:::

View File

@ -86,6 +86,7 @@ geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precisi
:::info "Замечание"
Все передаваемые координаты должны быть одного и того же типа: либо `Float32`, либо `Float64`.
:::
**Возвращаемые значения**
@ -96,6 +97,7 @@ geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precisi
:::info "Замечание"
Если возвращаемый массив содержит свыше 10 000 000 элементов, функция сгенерирует исключение.
:::
**Пример**

View File

@ -994,7 +994,7 @@ parseDateTimeBestEffort(time_string[, time_zone])
Запрос:
``` sql
SELECT parseDateTimeBestEffort('12/12/2020 12:12:57')
SELECT parseDateTimeBestEffort('23/10/2020 12:12:57')
AS parseDateTimeBestEffort;
```
@ -1002,7 +1002,7 @@ AS parseDateTimeBestEffort;
``` text
┌─parseDateTimeBestEffort─┐
│ 2020-12-12 12:12:57 │
│ 2020-10-23 12:12:57 │
└─────────────────────────┘
```
@ -1039,7 +1039,7 @@ AS parseDateTimeBestEffort;
Запрос:
``` sql
SELECT parseDateTimeBestEffort('2018-12-12 10:12:12')
SELECT parseDateTimeBestEffort('2018-10-23 10:12:12')
AS parseDateTimeBestEffort;
```
@ -1047,7 +1047,7 @@ AS parseDateTimeBestEffort;
``` text
┌─parseDateTimeBestEffort─┐
│ 2018-12-12 10:12:12 │
│ 2018-10-23 10:12:12 │
└─────────────────────────┘
```
@ -1074,77 +1074,7 @@ SELECT parseDateTimeBestEffort('10 20:19');
## parseDateTimeBestEffortUS {#parsedatetimebesteffortUS}
Эта функция похожа на [parseDateTimeBestEffort](#parsedatetimebesteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности.
**Синтаксис**
``` sql
parseDateTimeBestEffortUS(time_string [, time_zone])
```
**Аргументы**
- `time_string` — строка, содержащая дату и время для преобразования. [String](../../sql-reference/data-types/string.md).
- `time_zone` — часовой пояс. Функция анализирует `time_string` в соответствии с часовым поясом. [String](../../sql-reference/data-types/string.md).
**Поддерживаемые нестандартные форматы**
- Строка, содержащая 9-10 цифр [unix timestamp](https://en.wikipedia.org/wiki/Unix_time).
- Строка, содержащая дату и время: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss`, etc.
- Строка с датой, но без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY` etc.
- Строка, содержащая день и время: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` заменяется на `2000-01`.
- Строка, содержащая дату и время, а также информацию о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm` и т.д. Например, `2020-12-12 17:36:00 -5:00`.
**Возвращаемое значение**
- `time_string` преобразован в тип данных `DateTime`.
**Примеры**
Запрос:
``` sql
SELECT parseDateTimeBestEffortUS('09/12/2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Результат:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUS('09-12-2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Результат:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUS('09.12.2020 12:12:57')
AS parseDateTimeBestEffortUS;
```
Результат:
``` text
┌─parseDateTimeBestEffortUS─┐
│ 2020-09-12 12:12:57 │
└─────────────────────────——┘
```
Эта функция ведет себя как [parseDateTimeBestEffort](#parsedatetimebesteffort) для форматов даты ISO, например, `YYYY-MM-DD hh:mm:ss`, и других форматов даты, где компоненты месяца и дня могут быть однозначно выделены, например, `YYYYMMDDhhmmss`, `YYYY-MM`, `DD hh`, или `YYYY-MM-DD hh:mm:ss ±h:mm`. Если месяц и день не могут быть однозначно выделены, например, `MM/DD/YYY`, `MM-DD-YYYY` или `MM-DD-YY`, то вместо `DD/MM/YYY`, `DD-MM-YYYY` или `DD-MM-YY` предпочитается формат даты США. Впрочем, если номер месяца был бы больше 12 и меньше или равен 31, эта функция возвращается к поведению [parseDateTimeBestEffort](#parsedatetimebesteffort), т.е. `15/08/2020` будет разобрано как `2020-08-15`.
## parseDateTimeBestEffortOrNull {#parsedatetimebesteffortornull}
## parseDateTime32BestEffortOrNull {#parsedatetime32besteffortornull}
@ -1160,174 +1090,10 @@ AS parseDateTimeBestEffortUS;
Работает аналогично функции [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS), но в отличие от нее возвращает `NULL`, если входная строка не может быть преобразована в тип данных [DateTime](../../sql-reference/data-types/datetime.md).
**Синтаксис**
``` sql
parseDateTimeBestEffortUSOrNull(time_string[, time_zone])
```
**Аргументы**
- `time_string` — строка, содержащая дату или дату со временем для преобразования. Дата должна быть в американском формате (`MM/DD/YYYY` и т.д.). [String](../../sql-reference/data-types/string.md).
- `time_zone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция анализирует `time_string` в соответствии с заданным часовым поясом. Опциональный параметр. [String](../../sql-reference/data-types/string.md).
**Поддерживаемые нестандартные форматы**
- Строка в формате [unix timestamp](https://en.wikipedia.org/wiki/Unix_time), содержащая 9-10 цифр.
- Строка, содержащая дату и время: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss` и т.д.
- Строка, содержащая дату без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY` и т.д.
- Строка, содержащая день и время: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` заменяется на `2000-01`.
- Строка, содержащая дату и время, а также информацию о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm` и т.д. Например, `2020-12-12 17:36:00 -5:00`.
**Возвращаемые значения**
- `time_string`, преобразованная в тип данных `DateTime`.
- `NULL`, если входная строка не может быть преобразована в тип данных `DateTime`.
**Примеры**
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02/10/2021 21:12:57') AS parseDateTimeBestEffortUSOrNull;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-10 21:12:57 │
└─────────────────────────────────┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02-10-2021 21:12:57 GMT', 'Europe/Moscow') AS parseDateTimeBestEffortUSOrNull;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-11 00:12:57 │
└─────────────────────────────────┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('02.10.2021') AS parseDateTimeBestEffortUSOrNull;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ 2021-02-10 00:00:00 │
└─────────────────────────────────┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrNull('10.2021') AS parseDateTimeBestEffortUSOrNull;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrNull─┐
│ ᴺᵁᴸᴸ │
└─────────────────────────────────┘
```
## parseDateTimeBestEffortUSOrZero {#parsedatetimebesteffortusorzero}
Работает аналогично функции [parseDateTimeBestEffortUS](#parsedatetimebesteffortUS), но в отличие от нее возвращает нулевую дату (`1970-01-01`) или нулевую дату со временем (`1970-01-01 00:00:00`), если входная строка не может быть преобразована в тип данных [DateTime](../../sql-reference/data-types/datetime.md).
**Синтаксис**
``` sql
parseDateTimeBestEffortUSOrZero(time_string[, time_zone])
```
**Аргументы**
- `time_string` — строка, содержащая дату или дату со временем для преобразования. Дата должна быть в американском формате (`MM/DD/YYYY` и т.д.). [String](../../sql-reference/data-types/string.md).
- `time_zone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone). Функция анализирует `time_string` в соответствии с заданным часовым поясом. Опциональный параметр. [String](../../sql-reference/data-types/string.md).
**Поддерживаемые нестандартные форматы**
- Строка в формате [unix timestamp](https://en.wikipedia.org/wiki/Unix_time), содержащая 9-10 цифр.
- Строка, содержащая дату и время: `YYYYMMDDhhmmss`, `MM/DD/YYYY hh:mm:ss`, `MM-DD-YY hh:mm`, `YYYY-MM-DD hh:mm:ss` и т.д.
- Строка, содержащая дату без времени: `YYYY`, `YYYYMM`, `YYYY*MM`, `MM/DD/YYYY`, `MM-DD-YY` и т.д.
- Строка, содержащая день и время: `DD`, `DD hh`, `DD hh:mm`. В этом случае `YYYY-MM` заменяется на `2000-01`.
- Строка, содержащая дату и время, а также информацию о часовом поясе: `YYYY-MM-DD hh:mm:ss ±h:mm` и т.д. Например, `2020-12-12 17:36:00 -5:00`.
**Возвращаемые значения**
- `time_string`, преобразованная в тип данных `DateTime`.
- Нулевая дата или нулевая дата со временем, если входная строка не может быть преобразована в тип данных `DateTime`.
**Примеры**
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02/10/2021 21:12:57') AS parseDateTimeBestEffortUSOrZero;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-10 21:12:57 │
└─────────────────────────────────┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02-10-2021 21:12:57 GMT', 'Europe/Moscow') AS parseDateTimeBestEffortUSOrZero;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-11 00:12:57 │
└─────────────────────────────────┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02.10.2021') AS parseDateTimeBestEffortUSOrZero;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 2021-02-10 00:00:00 │
└─────────────────────────────────┘
```
Запрос:
``` sql
SELECT parseDateTimeBestEffortUSOrZero('02.2021') AS parseDateTimeBestEffortUSOrZero;
```
Результат:
``` text
┌─parseDateTimeBestEffortUSOrZero─┐
│ 1970-01-01 00:00:00 │
└─────────────────────────────────┘
```
## parseDateTime64BestEffort {#parsedatetime64besteffort}
Работает аналогично функции [parseDateTimeBestEffort](#parsedatetimebesteffort), но также принимает миллисекунды и микросекунды. Возвращает тип данных [DateTime](../../sql-reference/functions/type-conversion-functions.md#data_type-datetime).
@ -1374,13 +1140,25 @@ FORMAT PrettyCompactMonoBlock;
└────────────────────────────┴────────────────────────────────┘
```
## parseDateTime64BestEffortOrNull {#parsedatetime32besteffortornull}
## parseDateTime64BestEffortUS {#parsedatetime64besteffortus}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности.
## parseDateTime64BestEffortOrNull {#parsedatetime64besteffortornull}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но возвращает `NULL`, если формат даты не может быть обработан.
## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetimebesteffort), но возвращает нулевую дату и время, если формат даты не может быть обработан.
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но возвращает нулевую дату и время, если формат даты не может быть обработан.
## parseDateTime64BestEffortUSOrNull {#parsedatetime64besteffortusornull}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности и возвращает `NULL`, если формат даты не может быть обработан.
## parseDateTime64BestEffortUSOrZero {#parsedatetime64besteffortusorzero}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности и возвращает нулевую дату и время, если формат даты не может быть обработан.
## toLowCardinality {#tolowcardinality}
@ -1431,6 +1209,7 @@ SELECT toLowCardinality('1');
:::info "Примечание"
Возвращаемое значение — это временная метка в UTC, а не в часовом поясе `DateTime64`.
:::
**Синтаксис**

View File

@ -25,7 +25,7 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN
- [CONSTRAINT](../../../sql-reference/statements/alter/constraint.md)
- [TTL](../../../sql-reference/statements/alter/ttl.md)
:::note
:::note
Запрос `ALTER TABLE` поддерживается только для таблиц типа `*MergeTree`, а также `Merge` и `Distributed`. Запрос имеет несколько вариантов.
:::
Следующие запросы `ALTER` управляют представлениями:
@ -77,5 +77,6 @@ ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name
:::info "Примечание"
Для всех запросов `ALTER` при `replication_alter_partitions_sync = 2` и неактивности некоторых реплик больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, генерируется исключение `UNFINISHED`.
:::
Для запросов `ALTER TABLE ... UPDATE|DELETE` синхронность выполнения определяется настройкой [mutations_sync](../../../operations/settings/settings.md#mutations_sync).

View File

@ -56,6 +56,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
:::info "Внимание"
ClickHouse трактует конструкцию `user_name@'address'` как имя пользователя целиком. То есть технически вы можете создать несколько пользователей с одинаковыми `user_name`, но разными частями конструкции после `@`, но лучше так не делать.
:::
## Секция GRANTEES {#grantees}

View File

@ -88,6 +88,7 @@ LIVE-представления работают по тому же принци
- `LIVE VIEW` не обновляется, если в исходном запросе используются несколько таблиц.
В случаях, когда `LIVE VIEW` не обновляется автоматически, чтобы обновлять его принудительно с заданной периодичностью, используйте [WITH REFRESH](#live-view-with-refresh).
:::
### Отслеживание изменений LIVE-представлений {#live-view-monitoring}

View File

@ -30,6 +30,7 @@ ClickHouse не оповещает клиента. Чтобы включить
:::info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.
:::
## Выражение BY {#by-expression}

View File

@ -368,6 +368,7 @@ SHOW ACCESS
:::info "Note"
По запросу `SHOW CLUSTER name` вы получите содержимое таблицы system.clusters для этого кластера.
:::
### Синтаксис {#show-cluster-syntax}

View File

@ -19,3 +19,4 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
:::info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.
:::

View File

@ -104,3 +104,4 @@ WATCH lv EVENTS LIMIT 1;
:::info "Примечание"
При отслеживании [LIVE VIEW](./create/view.md#live-view) через интерфейс HTTP следует использовать формат [JSONEachRowWithProgress](../../interfaces/formats.md#jsoneachrowwithprogress). Постоянные сообщения об изменениях будут добавлены в поток вывода для поддержания активности долговременного HTTP-соединения до тех пор, пока результат запроса изменяется. Проомежуток времени между сообщениями об изменениях управляется настройкой[live_view_heartbeat_interval](./create/view.md#live-view-settings).
:::

View File

@ -28,6 +28,7 @@ postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`])
:::info "Примечание"
В запросах `INSERT` для того чтобы отличить табличную функцию `postgresql(...)` от таблицы со списком имен столбцов вы должны указывать ключевые слова `FUNCTION` или `TABLE FUNCTION`. См. примеры ниже.
:::
## Особенности реализации {#implementation-details}
@ -43,6 +44,7 @@ PostgreSQL массивы конвертируются в массивы ClickHo
:::info "Примечание"
Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы.
:::
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:

View File

@ -424,7 +424,7 @@ parseDateTimeBestEffort(time_string [, time_zone]);
查询:
``` sql
SELECT parseDateTimeBestEffort('12/12/2020 12:12:57')
SELECT parseDateTimeBestEffort('23/10/2020 12:12:57')
AS parseDateTimeBestEffort;
```
@ -432,7 +432,7 @@ AS parseDateTimeBestEffort;
``` text
┌─parseDateTimeBestEffort─┐
│ 2020-12-12 12:12:57 │
│ 2020-10-23 12:12:57 │
└─────────────────────────┘
```
@ -469,7 +469,7 @@ AS parseDateTimeBestEffort
查询:
``` sql
SELECT parseDateTimeBestEffort('2018-12-12 10:12:12')
SELECT parseDateTimeBestEffort('2018-10-23 10:12:12')
AS parseDateTimeBestEffort
```
@ -477,7 +477,7 @@ AS parseDateTimeBestEffort
``` text
┌─parseDateTimeBestEffort─┐
│ 2018-12-12 10:12:12 │
│ 2018-10-23 10:12:12 │
└─────────────────────────┘
```

View File

@ -61,7 +61,7 @@ public:
Benchmark(unsigned concurrency_, double delay_,
Strings && hosts_, Ports && ports_, bool round_robin_,
bool cumulative_, bool secure_, const String & default_database_,
const String & user_, const String & password_, const String & stage,
const String & user_, const String & password_, const String & quota_key_, const String & stage,
bool randomize_, size_t max_iterations_, double max_time_,
const String & json_path_, size_t confidence_,
const String & query_id_, const String & query_to_execute_, bool continue_on_errors_,
@ -90,7 +90,7 @@ public:
connections.emplace_back(std::make_unique<ConnectionPool>(
concurrency,
cur_host, cur_port,
default_database_, user_, password_,
default_database_, user_, password_, quota_key_,
/* cluster_= */ "",
/* cluster_secret_= */ "",
/* client_name_= */ "benchmark",
@ -607,6 +607,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
/// So we copy the results to std::string.
std::optional<std::string> env_user_str;
std::optional<std::string> env_password_str;
std::optional<std::string> env_quota_key_str;
const char * env_user = getenv("CLICKHOUSE_USER");
if (env_user != nullptr)
@ -616,6 +617,10 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
if (env_password != nullptr)
env_password_str.emplace(std::string(env_password));
const char * env_quota_key = getenv("CLICKHOUSE_QUOTA_KEY");
if (env_quota_key != nullptr)
env_quota_key_str.emplace(std::string(env_quota_key));
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
desc.add_options()
("help", "produce help message")
@ -634,6 +639,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
("secure,s", "Use TLS connection")
("user,u", value<std::string>()->default_value(env_user_str.value_or("default")), "")
("password", value<std::string>()->default_value(env_password_str.value_or("")), "")
("quota_key", value<std::string>()->default_value(env_quota_key_str.value_or("")), "")
("database", value<std::string>()->default_value("default"), "")
("stacktrace", "print stack traces of exceptions")
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
@ -682,6 +688,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
options["database"].as<std::string>(),
options["user"].as<std::string>(),
options["password"].as<std::string>(),
options["quota_key"].as<std::string>(),
options["stage"].as<std::string>(),
options["randomize"].as<bool>(),
options["iterations"].as<size_t>(),

View File

@ -2010,7 +2010,7 @@ UInt64 ClusterCopier::executeQueryOnCluster(
{
connections.emplace_back(std::make_shared<Connection>(
node.host_name, node.port, node.default_database,
node.user, node.password, node.cluster, node.cluster_secret,
node.user, node.password, node.quota_key, node.cluster, node.cluster_secret,
"ClusterCopier", node.compression, node.secure
));

View File

@ -26,13 +26,13 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET)
{
if (uri.getPath() == "/extdict_ping")
return std::make_unique<LibraryBridgeExistsHandler>(keep_alive_timeout, getContext());
return std::make_unique<ExternalDictionaryLibraryBridgeExistsHandler>(keep_alive_timeout, getContext());
}
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST)
{
if (uri.getPath() == "/extdict_request")
return std::make_unique<LibraryBridgeRequestHandler>(keep_alive_timeout, getContext());
return std::make_unique<ExternalDictionaryLibraryBridgeRequestHandler>(keep_alive_timeout, getContext());
}
return nullptr;

View File

@ -5,6 +5,7 @@
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <Common/BridgeProtocolVersion.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Poco/Net/HTMLForm.h>
@ -78,18 +79,39 @@ static void writeData(Block data, OutputFormatPtr format)
executor.execute();
}
LibraryBridgeRequestHandler::LibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_)
ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, log(&Poco::Logger::get("LibraryBridgeRequestHandler"))
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeRequestHandler"))
, keep_alive_timeout(keep_alive_timeout_)
{
}
void LibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
{
LOG_TRACE(log, "Request URI: {}", request.getURI());
HTMLForm params(getContext()->getSettingsRef(), request);
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
processError(response, "Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != LIBRARY_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
processError(response, "Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("method"))
{
processError(response, "No 'method' in request URL");
@ -340,14 +362,14 @@ void LibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTT
}
}
LibraryBridgeExistsHandler::LibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, keep_alive_timeout(keep_alive_timeout_)
, log(&Poco::Logger::get("LibraryBridgeExistsHandler"))
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeExistsHandler"))
{
}
void LibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
{
try
{
@ -361,6 +383,7 @@ void LibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTP
}
std::string dictionary_id = params.get("dictionary_id");
auto library_handler = ExternalDictionaryLibraryHandlerFactory::instance().get(dictionary_id);
String res = library_handler ? "1" : "0";

View File

@ -9,7 +9,6 @@
namespace DB
{
/// Handler for requests to Library Dictionary Source, returns response in RowBinary format.
/// When a library dictionary source is created, it sends 'extDict_libNew' request to library bridge (which is started on first
/// request to it, if it was not yet started). On this request a new sharedLibrayHandler is added to a
@ -17,10 +16,10 @@ namespace DB
/// names of dictionary attributes, sample block to parse block of null values, block of null values. Everything is
/// passed in binary format and is urlencoded. When dictionary is cloned, a new handler is created.
/// Each handler is unique to dictionary.
class LibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext
class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext
{
public:
LibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
@ -32,10 +31,10 @@ private:
};
class LibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext
class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext
{
public:
LibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;

View File

@ -5,6 +5,7 @@
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeNullable.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Parsers/ParserQueryWithOutput.h>
#include <Parsers/parseQuery.h>
@ -14,6 +15,7 @@
#include <Poco/NumberParser.h>
#include <Common/logger_useful.h>
#include <base/scope_guard.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/quoteString.h>
#include "getIdentifierQuote.h"
#include "validateODBCConnectionString.h"
@ -80,6 +82,27 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
LOG_WARNING(log, fmt::runtime(message));
};
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
process_error("Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
process_error("Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("table"))
{
process_error("No 'table' param in request URL");

View File

@ -5,11 +5,13 @@
#include <DataTypes/DataTypeFactory.h>
#include <Server/HTTP/HTMLForm.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Parsers/ParserQueryWithOutput.h>
#include <Parsers/parseQuery.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/logger_useful.h>
#include <base/scope_guard.h>
#include "getIdentifierQuote.h"
@ -32,6 +34,27 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
LOG_WARNING(log, fmt::runtime(message));
};
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
process_error("Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
process_error("Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("connection_string"))
{
process_error("No 'connection_string' in request URL");

View File

@ -17,6 +17,7 @@
#include <QueryPipeline/QueryPipeline.h>
#include <Processors/Executors/CompletedPipelineExecutor.h>
#include <Processors/Formats/IInputFormat.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/logger_useful.h>
#include <Server/HTTP/HTMLForm.h>
#include <Common/config.h>
@ -55,6 +56,28 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
HTMLForm params(getContext()->getSettingsRef(), request);
LOG_TRACE(log, "Request URI: {}", request.getURI());
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
processError(response, "Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
processError(response, "Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (mode == "read")
params.read(request.getStream());

View File

@ -4,9 +4,11 @@
#include <Server/HTTP/HTMLForm.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/logger_useful.h>
#include "validateODBCConnectionString.h"
#include "ODBCPooledConnectionFactory.h"
@ -40,6 +42,28 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
LOG_WARNING(log, fmt::runtime(message));
};
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
process_error("Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
process_error("Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("connection_string"))
{
process_error("No 'connection_string' in request URL");

View File

@ -60,6 +60,14 @@
</logger>
</levels>
-->
<!-- Structured log formatting:
You can specify log format(for now, JSON only). In that case, the console log will be printed
in specified format like JSON.
For example, as below:
{"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
To enable JSON logging support, just uncomment <formatting> tag below.
-->
<!-- <formatting>json</formatting> -->
</logger>
<!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. -->

View File

@ -78,6 +78,7 @@
/* For iPad */
margin: 0;
border-radius: 0;
tab-size: 4;
}
html, body
@ -609,11 +610,13 @@
}
}
let query_area = document.getElementById('query');
window.onpopstate = function(event) {
if (!event.state) {
return;
}
document.getElementById('query').value = event.state.query;
query_area.value = event.state.query;
if (!event.state.response) {
clear();
return;
@ -622,13 +625,13 @@
};
if (window.location.hash) {
document.getElementById('query').value = window.atob(window.location.hash.substr(1));
query_area.value = window.atob(window.location.hash.substr(1));
}
function post()
{
++request_num;
let query = document.getElementById('query').value;
let query = query_area.value;
postImpl(request_num, query);
}
@ -645,6 +648,32 @@
}
}
/// Pressing Tab in textarea will increase indentation.
/// But for accessibility reasons, we will fall back to tab navigation if the user already used Tab for that.
let user_prefers_tab_navigation = false;
[...document.querySelectorAll('input')].map(elem => {
elem.onkeydown = (e) => {
if (e.key == 'Tab') { user_prefers_tab_navigation = true; }
};
});
query_area.onkeydown = (e) => {
if (e.key == 'Tab' && !event.shiftKey && !user_prefers_tab_navigation) {
let elem = e.target;
let selection_start = elem.selectionStart;
let selection_end = elem.selectionEnd;
elem.value = elem.value.substring(0, elem.selectionStart) + ' ' + elem.value.substring(elem.selectionEnd);
elem.selectionStart = selection_start + 4;
elem.selectionEnd = selection_start + 4;
e.preventDefault();
return false;
}
};
function clearElement(id)
{
let elem = document.getElementById(id);
@ -701,7 +730,7 @@
stats.innerText = `Elapsed: ${seconds} sec, read ${formatted_rows} rows, ${formatted_bytes}.`;
/// We can also render graphs if user performed EXPLAIN PIPELINE graph=1 or EXPLAIN AST graph = 1
if (response.data.length > 3 && document.getElementById('query').value.match(/^\s*EXPLAIN/i) && typeof(response.data[0][0]) === "string" && response.data[0][0].startsWith("digraph")) {
if (response.data.length > 3 && query_area.value.match(/^\s*EXPLAIN/i) && typeof(response.data[0][0]) === "string" && response.data[0][0].startsWith("digraph")) {
renderGraph(response);
} else {
renderTable(response);

View File

@ -31,16 +31,10 @@ ExternalDictionaryLibraryBridgeHelper::ExternalDictionaryLibraryBridgeHelper(
const Block & sample_block_,
const Field & dictionary_id_,
const LibraryInitData & library_data_)
: IBridgeHelper(context_->getGlobalContext())
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeHelper"))
: LibraryBridgeHelper(context_->getGlobalContext())
, sample_block(sample_block_)
, config(context_->getConfigRef())
, http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value)
, library_data(library_data_)
, dictionary_id(dictionary_id_)
, bridge_host(config.getString("library_bridge.host", DEFAULT_HOST))
, bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT))
, http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_))
{
}
@ -65,28 +59,13 @@ Poco::URI ExternalDictionaryLibraryBridgeHelper::getMainURI() const
Poco::URI ExternalDictionaryLibraryBridgeHelper::createRequestURI(const String & method) const
{
auto uri = getMainURI();
uri.addQueryParameter("version", std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("dictionary_id", toString(dictionary_id));
uri.addQueryParameter("method", method);
return uri;
}
Poco::URI ExternalDictionaryLibraryBridgeHelper::createBaseURI() const
{
Poco::URI uri;
uri.setHost(bridge_host);
uri.setPort(bridge_port);
uri.setScheme("http");
return uri;
}
void ExternalDictionaryLibraryBridgeHelper::startBridge(std::unique_ptr<ShellCommand> cmd) const
{
getContext()->addBridgeCommand(std::move(cmd));
}
bool ExternalDictionaryLibraryBridgeHelper::bridgeHandShake()
{
String result;
@ -225,6 +204,14 @@ QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadAll()
}
static String getDictIdsString(const std::vector<UInt64> & ids)
{
WriteBufferFromOwnString out;
writeVectorBinary(ids, out);
return out.str();
}
QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadIds(const std::vector<uint64_t> & ids)
{
startBridgeSync();
@ -283,13 +270,4 @@ QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadBase(const Poco::URI &
return QueryPipeline(std::move(source));
}
String ExternalDictionaryLibraryBridgeHelper::getDictIdsString(const std::vector<UInt64> & ids)
{
WriteBufferFromOwnString out;
writeVectorBinary(ids, out);
return out.str();
}
}

View File

@ -2,10 +2,9 @@
#include <Interpreters/Context.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <Poco/Logger.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/URI.h>
#include <BridgeHelper/IBridgeHelper.h>
#include <BridgeHelper/LibraryBridgeHelper.h>
#include <QueryPipeline/QueryPipeline.h>
@ -14,7 +13,8 @@ namespace DB
class Pipe;
class ExternalDictionaryLibraryBridgeHelper : public IBridgeHelper
// Class to access the external dictionary part of the clickhouse-library-bridge.
class ExternalDictionaryLibraryBridgeHelper : public LibraryBridgeHelper
{
public:
@ -25,7 +25,6 @@ public:
String dict_attributes;
};
static constexpr inline size_t DEFAULT_PORT = 9012;
static constexpr inline auto PING_HANDLER = "/extdict_ping";
static constexpr inline auto MAIN_HANDLER = "/extdict_request";
@ -56,26 +55,6 @@ protected:
bool bridgeHandShake() override;
void startBridge(std::unique_ptr<ShellCommand> cmd) const override;
String serviceAlias() const override { return "clickhouse-library-bridge"; }
String serviceFileName() const override { return serviceAlias(); }
size_t getDefaultPort() const override { return DEFAULT_PORT; }
bool startBridgeManually() const override { return false; }
String configPrefix() const override { return "library_bridge"; }
const Poco::Util::AbstractConfiguration & getConfig() const override { return config; }
Poco::Logger * getLog() const override { return log; }
Poco::Timespan getHTTPTimeout() const override { return http_timeout; }
Poco::URI createBaseURI() const override;
QueryPipeline loadBase(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = {});
bool executeRequest(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = {}) const;
@ -94,20 +73,10 @@ private:
Poco::URI createRequestURI(const String & method) const;
static String getDictIdsString(const std::vector<UInt64> & ids);
Poco::Logger * log;
const Block sample_block;
const Poco::Util::AbstractConfiguration & config;
const Poco::Timespan http_timeout;
LibraryInitData library_data;
Field dictionary_id;
std::string bridge_host;
size_t bridge_port;
bool library_initialized = false;
ConnectionTimeouts http_timeouts;
Poco::Net::HTTPBasicCredentials credentials{};
};
}

View File

@ -0,0 +1,34 @@
#include "LibraryBridgeHelper.h"
namespace DB
{
LibraryBridgeHelper::LibraryBridgeHelper(ContextPtr context_)
: IBridgeHelper(context_)
, config(context_->getConfigRef())
, log(&Poco::Logger::get("LibraryBridgeHelper"))
, http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value)
, bridge_host(config.getString("library_bridge.host", DEFAULT_HOST))
, bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT))
, http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_))
{
}
void LibraryBridgeHelper::startBridge(std::unique_ptr<ShellCommand> cmd) const
{
getContext()->addBridgeCommand(std::move(cmd));
}
Poco::URI LibraryBridgeHelper::createBaseURI() const
{
Poco::URI uri;
uri.setHost(bridge_host);
uri.setPort(bridge_port);
uri.setScheme("http");
return uri;
}
}

View File

@ -0,0 +1,51 @@
#pragma once
#include <Interpreters/Context.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <Poco/Logger.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/URI.h>
#include <BridgeHelper/IBridgeHelper.h>
#include <Common/BridgeProtocolVersion.h>
namespace DB
{
// Common base class to access the clickhouse-library-bridge.
class LibraryBridgeHelper : public IBridgeHelper
{
protected:
explicit LibraryBridgeHelper(ContextPtr context_);
void startBridge(std::unique_ptr<ShellCommand> cmd) const override;
String serviceAlias() const override { return "clickhouse-library-bridge"; }
String serviceFileName() const override { return serviceAlias(); }
size_t getDefaultPort() const override { return DEFAULT_PORT; }
bool startBridgeManually() const override { return false; }
String configPrefix() const override { return "library_bridge"; }
const Poco::Util::AbstractConfiguration & getConfig() const override { return config; }
Poco::Logger * getLog() const override { return log; }
Poco::Timespan getHTTPTimeout() const override { return http_timeout; }
Poco::URI createBaseURI() const override;
static constexpr inline size_t DEFAULT_PORT = 9012;
const Poco::Util::AbstractConfiguration & config;
Poco::Logger * log;
const Poco::Timespan http_timeout;
std::string bridge_host;
size_t bridge_port;
ConnectionTimeouts http_timeouts;
Poco::Net::HTTPBasicCredentials credentials{};
};
}

View File

@ -9,6 +9,7 @@
#include <Poco/Net/HTTPRequest.h>
#include <Poco/URI.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/ShellCommand.h>
#include <Common/logger_useful.h>
#include <IO/ConnectionTimeoutsContext.h>
@ -86,6 +87,7 @@ protected:
{
auto uri = createBaseURI();
uri.setPath(MAIN_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
return uri;
}
@ -163,6 +165,7 @@ protected:
{
auto uri = createBaseURI();
uri.setPath(COL_INFO_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
return uri;
}
@ -184,6 +187,7 @@ protected:
auto uri = createBaseURI();
uri.setPath(SCHEMA_ALLOWED_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("connection_string", getConnectionString());
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);
@ -204,6 +208,7 @@ protected:
auto uri = createBaseURI();
uri.setPath(IDENTIFIER_QUOTE_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("connection_string", getConnectionString());
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);

View File

@ -64,6 +64,7 @@ Connection::~Connection() = default;
Connection::Connection(const String & host_, UInt16 port_,
const String & default_database_,
const String & user_, const String & password_,
const String & quota_key_,
const String & cluster_,
const String & cluster_secret_,
const String & client_name_,
@ -71,7 +72,7 @@ Connection::Connection(const String & host_, UInt16 port_,
Protocol::Secure secure_,
Poco::Timespan sync_request_timeout_)
: host(host_), port(port_), default_database(default_database_)
, user(user_), password(password_)
, user(user_), password(password_), quota_key(quota_key_)
, cluster(cluster_)
, cluster_secret(cluster_secret_)
, client_name(client_name_)
@ -169,6 +170,8 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
sendHello();
receiveHello();
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM)
sendAddendum();
LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.",
server_name, server_version_major, server_version_minor, server_version_patch);
@ -266,6 +269,14 @@ void Connection::sendHello()
}
void Connection::sendAddendum()
{
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY)
writeStringBinary(quota_key, *out);
out->next();
}
void Connection::receiveHello()
{
/// Receive hello packet.
@ -1083,6 +1094,7 @@ ServerConnectionPtr Connection::createConnection(const ConnectionParameters & pa
parameters.default_database,
parameters.user,
parameters.password,
parameters.quota_key,
"", /* cluster */
"", /* cluster_secret */
"client",

View File

@ -51,6 +51,7 @@ public:
Connection(const String & host_, UInt16 port_,
const String & default_database_,
const String & user_, const String & password_,
const String & quota_key_,
const String & cluster_,
const String & cluster_secret_,
const String & client_name_,
@ -159,6 +160,7 @@ private:
String default_database;
String user;
String password;
String quota_key;
/// For inter-server authorization
String cluster;
@ -245,6 +247,7 @@ private:
void connect(const ConnectionTimeouts & timeouts);
void sendHello();
void sendAddendum();
void receiveHello();
#if USE_SSL

View File

@ -58,6 +58,7 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
if (auto * result = readpassphrase(prompt.c_str(), buf, sizeof(buf), 0))
password = result;
}
quota_key = config.getString("quota_key", "");
/// By default compression is disabled if address looks like localhost.
compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host)))

View File

@ -18,6 +18,7 @@ struct ConnectionParameters
std::string default_database;
std::string user;
std::string password;
std::string quota_key;
Protocol::Secure security = Protocol::Secure::Disable;
Protocol::Compression compression = Protocol::Compression::Enable;
ConnectionTimeouts timeouts;

View File

@ -12,6 +12,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
String default_database,
String user,
String password,
String quota_key,
String cluster,
String cluster_secret,
String client_name,
@ -20,7 +21,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
Int64 priority)
{
Key key{
max_connections, host, port, default_database, user, password, cluster, cluster_secret, client_name, compression, secure, priority};
max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority};
std::lock_guard lock(mutex);
auto [it, inserted] = pools.emplace(key, ConnectionPoolPtr{});
@ -37,6 +38,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get(
default_database,
user,
password,
quota_key,
cluster,
cluster_secret,
client_name,

View File

@ -54,6 +54,7 @@ public:
const String & default_database_,
const String & user_,
const String & password_,
const String & quota_key_,
const String & cluster_,
const String & cluster_secret_,
const String & client_name_,
@ -67,6 +68,7 @@ public:
default_database(default_database_),
user(user_),
password(password_),
quota_key(quota_key_),
cluster(cluster_),
cluster_secret(cluster_secret_),
client_name(client_name_),
@ -112,7 +114,7 @@ protected:
{
return std::make_shared<Connection>(
host, port,
default_database, user, password,
default_database, user, password, quota_key,
cluster, cluster_secret,
client_name, compression, secure);
}
@ -123,6 +125,7 @@ private:
String default_database;
String user;
String password;
String quota_key;
/// For inter-server authorization
String cluster;
@ -149,6 +152,7 @@ public:
String default_database;
String user;
String password;
String quota_key;
String cluster;
String cluster_secret;
String client_name;
@ -171,6 +175,7 @@ public:
String default_database,
String user,
String password,
String quota_key,
String cluster,
String cluster_secret,
String client_name,

View File

@ -0,0 +1,14 @@
#pragma once
#include <cstddef>
namespace DB
{
// Version of protocol between clickhouse-server and clickhouse-library-bridge. Increment if you change it in a non-compatible way.
static constexpr size_t LIBRARY_BRIDGE_PROTOCOL_VERSION = 1;
// Version of protocol between clickhouse-server and clickhouse-xdbc-bridge. Increment if you change it in a non-compatible way.
static constexpr size_t XDBC_BRIDGE_PROTOCOL_VERSION = 1;
}

View File

@ -17,6 +17,17 @@
eintr_wrapper_result; \
})
namespace DB
{
enum PollPidResult
{
RESTART,
FAILED
};
}
#if defined(OS_LINUX)
#include <poll.h>
@ -43,16 +54,9 @@ namespace DB
static int syscall_pidfd_open(pid_t pid)
{
// pidfd_open cannot be interrupted, no EINTR handling
return syscall(SYS_pidfd_open, pid, 0);
}
static int dir_pidfd_open(pid_t pid)
{
std::string path = "/proc/" + std::to_string(pid);
return HANDLE_EINTR(open(path.c_str(), O_DIRECTORY));
}
static bool supportsPidFdOpen()
{
VersionNumber pidfd_open_minimal_version(5, 3, 0);
@ -60,36 +64,52 @@ static bool supportsPidFdOpen()
return linux_version >= pidfd_open_minimal_version;
}
static int pidFdOpen(pid_t pid)
static PollPidResult pollPid(pid_t pid, int timeout_in_ms)
{
// use pidfd_open or just plain old /proc/[pid] open for Linux
int pid_fd = 0;
if (supportsPidFdOpen())
{
return syscall_pidfd_open(pid);
// pidfd_open cannot be interrupted, no EINTR handling
pid_fd = syscall_pidfd_open(pid);
if (pid_fd < 0)
{
if (errno == ESRCH)
return PollPidResult::RESTART;
return PollPidResult::FAILED;
}
}
else
{
return dir_pidfd_open(pid);
}
}
std::string path = "/proc/" + std::to_string(pid);
pid_fd = HANDLE_EINTR(open(path.c_str(), O_DIRECTORY));
if (pid_fd < 0)
{
if (errno == ENOENT)
return PollPidResult::RESTART;
return PollPidResult::FAILED;
}
}
static int pollPid(pid_t pid, int timeout_in_ms)
{
struct pollfd pollfd;
int pid_fd = pidFdOpen(pid);
if (pid_fd == -1)
{
return false;
}
pollfd.fd = pid_fd;
pollfd.events = POLLIN;
int ready = poll(&pollfd, 1, timeout_in_ms);
int save_errno = errno;
int ready = HANDLE_EINTR(poll(&pollfd, 1, timeout_in_ms));
if (ready <= 0)
return PollPidResult::FAILED;
close(pid_fd);
errno = save_errno;
return ready;
return PollPidResult::RESTART;
}
#elif defined(OS_DARWIN) || defined(OS_FREEBSD)
#pragma clang diagnostic ignored "-Wreserved-identifier"
@ -100,38 +120,32 @@ static int pollPid(pid_t pid, int timeout_in_ms)
namespace DB
{
static int pollPid(pid_t pid, int timeout_in_ms)
static PollPidResult pollPid(pid_t pid, int timeout_in_ms)
{
int status = 0;
int kq = HANDLE_EINTR(kqueue());
int kq = kqueue();
if (kq == -1)
{
return false;
}
return PollPidResult::FAILED;
struct kevent change = {.ident = NULL};
EV_SET(&change, pid, EVFILT_PROC, EV_ADD, NOTE_EXIT, 0, NULL);
int result = HANDLE_EINTR(kevent(kq, &change, 1, NULL, 0, NULL));
if (result == -1)
int event_add_result = HANDLE_EINTR(kevent(kq, &change, 1, NULL, 0, NULL));
if (event_add_result == -1)
{
if (errno != ESRCH)
{
return false;
}
// check if pid already died while we called kevent()
if (waitpid(pid, &status, WNOHANG) == pid)
{
return true;
}
return false;
if (errno == ESRCH)
return PollPidResult::RESTART;
return PollPidResult::FAILED;
}
struct kevent event = {.ident = NULL};
struct timespec remaining_timespec = {.tv_sec = timeout_in_ms / 1000, .tv_nsec = (timeout_in_ms % 1000) * 1000000};
int ready = kevent(kq, nullptr, 0, &event, 1, &remaining_timespec);
int save_errno = errno;
int ready = HANDLE_EINTR(kevent(kq, nullptr, 0, &event, 1, &remaining_timespec));
PollPidResult result = ready < 0 ? PollPidResult::FAILED : PollPidResult::RESTART;
close(kq);
errno = save_errno;
return ready;
return result;
}
#else
#error "Unsupported OS type"
@ -148,7 +162,7 @@ bool waitForPid(pid_t pid, size_t timeout_in_seconds)
/// If there is no timeout before signal try to waitpid 1 time without block so we can avoid sending
/// signal if process is already normally terminated.
int waitpid_res = waitpid(pid, &status, WNOHANG);
int waitpid_res = HANDLE_EINTR(waitpid(pid, &status, WNOHANG));
bool process_terminated_normally = (waitpid_res == pid);
return process_terminated_normally;
}
@ -159,34 +173,24 @@ bool waitForPid(pid_t pid, size_t timeout_in_seconds)
int timeout_in_ms = timeout_in_seconds * 1000;
while (timeout_in_ms > 0)
{
int waitpid_res = waitpid(pid, &status, WNOHANG);
int waitpid_res = HANDLE_EINTR(waitpid(pid, &status, WNOHANG));
bool process_terminated_normally = (waitpid_res == pid);
if (process_terminated_normally)
{
return true;
}
else if (waitpid_res == 0)
{
watch.restart();
int ready = pollPid(pid, timeout_in_ms);
if (ready <= 0)
{
if (errno == EINTR || errno == EAGAIN)
{
timeout_in_ms -= watch.elapsedMilliseconds();
}
else
{
return false;
}
}
continue;
}
else if (waitpid_res == -1 && errno != EINTR)
{
if (waitpid_res != 0)
return false;
}
watch.restart();
PollPidResult result = pollPid(pid, timeout_in_ms);
if (result == PollPidResult::FAILED)
return false;
timeout_in_ms -= watch.elapsedMilliseconds();
}
return false;
}

View File

@ -4,8 +4,8 @@
namespace DB
{
/*
* Waits for a specific pid with timeout, using modern Linux and OSX facilities
* Returns `true` if process terminated successfully or `false` otherwise
* Waits for a specific pid with timeout
* Returns `true` if process terminated successfully in specified timeout or `false` otherwise
*/
bool waitForPid(pid_t pid, size_t timeout_in_seconds);

View File

@ -292,7 +292,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
const char * dest_start = dest;
const UInt32 items_count = source_size / sizeof(ValueType);
unalignedStore<UInt32>(dest, items_count);
unalignedStoreLE<UInt32>(dest, items_count);
dest += sizeof(items_count);
ValueType prev_value{};
@ -300,8 +300,8 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
if (source < source_end)
{
prev_value = unalignedLoad<ValueType>(source);
unalignedStore<ValueType>(dest, prev_value);
prev_value = unalignedLoadLE<ValueType>(source);
unalignedStoreLE<ValueType>(dest, prev_value);
source += sizeof(prev_value);
dest += sizeof(prev_value);
@ -309,10 +309,10 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
if (source < source_end)
{
const ValueType curr_value = unalignedLoad<ValueType>(source);
const ValueType curr_value = unalignedLoadLE<ValueType>(source);
prev_delta = curr_value - prev_value;
unalignedStore<UnsignedDeltaType>(dest, prev_delta);
unalignedStoreLE<UnsignedDeltaType>(dest, prev_delta);
source += sizeof(curr_value);
dest += sizeof(prev_delta);
@ -324,7 +324,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest)
int item = 2;
for (; source < source_end; source += sizeof(ValueType), ++item)
{
const ValueType curr_value = unalignedLoad<ValueType>(source);
const ValueType curr_value = unalignedLoadLE<ValueType>(source);
const UnsignedDeltaType delta = curr_value - prev_value;
const UnsignedDeltaType double_delta = delta - prev_delta;
@ -368,7 +368,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
if (source + sizeof(UInt32) > source_end)
return;
const UInt32 items_count = unalignedLoad<UInt32>(source);
const UInt32 items_count = unalignedLoadLE<UInt32>(source);
source += sizeof(items_count);
ValueType prev_value{};
@ -378,10 +378,10 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
if (source + sizeof(ValueType) > source_end || items_count < 1)
return;
prev_value = unalignedLoad<ValueType>(source);
prev_value = unalignedLoadLE<ValueType>(source);
if (dest + sizeof(prev_value) > output_end)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
unalignedStore<ValueType>(dest, prev_value);
unalignedStoreLE<ValueType>(dest, prev_value);
source += sizeof(prev_value);
dest += sizeof(prev_value);
@ -390,11 +390,11 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
if (source + sizeof(UnsignedDeltaType) > source_end || items_count < 2)
return;
prev_delta = unalignedLoad<UnsignedDeltaType>(source);
prev_delta = unalignedLoadLE<UnsignedDeltaType>(source);
prev_value = prev_value + static_cast<ValueType>(prev_delta);
if (dest + sizeof(prev_value) > output_end)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
unalignedStore<ValueType>(dest, prev_value);
unalignedStoreLE<ValueType>(dest, prev_value);
source += sizeof(prev_delta);
dest += sizeof(prev_value);
@ -427,7 +427,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest,
const ValueType curr_value = prev_value + delta;
if (dest + sizeof(curr_value) > output_end)
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
unalignedStore<ValueType>(dest, curr_value);
unalignedStoreLE<ValueType>(dest, curr_value);
dest += sizeof(curr_value);
prev_delta = curr_value - prev_value;

View File

@ -208,7 +208,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
const UInt32 items_count = source_size / sizeof(T);
unalignedStore<UInt32>(dest, items_count);
unalignedStoreLE<UInt32>(dest, items_count);
dest += sizeof(items_count);
T prev_value{};
@ -217,8 +217,8 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
if (source < source_end)
{
prev_value = unalignedLoad<T>(source);
unalignedStore<T>(dest, prev_value);
prev_value = unalignedLoadLE<T>(source);
unalignedStoreLE<T>(dest, prev_value);
source += sizeof(prev_value);
dest += sizeof(prev_value);
@ -228,7 +228,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
while (source < source_end)
{
const T curr_value = unalignedLoad<T>(source);
const T curr_value = unalignedLoadLE<T>(source);
source += sizeof(curr_value);
const auto xored_data = curr_value ^ prev_value;
@ -274,7 +274,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
if (source + sizeof(UInt32) > source_end)
return;
const UInt32 items_count = unalignedLoad<UInt32>(source);
const UInt32 items_count = unalignedLoadLE<UInt32>(source);
source += sizeof(items_count);
T prev_value{};
@ -283,8 +283,8 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
if (source + sizeof(T) > source_end || items_count < 1)
return;
prev_value = unalignedLoad<T>(source);
unalignedStore<T>(dest, prev_value);
prev_value = unalignedLoadLE<T>(source);
unalignedStoreLE<T>(dest, prev_value);
source += sizeof(prev_value);
dest += sizeof(prev_value);
@ -326,7 +326,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
}
// else: 0b0 prefix - use prev_value
unalignedStore<T>(dest, curr_value);
unalignedStoreLE<T>(dest, curr_value);
dest += sizeof(curr_value);
prev_xored_info = curr_xored_info;

View File

@ -86,8 +86,8 @@ UInt32 ICompressionCodec::compress(const char * source, UInt32 source_size, char
UInt8 header_size = getHeaderSize();
/// Write data from header_size
UInt32 compressed_bytes_written = doCompressData(source, source_size, &dest[header_size]);
unalignedStore<UInt32>(&dest[1], compressed_bytes_written + header_size);
unalignedStore<UInt32>(&dest[5], source_size);
unalignedStoreLE<UInt32>(&dest[1], compressed_bytes_written + header_size);
unalignedStoreLE<UInt32>(&dest[5], source_size);
return header_size + compressed_bytes_written;
}
@ -112,7 +112,7 @@ UInt32 ICompressionCodec::decompress(const char * source, UInt32 source_size, ch
UInt32 ICompressionCodec::readCompressedBlockSize(const char * source)
{
UInt32 compressed_block_size = unalignedLoad<UInt32>(&source[1]);
UInt32 compressed_block_size = unalignedLoadLE<UInt32>(&source[1]);
if (compressed_block_size == 0)
throw Exception(ErrorCodes::CORRUPTED_DATA, "Can't decompress data: header is corrupt with compressed block size 0");
return compressed_block_size;
@ -121,7 +121,7 @@ UInt32 ICompressionCodec::readCompressedBlockSize(const char * source)
UInt32 ICompressionCodec::readDecompressedBlockSize(const char * source)
{
UInt32 decompressed_block_size = unalignedLoad<UInt32>(&source[5]);
UInt32 decompressed_block_size = unalignedLoadLE<UInt32>(&source[5]);
if (decompressed_block_size == 0)
throw Exception(ErrorCodes::CORRUPTED_DATA, "Can't decompress data: header is corrupt with decompressed block size 0");
return decompressed_block_size;

View File

@ -175,7 +175,7 @@ private:
throw std::runtime_error("No more data to read");
}
current_value = unalignedLoad<T>(data);
current_value = unalignedLoadLE<T>(data);
data = reinterpret_cast<const char *>(data) + sizeof(T);
}
};
@ -371,7 +371,7 @@ CodecTestSequence makeSeq(Args && ... args)
char * write_pos = data.data();
for (const auto & v : vals)
{
unalignedStore<T>(write_pos, v);
unalignedStoreLE<T>(write_pos, v);
write_pos += sizeof(v);
}
@ -393,7 +393,7 @@ CodecTestSequence generateSeq(Generator gen, const char* gen_name, B Begin = 0,
{
const T v = gen(static_cast<T>(i));
unalignedStore<T>(write_pos, v);
unalignedStoreLE<T>(write_pos, v);
write_pos += sizeof(v);
}

View File

@ -52,10 +52,14 @@
/// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION,
/// later is just a number for server version (one number instead of commit SHA)
/// for simplicity (sometimes it may be more convenient in some use cases).
#define DBMS_TCP_PROTOCOL_VERSION 54457
#define DBMS_TCP_PROTOCOL_VERSION 54458
#define DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME 54449
#define DBMS_MIN_PROTOCOL_VERSION_WITH_PROFILE_EVENTS_IN_INSERT 54456
#define DBMS_MIN_PROTOCOL_VERSION_WITH_VIEW_IF_PERMITTED 54457
#define DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM 54458
#define DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY 54458

View File

@ -705,6 +705,10 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
M(Bool, input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference, false, "Skip columns with unsupported types while schema inference for format Arrow", 0) \
M(String, column_names_for_schema_inference, "", "The list of column names to use in schema inference for formats without column names. The format: 'column1,column2,column3,...'", 0) \
M(Bool, input_format_json_read_bools_as_numbers, true, "Allow to parse bools as numbers in JSON input formats", 0) \
M(Bool, input_format_json_try_infer_numbers_from_strings, true, "Try to infer numbers from string fields while schema inference", 0) \
M(Bool, input_format_try_infer_integers, true, "Try to infer numbers from string fields while schema inference in text formats", 0) \
M(Bool, input_format_try_infer_dates, true, "Try to infer dates from string fields while schema inference in text formats", 0) \
M(Bool, input_format_try_infer_datetimes, true, "Try to infer datetimes from string fields while schema inference in text formats", 0) \
M(Bool, input_format_protobuf_flatten_google_wrappers, false, "Enable Google wrappers for regular non-nested columns, e.g. google.protobuf.StringValue 'str' for String column 'str'. For Nullable columns empty wrappers are recognized as defaults, and missing as nulls", 0) \
M(Bool, output_format_protobuf_nullables_with_google_wrappers, false, "When serializing Nullable columns with Google wrappers, serialize default values as empty wrappers. If turned off, default and null values are not serialized", 0) \
M(UInt64, input_format_csv_skip_first_lines, 0, "Skip specified number of lines at the beginning of data in CSV format", 0) \

View File

@ -1013,7 +1013,11 @@ void BaseDaemon::setupWatchdog()
/// If streaming compression of logs is used then we write watchdog logs to cerr
if (config().getRawString("logger.stream_compress", "false") == "true")
{
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
Poco::AutoPtr<OwnPatternFormatter> pf;
if (config().getString("logger.formatting", "") == "json")
pf = new OwnJSONPatternFormatter;
else
pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel(std::cerr));
logger().setChannel(log);
}

Some files were not shown because too many files have changed in this diff Show More