Merge branch 'master' into iaadeflate_upgrade_qpl030

This commit is contained in:
jasperzhu 2022-12-08 11:19:20 +08:00 committed by GitHub
commit 2b43942fe6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
109 changed files with 2787 additions and 473 deletions

View File

@ -51,7 +51,7 @@ jobs:
--gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \
--output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}"
git add "./docs/changelogs/${GITHUB_TAG}.md"
python ./utils/security-generator/generate_security.py > SECURITY.md
python3 ./utils/security-generator/generate_security.py > SECURITY.md
git diff HEAD
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3

View File

@ -21,12 +21,12 @@ set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
set (GCC_MINIMUM_VERSION 11)
if (COMPILER_GCC)
message (FATAL_ERROR "Compilation with GCC is unsupported. Please use Clang instead.")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with GCC version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${GCC_MINIMUM_VERSION}.")
endif ()
message (WARNING "Compilation with GCC is unsupported. Please use Clang instead.")
elseif (COMPILER_CLANG)
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
@ -83,7 +83,7 @@ if ((OS_LINUX OR OS_DARWIN) AND NOT LINKER_NAME)
if (NOT LINKER_NAME)
if (GOLD_PATH)
message (WARNING "Linking with gold is not recommended. Please use lld.")
message (FATAL_ERROR "Linking with gold is unsupported. Please use lld.")
if (COMPILER_GCC)
set (LINKER_NAME "gold")
else ()

View File

@ -0,0 +1,32 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.10.3.27-stable (6d3b2985724) FIXME as compared to v22.10.2.11-stable (d2bfcaba002)
#### Improvement
* Backported in [#42842](https://github.com/ClickHouse/ClickHouse/issues/42842): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Build/Testing/Packaging Improvement
* Backported in [#42959](https://github.com/ClickHouse/ClickHouse/issues/42959): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#43042](https://github.com/ClickHouse/ClickHouse/issues/43042): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#42864](https://github.com/ClickHouse/ClickHouse/issues/42864): Fix lowerUTF8()/upperUTF8() in case of symbol was in between 16-byte boundary (very frequent case of you have strings > 16 bytes long). [#42812](https://github.com/ClickHouse/ClickHouse/pull/42812) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#43173](https://github.com/ClickHouse/ClickHouse/issues/43173): Fix rare possible hung on query cancellation. [#42874](https://github.com/ClickHouse/ClickHouse/pull/42874) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#43064](https://github.com/ClickHouse/ClickHouse/issues/43064): Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#43075](https://github.com/ClickHouse/ClickHouse/issues/43075): Fix lambda parsing. Closes [#41848](https://github.com/ClickHouse/ClickHouse/issues/41848). [#42979](https://github.com/ClickHouse/ClickHouse/pull/42979) ([Nikolay Degterinsky](https://github.com/evillique)).
* Backported in [#43444](https://github.com/ClickHouse/ClickHouse/issues/43444): - Fix several buffer over-reads. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#43430](https://github.com/ClickHouse/ClickHouse/issues/43430): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix a bug in CAST function parser [#42980](https://github.com/ClickHouse/ClickHouse/pull/42980) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix TSan errors (correctly ignore _exit interception) [#43009](https://github.com/ClickHouse/ClickHouse/pull/43009) ([Azat Khuzhin](https://github.com/azat)).
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.10.4.23-stable (352772987f4) FIXME as compared to v22.10.3.27-stable (6d3b2985724)
#### Backward Incompatible Change
* Backported in [#43487](https://github.com/ClickHouse/ClickHouse/issues/43487): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
#### Build/Testing/Packaging Improvement
* Backported in [#43053](https://github.com/ClickHouse/ClickHouse/issues/43053): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#43715](https://github.com/ClickHouse/ClickHouse/issues/43715): An issue with the following exception has been reported while trying to read a Parquet file from S3 into ClickHouse:. [#43297](https://github.com/ClickHouse/ClickHouse/pull/43297) ([Arthur Passos](https://github.com/arthurpassos)).
* Backported in [#43576](https://github.com/ClickHouse/ClickHouse/issues/43576): Fix possible `Cannot create non-empty column with type Nothing` in functions if/multiIf. Closes [#43356](https://github.com/ClickHouse/ClickHouse/issues/43356). [#43368](https://github.com/ClickHouse/ClickHouse/pull/43368) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#43506](https://github.com/ClickHouse/ClickHouse/issues/43506): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#43723](https://github.com/ClickHouse/ClickHouse/issues/43723): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,33 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.11.2.30-stable (28f72d8ab09) FIXME as compared to v22.11.1.1360-stable (0d211ed1984)
#### Backward Incompatible Change
* Backported in [#43488](https://github.com/ClickHouse/ClickHouse/issues/43488): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
#### Improvement
* Backported in [#43511](https://github.com/ClickHouse/ClickHouse/issues/43511): Restrict default access to named collections for user defined in config. It must have explicit `show_named_collections=1` to be able to see them. [#43325](https://github.com/ClickHouse/ClickHouse/pull/43325) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#43716](https://github.com/ClickHouse/ClickHouse/issues/43716): An issue with the following exception has been reported while trying to read a Parquet file from S3 into ClickHouse:. [#43297](https://github.com/ClickHouse/ClickHouse/pull/43297) ([Arthur Passos](https://github.com/arthurpassos)).
* Backported in [#43431](https://github.com/ClickHouse/ClickHouse/issues/43431): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#43577](https://github.com/ClickHouse/ClickHouse/issues/43577): Fix possible `Cannot create non-empty column with type Nothing` in functions if/multiIf. Closes [#43356](https://github.com/ClickHouse/ClickHouse/issues/43356). [#43368](https://github.com/ClickHouse/ClickHouse/pull/43368) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#43507](https://github.com/ClickHouse/ClickHouse/issues/43507): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#43724](https://github.com/ClickHouse/ClickHouse/issues/43724): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#43807](https://github.com/ClickHouse/ClickHouse/issues/43807): Optimized number of List requests to ZooKeeper when selecting a part to merge. Previously it could produce thousands of requests in some cases. Fixes [#43647](https://github.com/ClickHouse/ClickHouse/issues/43647). [#43675](https://github.com/ClickHouse/ClickHouse/pull/43675) ([Alexander Tokmakov](https://github.com/tavplubix)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,34 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.15.33-lts (4ef30f2c4b6) FIXME as compared to v22.3.14.23-lts (74956bfee4d)
#### Backward Incompatible Change
* Backported in [#43484](https://github.com/ClickHouse/ClickHouse/issues/43484): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
#### Improvement
* Backported in [#42839](https://github.com/ClickHouse/ClickHouse/issues/42839): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Build/Testing/Packaging Improvement
* Backported in [#43050](https://github.com/ClickHouse/ClickHouse/issues/43050): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#42963](https://github.com/ClickHouse/ClickHouse/issues/42963): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#43039](https://github.com/ClickHouse/ClickHouse/issues/43039): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#43427](https://github.com/ClickHouse/ClickHouse/issues/43427): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#43720](https://github.com/ClickHouse/ClickHouse/issues/43720): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,32 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.8.10.29-lts (d568a57f7af) FIXME as compared to v22.8.9.24-lts (a1b69551d40)
#### Backward Incompatible Change
* Backported in [#43485](https://github.com/ClickHouse/ClickHouse/issues/43485): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
#### Build/Testing/Packaging Improvement
* Backported in [#43051](https://github.com/ClickHouse/ClickHouse/issues/43051): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#43513](https://github.com/ClickHouse/ClickHouse/issues/43513): - Fix several buffer over-reads. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#43428](https://github.com/ClickHouse/ClickHouse/issues/43428): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#43580](https://github.com/ClickHouse/ClickHouse/issues/43580): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#43721](https://github.com/ClickHouse/ClickHouse/issues/43721): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)).
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,30 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.9.5.25-stable (68ba857aa82) FIXME as compared to v22.9.4.32-stable (3db8bcf1a70)
#### Improvement
* Backported in [#42841](https://github.com/ClickHouse/ClickHouse/issues/42841): Update tzdata to 2022f. Mexico will no longer observe DST except near the US border: https://www.timeanddate.com/news/time/mexico-abolishes-dst-2022.html. Chihuahua moves to year-round UTC-6 on 2022-10-30. Fiji no longer observes DST. See https://github.com/google/cctz/pull/235 and https://bugs.launchpad.net/ubuntu/+source/tzdata/+bug/1995209. [#42796](https://github.com/ClickHouse/ClickHouse/pull/42796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### Build/Testing/Packaging Improvement
* Backported in [#42965](https://github.com/ClickHouse/ClickHouse/issues/42965): Before the fix, the user-defined config was preserved by RPM in `$file.rpmsave`. The PR fixes it and won't replace the user's files from packages. [#42936](https://github.com/ClickHouse/ClickHouse/pull/42936) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#43041](https://github.com/ClickHouse/ClickHouse/issues/43041): Add a CI step to mark commits as ready for release; soft-forbid launching a release script from branches but master. [#43017](https://github.com/ClickHouse/ClickHouse/pull/43017) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#42749](https://github.com/ClickHouse/ClickHouse/issues/42749): A segmentation fault related to DNS & c-ares has been reported. The below error ocurred in multiple threads: ``` 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008088 [ 356 ] {} <Fatal> BaseDaemon: ######################################## 2022-09-28 15:41:19.008,"2022.09.28 15:41:19.008147 [ 356 ] {} <Fatal> BaseDaemon: (version 22.8.5.29 (official build), build id: 92504ACA0B8E2267) (from thread 353) (no query) Received signal Segmentation fault (11)" 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008196 [ 356 ] {} <Fatal> BaseDaemon: Address: 0xf Access: write. Address not mapped to object. 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008216 [ 356 ] {} <Fatal> BaseDaemon: Stack trace: 0x188f8212 0x1626851b 0x1626a69e 0x16269b3f 0x16267eab 0x13cf8284 0x13d24afc 0x13c5217e 0x14ec2495 0x15ba440f 0x15b9d13b 0x15bb2699 0x1891ccb3 0x1891e00d 0x18ae0769 0x18ade022 0x7f76aa985609 0x7f76aa8aa133 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008274 [ 356 ] {} <Fatal> BaseDaemon: 2. Poco::Net::IPAddress::family() const @ 0x188f8212 in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008297 [ 356 ] {} <Fatal> BaseDaemon: 3. ? @ 0x1626851b in /usr/bin/clickhouse 2022-09-28 15:41:19.008,2022.09.28 15:41:19.008309 [ 356 ] {} <Fatal> BaseDaemon: 4. ? @ 0x1626a69e in /usr/bin/clickhouse ```. [#42234](https://github.com/ClickHouse/ClickHouse/pull/42234) ([Arthur Passos](https://github.com/arthurpassos)).
* Backported in [#42863](https://github.com/ClickHouse/ClickHouse/issues/42863): Fix lowerUTF8()/upperUTF8() in case of symbol was in between 16-byte boundary (very frequent case of you have strings > 16 bytes long). [#42812](https://github.com/ClickHouse/ClickHouse/pull/42812) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#43063](https://github.com/ClickHouse/ClickHouse/issues/43063): Fix rare NOT_FOUND_COLUMN_IN_BLOCK error when projection is possible to use but there is no projection available. This fixes [#42771](https://github.com/ClickHouse/ClickHouse/issues/42771) . The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/25563. [#42938](https://github.com/ClickHouse/ClickHouse/pull/42938) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#43443](https://github.com/ClickHouse/ClickHouse/issues/43443): - Fix several buffer over-reads. [#43159](https://github.com/ClickHouse/ClickHouse/pull/43159) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#43429](https://github.com/ClickHouse/ClickHouse/issues/43429): Fixed queries with `SAMPLE BY` with prewhere optimization on tables using `Merge` engine. [#43315](https://github.com/ClickHouse/ClickHouse/pull/43315) ([Antonio Andelic](https://github.com/antonio2368)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Always run `BuilderReport` and `BuilderSpecialReport` in all CI types [#42684](https://github.com/ClickHouse/ClickHouse/pull/42684) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Update SECURITY.md on new stable tags [#43365](https://github.com/ClickHouse/ClickHouse/pull/43365) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use all parameters with prefixes from ssm [#43467](https://github.com/ClickHouse/ClickHouse/pull/43467) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,28 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.9.6.20-stable (ef6343f9579) FIXME as compared to v22.9.5.25-stable (68ba857aa82)
#### Backward Incompatible Change
* Backported in [#43486](https://github.com/ClickHouse/ClickHouse/issues/43486): Fixed backward incompatibility in (de)serialization of states of `min`, `max`, `any*`, `argMin`, `argMax` aggregate functions with `String` argument. The incompatibility was introduced in https://github.com/ClickHouse/ClickHouse/pull/41431 and affects 22.9, 22.10 and 22.11 branches (fixed since 22.9.6, 22.10.4 and 22.11.2 correspondingly). Some minor releases of 22.3, 22.7 and 22.8 branches are also affected: 22.3.13...22.3.14 (fixed since 22.3.15), 22.8.6...22.8.9 (fixed since 22.8.10), 22.7.6 and newer (will not be fixed in 22.7, we recommend to upgrade from 22.7.* to 22.8.10 or newer). This release note does not concern users that have never used affected versions. Incompatible versions append extra `'\0'` to strings when reading states of the aggregate functions mentioned above. For example, if an older version saved state of `anyState('foobar')` to `state_column` then incompatible version will print `'foobar\0'` on `anyMerge(state_column)`. Also incompatible versions write states of the aggregate functions without trailing `'\0'`. Newer versions (that have the fix) can correctly read data written by all versions including incompatible versions, except one corner case. If an incompatible version saved a state with a string that actually ends with null character, then newer version will trim trailing `'\0'` when reading state of affected aggregate function. For example, if an incompatible version saved state of `anyState('abrac\0dabra\0')` to `state_column` then incompatible versions will print `'abrac\0dabra'` on `anyMerge(state_column)`. The issue also affects distributed queries when an incompatible version works in a cluster together with older or newer versions. [#43038](https://github.com/ClickHouse/ClickHouse/pull/43038) ([Raúl Marín](https://github.com/Algunenano)).
#### Build/Testing/Packaging Improvement
* Backported in [#43052](https://github.com/ClickHouse/ClickHouse/issues/43052): Wait for all files are in sync before archiving them in integration tests. [#42891](https://github.com/ClickHouse/ClickHouse/pull/42891) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#43505](https://github.com/ClickHouse/ClickHouse/issues/43505): Fix a bug when row level filter uses default value of column. [#43387](https://github.com/ClickHouse/ClickHouse/pull/43387) ([Alexander Gololobov](https://github.com/davenger)).
* Backported in [#43722](https://github.com/ClickHouse/ClickHouse/issues/43722): Fixed primary key analysis with conditions involving `toString(enum)`. [#43596](https://github.com/ClickHouse/ClickHouse/pull/43596) ([Nikita Taranov](https://github.com/nickitat)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)).
* Temporarily disable `test_hive_query` [#43542](https://github.com/ClickHouse/ClickHouse/pull/43542) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Do not checkout submodules recursively [#43637](https://github.com/ClickHouse/ClickHouse/pull/43637) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Use docker images cache from merged PRs in master and release branches [#43664](https://github.com/ClickHouse/ClickHouse/pull/43664) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix pagination issue in GITHUB_JOB_ID() [#43681](https://github.com/ClickHouse/ClickHouse/pull/43681) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -16,7 +16,7 @@ import SupersetDocker from '@site/docs/en/_snippets/_add_superset_detail.md';
## Goal
In this guide you will learn how to:
- Load the OpenCelliD data in Clickhouse
- Load the OpenCelliD data in ClickHouse
- Connect Apache Superset to ClickHouse
- Build a dashboard based on data available in the dataset

View File

@ -56,6 +56,7 @@ As of November 8th, 2022, each TSV is approximately the following size and numbe
- [Line by line commit history of a file](#line-by-line-commit-history-of-a-file)
- [Unsolved Questions](#unsolved-questions)
- [Git blame](#git-blame)
- [Related Content](#related-content)
# Generating the data
@ -2497,3 +2498,7 @@ LIMIT 20
We welcome exact and improved solutions here.
# Related Content
- [Git commits and our community](https://clickhouse.com/blog/clickhouse-git-community-commits)
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)

View File

@ -22,5 +22,8 @@ functions in ClickHouse. The sample datasets include:
- The [Cell Towers dataset](../getting-started/example-datasets/cell-towers.md) imports a CSV into ClickHouse
- The [NYPD Complaint Data](../getting-started/example-datasets/nypd_complaint_data.md) demonstrates how to use data inference to simplify creating tables
- The ["What's on the Menu?" dataset](../getting-started/example-datasets/menus.md) has an example of denormalizing data
- The [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
- The [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) shows how JSON data can be loaded
- The [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
View the **Tutorials and Datasets** menu for a complete list of sample datasets.

View File

@ -8,8 +8,8 @@ slug: /en/install
You have two options for getting up and running with ClickHouse:
- **[ClickHouse Cloud](https://clickhouse.cloud/):** the official ClickHouse as a service, - built by, maintained, and supported by the creators of ClickHouse
- **Self-managed ClickHouse:** ClickHouse can run on any Linux, FreeBSD, or Mac OS X with x86_64, AArch64, or PowerPC64LE CPU architecture
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** the official ClickHouse as a service, - built by, maintained, and supported by the creators of ClickHouse
- **[Self-managed ClickHouse](https://github.com/ClickHouse/ClickHouse):** ClickHouse can run on any Linux, FreeBSD, or Mac OS X with x86_64, AArch64, or PowerPC64LE CPU architecture
## ClickHouse Cloud
@ -406,4 +406,3 @@ SELECT 1
**Congratulations, the system works!**
To continue experimenting, you can download one of the test data sets or go through [tutorial](/docs/en/tutorial.md).

View File

@ -1,5 +1,8 @@
---
slug: /en/operations/backup
---
[//]: # (This file is included in Manage > Backups)
# Backup and Restore
- [Backup to a local disk](#backup-to-a-local-disk)
- [Configuring backup/restore to use an S3 endpoint](#configuring-backuprestore-to-use-an-s3-endpoint)
@ -356,4 +359,3 @@ Data can be restored from backup using the `ALTER TABLE ... ATTACH PARTITION ...
For more information about queries related to partition manipulations, see the [ALTER documentation](../sql-reference/statements/alter/partition.md#alter_manipulations-with-partitions).
A third-party tool is available to automate this approach: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup).

View File

@ -286,3 +286,7 @@ end script
If you use antivirus software configure it to skip folders with ClickHouse datafiles (`/var/lib/clickhouse`) otherwise performance may be reduced and you may experience unexpected errors during data ingestion and background merges.
[Original article](https://clickhouse.com/docs/en/operations/tips/)
## Related Content
- [Getting started with ClickHouse? Here are 13 "Deadly Sins" and how to avoid them](https://clickhouse.com/blog/common-getting-started-issues-with-clickhouse)

View File

@ -1,5 +1,8 @@
---
slug: /en/operations/update
---
[//]: # (This file is included in Manage > Updates)
# Update
## Self-managed ClickHouse Upgrade

View File

@ -117,3 +117,8 @@ Read 186 rows, 4.15 KiB in 0.035 sec., 5302 rows/sec., 118.34 KiB/sec.
```
[Original article](https://clickhouse.com/docs/en/operations/utils/clickhouse-local/) <!--hide-->
## Related Content
- [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1)
- [Exploring massive, real-world data sets: 100+ Years of Weather Records in ClickHouse](https://clickhouse.com/blog/real-world-data-noaa-climate-data)

View File

@ -95,3 +95,6 @@ Result:
└─────────────────────────────────────────────────────────────────────────────────────────────────┴─────────────────┘
```
## Related Content
- [Exploring massive, real-world data sets: 100+ Years of Weather Records in ClickHouse](https://clickhouse.com/blog/real-world-data-noaa-climate-data)

View File

@ -75,3 +75,7 @@ SELECT * FROM json FORMAT JSONEachRow
```text
{"o":{"a":1,"b":{"c":2,"d":[1,2,3]}}}
```
## Related Content
- [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json)

View File

@ -1,4 +1,4 @@
:::tip
If you are using a dictionary with ClickHouse Cloud please use the DDL query option to create your dictionaries, and create your dictionary as user `default`.
Also, verify the list of supported dictionary sources in the [Cloud Compatibility guide](/docs/en/whats-new/cloud-capabilities.md).
Also, verify the list of supported dictionary sources in the [Cloud Compatibility guide](/docs/en/cloud/reference/cloud-compatibility.md).
:::

View File

@ -134,3 +134,7 @@ Result:
│ [[[(3,1),(0,1),(0,-1),(3,-1)]]] │ Value │
└─────────────────────────────────┴───────┘
```
## Related Content
- [Exploring massive, real-world data sets: 100+ Years of Weather Records in ClickHouse](https://clickhouse.com/blog/real-world-data-noaa-climate-data)

View File

@ -410,35 +410,35 @@ Converts a date with time to a certain fixed date, while preserving the time.
## toRelativeYearNum
Converts a date with time or date to the number of the year, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the year, starting from a certain fixed point in the past.
## toRelativeQuarterNum
Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the quarter, starting from a certain fixed point in the past.
## toRelativeMonthNum
Converts a date with time or date to the number of the month, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the month, starting from a certain fixed point in the past.
## toRelativeWeekNum
Converts a date with time or date to the number of the week, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the week, starting from a certain fixed point in the past.
## toRelativeDayNum
Converts a date with time or date to the number of the day, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the day, starting from a certain fixed point in the past.
## toRelativeHourNum
Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the hour, starting from a certain fixed point in the past.
## toRelativeMinuteNum
Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the minute, starting from a certain fixed point in the past.
## toRelativeSecondNum
Converts a date with time or date to the number of the second, starting from a certain fixed point in the past.
Converts a date or date with time to the number of the second, starting from a certain fixed point in the past.
## toISOYear
@ -517,6 +517,154 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
└────────────┴───────────┴───────────┴───────────┘
```
## age
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
**Syntax**
``` sql
age('unit', startdate, enddate, [timezone])
```
**Arguments**
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
- `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`)
- `day` (possible abbreviations: `dd`, `d`)
- `week` (possible abbreviations: `wk`, `ww`)
- `month` (possible abbreviations: `mm`, `m`)
- `quarter` (possible abbreviations: `qq`, `q`)
- `year` (possible abbreviations: `yyyy`, `yy`)
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
**Returned value**
Difference between `enddate` and `startdate` expressed in `unit`.
Type: [Int](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
```
Result:
``` text
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 24 │
└───────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
age('day', s, e) AS day_age,
age('month', s, e) AS month__age,
age('year', s, e) AS year_age;
```
Result:
``` text
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
└────────────┴────────────┴─────────┴────────────┴──────────┘
```
## date\_diff
Returns the count of the specified `unit` boundaries crossed between the `startdate` and `enddate`.
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
**Syntax**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Aliases: `dateDiff`, `DATE_DIFF`.
**Arguments**
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
- `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`)
- `day` (possible abbreviations: `dd`, `d`)
- `week` (possible abbreviations: `wk`, `ww`)
- `month` (possible abbreviations: `mm`, `m`)
- `quarter` (possible abbreviations: `qq`, `q`)
- `year` (possible abbreviations: `yyyy`, `yy`)
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
**Returned value**
Difference between `enddate` and `startdate` expressed in `unit`.
Type: [Int](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Result:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
dateDiff('day', s, e) AS day_diff,
dateDiff('month', s, e) AS month__diff,
dateDiff('year', s, e) AS year_diff;
```
Result:
``` text
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
└────────────┴────────────┴──────────┴─────────────┴───────────┘
```
## date\_trunc
Truncates date and time data to the specified part of date.
@ -637,80 +785,6 @@ Result:
└───────────────────────────────────────────────┘
```
## date\_diff
Returns the difference between two dates or dates with time values.
The difference is calculated using relative units, e.g. the difference between `2022-01-01` and `2021-12-29` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
**Syntax**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Aliases: `dateDiff`, `DATE_DIFF`.
**Arguments**
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
**Returned value**
Difference between `enddate` and `startdate` expressed in `unit`.
Type: [Int](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Result:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
dateDiff('day', s, e) AS day_diff,
dateDiff('month', s, e) AS month__diff,
dateDiff('year', s, e) AS year_diff;
```
Result:
``` text
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
└────────────┴────────────┴──────────┴─────────────┴───────────┘
```
## date\_sub
Subtracts the time interval or date interval from the provided date or date with time.

View File

@ -464,5 +464,39 @@ Removes the query string and fragment identifier. The question mark and number s
### cutURLParameter(URL, name)
Removes the name URL parameter, if present. This function works under the assumption that the parameter name is encoded in the URL exactly the same way as in the passed argument.
Removes the `name` parameter from URL, if present. This function does not encode or decode characters in parameter names, e.g. `Client ID` and `Client%20ID` are treated as different parameter names.
**Syntax**
``` sql
cutURLParameter(URL, name)
```
**Arguments**
- `url` — URL. [String](../../sql-reference/data-types/string.md).
- `name` — name of URL parameter. [String](../../sql-reference/data-types/string.md) or [Array](../../sql-reference/data-types/array.md) of Strings.
**Returned value**
- URL with `name` URL parameter removed.
Type: `String`.
**Example**
Query:
``` sql
SELECT
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', 'a') as url_without_a,
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', ['c', 'e']) as url_without_c_and_e;
```
Result:
``` text
┌─url_without_a────────────────┬─url_without_c_and_e──────┐
│ http://bigmir.net/?c=d&e=f#g │ http://bigmir.net/?a=b#g │
└──────────────────────────────┴──────────────────────────┘
```

View File

@ -194,7 +194,7 @@ To restore data from a backup, do the following:
Restoring from a backup does not require stopping the server.
For more information about backups and restoring data, see the [Data Backup](/docs/en/manage/backups.mdx) section.
For more information about backups and restoring data, see the [Data Backup](/docs/en/operations/backup.md) section.
## UNFREEZE PARTITION

View File

@ -587,3 +587,8 @@ ORDER BY
│ ambient_temp │ 2020-03-01 12:00:00 │ 16 │ 16 │
└──────────────┴─────────────────────┴───────┴─────────────────────────┘
```
## Related Content
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)

View File

@ -424,23 +424,23 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
## toRelativeYearNum {#torelativeyearnum}
Переводит дату-с-временем или дату в номер года, начиная с некоторого фиксированного момента в прошлом.
Переводит дату или дату-с-временем в номер года, начиная с некоторого фиксированного момента в прошлом.
## toRelativeQuarterNum {#torelativequarternum}
Переводит дату-с-временем или дату в номер квартала, начиная с некоторого фиксированного момента в прошлом.
Переводит дату или дату-с-временем в номер квартала, начиная с некоторого фиксированного момента в прошлом.
## toRelativeMonthNum {#torelativemonthnum}
Переводит дату-с-временем или дату в номер месяца, начиная с некоторого фиксированного момента в прошлом.
Переводит дату или дату-с-временем в номер месяца, начиная с некоторого фиксированного момента в прошлом.
## toRelativeWeekNum {#torelativeweeknum}
Переводит дату-с-временем или дату в номер недели, начиная с некоторого фиксированного момента в прошлом.
Переводит дату или дату-с-временем в номер недели, начиная с некоторого фиксированного момента в прошлом.
## toRelativeDayNum {#torelativedaynum}
Переводит дату-с-временем или дату в номер дня, начиная с некоторого фиксированного момента в прошлом.
Переводит дату или дату-с-временем в номер дня, начиная с некоторого фиксированного момента в прошлом.
## toRelativeHourNum {#torelativehournum}
@ -456,7 +456,7 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
## toISOYear {#toisoyear}
Переводит дату-с-временем или дату в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
**Пример**
@ -479,7 +479,7 @@ SELECT
## toISOWeek {#toisoweek}
Переводит дату-с-временем или дату в число типа UInt8, содержащее номер ISO недели.
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер ISO недели.
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года.
@ -503,7 +503,7 @@ SELECT
```
## toWeek(date\[, mode\]\[, timezone\]) {#toweek}
Переводит дату-с-временем или дату в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
Переводит дату или дату-с-временем в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
`toISOWeek() ` эквивалентно `toWeek(date,3)`.
@ -569,6 +569,132 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
└────────────┴───────────┴───────────┴───────────┘
```
## age
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
**Синтаксис**
``` sql
age('unit', startdate, enddate, [timezone])
```
**Аргументы**
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
- `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`)
- `day` (возможные сокращения: `dd`, `d`)
- `week` (возможные сокращения: `wk`, `ww`)
- `month` (возможные сокращения: `mm`, `m`)
- `quarter` (возможные сокращения: `qq`, `q`)
- `year` (возможные сокращения: `yyyy`, `yy`)
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Разница между `enddate` и `startdate`, выраженная в `unit`.
Тип: [Int](../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
```
Результат:
``` text
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 24 │
└───────────────────────────────────────────────────────────────────────────────────┘
```
Запрос:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
age('day', s, e) AS day_age,
age('month', s, e) AS month__age,
age('year', s, e) AS year_age;
```
Результат:
``` text
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
└────────────┴────────────┴─────────┴────────────┴──────────┘
```
## date\_diff {#date_diff}
Вычисляет разницу указанных границ `unit` пересекаемых между `startdate` и `enddate`.
**Синтаксис**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Синонимы: `dateDiff`, `DATE_DIFF`.
**Аргументы**
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
- `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`)
- `day` (возможные сокращения: `dd`, `d`)
- `week` (возможные сокращения: `wk`, `ww`)
- `month` (возможные сокращения: `mm`, `m`)
- `quarter` (возможные сокращения: `qq`, `q`)
- `year` (возможные сокращения: `yyyy`, `yy`)
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Разница между `enddate` и `startdate`, выраженная в `unit`.
Тип: [Int](../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Результат:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## date_trunc {#date_trunc}
Отсекает от даты и времени части, меньшие чем указанная часть.
@ -689,60 +815,6 @@ SELECT date_add(YEAR, 3, toDate('2018-01-01'));
└───────────────────────────────────────────────┘
```
## date\_diff {#date_diff}
Вычисляет разницу между двумя значениями дат или дат со временем.
**Синтаксис**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Синонимы: `dateDiff`, `DATE_DIFF`.
**Аргументы**
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Разница между `enddate` и `startdate`, выраженная в `unit`.
Тип: [Int](../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Результат:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## date\_sub {#date_sub}
Вычитает интервал времени или даты из указанной даты или даты со временем.

View File

@ -404,5 +404,39 @@ SELECT netloc('http://paul@www.example.com:80/');
### cutURLParameter(URL, name) {#cuturlparameterurl-name}
Удаляет параметр URL с именем name, если такой есть. Функция работает при допущении, что имя параметра закодировано в URL в точности таким же образом, что и в переданном аргументе.
Удаляет параметр с именем `name` из URL, если такой есть. Функция не кодирует или декодирует символы в именах параметров. Например `Client ID` и `Client%20ID` обрабатываются как разные имена параметров.
**Синтаксис**
``` sql
cutURLParameter(URL, name)
```
**Аргументы**
- `url` — URL. [String](../../sql-reference/data-types/string.md).
- `name` — имя параметра URL. [String](../../sql-reference/data-types/string.md) или [Array](../../sql-reference/data-types/array.md) состоящий из строк.
**Возвращаемое значение**
- URL с удалённым параметром URL с именем `name`.
Type: `String`.
**Пример**
Запрос:
``` sql
SELECT
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', 'a') as url_without_a,
cutURLParameter('http://bigmir.net/?a=b&c=d&e=f#g', ['c', 'e']) as url_without_c_and_e;
```
Результат:
``` text
┌─url_without_a────────────────┬─url_without_c_and_e──────┐
│ http://bigmir.net/?c=d&e=f#g │ http://bigmir.net/?a=b#g │
└──────────────────────────────┴──────────────────────────┘
```

View File

@ -1 +0,0 @@
../../../en/sql-reference/table-functions/format.md

View File

@ -0,0 +1,75 @@
---
slug: /ru/sql-reference/table-functions/format
sidebar_position: 56
sidebar_label: format
---
# format
Extracts table structure from data and parses it according to specified input format.
**Syntax**
``` sql
format(format_name, data)
```
**Parameters**
- `format_name` — The [format](../../interfaces/formats.md#formats) of the data.
- `data` — String literal or constant expression that returns a string containing data in specified format
**Returned value**
A table with data parsed from `data` argument according specified format and extracted schema.
**Examples**
**Query:**
``` sql
:) select * from format(JSONEachRow,
$$
{"a": "Hello", "b": 111}
{"a": "World", "b": 123}
{"a": "Hello", "b": 112}
{"a": "World", "b": 124}
$$)
```
**Result:**
```text
┌───b─┬─a─────┐
│ 111 │ Hello │
│ 123 │ World │
│ 112 │ Hello │
│ 124 │ World │
└─────┴───────┘
```
**Query:**
```sql
:) desc format(JSONEachRow,
$$
{"a": "Hello", "b": 111}
{"a": "World", "b": 123}
{"a": "Hello", "b": 112}
{"a": "World", "b": 124}
$$)
```
**Result:**
```text
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ b │ Nullable(Float64) │ │ │ │ │ │
│ a │ Nullable(String) │ │ │ │ │ │
└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
```
**See Also**
- [Formats](../../interfaces/formats.md)
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/format) <!--hide-->

View File

@ -1 +0,0 @@
../../../en/sql-reference/table-functions/format.md

View File

@ -0,0 +1,75 @@
---
slug: /zh/sql-reference/table-functions/format
sidebar_position: 56
sidebar_label: format
---
# format
Extracts table structure from data and parses it according to specified input format.
**Syntax**
``` sql
format(format_name, data)
```
**Parameters**
- `format_name` — The [format](../../interfaces/formats.md#formats) of the data.
- `data` — String literal or constant expression that returns a string containing data in specified format
**Returned value**
A table with data parsed from `data` argument according specified format and extracted schema.
**Examples**
**Query:**
``` sql
:) select * from format(JSONEachRow,
$$
{"a": "Hello", "b": 111}
{"a": "World", "b": 123}
{"a": "Hello", "b": 112}
{"a": "World", "b": 124}
$$)
```
**Result:**
```text
┌───b─┬─a─────┐
│ 111 │ Hello │
│ 123 │ World │
│ 112 │ Hello │
│ 124 │ World │
└─────┴───────┘
```
**Query:**
```sql
:) desc format(JSONEachRow,
$$
{"a": "Hello", "b": 111}
{"a": "World", "b": 123}
{"a": "Hello", "b": 112}
{"a": "World", "b": 124}
$$)
```
**Result:**
```text
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ b │ Nullable(Float64) │ │ │ │ │ │
│ a │ Nullable(String) │ │ │ │ │ │
└──────┴───────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
```
**See Also**
- [Formats](../../interfaces/formats.md)
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/format) <!--hide-->

View File

@ -58,22 +58,52 @@ namespace ErrorCodes
class Benchmark : public Poco::Util::Application
{
public:
Benchmark(unsigned concurrency_, double delay_,
Strings && hosts_, Ports && ports_, bool round_robin_,
bool cumulative_, bool secure_, const String & default_database_,
const String & user_, const String & password_, const String & quota_key_, const String & stage,
bool randomize_, size_t max_iterations_, double max_time_,
const String & json_path_, size_t confidence_,
const String & query_id_, const String & query_to_execute_, bool continue_on_errors_,
bool reconnect_, bool display_client_side_time_, bool print_stacktrace_, const Settings & settings_)
Benchmark(unsigned concurrency_,
double delay_,
Strings && hosts_,
Ports && ports_,
bool round_robin_,
bool cumulative_,
bool secure_,
const String & default_database_,
const String & user_,
const String & password_,
const String & quota_key_,
const String & stage,
bool randomize_,
size_t max_iterations_,
double max_time_,
const String & json_path_,
size_t confidence_,
const String & query_id_,
const String & query_to_execute_,
size_t max_consecutive_errors_,
bool continue_on_errors_,
bool reconnect_,
bool display_client_side_time_,
bool print_stacktrace_,
const Settings & settings_)
:
round_robin(round_robin_), concurrency(concurrency_), delay(delay_), queue(concurrency), randomize(randomize_),
cumulative(cumulative_), max_iterations(max_iterations_), max_time(max_time_),
json_path(json_path_), confidence(confidence_), query_id(query_id_),
query_to_execute(query_to_execute_), continue_on_errors(continue_on_errors_), reconnect(reconnect_),
round_robin(round_robin_),
concurrency(concurrency_),
delay(delay_),
queue(concurrency),
randomize(randomize_),
cumulative(cumulative_),
max_iterations(max_iterations_),
max_time(max_time_),
json_path(json_path_),
confidence(confidence_),
query_id(query_id_),
query_to_execute(query_to_execute_),
continue_on_errors(continue_on_errors_),
max_consecutive_errors(max_consecutive_errors_),
reconnect(reconnect_),
display_client_side_time(display_client_side_time_),
print_stacktrace(print_stacktrace_), settings(settings_),
shared_context(Context::createShared()), global_context(Context::createGlobal(shared_context.get())),
print_stacktrace(print_stacktrace_),
settings(settings_),
shared_context(Context::createShared()),
global_context(Context::createGlobal(shared_context.get())),
pool(concurrency)
{
const auto secure = secure_ ? Protocol::Secure::Enable : Protocol::Secure::Disable;
@ -166,6 +196,7 @@ private:
String query_id;
String query_to_execute;
bool continue_on_errors;
size_t max_consecutive_errors;
bool reconnect;
bool display_client_side_time;
bool print_stacktrace;
@ -174,6 +205,8 @@ private:
ContextMutablePtr global_context;
QueryProcessingStage::Enum query_processing_stage;
std::atomic<size_t> consecutive_errors{0};
/// Don't execute new queries after timelimit or SIGINT or exception
std::atomic<bool> shutdown{false};
@ -393,13 +426,14 @@ private:
try
{
execute(connection_entries, query, connection_index);
consecutive_errors = 0;
}
catch (...)
{
std::lock_guard lock(mutex);
std::cerr << "An error occurred while processing the query " << "'" << query << "'"
<< ": " << getCurrentExceptionMessage(false) << std::endl;
if (!continue_on_errors)
if (!(continue_on_errors || max_consecutive_errors > ++consecutive_errors))
{
shutdown = true;
throw;
@ -648,6 +682,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
("stacktrace", "print stack traces of exceptions")
("confidence", value<size_t>()->default_value(5), "set the level of confidence for T-test [0=80%, 1=90%, 2=95%, 3=98%, 4=99%, 5=99.5%(default)")
("query_id", value<std::string>()->default_value(""), "")
("max-consecutive-errors", value<size_t>()->default_value(0), "set number of allowed consecutive errors")
("continue_on_errors", "continue testing even if a query fails")
("reconnect", "establish new connection for every query")
("client-side-time", "display the time including network communication instead of server-side time; note that for server versions before 22.8 we always display client-side time")
@ -702,6 +737,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
options["confidence"].as<size_t>(),
options["query_id"].as<std::string>(),
options["query"].as<std::string>(),
options["max-consecutive-errors"].as<size_t>(),
options.count("continue_on_errors"),
options.count("reconnect"),
options.count("client-side-time"),

View File

@ -348,17 +348,9 @@ void Client::connect()
}
catch (const Exception & e)
{
/// It is typical when users install ClickHouse, type some password and instantly forget it.
/// This problem can't be fixed with reconnection so it is not attempted
if ((connection_parameters.user.empty() || connection_parameters.user == "default")
&& e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
if (e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
{
std::cerr << std::endl
<< "If you have installed ClickHouse and forgot password you can reset it in the configuration file." << std::endl
<< "The password for default user is typically located at /etc/clickhouse-server/users.d/default-password.xml" << std::endl
<< "and deleting this file will reset the password." << std::endl
<< "See also /etc/clickhouse-server/users.xml on the server where ClickHouse is installed." << std::endl
<< std::endl;
/// This problem can't be fixed with reconnection so it is not attempted
throw;
}
else

View File

@ -670,24 +670,30 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
}
/// Create node to signal that we finished moving
/// Also increment a counter of processed partitions
{
String state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id);
zookeeper->set(current_partition_attach_is_done, state_finished, 0);
/// Also increment a counter of processed partitions
const auto state_finished = TaskStateWithOwner::getData(TaskState::Finished, host_id);
const auto task_status = task_zookeeper_path + "/status";
/// Try until success
while (true)
{
Coordination::Stat stat;
auto status_json = zookeeper->get(task_zookeeper_path + "/status", &stat);
auto status_json = zookeeper->get(task_status, &stat);
auto statuses = StatusAccumulator::fromJSON(status_json);
/// Increment status for table.
auto status_for_table = (*statuses)[task_table.name_in_config];
status_for_table.processed_partitions_count += 1;
(*statuses)[task_table.name_in_config] = status_for_table;
(*statuses)[task_table.name_in_config].processed_partitions_count += 1;
auto statuses_to_commit = StatusAccumulator::serializeToJSON(statuses);
auto error = zookeeper->trySet(task_zookeeper_path + "/status", statuses_to_commit, stat.version, &stat);
if (error == Coordination::Error::ZOK)
Coordination::Requests ops;
ops.emplace_back(zkutil::makeSetRequest(current_partition_attach_is_done, state_finished, 0));
ops.emplace_back(zkutil::makeSetRequest(task_status, statuses_to_commit, stat.version));
Coordination::Responses responses;
Coordination::Error code = zookeeper->tryMulti(ops, responses);
if (code == Coordination::Error::ZOK)
break;
}
}

View File

@ -20,7 +20,7 @@
#include <Backups/RestorerFromBackup.h>
#include <Core/Settings.h>
#include <base/defines.h>
#include <base/find_symbols.h>
#include <IO/Operators.h>
#include <Poco/AccessExpireCache.h>
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/split.hpp>
@ -454,9 +454,21 @@ UUID AccessControl::authenticate(const Credentials & credentials, const Poco::Ne
{
tryLogCurrentException(getLogger(), "from: " + address.toString() + ", user: " + credentials.getUserName() + ": Authentication failed");
WriteBufferFromOwnString message;
message << credentials.getUserName() << ": Authentication failed: password is incorrect, or there is no user with such name.";
/// Better exception message for usability.
/// It is typical when users install ClickHouse, type some password and instantly forget it.
if (credentials.getUserName().empty() || credentials.getUserName() == "default")
message << "\n\n"
<< "If you have installed ClickHouse and forgot password you can reset it in the configuration file.\n"
<< "The password for default user is typically located at /etc/clickhouse-server/users.d/default-password.xml\n"
<< "and deleting this file will reset the password.\n"
<< "See also /etc/clickhouse-server/users.xml on the server where ClickHouse is installed.\n\n";
/// We use the same message for all authentication failures because we don't want to give away any unnecessary information for security reasons,
/// only the log will show the exact reason.
throw Exception(credentials.getUserName() + ": Authentication failed: password is incorrect or there is no user with such name", ErrorCodes::AUTHENTICATION_FAILED);
throw Exception(message.str(), ErrorCodes::AUTHENTICATION_FAILED);
}
}

View File

@ -77,7 +77,10 @@ protected:
static bool getFlag(ConstAggregateDataPtr __restrict place) noexcept
{
return result_is_nullable ? place[0] : true;
if constexpr (result_is_nullable)
return place[0];
else
return true;
}
public:
@ -98,9 +101,10 @@ public:
DataTypePtr getReturnType() const override
{
return result_is_nullable
? makeNullable(nested_function->getReturnType())
: nested_function->getReturnType();
if constexpr (result_is_nullable)
return makeNullable(nested_function->getReturnType());
else
return nested_function->getReturnType();
}
void create(AggregateDataPtr __restrict place) const override
@ -136,8 +140,9 @@ public:
void merge(AggregateDataPtr __restrict place, ConstAggregateDataPtr rhs, Arena * arena) const override
{
if (result_is_nullable && getFlag(rhs))
setFlag(place);
if constexpr (result_is_nullable)
if (getFlag(rhs))
setFlag(place);
nested_function->merge(nestedPlace(place), nestedPlace(rhs), arena);
}

View File

@ -2843,6 +2843,14 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
}
}
/** Nested subqueries cannot access outer subqueries table expressions from JOIN tree because
* that can prevent resolution of table expression from CTE.
*
* Example: WITH a AS (SELECT number FROM numbers(1)), b AS (SELECT number FROM a) SELECT * FROM a as l, b as r;
*/
if (identifier_lookup.isTableExpressionLookup())
identifier_resolve_settings.allow_to_check_join_tree = false;
while (scope_to_check != nullptr)
{
auto lookup_result = tryResolveIdentifier(identifier_lookup, *scope_to_check, identifier_resolve_settings);

View File

@ -1204,6 +1204,11 @@ public:
return res;
}
template <typename DateOrTime>
inline DateTimeComponents toDateTimeComponents(DateOrTime v) const
{
return toDateTimeComponents(lut[toLUTIndex(v)].date);
}
inline UInt64 toNumYYYYMMDDhhmmss(Time t) const
{

View File

@ -325,7 +325,7 @@ void KeeperServer::launchRaftServer(const Poco::Util::AbstractConfiguration & co
{
auto asio_listener = asio_service->create_rpc_listener(state_manager->getPort(), logger, enable_ipv6);
if (!asio_listener)
return;
throw Exception(ErrorCodes::RAFT_ERROR, "Cannot create interserver listener on port {}", state_manager->getPort());
asio_listeners.emplace_back(std::move(asio_listener));
}
else
@ -924,14 +924,22 @@ KeeperLogInfo KeeperServer::getKeeperLogInfo()
{
KeeperLogInfo log_info;
auto log_store = state_manager->load_log_store();
log_info.first_log_idx = log_store->start_index();
log_info.first_log_term = log_store->term_at(log_info.first_log_idx);
log_info.last_log_idx = raft_instance->get_last_log_idx();
log_info.last_log_term = raft_instance->get_last_log_term();
log_info.last_committed_log_idx = raft_instance->get_committed_log_idx();
log_info.leader_committed_log_idx = raft_instance->get_leader_committed_log_idx();
log_info.target_committed_log_idx = raft_instance->get_target_committed_log_idx();
log_info.last_snapshot_idx = raft_instance->get_last_snapshot_idx();
if (log_store)
{
log_info.first_log_idx = log_store->start_index();
log_info.first_log_term = log_store->term_at(log_info.first_log_idx);
}
if (raft_instance)
{
log_info.last_log_idx = raft_instance->get_last_log_idx();
log_info.last_log_term = raft_instance->get_last_log_term();
log_info.last_committed_log_idx = raft_instance->get_committed_log_idx();
log_info.leader_committed_log_idx = raft_instance->get_leader_committed_log_idx();
log_info.target_committed_log_idx = raft_instance->get_target_committed_log_idx();
log_info.last_snapshot_idx = raft_instance->get_last_snapshot_idx();
}
return log_info;
}

View File

@ -546,7 +546,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
\
M(Bool, collect_hash_table_stats_during_aggregation, true, "Enable collecting hash table statistics to optimize memory allocation", 0) \
M(UInt64, max_entries_for_hash_table_stats, 10'000, "How many entries hash table statistics collected during aggregation is allowed to have", 0) \
M(UInt64, max_size_to_preallocate_for_aggregation, 10'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before aggregation", 0) \
M(UInt64, max_size_to_preallocate_for_aggregation, 100'000'000, "For how many elements it is allowed to preallocate space in all hash tables in total before aggregation", 0) \
\
M(Bool, kafka_disable_num_consumers_limit, false, "Disable limit on kafka_num_consumers that depends on the number of available CPU cores", 0) \
M(Bool, enable_software_prefetch_in_aggregation, true, "Enable use of software prefetch in aggregation", 0) \
@ -583,6 +583,7 @@ static constexpr UInt64 operator""_GiB(unsigned long long value)
M(Bool, query_plan_filter_push_down, true, "Allow to push down filter by predicate query plan step", 0) \
M(Bool, query_plan_optimize_primary_key, true, "Analyze primary key using query plan (instead of AST)", 0) \
M(Bool, query_plan_read_in_order, true, "Use query plan for read-in-order optimisation", 0) \
M(Bool, query_plan_aggregation_in_order, true, "Use query plan for aggregation-in-order optimisation", 0) \
M(UInt64, regexp_max_matches_per_row, 1000, "Max matches of any single regexp per row, used to safeguard 'extractAllGroupsHorizontal' against consuming too much memory with greedy RE.", 0) \
\
M(UInt64, limit, 0, "Limit on read rows from the most 'end' result for select query, default 0 means no limit length", 0) \

View File

@ -51,13 +51,13 @@ struct SortColumnDescription
SortColumnDescription() = default;
explicit SortColumnDescription(
const std::string & column_name_,
std::string column_name_,
int direction_ = 1,
int nulls_direction_ = 1,
const std::shared_ptr<Collator> & collator_ = nullptr,
bool with_fill_ = false,
const FillColumnDescription & fill_description_ = {})
: column_name(column_name_)
: column_name(std::move(column_name_))
, direction(direction_)
, nulls_direction(nulls_direction_)
, collator(collator_)

View File

@ -1343,6 +1343,30 @@ struct ToYYYYMMDDhhmmssImpl
using FactorTransform = ZeroTransform;
};
struct ToDateTimeComponentsImpl
{
static constexpr auto name = "toDateTimeComponents";
static inline DateLUTImpl::DateTimeComponents execute(Int64 t, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(t);
}
static inline DateLUTImpl::DateTimeComponents execute(UInt32 t, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(t));
}
static inline DateLUTImpl::DateTimeComponents execute(Int32 d, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(ExtendedDayNum(d));
}
static inline DateLUTImpl::DateTimeComponents execute(UInt16 d, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(DayNum(d));
}
using FactorTransform = ZeroTransform;
};
template <typename FromType, typename ToType, typename Transform, bool is_extended_result = false>
struct Transformer

View File

@ -48,6 +48,10 @@ public:
: scale_multiplier(DecimalUtils::scaleMultiplier<DateTime64::NativeType>(scale_))
{}
TransformDateTime64(DateTime64::NativeType scale_multiplier_ = 1) /// NOLINT(google-explicit-constructor)
: scale_multiplier(scale_multiplier_)
{}
template <typename ... Args>
inline auto NO_SANITIZE_UNDEFINED execute(const DateTime64 & t, Args && ... args) const
{
@ -127,6 +131,8 @@ public:
return wrapped_transform.executeExtendedResult(t, std::forward<Args>(args)...);
}
DateTime64::NativeType getScaleMultiplier() const { return scale_multiplier; }
private:
DateTime64::NativeType scale_multiplier = 1;
Transform wrapped_transform = {};

View File

@ -1,82 +1,174 @@
#include <Columns/ColumnString.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeString.h>
#include <Functions/FunctionHelpers.h>
#include <Functions/FunctionFactory.h>
#include <Functions/FunctionsStringSearchToString.h>
#include <base/find_symbols.h>
namespace DB
{
struct CutURLParameterImpl
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
}
class FunctionCutURLParameter : public IFunction
{
public:
static constexpr auto name = "cutURLParameter";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionCutURLParameter>(); }
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 2; }
bool useDefaultImplementationForConstants() const override { return true; }
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
if (!isString(arguments[0]))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument of function {}",
arguments[0]->getName(), getName());
if (!isString(arguments[1]) && !isArray(arguments[1]))
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Illegal type {} of argument of function {}",
arguments[1]->getName(), getName());
return std::make_shared<DataTypeString>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
{
const ColumnPtr column = arguments[0].column;
const ColumnPtr column_needle = arguments[1].column;
const ColumnConst * col_needle = typeid_cast<const ColumnConst *>(&*column_needle);
const ColumnArray * col_needle_const_array = checkAndGetColumnConstData<ColumnArray>(column_needle.get());
if (!col_needle && !col_needle_const_array)
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
"Second argument of function {} must be constant string or constant array",
getName());
if (col_needle_const_array)
{
if (!col_needle_const_array->getData().empty() && typeid_cast<const DataTypeArray &>(*arguments[1].type).getNestedType()->getTypeId() != TypeIndex::String)
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
"Second argument of function {} must be constant array of strings",
getName());
}
if (const ColumnString * col = checkAndGetColumn<ColumnString>(column.get()))
{
auto col_res = ColumnString::create();
ColumnString::Chars & vec_res = col_res->getChars();
ColumnString::Offsets & offsets_res = col_res->getOffsets();
vector(col->getChars(), col->getOffsets(), col_needle, col_needle_const_array, vec_res, offsets_res);
return col_res;
}
else
throw Exception(ErrorCodes::ILLEGAL_COLUMN,
"Illegal column {} of argument of function {}",
arguments[0].column->getName(), getName());
}
static void cutURL(ColumnString::Chars & data, String pattern, size_t prev_offset, size_t & cur_offset)
{
pattern += '=';
const char * param_str = pattern.c_str();
size_t param_len = pattern.size();
const char * url_begin = reinterpret_cast<const char *>(&data[prev_offset]);
const char * url_end = reinterpret_cast<const char *>(&data[cur_offset - 2]);
const char * begin_pos = url_begin;
const char * end_pos = begin_pos;
do
{
const char * query_string_begin = find_first_symbols<'?', '#'>(url_begin, url_end);
if (query_string_begin + 1 >= url_end)
break;
const char * pos = static_cast<const char *>(memmem(query_string_begin + 1, url_end - query_string_begin - 1, param_str, param_len));
if (pos == nullptr)
break;
if (pos[-1] != '?' && pos[-1] != '#' && pos[-1] != '&')
{
pos = nullptr;
break;
}
begin_pos = pos;
end_pos = begin_pos + param_len;
/// Skip the value.
while (*end_pos && *end_pos != '&' && *end_pos != '#')
++end_pos;
/// Capture '&' before or after the parameter.
if (*end_pos == '&')
++end_pos;
else if (begin_pos[-1] == '&')
--begin_pos;
} while (false);
size_t cut_length = end_pos - begin_pos;
cur_offset -= cut_length;
data.erase(data.begin() + prev_offset + (begin_pos - url_begin), data.begin() + prev_offset+ (end_pos - url_begin));
}
static void vector(const ColumnString::Chars & data,
const ColumnString::Offsets & offsets,
std::string pattern,
const ColumnConst * col_needle,
const ColumnArray * col_needle_const_array,
ColumnString::Chars & res_data, ColumnString::Offsets & res_offsets)
{
res_data.reserve(data.size());
res_offsets.resize(offsets.size());
pattern += '=';
const char * param_str = pattern.c_str();
size_t param_len = pattern.size();
size_t prev_offset = 0;
size_t cur_offset;
size_t cur_len;
size_t res_offset = 0;
size_t cur_res_offset;
for (size_t i = 0; i < offsets.size(); ++i)
{
size_t cur_offset = offsets[i];
cur_offset = offsets[i];
cur_len = cur_offset - prev_offset;
cur_res_offset = res_offset + cur_len;
res_data.resize(cur_res_offset);
memcpySmallAllowReadWriteOverflow15(&res_data[res_offset], &data[prev_offset], cur_len);
const char * url_begin = reinterpret_cast<const char *>(&data[prev_offset]);
const char * url_end = reinterpret_cast<const char *>(&data[cur_offset]) - 1;
const char * begin_pos = url_begin;
const char * end_pos = begin_pos;
do
if (col_needle_const_array)
{
const char * query_string_begin = find_first_symbols<'?', '#'>(url_begin, url_end);
if (query_string_begin + 1 >= url_end)
break;
const char * pos = static_cast<const char *>(memmem(query_string_begin + 1, url_end - query_string_begin - 1, param_str, param_len));
if (pos == nullptr)
break;
if (pos[-1] != '?' && pos[-1] != '#' && pos[-1] != '&')
size_t num_needles = col_needle_const_array->getData().size();
for (size_t j = 0; j < num_needles; ++j)
{
pos = nullptr;
break;
auto field = col_needle_const_array->getData()[j];
cutURL(res_data, field.get<String>(), res_offset, cur_res_offset);
}
begin_pos = pos;
end_pos = begin_pos + param_len;
/// Skip the value.
while (*end_pos && *end_pos != '&' && *end_pos != '#')
++end_pos;
/// Capture '&' before or after the parameter.
if (*end_pos == '&')
++end_pos;
else if (begin_pos[-1] == '&')
--begin_pos;
} while (false);
size_t cut_length = (url_end - url_begin) - (end_pos - begin_pos);
res_data.resize(res_offset + cut_length + 1);
memcpySmallAllowReadWriteOverflow15(&res_data[res_offset], url_begin, begin_pos - url_begin);
memcpySmallAllowReadWriteOverflow15(&res_data[res_offset] + (begin_pos - url_begin), end_pos, url_end - end_pos);
res_offset += cut_length + 1;
res_data[res_offset - 1] = 0;
res_offsets[i] = res_offset;
}
else
{
cutURL(res_data, col_needle->getValue<String>(), res_offset, cur_res_offset);
}
res_offsets[i] = cur_res_offset;
res_offset = cur_res_offset;
prev_offset = cur_offset;
}
}
};
struct NameCutURLParameter { static constexpr auto name = "cutURLParameter"; };
using FunctionCutURLParameter = FunctionsStringSearchToString<CutURLParameterImpl, NameCutURLParameter>;
REGISTER_FUNCTION(CutURLParameter)
{
factory.registerFunction<FunctionCutURLParameter>();

View File

@ -1,6 +1,7 @@
#include <DataTypes/DataTypeDateTime.h>
#include <DataTypes/DataTypeDateTime64.h>
#include <DataTypes/DataTypesNumber.h>
#include <Common/IntervalKind.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnsDateTime.h>
#include <Columns/ColumnsNumber.h>
@ -34,6 +35,7 @@ namespace ErrorCodes
namespace
{
template <bool is_diff>
class DateDiffImpl
{
public:
@ -165,8 +167,92 @@ public:
template <typename TransformX, typename TransformY, typename T1, typename T2>
Int64 calculate(const TransformX & transform_x, const TransformY & transform_y, T1 x, T2 y, const DateLUTImpl & timezone_x, const DateLUTImpl & timezone_y) const
{
return static_cast<Int64>(transform_y.execute(y, timezone_y))
if constexpr (is_diff)
return static_cast<Int64>(transform_y.execute(y, timezone_y))
- static_cast<Int64>(transform_x.execute(x, timezone_x));
else
{
auto res = static_cast<Int64>(transform_y.execute(y, timezone_y))
- static_cast<Int64>(transform_x.execute(x, timezone_x));
DateLUTImpl::DateTimeComponents a_comp;
DateLUTImpl::DateTimeComponents b_comp;
Int64 adjust_value;
auto x_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
auto y_seconds = TransformDateTime64<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
if (x_seconds <= y_seconds)
{
a_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
b_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
adjust_value = -1;
}
else
{
a_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
b_comp = TransformDateTime64<ToDateTimeComponentsImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
adjust_value = 1;
}
if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeYearNumImpl<ResultPrecision::Extended>>>)
{
if ((a_comp.date.month > b_comp.date.month)
|| ((a_comp.date.month == b_comp.date.month) && ((a_comp.date.day > b_comp.date.day)
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
)))))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>>)
{
auto x_month_in_quarter = (a_comp.date.month - 1) % 3;
auto y_month_in_quarter = (b_comp.date.month - 1) % 3;
if ((x_month_in_quarter > y_month_in_quarter)
|| ((x_month_in_quarter == y_month_in_quarter) && ((a_comp.date.day > b_comp.date.day)
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
)))))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMonthNumImpl<ResultPrecision::Extended>>>)
{
if ((a_comp.date.day > b_comp.date.day)
|| ((a_comp.date.day == b_comp.date.day) && ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second))))
)))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeWeekNumImpl<ResultPrecision::Extended>>>)
{
auto x_day_of_week = TransformDateTime64<ToDayOfWeekImpl>(transform_x.getScaleMultiplier()).execute(x, timezone_x);
auto y_day_of_week = TransformDateTime64<ToDayOfWeekImpl>(transform_y.getScaleMultiplier()).execute(y, timezone_y);
if ((x_day_of_week > y_day_of_week)
|| ((x_day_of_week == y_day_of_week) && (a_comp.time.hour > b_comp.time.hour))
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeDayNumImpl<ResultPrecision::Extended>>>)
{
if ((a_comp.time.hour > b_comp.time.hour)
|| ((a_comp.time.hour == b_comp.time.hour) && ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeHourNumImpl<ResultPrecision::Extended>>>)
{
if ((a_comp.time.minute > b_comp.time.minute)
|| ((a_comp.time.minute == b_comp.time.minute) && (a_comp.time.second > b_comp.time.second)))
res += adjust_value;
}
else if constexpr (std::is_same_v<TransformX, TransformDateTime64<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>>)
{
if (a_comp.time.second > b_comp.time.second)
res += adjust_value;
}
return res;
}
}
template <typename T>
@ -193,7 +279,8 @@ private:
/** dateDiff('unit', t1, t2, [timezone])
* t1 and t2 can be Date or DateTime
* age('unit', t1, t2, [timezone])
* t1 and t2 can be Date, Date32, DateTime or DateTime64
*
* If timezone is specified, it applied to both arguments.
* If not, timezones from datatypes t1 and t2 are used.
@ -201,10 +288,11 @@ private:
*
* Timezone matters because days can have different length.
*/
template <bool is_relative>
class FunctionDateDiff : public IFunction
{
public:
static constexpr auto name = "dateDiff";
static constexpr auto name = is_relative ? "dateDiff" : "age";
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionDateDiff>(); }
String getName() const override
@ -270,21 +358,21 @@ public:
const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2);
if (unit == "year" || unit == "yy" || unit == "yyyy")
impl.dispatchForColumns<ToRelativeYearNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeYearNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "quarter" || unit == "qq" || unit == "q")
impl.dispatchForColumns<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "month" || unit == "mm" || unit == "m")
impl.dispatchForColumns<ToRelativeMonthNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeMonthNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "week" || unit == "wk" || unit == "ww")
impl.dispatchForColumns<ToRelativeWeekNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeWeekNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "day" || unit == "dd" || unit == "d")
impl.dispatchForColumns<ToRelativeDayNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeDayNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "hour" || unit == "hh" || unit == "h")
impl.dispatchForColumns<ToRelativeHourNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeHourNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "minute" || unit == "mi" || unit == "n")
impl.dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else if (unit == "second" || unit == "ss" || unit == "s")
impl.dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
impl.template dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
else
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Function {} does not support '{}' unit", getName(), unit);
@ -292,7 +380,7 @@ public:
return res;
}
private:
DateDiffImpl impl{name};
DateDiffImpl<is_relative> impl{name};
};
@ -352,14 +440,14 @@ public:
return res;
}
private:
DateDiffImpl impl{name};
DateDiffImpl<true> impl{name};
};
}
REGISTER_FUNCTION(DateDiff)
{
factory.registerFunction<FunctionDateDiff>({}, FunctionFactory::CaseInsensitive);
factory.registerFunction<FunctionDateDiff<true>>({}, FunctionFactory::CaseInsensitive);
}
REGISTER_FUNCTION(TimeDiff)
@ -376,4 +464,9 @@ Example:
Documentation::Categories{"Dates and Times"}}, FunctionFactory::CaseInsensitive);
}
REGISTER_FUNCTION(Age)
{
factory.registerFunction<FunctionDateDiff<false>>({}, FunctionFactory::CaseInsensitive);
}
}

View File

@ -334,7 +334,7 @@ std::string HTTPException::makeExceptionMessage(
"Received error from remote server {}. "
"HTTP status code: {} {}, "
"body: {}",
uri, http_status, reason, body);
uri, static_cast<int>(http_status), reason, body);
}
}

View File

@ -221,16 +221,31 @@ void initDataVariantsWithSizeHint(
const auto max_threads = params.group_by_two_level_threshold != 0 ? std::max(params.max_threads, 1ul) : 1;
const auto lower_limit = hint->sum_of_sizes / max_threads;
const auto upper_limit = stats_collecting_params.max_size_to_preallocate_for_aggregation / max_threads;
const auto adjusted = std::min(std::max(lower_limit, hint->median_size), upper_limit);
if (worthConvertToTwoLevel(
params.group_by_two_level_threshold,
hint->sum_of_sizes,
/*group_by_two_level_threshold_bytes*/ 0,
/*result_size_bytes*/ 0))
method_chosen = convertToTwoLevelTypeIfPossible(method_chosen);
result.init(method_chosen, adjusted);
ProfileEvents::increment(ProfileEvents::AggregationHashTablesInitializedAsTwoLevel, result.isTwoLevel());
return;
if (hint->median_size > upper_limit)
{
/// Since we cannot afford to preallocate as much as we want, we will likely need to do resize anyway.
/// But we will also work with the big (i.e. not so cache friendly) HT from the beginning which may result in a slight slowdown.
/// So let's just do nothing.
LOG_TRACE(
&Poco::Logger::get("Aggregator"),
"No space were preallocated in hash tables because 'max_size_to_preallocate_for_aggregation' has too small value: {}, "
"should be at least {}",
stats_collecting_params.max_size_to_preallocate_for_aggregation,
hint->median_size * max_threads);
}
else
{
const auto adjusted = std::max(lower_limit, hint->median_size);
if (worthConvertToTwoLevel(
params.group_by_two_level_threshold,
hint->sum_of_sizes,
/*group_by_two_level_threshold_bytes*/ 0,
/*result_size_bytes*/ 0))
method_chosen = convertToTwoLevelTypeIfPossible(method_chosen);
result.init(method_chosen, adjusted);
ProfileEvents::increment(ProfileEvents::AggregationHashTablesInitializedAsTwoLevel, result.isTwoLevel());
return;
}
}
}
result.init(method_chosen);
@ -488,7 +503,6 @@ Aggregator::AggregateColumnsConstData Aggregator::Params::makeAggregateColumnsDa
void Aggregator::Params::explain(WriteBuffer & out, size_t indent) const
{
Strings res;
String prefix(indent, ' ');
{
@ -931,7 +945,10 @@ void Aggregator::executeOnBlockSmall(
/// How to perform the aggregation?
if (result.empty())
{
initDataVariantsWithSizeHint(result, method_chosen, params);
if (method_chosen != AggregatedDataVariants::Type::without_key)
initDataVariantsWithSizeHint(result, method_chosen, params);
else
result.init(method_chosen);
result.keys_size = params.keys_size;
result.key_sizes = key_sizes;
}

View File

@ -16,31 +16,6 @@
#include <QueryPipeline/Pipe.h>
#include <Storages/SelectQueryInfo.h>
using namespace DB;
namespace
{
/// We determine output stream sort properties by a local plan (local because otherwise table could be unknown).
/// If no local shard exist for this cluster, no sort properties will be provided, c'est la vie.
auto getRemoteShardsOutputStreamSortingProperties(const std::vector<QueryPlanPtr> & plans, ContextMutablePtr context)
{
SortDescription sort_description;
DataStream::SortScope sort_scope = DataStream::SortScope::None;
if (!plans.empty())
{
if (const auto * step = dynamic_cast<const ITransformingStep *>(plans.front()->getRootNode()->step.get());
step && step->getDataStreamTraits().can_enforce_sorting_properties_in_distributed_query)
{
step->adjustSettingsToEnforceSortingPropertiesInDistributedQuery(context);
sort_description = step->getOutputStream().sort_description;
sort_scope = step->getOutputStream().sort_scope;
}
}
return std::make_pair(sort_description, sort_scope);
}
}
namespace DB
{
@ -216,8 +191,6 @@ void executeQuery(
"_shard_count", Block{{DataTypeUInt32().createColumnConst(1, shards), std::make_shared<DataTypeUInt32>(), "_shard_count"}});
auto external_tables = context->getExternalTables();
auto && [sort_description, sort_scope] = getRemoteShardsOutputStreamSortingProperties(plans, new_context);
auto plan = std::make_unique<QueryPlan>();
auto read_from_remote = std::make_unique<ReadFromRemote>(
std::move(remote_shards),
@ -231,9 +204,7 @@ void executeQuery(
std::move(external_tables),
log,
shards,
query_info.storage_limits,
std::move(sort_description),
std::move(sort_scope));
query_info.storage_limits);
read_from_remote->setStepDescription("Read from remote replica");
plan->addStep(std::move(read_from_remote));
@ -329,7 +300,6 @@ void executeQueryWithParallelReplicas(
if (!remote_shards.empty())
{
auto new_context = Context::createCopy(context);
auto && [sort_description, sort_scope] = getRemoteShardsOutputStreamSortingProperties(plans, new_context);
for (const auto & shard : remote_shards)
{
@ -345,9 +315,7 @@ void executeQueryWithParallelReplicas(
scalars,
external_tables,
&Poco::Logger::get("ReadFromParallelRemoteReplicasStep"),
query_info.storage_limits,
sort_description,
sort_scope);
query_info.storage_limits);
auto remote_plan = std::make_unique<QueryPlan>();
remote_plan->addStep(std::move(read_from_remote));

View File

@ -1525,9 +1525,9 @@ void Context::setCurrentQueryId(const String & query_id)
client_info.initial_query_id = client_info.current_query_id;
}
void Context::killCurrentQuery()
void Context::killCurrentQuery() const
{
if (auto elem = process_list_elem.lock())
if (auto elem = getProcessListElement())
elem->cancelQuery(true);
}
@ -1782,11 +1782,16 @@ void Context::setProcessListElement(QueryStatusPtr elem)
{
/// Set to a session or query. In the session, only one query is processed at a time. Therefore, the lock is not needed.
process_list_elem = elem;
has_process_list_elem = elem.get();
}
QueryStatusPtr Context::getProcessListElement() const
{
return process_list_elem.lock();
if (!has_process_list_elem)
return {};
if (auto res = process_list_elem.lock())
return res;
throw Exception(ErrorCodes::LOGICAL_ERROR, "Weak pointer to process_list_elem expired during query execution, it's a bug");
}

View File

@ -239,6 +239,7 @@ private:
FileProgressCallback file_progress_callback; /// Callback for tracking progress of file loading.
std::weak_ptr<QueryStatus> process_list_elem; /// For tracking total resource usage for query.
bool has_process_list_elem = false; /// It's impossible to check if weak_ptr was initialized or not
StorageID insertion_table = StorageID::createEmpty(); /// Saved insertion table in query context
bool is_distributed = false; /// Whether the current context it used for distributed query
@ -629,7 +630,7 @@ public:
void setCurrentDatabaseNameInGlobalContext(const String & name);
void setCurrentQueryId(const String & query_id);
void killCurrentQuery();
void killCurrentQuery() const;
bool hasInsertionTable() const { return !insertion_table.empty(); }
void setInsertionTable(StorageID db_and_table) { insertion_table = std::move(db_and_table); }

View File

@ -1960,6 +1960,7 @@ ExpressionAnalysisResult::ExpressionAnalysisResult(
/// TODO correct conditions
optimize_aggregation_in_order =
context->getSettingsRef().optimize_aggregation_in_order
&& (!context->getSettingsRef().query_plan_aggregation_in_order)
&& storage && query.groupBy();
query_analyzer.appendGroupBy(chain, only_types || !first_stage, optimize_aggregation_in_order, group_by_elements_actions);

View File

@ -1963,7 +1963,7 @@ IBlocksStreamPtr HashJoin::getNonJoinedBlocks(const Block & left_sample_block,
/// ... calculate `left_columns_count` ...
size_t left_columns_count = left_sample_block.columns();
auto non_joined = std::make_unique<NotJoinedHash<true>>(*this, max_block_size);
return std::make_unique<NotJoinedBlocks>(std::move(non_joined), result_sample_block, left_columns_count, table_join->leftToRightKeyRemap());
return std::make_unique<NotJoinedBlocks>(std::move(non_joined), result_sample_block, left_columns_count, *table_join);
}
else
@ -1971,7 +1971,7 @@ IBlocksStreamPtr HashJoin::getNonJoinedBlocks(const Block & left_sample_block,
size_t left_columns_count = left_sample_block.columns();
assert(left_columns_count == result_sample_block.columns() - required_right_keys.columns() - sample_block_with_columns_to_add.columns());
auto non_joined = std::make_unique<NotJoinedHash<false>>(*this, max_block_size);
return std::make_unique<NotJoinedBlocks>(std::move(non_joined), result_sample_block, left_columns_count, table_join->leftToRightKeyRemap());
return std::make_unique<NotJoinedBlocks>(std::move(non_joined), result_sample_block, left_columns_count, *table_join);
}
}

View File

@ -2457,9 +2457,13 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
auto grouping_sets_params = getAggregatorGroupingSetsParams(*query_analyzer, keys);
SortDescription group_by_sort_description;
SortDescription sort_description_for_merging;
if (group_by_info && settings.optimize_aggregation_in_order && !query_analyzer->useGroupingSetKey())
{
group_by_sort_description = getSortDescriptionFromGroupBy(getSelectQuery());
sort_description_for_merging = group_by_info->sort_description_for_merging;
}
else
group_by_info = nullptr;
@ -2481,6 +2485,8 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
group_by_info = std::make_shared<InputOrderInfo>(
group_by_sort_description, group_by_sort_description.size(), 1 /* direction */, 0 /* limit */);
sort_description_for_merging = group_by_info->sort_description_for_merging;
}
auto merge_threads = max_streams;
@ -2504,7 +2510,7 @@ void InterpreterSelectQuery::executeAggregation(QueryPlan & query_plan, const Ac
temporary_data_merge_threads,
storage_has_evenly_distributed_read,
settings.group_by_use_nulls,
std::move(group_by_info),
std::move(sort_description_for_merging),
std::move(group_by_sort_description),
should_produce_results_in_order_of_bucket_number,
settings.enable_memory_bound_merging_of_aggregation_results);

View File

@ -111,7 +111,15 @@ String InterpreterShowTablesQuery::getRewrittenQuery()
DatabaseCatalog::instance().assertDatabaseExists(database);
WriteBufferFromOwnString rewritten_query;
rewritten_query << "SELECT name FROM system.";
if (query.full)
{
rewritten_query << "SELECT name, engine FROM system.";
}
else
{
rewritten_query << "SELECT name FROM system.";
}
if (query.dictionaries)
rewritten_query << "dictionaries ";

View File

@ -718,11 +718,12 @@ ColumnPtr filterWithBlanks(ColumnPtr src_column, const IColumn::Filter & filter,
NotJoinedBlocks::NotJoinedBlocks(std::unique_ptr<RightColumnsFiller> filler_,
const Block & result_sample_block_,
size_t left_columns_count,
const LeftToRightKeyRemap & left_to_right_key_remap)
const TableJoin & table_join)
: filler(std::move(filler_))
, saved_block_sample(filler->getEmptyBlock())
, result_sample_block(materializeBlock(result_sample_block_))
{
const auto & left_to_right_key_remap = table_join.leftToRightKeyRemap();
for (size_t left_pos = 0; left_pos < left_columns_count; ++left_pos)
{
/// We need right 'x' for 'RIGHT JOIN ... USING(x)'
@ -739,14 +740,21 @@ NotJoinedBlocks::NotJoinedBlocks(std::unique_ptr<RightColumnsFiller> filler_,
/// `saved_block_sample` may contains non unique column names, get any of them
/// (e.g. in case of `... JOIN (SELECT a, a, b FROM table) as t2`)
for (const auto & [name, right_pos] : saved_block_sample.getNamesToIndexesMap())
for (const auto & [right_name, right_pos] : saved_block_sample.getNamesToIndexesMap())
{
String column_name(right_name);
if (table_join.getStorageJoin())
{
/// StorageJoin operates with original non qualified column names, so apply renaming here
column_name = table_join.renamedRightColumnName(column_name);
}
/// Start from left_columns_count to don't remap left keys twice. We need only qualified right keys here
/// `result_sample_block` may contains non unique column names, need to set index for all of them
for (size_t result_pos = left_columns_count; result_pos < result_sample_block.columns(); ++result_pos)
{
const auto & result_name = result_sample_block.getByPosition(result_pos).name;
if (result_name == name)
if (result_name == column_name)
setRightIndex(right_pos, result_pos);
}
}

View File

@ -138,7 +138,7 @@ public:
NotJoinedBlocks(std::unique_ptr<RightColumnsFiller> filler_,
const Block & result_sample_block_,
size_t left_columns_count,
const LeftToRightKeyRemap & left_to_right_key_remap);
const TableJoin & table_join);
Block nextImpl() override;

View File

@ -1122,7 +1122,7 @@ IBlocksStreamPtr MergeJoin::getNonJoinedBlocks(
size_t left_columns_count = left_sample_block.columns();
assert(left_columns_count == result_sample_block.columns() - right_columns_to_add.columns());
auto non_joined = std::make_unique<NotJoinedMerge>(*this, max_block_size);
return std::make_unique<NotJoinedBlocks>(std::move(non_joined), result_sample_block, left_columns_count, table_join->leftToRightKeyRemap());
return std::make_unique<NotJoinedBlocks>(std::move(non_joined), result_sample_block, left_columns_count, *table_join);
}
return nullptr;
}

View File

@ -346,7 +346,7 @@ public:
void setStorageJoin(std::shared_ptr<const IKeyValueEntity> storage);
void setStorageJoin(std::shared_ptr<StorageJoin> storage);
std::shared_ptr<StorageJoin> getStorageJoin() { return right_storage_join; }
std::shared_ptr<StorageJoin> getStorageJoin() const { return right_storage_join; }
bool isSpecialStorage() const { return !right_storage_name.empty() || right_storage_join || right_kv_storage; }

View File

@ -417,6 +417,8 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
throw;
}
/// Avoid early destruction of process_list_entry if it was not saved to `res` yet (in case of exception)
ProcessList::EntryPtr process_list_entry;
BlockIO res;
std::shared_ptr<InterpreterTransactionControlQuery> implicit_txn_control{};
String query_database;
@ -509,7 +511,6 @@ static std::tuple<ASTPtr, BlockIO> executeQueryImpl(
checkASTSizeLimits(*ast, settings);
/// Put query to process list. But don't put SHOW PROCESSLIST query itself.
ProcessList::EntryPtr process_list_entry;
if (!internal && !ast->as<ASTShowProcesslistQuery>())
{
/// processlist also has query masked now, to avoid secrets leaks though SHOW PROCESSLIST by other users.

View File

@ -22,6 +22,7 @@ public:
bool changed{false};
bool temporary{false};
bool caches{false};
bool full{false};
String cluster_str;
String from;

View File

@ -18,6 +18,7 @@ namespace DB
bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
{
ParserKeyword s_show("SHOW");
ParserKeyword s_full("FULL");
ParserKeyword s_temporary("TEMPORARY");
ParserKeyword s_tables("TABLES");
ParserKeyword s_databases("DATABASES");
@ -46,6 +47,11 @@ bool ParserShowTablesQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expec
if (!s_show.ignore(pos, expected))
return false;
if (s_full.ignore(pos, expected))
{
query->full = true;
}
if (s_databases.ignore(pos, expected))
{
query->databases = true;

View File

@ -14,7 +14,7 @@ namespace DB
class ParserShowTablesQuery : public IParserBase
{
protected:
const char * getName() const override { return "SHOW [TEMPORARY] TABLES|DATABASES|CLUSTERS|CLUSTER 'name' [[NOT] [I]LIKE 'str'] [LIMIT expr]"; }
const char * getName() const override { return "SHOW [FULL] [TEMPORARY] TABLES|DATABASES|CLUSTERS|CLUSTER 'name' [[NOT] [I]LIKE 'str'] [LIMIT expr]"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override;
};

View File

@ -455,6 +455,7 @@ void Planner::buildQueryPlanIfNeeded()
);
SortDescription group_by_sort_description;
SortDescription sort_description_for_merging;
auto merge_threads = settings.max_threads;
auto temporary_data_merge_threads = settings.aggregation_memory_efficient_merge_threads
@ -477,7 +478,6 @@ void Planner::buildQueryPlanIfNeeded()
const bool should_produce_results_in_order_of_bucket_number
= select_query_options.to_stage == QueryProcessingStage::WithMergeableState && settings.distributed_aggregation_memory_efficient;
InputOrderInfoPtr input_order_info;
bool aggregate_final =
select_query_options.to_stage > QueryProcessingStage::WithMergeableState &&
!query_node.isGroupByWithTotals() && !query_node.isGroupByWithRollup() && !query_node.isGroupByWithCube();
@ -493,7 +493,7 @@ void Planner::buildQueryPlanIfNeeded()
temporary_data_merge_threads,
storage_has_evenly_distributed_read,
settings.group_by_use_nulls,
std::move(input_order_info),
std::move(sort_description_for_merging),
std::move(group_by_sort_description),
should_produce_results_in_order_of_bucket_number,
settings.enable_memory_bound_merging_of_aggregation_results);

View File

@ -18,6 +18,8 @@
#include <Processors/Transforms/MemoryBoundMerging.h>
#include <Processors/Transforms/MergingAggregatedMemoryEfficientTransform.h>
#include <QueryPipeline/QueryPipelineBuilder.h>
#include <IO/Operators.h>
#include <Common/JSONBuilder.h>
namespace DB
{
@ -25,21 +27,20 @@ namespace DB
static bool memoryBoundMergingWillBeUsed(
bool should_produce_results_in_order_of_bucket_number,
bool memory_bound_merging_of_aggregation_results_enabled,
InputOrderInfoPtr group_by_info)
SortDescription sort_description_for_merging)
{
return should_produce_results_in_order_of_bucket_number && memory_bound_merging_of_aggregation_results_enabled && group_by_info;
return should_produce_results_in_order_of_bucket_number && memory_bound_merging_of_aggregation_results_enabled && !sort_description_for_merging.empty();
}
static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_of_bucket_number, bool memory_bound_merging_will_be_used)
static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_of_bucket_number)
{
return ITransformingStep::Traits
{
{
.preserves_distinct_columns = false, /// Actually, we may check that distinct names are in aggregation keys
.returns_single_stream = should_produce_results_in_order_of_bucket_number || memory_bound_merging_will_be_used,
.returns_single_stream = should_produce_results_in_order_of_bucket_number,
.preserves_number_of_streams = false,
.preserves_sorting = false,
.can_enforce_sorting_properties_in_distributed_query = memory_bound_merging_will_be_used,
},
{
.preserves_number_of_rows = false,
@ -97,17 +98,14 @@ AggregatingStep::AggregatingStep(
size_t temporary_data_merge_threads_,
bool storage_has_evenly_distributed_read_,
bool group_by_use_nulls_,
InputOrderInfoPtr group_by_info_,
SortDescription sort_description_for_merging_,
SortDescription group_by_sort_description_,
bool should_produce_results_in_order_of_bucket_number_,
bool memory_bound_merging_of_aggregation_results_enabled_)
: ITransformingStep(
input_stream_,
appendGroupingColumn(params_.getHeader(input_stream_.header, final_), params_.keys, grouping_sets_params_, group_by_use_nulls_),
getTraits(
should_produce_results_in_order_of_bucket_number_,
DB::memoryBoundMergingWillBeUsed(
should_produce_results_in_order_of_bucket_number_, memory_bound_merging_of_aggregation_results_enabled_, group_by_info_)),
getTraits(should_produce_results_in_order_of_bucket_number_),
false)
, params(std::move(params_))
, grouping_sets_params(std::move(grouping_sets_params_))
@ -118,7 +116,7 @@ AggregatingStep::AggregatingStep(
, temporary_data_merge_threads(temporary_data_merge_threads_)
, storage_has_evenly_distributed_read(storage_has_evenly_distributed_read_)
, group_by_use_nulls(group_by_use_nulls_)
, group_by_info(std::move(group_by_info_))
, sort_description_for_merging(std::move(sort_description_for_merging_))
, group_by_sort_description(std::move(group_by_sort_description_))
, should_produce_results_in_order_of_bucket_number(should_produce_results_in_order_of_bucket_number_)
, memory_bound_merging_of_aggregation_results_enabled(memory_bound_merging_of_aggregation_results_enabled_)
@ -130,6 +128,19 @@ AggregatingStep::AggregatingStep(
}
}
void AggregatingStep::applyOrder(SortDescription sort_description_for_merging_, SortDescription group_by_sort_description_)
{
sort_description_for_merging = std::move(sort_description_for_merging_);
group_by_sort_description = std::move(group_by_sort_description_);
if (memoryBoundMergingWillBeUsed())
{
output_stream->sort_description = group_by_sort_description;
output_stream->sort_scope = DataStream::SortScope::Global;
output_stream->has_single_port = true;
}
}
void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings & settings)
{
QueryPipelineProcessorsCollector collector(pipeline, this);
@ -140,7 +151,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
bool allow_to_use_two_level_group_by = pipeline.getNumStreams() > 1 || params.max_bytes_before_external_group_by != 0;
/// optimize_aggregation_in_order
if (group_by_info)
if (!sort_description_for_merging.empty())
{
/// two-level aggregation is not supported anyway for in order aggregation.
allow_to_use_two_level_group_by = false;
@ -320,7 +331,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
return;
}
if (group_by_info)
if (!sort_description_for_merging.empty())
{
if (pipeline.getNumStreams() > 1)
{
@ -340,7 +351,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
/// So, we reduce 'max_bytes' value for aggregation in 'merge_threads' times.
return std::make_shared<AggregatingInOrderTransform>(
header, transform_params,
group_by_info, group_by_sort_description,
sort_description_for_merging, group_by_sort_description,
max_block_size, aggregation_in_order_max_block_bytes / merge_threads,
many_data, counter++);
});
@ -379,7 +390,7 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
{
return std::make_shared<AggregatingInOrderTransform>(
header, transform_params,
group_by_info, group_by_sort_description,
sort_description_for_merging, group_by_sort_description,
max_block_size, aggregation_in_order_max_block_bytes);
});
@ -427,11 +438,18 @@ void AggregatingStep::transformPipeline(QueryPipelineBuilder & pipeline, const B
void AggregatingStep::describeActions(FormatSettings & settings) const
{
params.explain(settings.out, settings.offset);
if (!sort_description_for_merging.empty())
{
String prefix(settings.offset, settings.indent_char);
settings.out << prefix << "Order: " << dumpSortDescription(sort_description_for_merging) << '\n';
}
}
void AggregatingStep::describeActions(JSONBuilder::JSONMap & map) const
{
params.explain(map);
if (!sort_description_for_merging.empty())
map.add("Order", dumpSortDescription(sort_description_for_merging));
}
void AggregatingStep::describePipeline(FormatSettings & settings) const
@ -455,17 +473,10 @@ void AggregatingStep::updateOutputStream()
getDataStreamTraits());
}
void AggregatingStep::adjustSettingsToEnforceSortingPropertiesInDistributedQuery(ContextMutablePtr context) const
{
context->setSetting("enable_memory_bound_merging_of_aggregation_results", true);
context->setSetting("optimize_aggregation_in_order", true);
context->setSetting("force_aggregation_in_order", true);
}
bool AggregatingStep::memoryBoundMergingWillBeUsed() const
{
return DB::memoryBoundMergingWillBeUsed(
should_produce_results_in_order_of_bucket_number, memory_bound_merging_of_aggregation_results_enabled, group_by_info);
should_produce_results_in_order_of_bucket_number, memory_bound_merging_of_aggregation_results_enabled, sort_description_for_merging);
}
}

View File

@ -37,7 +37,7 @@ public:
size_t temporary_data_merge_threads_,
bool storage_has_evenly_distributed_read_,
bool group_by_use_nulls_,
InputOrderInfoPtr group_by_info_,
SortDescription sort_description_for_merging_,
SortDescription group_by_sort_description_,
bool should_produce_results_in_order_of_bucket_number_,
bool memory_bound_merging_of_aggregation_results_enabled_);
@ -53,13 +53,14 @@ public:
const Aggregator::Params & getParams() const { return params; }
void adjustSettingsToEnforceSortingPropertiesInDistributedQuery(ContextMutablePtr context) const override;
bool inOrder() const { return !sort_description_for_merging.empty(); }
bool isGroupingSets() const { return !grouping_sets_params.empty(); }
void applyOrder(SortDescription sort_description_for_merging_, SortDescription group_by_sort_description_);
bool memoryBoundMergingWillBeUsed() const;
private:
void updateOutputStream() override;
bool memoryBoundMergingWillBeUsed() const;
Aggregator::Params params;
GroupingSetsParamsList grouping_sets_params;
bool final;
@ -71,7 +72,11 @@ private:
bool storage_has_evenly_distributed_read;
bool group_by_use_nulls;
InputOrderInfoPtr group_by_info;
/// Both sort descriptions are needed for aggregate-in-order optimisation.
/// Both sort descriptions are subset of GROUP BY key columns (or monotonic functions over it).
/// Sort description for merging is a sort description for input and a prefix of group_by_sort_description.
/// group_by_sort_description contains all GROUP BY keys and is used for final merging of aggregated data.
SortDescription sort_description_for_merging;
SortDescription group_by_sort_description;
/// These settings are used to determine if we should resize pipeline to 1 at the end.

View File

@ -34,9 +34,6 @@ public:
/// Doesn't change row order.
/// Examples: true for FilterStep, false for PartialSortingStep
bool preserves_sorting;
/// See adjustSettingsToEnforceSortingPropertiesInDistributedQuery().
bool can_enforce_sorting_properties_in_distributed_query = false;
};
/// This flags are used by QueryPlan optimizers.

View File

@ -19,7 +19,7 @@ static bool memoryBoundMergingWillBeUsed(
&& input_stream.sort_scope >= DataStream::SortScope::Stream && input_stream.sort_description.hasPrefix(group_by_sort_description);
}
static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_of_bucket_number, bool memory_bound_merging_will_be_used)
static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_of_bucket_number)
{
return ITransformingStep::Traits
{
@ -28,7 +28,6 @@ static ITransformingStep::Traits getTraits(bool should_produce_results_in_order_
.returns_single_stream = should_produce_results_in_order_of_bucket_number,
.preserves_number_of_streams = false,
.preserves_sorting = false,
.can_enforce_sorting_properties_in_distributed_query = memory_bound_merging_will_be_used,
},
{
.preserves_number_of_rows = false,
@ -51,10 +50,7 @@ MergingAggregatedStep::MergingAggregatedStep(
: ITransformingStep(
input_stream_,
params_.getHeader(input_stream_.header, final_),
getTraits(
should_produce_results_in_order_of_bucket_number_,
DB::memoryBoundMergingWillBeUsed(
input_stream_, memory_bound_merging_of_aggregation_results_enabled_, group_by_sort_description_)))
getTraits(should_produce_results_in_order_of_bucket_number_))
, params(std::move(params_))
, final(final_)
, memory_efficient_aggregation(memory_efficient_aggregation_)
@ -77,6 +73,19 @@ MergingAggregatedStep::MergingAggregatedStep(
}
}
void MergingAggregatedStep::updateInputSortDescription(SortDescription sort_description, DataStream::SortScope sort_scope)
{
auto & input_stream = input_streams.front();
input_stream.sort_scope = sort_scope;
input_stream.sort_description = sort_description;
if (memoryBoundMergingWillBeUsed() && should_produce_results_in_order_of_bucket_number)
{
output_stream->sort_description = group_by_sort_description;
output_stream->sort_scope = DataStream::SortScope::Global;
}
}
void MergingAggregatedStep::transformPipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
{
auto transform_params = std::make_shared<AggregatingTransformParams>(pipeline.getHeader(), std::move(params), final);
@ -151,11 +160,6 @@ void MergingAggregatedStep::updateOutputStream()
output_stream->distinct_columns.insert(key);
}
void MergingAggregatedStep::adjustSettingsToEnforceSortingPropertiesInDistributedQuery(ContextMutablePtr context) const
{
context->setSetting("enable_memory_bound_merging_of_aggregation_results", true);
}
bool MergingAggregatedStep::memoryBoundMergingWillBeUsed() const
{
return DB::memoryBoundMergingWillBeUsed(

View File

@ -33,12 +33,13 @@ public:
void describeActions(JSONBuilder::JSONMap & map) const override;
void describeActions(FormatSettings & settings) const override;
void adjustSettingsToEnforceSortingPropertiesInDistributedQuery(ContextMutablePtr context) const override;
void updateInputSortDescription(SortDescription input_sort_description, DataStream::SortScope sort_scope);
bool memoryBoundMergingWillBeUsed() const;
private:
void updateOutputStream() override;
bool memoryBoundMergingWillBeUsed() const;
Aggregator::Params params;
bool final;

View File

@ -92,6 +92,11 @@ using Stack = std::vector<Frame>;
/// Second pass optimizations
void optimizePrimaryKeyCondition(const Stack & stack);
void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes);
void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &);
/// Enable memory bound merging of aggregation states for remote queries
/// in case it was enabled for local plan
void enableMemoryBoundMerging(QueryPlan::Node & node, QueryPlan::Nodes &);
}

View File

@ -13,6 +13,7 @@ QueryPlanOptimizationSettings QueryPlanOptimizationSettings::fromSettings(const
settings.filter_push_down = from.query_plan_filter_push_down;
settings.distinct_in_order = from.optimize_distinct_in_order;
settings.read_in_order = from.optimize_read_in_order && from.query_plan_read_in_order;
settings.aggregation_in_order = from.optimize_aggregation_in_order && from.query_plan_aggregation_in_order;
return settings;
}

View File

@ -27,6 +27,9 @@ struct QueryPlanOptimizationSettings
/// If read-in-order optimisation is enabled
bool read_in_order = true;
/// If aggregation-in-order optimisation is enabled
bool aggregation_in_order = false;
static QueryPlanOptimizationSettings fromSettings(const Settings & from);
static QueryPlanOptimizationSettings fromContext(ContextPtr from);
};

View File

@ -0,0 +1,94 @@
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
#include <Processors/QueryPlan/ReadFromRemote.h>
#include <Processors/QueryPlan/AggregatingStep.h>
#include <Processors/QueryPlan/MergingAggregatedStep.h>
#include <Processors/QueryPlan/UnionStep.h>
namespace DB::QueryPlanOptimizations
{
/// We are trying to find a part of plan like
///
/// - ReadFromRemote (x N)
/// - Union - ReadFromParallelRemoteReplicasStep (x M)
/// - Aggregating/MergingAggregated
///
/// and enable memory bound merging for remote steps if it was enabled for local aggregation.
void enableMemoryBoundMerging(QueryPlan::Node & node, QueryPlan::Nodes &)
{
auto * root_mergine_aggeregated = typeid_cast<MergingAggregatedStep *>(node.step.get());
if (!root_mergine_aggeregated)
return;
const auto & union_node = *node.children.front();
auto * union_step = typeid_cast<UnionStep *>(union_node.step.get());
if (!union_step)
return;
std::vector<ReadFromRemote *> reading_steps;
std::vector<ReadFromParallelRemoteReplicasStep *> async_reading_steps;
IQueryPlanStep * local_plan = nullptr;
reading_steps.reserve((union_node.children.size()));
async_reading_steps.reserve((union_node.children.size()));
for (const auto & child : union_node.children)
{
auto * child_node = child->step.get();
if (auto * reading_step = typeid_cast<ReadFromRemote *>(child_node))
reading_steps.push_back(reading_step);
else if (auto * async_reading_step = typeid_cast<ReadFromParallelRemoteReplicasStep *>(child_node))
async_reading_steps.push_back(async_reading_step);
else if (local_plan)
/// Usually there is a single local plan.
/// TODO: we can support many local plans and calculate common sort description prefix. Do we need it?
return;
else
local_plan = child_node;
}
/// We determine output stream sort properties by a local plan (local because otherwise table could be unknown).
/// If no local shard exist for this cluster, no sort properties will be provided, c'est la vie.
if (local_plan == nullptr || (reading_steps.empty() && async_reading_steps.empty()))
return;
SortDescription sort_description;
bool enforce_aggregation_in_order = false;
if (auto * aggregating_step = typeid_cast<AggregatingStep *>(local_plan))
{
if (aggregating_step->memoryBoundMergingWillBeUsed())
{
sort_description = aggregating_step->getOutputStream().sort_description;
enforce_aggregation_in_order = true;
}
}
else if (auto * mergine_aggeregated = typeid_cast<MergingAggregatedStep *>(local_plan))
{
if (mergine_aggeregated->memoryBoundMergingWillBeUsed())
{
sort_description = mergine_aggeregated->getOutputStream().sort_description;
}
}
if (sort_description.empty())
return;
for (auto & reading : reading_steps)
{
reading->enforceSorting(sort_description);
if (enforce_aggregation_in_order)
reading->enforceAggregationInOrder();
}
for (auto & reading : async_reading_steps)
{
reading->enforceSorting(sort_description);
if (enforce_aggregation_in_order)
reading->enforceAggregationInOrder();
}
root_mergine_aggeregated->updateInputSortDescription(sort_description, DataStream::SortScope::Stream);
}
}

View File

@ -679,6 +679,210 @@ InputOrderInfoPtr buildInputOrderInfo(
return std::make_shared<InputOrderInfo>(order_key_prefix_descr, next_sort_key, read_direction, limit);
}
/// We really need three different sort descriptions here.
/// For example:
///
/// create table tab (a Int32, b Int32, c Int32, d Int32) engine = MergeTree order by (a, b, c);
/// select a, any(b), c, d from tab where b = 1 group by a, c, d order by c, d;
///
/// We would like to have:
/// (a, b, c) - a sort description for reading from table (it's into input_order)
/// (a, c) - a sort description for merging (an input of AggregatingInOrderTransfrom is sorted by this GROUP BY keys)
/// (a, c, d) - a group by soer description (an input of FinishAggregatingInOrderTransform is sorted by all GROUP BY keys)
///
/// Sort description from input_order is not actually used. ReadFromMergeTree reads only PK prefix size.
/// We should remove it later.
struct AggregationInputOrder
{
InputOrderInfoPtr input_order;
SortDescription sort_description_for_merging;
SortDescription group_by_sort_description;
};
AggregationInputOrder buildInputOrderInfo(
const FixedColumns & fixed_columns,
const ActionsDAGPtr & dag,
const Names & group_by_keys,
const ActionsDAG & sorting_key_dag,
const Names & sorting_key_columns)
{
MatchedTrees::Matches matches;
FixedColumns fixed_key_columns;
/// For every column in PK find any match from GROUP BY key.
using ReverseMatches = std::unordered_map<const ActionsDAG::Node *, MatchedTrees::Matches::const_iterator>;
ReverseMatches reverse_matches;
if (dag)
{
matches = matchTrees(sorting_key_dag, *dag);
for (const auto & [node, match] : matches)
{
if (!match.monotonicity || match.monotonicity->strict)
{
if (match.node && fixed_columns.contains(node))
fixed_key_columns.insert(match.node);
}
}
enreachFixedColumns(sorting_key_dag, fixed_key_columns);
for (auto it = matches.cbegin(); it != matches.cend(); ++it)
{
const MatchedTrees::Match * match = &it->second;
if (match->node)
{
auto [jt, inserted] = reverse_matches.emplace(match->node, it);
if (!inserted)
{
/// Find the best match for PK node.
/// Direct match > strict monotonic > monotonic.
const MatchedTrees::Match * prev_match = &jt->second->second;
bool is_better = prev_match->monotonicity && !match->monotonicity;
if (!is_better)
{
bool both_monotionic = prev_match->monotonicity && match->monotonicity;
is_better = both_monotionic && match->monotonicity->strict && !prev_match->monotonicity->strict;
}
if (is_better)
jt->second = it;
}
}
}
}
/// This is a result direction we will read from MergeTree
/// 1 - in order,
/// -1 - in reverse order,
/// 0 - usual read, don't apply optimization
///
/// So far, 0 means any direction is possible. It is ok for constant prefix.
int read_direction = 0;
size_t next_sort_key = 0;
std::unordered_set<std::string_view> not_matched_group_by_keys(group_by_keys.begin(), group_by_keys.end());
SortDescription group_by_sort_description;
group_by_sort_description.reserve(group_by_keys.size());
SortDescription order_key_prefix_descr;
order_key_prefix_descr.reserve(sorting_key_columns.size());
while (!not_matched_group_by_keys.empty() && next_sort_key < sorting_key_columns.size())
{
const auto & sorting_key_column = sorting_key_columns[next_sort_key];
/// Direction for current sort key.
int current_direction = 0;
bool strict_monotonic = true;
std::unordered_set<std::string_view>::iterator group_by_key_it;
const ActionsDAG::Node * sort_column_node = sorting_key_dag.tryFindInOutputs(sorting_key_column);
/// This should not happen.
if (!sort_column_node)
break;
if (!dag)
{
/// This is possible if there were no Expression or Filter steps in Plan.
/// Example: SELECT * FROM tab ORDER BY a, b
if (sort_column_node->type != ActionsDAG::ActionType::INPUT)
break;
group_by_key_it = not_matched_group_by_keys.find(sorting_key_column);
if (group_by_key_it == not_matched_group_by_keys.end())
break;
current_direction = 1;
//std::cerr << "====== (no dag) Found direct match" << std::endl;
++next_sort_key;
}
else
{
const MatchedTrees::Match * match = nullptr;
const ActionsDAG::Node * group_by_key_node = nullptr;
if (const auto match_it = reverse_matches.find(sort_column_node); match_it != reverse_matches.end())
{
group_by_key_node = match_it->second->first;
match = &match_it->second->second;
}
//std::cerr << "====== Finding match for " << sort_column_node->result_name << ' ' << static_cast<const void *>(sort_column_node) << std::endl;
if (match && match->node)
group_by_key_it = not_matched_group_by_keys.find(group_by_key_node->result_name);
if (match && match->node && group_by_key_it != not_matched_group_by_keys.end())
{
//std::cerr << "====== Found direct match" << std::endl;
current_direction = 1;
if (match->monotonicity)
{
current_direction *= match->monotonicity->direction;
strict_monotonic = match->monotonicity->strict;
}
++next_sort_key;
}
else if (fixed_key_columns.contains(sort_column_node))
{
//std::cerr << "+++++++++ Found fixed key by match" << std::endl;
++next_sort_key;
}
else
break;
}
/// read_direction == 0 means we can choose any global direction.
/// current_direction == 0 means current key if fixed and any direction is possible for it.
if (current_direction && read_direction && current_direction != read_direction)
break;
if (read_direction == 0 && current_direction != 0)
read_direction = current_direction;
if (current_direction)
{
/// Aggregation in order will always read in table order.
/// Here, current_direction is a direction which will be applied to every key.
/// Example:
/// CREATE TABLE t (x, y, z) ENGINE = MergeTree ORDER BY (x, y)
/// SELECT ... FROM t GROUP BY negate(y), negate(x), z
/// Here, current_direction will be -1 cause negate() is negative montonic,
/// Prefix sort description for reading will be (negate(y) DESC, negate(x) DESC),
/// Sort description for GROUP BY will be (negate(y) DESC, negate(x) DESC, z).
//std::cerr << "---- adding " << std::string(*group_by_key_it) << std::endl;
group_by_sort_description.emplace_back(SortColumnDescription(std::string(*group_by_key_it), current_direction));
order_key_prefix_descr.emplace_back(SortColumnDescription(std::string(*group_by_key_it), current_direction));
not_matched_group_by_keys.erase(group_by_key_it);
}
else
{
/// If column is fixed, will read it in table order as well.
//std::cerr << "---- adding " << sorting_key_column << std::endl;
order_key_prefix_descr.emplace_back(SortColumnDescription(sorting_key_column, 1));
}
if (current_direction && !strict_monotonic)
break;
}
if (read_direction == 0 || group_by_sort_description.empty())
return {};
SortDescription sort_description_for_merging = group_by_sort_description;
for (const auto & key : not_matched_group_by_keys)
group_by_sort_description.emplace_back(SortColumnDescription(std::string(key)));
auto input_order = std::make_shared<InputOrderInfo>(order_key_prefix_descr, next_sort_key, /*read_direction*/ 1, /* limit */ 0);
return { std::move(input_order), std::move(sort_description_for_merging), std::move(group_by_sort_description) };
}
InputOrderInfoPtr buildInputOrderInfo(
ReadFromMergeTree * reading,
const FixedColumns & fixed_columns,
@ -733,6 +937,56 @@ InputOrderInfoPtr buildInputOrderInfo(
return order_info;
}
AggregationInputOrder buildInputOrderInfo(
ReadFromMergeTree * reading,
const FixedColumns & fixed_columns,
const ActionsDAGPtr & dag,
const Names & group_by_keys)
{
const auto & sorting_key = reading->getStorageMetadata()->getSortingKey();
const auto & sorting_key_columns = sorting_key.column_names;
return buildInputOrderInfo(
fixed_columns,
dag, group_by_keys,
sorting_key.expression->getActionsDAG(), sorting_key_columns);
}
AggregationInputOrder buildInputOrderInfo(
ReadFromMerge * merge,
const FixedColumns & fixed_columns,
const ActionsDAGPtr & dag,
const Names & group_by_keys)
{
const auto & tables = merge->getSelectedTables();
AggregationInputOrder order_info;
for (const auto & table : tables)
{
auto storage = std::get<StoragePtr>(table);
const auto & sorting_key = storage->getInMemoryMetadataPtr()->getSortingKey();
const auto & sorting_key_columns = sorting_key.column_names;
if (sorting_key_columns.empty())
return {};
auto table_order_info = buildInputOrderInfo(
fixed_columns,
dag, group_by_keys,
sorting_key.expression->getActionsDAG(), sorting_key_columns);
if (!table_order_info.input_order)
return {};
if (!order_info.input_order)
order_info = table_order_info;
else if (*order_info.input_order != *table_order_info.input_order)
return {};
}
return order_info;
}
InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & node)
{
QueryPlan::Node * reading_node = findReadingStep(node);
@ -781,6 +1035,53 @@ InputOrderInfoPtr buildInputOrderInfo(SortingStep & sorting, QueryPlan::Node & n
return nullptr;
}
AggregationInputOrder buildInputOrderInfo(AggregatingStep & aggregating, QueryPlan::Node & node)
{
QueryPlan::Node * reading_node = findReadingStep(node);
if (!reading_node)
return {};
const auto & keys = aggregating.getParams().keys;
size_t limit = 0;
ActionsDAGPtr dag;
FixedColumns fixed_columns;
buildSortingDAG(node, dag, fixed_columns, limit);
if (dag && !fixed_columns.empty())
enreachFixedColumns(*dag, fixed_columns);
if (auto * reading = typeid_cast<ReadFromMergeTree *>(reading_node->step.get()))
{
auto order_info = buildInputOrderInfo(
reading,
fixed_columns,
dag, keys);
if (order_info.input_order)
reading->requestReadingInOrder(
order_info.input_order->used_prefix_of_sorting_key_size,
order_info.input_order->direction,
order_info.input_order->limit);
return order_info;
}
else if (auto * merge = typeid_cast<ReadFromMerge *>(reading_node->step.get()))
{
auto order_info = buildInputOrderInfo(
merge,
fixed_columns,
dag, keys);
if (order_info.input_order)
merge->requestReadingInOrder(order_info.input_order);
return order_info;
}
return {};
}
void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
{
if (node.children.size() != 1)
@ -860,6 +1161,25 @@ void optimizeReadInOrder(QueryPlan::Node & node, QueryPlan::Nodes & nodes)
}
}
void optimizeAggregationInOrder(QueryPlan::Node & node, QueryPlan::Nodes &)
{
if (node.children.size() != 1)
return;
auto * aggregating = typeid_cast<AggregatingStep *>(node.step.get());
if (!aggregating)
return;
if (aggregating->inOrder() || aggregating->isGroupingSets())
return;
/// TODO: maybe add support for UNION later.
if (auto order_info = buildInputOrderInfo(*aggregating, *node.children.front()); order_info.input_order)
{
aggregating->applyOrder(std::move(order_info.sort_description_for_merging), std::move(order_info.group_by_sort_description));
}
}
/// This optimisation is obsolete and will be removed.
/// optimizeReadInOrder covers it.
size_t tryReuseStorageOrderingForWindowFunctions(QueryPlan::Node * parent_node, QueryPlan::Nodes & /*nodes*/)

View File

@ -1,6 +1,8 @@
#include <Processors/QueryPlan/Optimizations/Optimizations.h>
#include <Processors/QueryPlan/Optimizations/QueryPlanOptimizationSettings.h>
#include <Common/Exception.h>
#include <Processors/QueryPlan/MergingAggregatedStep.h>
#include <Processors/QueryPlan/UnionStep.h>
#include <stack>
namespace DB
@ -112,6 +114,9 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
if (optimization_settings.read_in_order)
optimizeReadInOrder(*frame.node, nodes);
if (optimization_settings.aggregation_in_order)
optimizeAggregationInOrder(*frame.node, nodes);
if (optimization_settings.distinct_in_order)
tryDistinctReadInOrder(frame.node);
}
@ -126,6 +131,7 @@ void optimizeTreeSecondPass(const QueryPlanOptimizationSettings & optimization_s
}
optimizePrimaryKeyCondition(stack);
enableMemoryBoundMerging(*frame.node, nodes);
stack.pop_back();
}

View File

@ -25,6 +25,7 @@ namespace DB
namespace ErrorCodes
{
extern const int ALL_CONNECTION_TRIES_FAILED;
extern const int LOGICAL_ERROR;
}
static void addConvertingActions(Pipe & pipe, const Block & header)
@ -51,6 +52,32 @@ static void addConvertingActions(Pipe & pipe, const Block & header)
});
}
static void enforceSorting(QueryProcessingStage::Enum stage, DataStream & output_stream, Context & context, SortDescription output_sort_description)
{
if (stage != QueryProcessingStage::WithMergeableState)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Cannot enforce sorting for ReadFromRemote step up to stage {}",
QueryProcessingStage::toString(stage));
context.setSetting("enable_memory_bound_merging_of_aggregation_results", true);
output_stream.sort_description = std::move(output_sort_description);
output_stream.sort_scope = DataStream::SortScope::Stream;
}
static void enforceAggregationInOrder(QueryProcessingStage::Enum stage, Context & context)
{
if (stage != QueryProcessingStage::WithMergeableState)
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Cannot enforce aggregation in order for ReadFromRemote step up to stage {}",
QueryProcessingStage::toString(stage));
context.setSetting("optimize_aggregation_in_order", true);
context.setSetting("force_aggregation_in_order", true);
}
static String formattedAST(const ASTPtr & ast)
{
if (!ast)
@ -70,15 +97,13 @@ ReadFromRemote::ReadFromRemote(
QueryProcessingStage::Enum stage_,
StorageID main_table_,
ASTPtr table_func_ptr_,
ContextPtr context_,
ContextMutablePtr context_,
ThrottlerPtr throttler_,
Scalars scalars_,
Tables external_tables_,
Poco::Logger * log_,
UInt32 shard_count_,
std::shared_ptr<const StorageLimitsList> storage_limits_,
SortDescription output_sort_description_,
DataStream::SortScope output_sort_scope_)
std::shared_ptr<const StorageLimitsList> storage_limits_)
: ISourceStep(DataStream{.header = std::move(header_)})
, shards(std::move(shards_))
, stage(stage_)
@ -92,8 +117,16 @@ ReadFromRemote::ReadFromRemote(
, log(log_)
, shard_count(shard_count_)
{
output_stream->sort_description = std::move(output_sort_description_);
output_stream->sort_scope = output_sort_scope_;
}
void ReadFromRemote::enforceSorting(SortDescription output_sort_description)
{
DB::enforceSorting(stage, *output_stream, *context, output_sort_description);
}
void ReadFromRemote::enforceAggregationInOrder()
{
DB::enforceAggregationInOrder(stage, *context);
}
void ReadFromRemote::addLazyPipe(Pipes & pipes, const ClusterProxy::SelectStreamFactory::Shard & shard)
@ -238,14 +271,12 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep(
QueryProcessingStage::Enum stage_,
StorageID main_table_,
ASTPtr table_func_ptr_,
ContextPtr context_,
ContextMutablePtr context_,
ThrottlerPtr throttler_,
Scalars scalars_,
Tables external_tables_,
Poco::Logger * log_,
std::shared_ptr<const StorageLimitsList> storage_limits_,
SortDescription output_sort_description_,
DataStream::SortScope output_sort_scope_)
std::shared_ptr<const StorageLimitsList> storage_limits_)
: ISourceStep(DataStream{.header = std::move(header_)})
, coordinator(std::move(coordinator_))
, shard(std::move(shard_))
@ -266,11 +297,17 @@ ReadFromParallelRemoteReplicasStep::ReadFromParallelRemoteReplicasStep(
description.push_back(fmt::format("Replica: {}", address.host_name));
setStepDescription(boost::algorithm::join(description, ", "));
output_stream->sort_description = std::move(output_sort_description_);
output_stream->sort_scope = output_sort_scope_;
}
void ReadFromParallelRemoteReplicasStep::enforceSorting(SortDescription output_sort_description)
{
DB::enforceSorting(stage, *output_stream, *context, output_sort_description);
}
void ReadFromParallelRemoteReplicasStep::enforceAggregationInOrder()
{
DB::enforceAggregationInOrder(stage, *context);
}
void ReadFromParallelRemoteReplicasStep::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &)
{

View File

@ -27,34 +27,29 @@ public:
QueryProcessingStage::Enum stage_,
StorageID main_table_,
ASTPtr table_func_ptr_,
ContextPtr context_,
ContextMutablePtr context_,
ThrottlerPtr throttler_,
Scalars scalars_,
Tables external_tables_,
Poco::Logger * log_,
UInt32 shard_count_,
std::shared_ptr<const StorageLimitsList> storage_limits_,
SortDescription output_sort_description_,
DataStream::SortScope output_sort_scope_);
std::shared_ptr<const StorageLimitsList> storage_limits_);
String getName() const override { return "ReadFromRemote"; }
void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override;
private:
enum class Mode
{
PerReplica,
PerShard
};
void enforceSorting(SortDescription output_sort_description);
void enforceAggregationInOrder();
private:
ClusterProxy::SelectStreamFactory::Shards shards;
QueryProcessingStage::Enum stage;
StorageID main_table;
ASTPtr table_func_ptr;
ContextPtr context;
ContextMutablePtr context;
ThrottlerPtr throttler;
Scalars scalars;
@ -80,19 +75,20 @@ public:
QueryProcessingStage::Enum stage_,
StorageID main_table_,
ASTPtr table_func_ptr_,
ContextPtr context_,
ContextMutablePtr context_,
ThrottlerPtr throttler_,
Scalars scalars_,
Tables external_tables_,
Poco::Logger * log_,
std::shared_ptr<const StorageLimitsList> storage_limits_,
SortDescription output_sort_description_,
DataStream::SortScope output_sort_scope_);
std::shared_ptr<const StorageLimitsList> storage_limits_);
String getName() const override { return "ReadFromRemoteParallelReplicas"; }
void initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) override;
void enforceSorting(SortDescription output_sort_description);
void enforceAggregationInOrder();
private:
void addPipeForSingeReplica(Pipes & pipes, std::shared_ptr<ConnectionPoolWithFailover> pool, IConnections::ReplicaInfo replica_info);
@ -104,7 +100,7 @@ private:
StorageID main_table;
ASTPtr table_func_ptr;
ContextPtr context;
ContextMutablePtr context;
ThrottlerPtr throttler;
Scalars scalars;

View File

@ -37,6 +37,11 @@ UnionStep::UnionStep(DataStreams input_streams_, size_t max_threads_)
else
output_stream = DataStream{.header = header};
updateOutputSortDescription();
}
void UnionStep::updateOutputSortDescription()
{
SortDescription common_sort_description = input_streams.front().sort_description;
DataStream::SortScope sort_scope = input_streams.front().sort_scope;
for (const auto & input_stream : input_streams)

View File

@ -19,6 +19,8 @@ public:
size_t getMaxThreads() const { return max_threads; }
void updateOutputSortDescription();
private:
Block header;
size_t max_threads;

View File

@ -11,11 +11,11 @@ namespace DB
AggregatingInOrderTransform::AggregatingInOrderTransform(
Block header,
AggregatingTransformParamsPtr params_,
InputOrderInfoPtr group_by_info_,
const SortDescription & sort_description_for_merging,
const SortDescription & group_by_description_,
size_t max_block_size_, size_t max_block_bytes_)
: AggregatingInOrderTransform(std::move(header), std::move(params_),
group_by_info_, group_by_description_,
sort_description_for_merging, group_by_description_,
max_block_size_, max_block_bytes_,
std::make_unique<ManyAggregatedData>(1), 0)
{
@ -23,7 +23,7 @@ AggregatingInOrderTransform::AggregatingInOrderTransform(
AggregatingInOrderTransform::AggregatingInOrderTransform(
Block header, AggregatingTransformParamsPtr params_,
InputOrderInfoPtr group_by_info_,
const SortDescription & sort_description_for_merging,
const SortDescription & group_by_description_,
size_t max_block_size_, size_t max_block_bytes_,
ManyAggregatedDataPtr many_data_, size_t current_variant)
@ -32,7 +32,6 @@ AggregatingInOrderTransform::AggregatingInOrderTransform(
, max_block_bytes(max_block_bytes_)
, params(std::move(params_))
, aggregates_mask(getAggregatesMask(params->getHeader(), params->params.aggregates))
, group_by_info(group_by_info_)
, sort_description(group_by_description_)
, aggregate_columns(params->params.aggregates_size)
, many_data(std::move(many_data_))
@ -41,13 +40,13 @@ AggregatingInOrderTransform::AggregatingInOrderTransform(
/// We won't finalize states in order to merge same states (generated due to multi-thread execution) in AggregatingSortedTransform
res_header = params->getCustomHeader(/* final_= */ false);
for (size_t i = 0; i < group_by_info->sort_description_for_merging.size(); ++i)
for (size_t i = 0; i < sort_description_for_merging.size(); ++i)
{
const auto & column_description = group_by_description_[i];
group_by_description.emplace_back(column_description, res_header.getPositionByName(column_description.column_name));
}
if (group_by_info->sort_description_for_merging.size() < group_by_description_.size())
if (sort_description_for_merging.size() < group_by_description_.size())
{
group_by_key = true;
/// group_by_description may contains duplicates, so we use keys_size from Aggregator::params

View File

@ -23,13 +23,13 @@ class AggregatingInOrderTransform : public IProcessor
{
public:
AggregatingInOrderTransform(Block header, AggregatingTransformParamsPtr params,
InputOrderInfoPtr group_by_info_,
const SortDescription & sort_description_for_merging,
const SortDescription & group_by_description_,
size_t max_block_size_, size_t max_block_bytes_,
ManyAggregatedDataPtr many_data, size_t current_variant);
AggregatingInOrderTransform(Block header, AggregatingTransformParamsPtr params,
InputOrderInfoPtr group_by_info_,
const SortDescription & sort_description_for_merging,
const SortDescription & group_by_description_,
size_t max_block_size_, size_t max_block_bytes_);
@ -58,7 +58,6 @@ private:
AggregatingTransformParamsPtr params;
ColumnsMask aggregates_mask;
InputOrderInfoPtr group_by_info;
/// For sortBlock()
SortDescription sort_description;
SortDescriptionWithPositions group_by_description;

View File

@ -572,7 +572,6 @@ ConnectionPoolPtr StorageDistributedDirectoryMonitor::createPool(const std::stri
std::map<UInt64, std::string> StorageDistributedDirectoryMonitor::getFiles()
{
std::map<UInt64, std::string> files;
size_t new_bytes_count = 0;
fs::directory_iterator end;
for (fs::directory_iterator it{path}; it != end; ++it)
@ -581,23 +580,9 @@ std::map<UInt64, std::string> StorageDistributedDirectoryMonitor::getFiles()
if (!it->is_directory() && startsWith(fs::path(file_path_str).extension(), ".bin"))
{
files[parse<UInt64>(fs::path(file_path_str).stem())] = file_path_str;
new_bytes_count += fs::file_size(fs::path(file_path_str));
}
}
{
std::lock_guard status_lock(status_mutex);
if (status.files_count != files.size())
LOG_TRACE(log, "Files set to {} (was {})", files.size(), status.files_count);
if (status.bytes_count != new_bytes_count)
LOG_TRACE(log, "Bytes set to {} (was {})", new_bytes_count, status.bytes_count);
metric_pending_files.changeTo(files.size());
status.files_count = files.size();
status.bytes_count = new_bytes_count;
}
return files;
}
bool StorageDistributedDirectoryMonitor::processFiles(const std::map<UInt64, std::string> & files)

View File

@ -1643,6 +1643,12 @@ void IMergeTreeDataPart::remove()
return CanRemoveDescription{.can_remove_anything = true, .files_not_to_remove = {} };
}
if (getState() == MergeTreeDataPartState::Temporary)
{
LOG_TRACE(storage.log, "Part {} in temporary state can be removed without unlocking shared state", name);
return CanRemoveDescription{.can_remove_anything = false, .files_not_to_remove = {} };
}
auto [can_remove, files_not_to_remove] = canRemovePart();
if (!can_remove)
LOG_TRACE(storage.log, "Blobs of part {} cannot be removed", name);

View File

@ -385,9 +385,13 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
: static_cast<size_t>(settings.max_threads);
InputOrderInfoPtr group_by_info = query_info.projection->input_order_info;
SortDescription sort_description_for_merging;
SortDescription group_by_sort_description;
if (group_by_info && settings.optimize_aggregation_in_order)
{
group_by_sort_description = getSortDescriptionFromGroupBy(select_query);
sort_description_for_merging = group_by_info->sort_description_for_merging;
}
else
group_by_info = nullptr;
@ -406,7 +410,7 @@ QueryPlanPtr MergeTreeDataSelectExecutor::read(
temporary_data_merge_threads,
/* storage_has_evenly_distributed_read_= */ false,
/* group_by_use_nulls */ false,
std::move(group_by_info),
std::move(sort_description_for_merging),
std::move(group_by_sort_description),
should_produce_results_in_order_of_bucket_number,
settings.enable_memory_bound_merging_of_aggregation_results);

View File

@ -91,12 +91,17 @@ void MergeTreeIndexGranuleBloomFilter::deserializeBinary(ReadBuffer & istr, Merg
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown index version {}.", version);
readVarUInt(total_rows, istr);
static size_t atom_size = 8;
size_t bytes_size = (bits_per_row * total_rows + atom_size - 1) / atom_size;
size_t read_size = bytes_size;
for (auto & filter : bloom_filters)
{
static size_t atom_size = 8;
size_t bytes_size = (bits_per_row * total_rows + atom_size - 1) / atom_size;
filter = std::make_shared<BloomFilter>(bytes_size, hash_functions, 0);
istr.readStrict(reinterpret_cast<char *>(filter->getFilter().data()), bytes_size);
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
read_size = filter->getFilter().size() * sizeof(BloomFilter::UnderType);
#endif
istr.readStrict(reinterpret_cast<char *>(filter->getFilter().data()), read_size);
}
}
@ -105,11 +110,17 @@ void MergeTreeIndexGranuleBloomFilter::serializeBinary(WriteBuffer & ostr) const
if (empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty bloom filter index.");
static size_t atom_size = 8;
writeVarUInt(total_rows, ostr);
size_t bytes_size = (bits_per_row * total_rows + atom_size - 1) / atom_size;
static size_t atom_size = 8;
size_t write_size = (bits_per_row * total_rows + atom_size - 1) / atom_size;
for (const auto & bloom_filter : bloom_filters)
ostr.write(reinterpret_cast<const char *>(bloom_filter->getFilter().data()), bytes_size);
{
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
write_size = bloom_filter->getFilter().size() * sizeof(BloomFilter::UnderType);
#endif
ostr.write(reinterpret_cast<const char *>(bloom_filter->getFilter().data()), write_size);
}
}
void MergeTreeIndexGranuleBloomFilter::fillingBloomFilter(BloomFilterPtr & bf, const Block & granule_index_block, size_t index_hash_column) const

View File

@ -41,7 +41,7 @@ def process_result(file_path):
if is_ok and report_url == "null":
return is_ok, None
status = f'OK: Bug reproduced (<a href="{report_url}">Report</a>'
status = f'OK: Bug reproduced (<a href="{report_url}">Report</a>)'
if not is_ok:
status = f'Bug is not reproduced (<a href="{report_url}">Report</a>)'
test_results.append([f"{prefix}: {description}", status])

View File

@ -13,14 +13,16 @@ import jwt
import requests # type: ignore
import boto3 # type: ignore
PULL_REQUEST_CI = "PullRequestCI"
NEED_RERUN_ON_EDITED = {
"PullRequestCI",
"DocsCheck",
}
NEED_RERUN_OR_CANCELL_WORKFLOWS = {
PULL_REQUEST_CI,
"DocsCheck",
"DocsReleaseChecks",
"BackportPR",
}
}.union(NEED_RERUN_ON_EDITED)
MAX_RETRY = 5
@ -334,7 +336,7 @@ def main(event):
most_recent_workflow = workflow_descriptions[-1]
if (
most_recent_workflow.status == "completed"
and most_recent_workflow.name == PULL_REQUEST_CI
and most_recent_workflow.name in NEED_RERUN_ON_EDITED
):
print(
"The PR's body is changed and workflow is finished. "

View File

@ -185,9 +185,9 @@ def test_mysql_client(started_cluster):
)
assert (
stderr.decode()
== "mysql: [Warning] Using a password on the command line interface can be insecure.\n"
"ERROR 516 (00000): default: Authentication failed: password is incorrect or there is no user with such name\n"
"mysql: [Warning] Using a password on the command line interface can be insecure.\n"
"ERROR 516 (00000): default: Authentication failed: password is incorrect, or there is no user with such name"
in stderr.decode()
)
code, (stdout, stderr) = started_cluster.mysql_client_container.exec_run(
@ -585,9 +585,10 @@ def test_python_client(started_cluster):
port=server_port,
)
assert exc_info.value.args == (
516,
"default: Authentication failed: password is incorrect or there is no user with such name",
assert exc_info.value.args[0] == 516
assert (
"default: Authentication failed: password is incorrect, or there is no user with such name"
in exc_info.value.args[1]
)
client = pymysql.connections.Connection(

View File

@ -13,7 +13,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# In this test we do the opposite: passing the invalid credentials while server is accepting default user without a password.
# And if the bug exists, they will be ignored (treat as empty credentials) and query succeed.
for i in {3950..4100}; do ${CLICKHOUSE_CURL} --user default:12345 "${CLICKHOUSE_URL}&query=SELECT+1"$(perl -e "print '+'x$i") | grep -v -F 'password' ||:; done
for i in {3950..4100}; do ${CLICKHOUSE_CURL} --user default:12345 "${CLICKHOUSE_URL}&query=SELECT+1"$(perl -e "print '+'x$i") | tr -d '\n' | grep -v -F 'password' ||:; done
# You can check that the bug exists in old version by running the old server in Docker:
# docker run --network host -it --rm clickhouse/clickhouse-server:1.1.54385

View File

@ -82,6 +82,7 @@ addYears
addressToLine
addressToLineWithInlines
addressToSymbol
age
alphaTokens
and
appendTrailingCharIfAbsent

View File

@ -0,0 +1,76 @@
Various intervals
-1
0
0
-7
-3
0
-23
-11
0
-103
-52
0
-730
-364
1
-17520
-8736
24
-1051200
-524160
1440
-63072000
-31449600
86400
DateTime arguments
0
23
1439
86399
Date and DateTime arguments
-63072000
-31449600
86400
Constant and non-constant arguments
-1051200
-524160
1440
Case insensitive
-10
Dependance of timezones
0
0
1
25
1500
90000
0
0
1
24
1440
86400
0
0
1
25
1500
90000
0
0
1
24
1440
86400
Additional test
1
1
1
1
1
1
1
1
1
1

View File

@ -0,0 +1,82 @@
SELECT 'Various intervals';
SELECT age('year', toDate('2017-12-31'), toDate('2016-01-01'));
SELECT age('year', toDate('2017-12-31'), toDate('2017-01-01'));
SELECT age('year', toDate('2017-12-31'), toDate('2018-01-01'));
SELECT age('quarter', toDate('2017-12-31'), toDate('2016-01-01'));
SELECT age('quarter', toDate('2017-12-31'), toDate('2017-01-01'));
SELECT age('quarter', toDate('2017-12-31'), toDate('2018-01-01'));
SELECT age('month', toDate('2017-12-31'), toDate('2016-01-01'));
SELECT age('month', toDate('2017-12-31'), toDate('2017-01-01'));
SELECT age('month', toDate('2017-12-31'), toDate('2018-01-01'));
SELECT age('week', toDate('2017-12-31'), toDate('2016-01-01'));
SELECT age('week', toDate('2017-12-31'), toDate('2017-01-01'));
SELECT age('week', toDate('2017-12-31'), toDate('2018-01-01'));
SELECT age('day', toDate('2017-12-31'), toDate('2016-01-01'));
SELECT age('day', toDate('2017-12-31'), toDate('2017-01-01'));
SELECT age('day', toDate('2017-12-31'), toDate('2018-01-01'));
SELECT age('hour', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC');
SELECT age('hour', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC');
SELECT age('hour', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC');
SELECT age('minute', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC');
SELECT age('minute', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC');
SELECT age('minute', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC');
SELECT age('second', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC');
SELECT age('second', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC');
SELECT age('second', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC');
SELECT 'DateTime arguments';
SELECT age('day', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
SELECT age('hour', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
SELECT age('minute', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
SELECT age('second', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC');
SELECT 'Date and DateTime arguments';
SELECT age('second', toDate('2017-12-31'), toDateTime('2016-01-01 00:00:00', 'UTC'), 'UTC');
SELECT age('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDate('2017-01-01'), 'UTC');
SELECT age('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDateTime('2018-01-01 00:00:00', 'UTC'));
SELECT 'Constant and non-constant arguments';
SELECT age('minute', materialize(toDate('2017-12-31')), toDate('2016-01-01'), 'UTC');
SELECT age('minute', toDate('2017-12-31'), materialize(toDate('2017-01-01')), 'UTC');
SELECT age('minute', materialize(toDate('2017-12-31')), materialize(toDate('2018-01-01')), 'UTC');
SELECT 'Case insensitive';
SELECT age('year', today(), today() - INTERVAL 10 YEAR);
SELECT 'Dependance of timezones';
SELECT age('month', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
SELECT age('week', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
SELECT age('day', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
SELECT age('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
SELECT age('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
SELECT age('second', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul');
SELECT age('month', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
SELECT age('week', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
SELECT age('day', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
SELECT age('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
SELECT age('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
SELECT age('second', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC');
SELECT age('month', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
SELECT age('week', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
SELECT age('day', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
SELECT age('hour', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
SELECT age('minute', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
SELECT age('second', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul'));
SELECT age('month', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
SELECT age('week', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
SELECT age('day', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
SELECT age('hour', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
SELECT age('minute', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
SELECT age('second', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC'));
SELECT 'Additional test';
SELECT number = age('month', now() - INTERVAL number MONTH, now()) FROM system.numbers LIMIT 10;

View File

@ -0,0 +1,169 @@
-- { echo }
-- Date32 vs Date32
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
86400
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
1440
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
24
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
1
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
1
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
1
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
1
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
1
-- With DateTime64
-- Date32 vs DateTime64
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
86400
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
1440
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
24
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
1
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-08 00:00:00', 3, 'UTC'), 'UTC');
1
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'), 'UTC');
1
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-04-01 00:00:00', 3, 'UTC'), 'UTC');
1
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'), 'UTC');
1
-- DateTime64 vs Date32
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
86400
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
1440
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
24
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
1
SELECT age('week', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
1
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
1
SELECT age('quarter', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
1
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
1
-- With DateTime
-- Date32 vs DateTime
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
86400
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
1440
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
24
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
1
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-25 00:00:00', 'UTC'), 'UTC');
1
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDateTime('2015-09-18 00:00:00', 'UTC'), 'UTC');
1
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDateTime('2015-11-18 00:00:00', 'UTC'), 'UTC');
1
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDateTime('2016-08-18 00:00:00', 'UTC'), 'UTC');
1
-- DateTime vs Date32
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
86400
SELECT age('minute', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
1440
SELECT age('hour', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
24
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
1
SELECT age('week', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
1
SELECT age('month', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
1
SELECT age('quarter', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
1
SELECT age('year', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
1
-- With Date
-- Date32 vs Date
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
86400
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
1440
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
24
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
1
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDate('2015-08-25', 'UTC'), 'UTC');
1
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDate('2015-09-18', 'UTC'), 'UTC');
1
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDate('2015-11-18', 'UTC'), 'UTC');
1
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDate('2016-08-18', 'UTC'), 'UTC');
1
-- Date vs Date32
SELECT age('second', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
86400
SELECT age('minute', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
1440
SELECT age('hour', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
24
SELECT age('day', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
1
SELECT age('week', toDate('2015-08-18', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
1
SELECT age('month', toDate('2015-08-18', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
1
SELECT age('quarter', toDate('2015-08-18', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
1
SELECT age('year', toDate('2015-08-18', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
1
-- Const vs non-const columns
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
1
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
1
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
1
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
1
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
1
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
1
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
1
-- Non-const vs const columns
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
1
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
1
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
1
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
1
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
1
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDate('2015-08-19', 'UTC'), 'UTC');
1
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
1
-- Non-const vs non-const columns
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
1
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
1
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
1
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
1
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
1
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
1
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
1

View File

@ -0,0 +1,101 @@
-- { echo }
-- Date32 vs Date32
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
-- With DateTime64
-- Date32 vs DateTime64
SELECT age('second', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('day', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('week', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-08 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('month', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-04-01 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('year', toDate32('1927-01-01', 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'), 'UTC');
-- DateTime64 vs Date32
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('week', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC');
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC');
SELECT age('quarter', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC');
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC');
-- With DateTime
-- Date32 vs DateTime
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-25 00:00:00', 'UTC'), 'UTC');
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDateTime('2015-09-18 00:00:00', 'UTC'), 'UTC');
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDateTime('2015-11-18 00:00:00', 'UTC'), 'UTC');
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDateTime('2016-08-18 00:00:00', 'UTC'), 'UTC');
-- DateTime vs Date32
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('minute', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('hour', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('week', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
SELECT age('month', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
SELECT age('quarter', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
SELECT age('year', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
-- With Date
-- Date32 vs Date
SELECT age('second', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
SELECT age('day', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC');
SELECT age('week', toDate32('2015-08-18', 'UTC'), toDate('2015-08-25', 'UTC'), 'UTC');
SELECT age('month', toDate32('2015-08-18', 'UTC'), toDate('2015-09-18', 'UTC'), 'UTC');
SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDate('2015-11-18', 'UTC'), 'UTC');
SELECT age('year', toDate32('2015-08-18', 'UTC'), toDate('2016-08-18', 'UTC'), 'UTC');
-- Date vs Date32
SELECT age('second', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('minute', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('hour', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('day', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('week', toDate('2015-08-18', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC');
SELECT age('month', toDate('2015-08-18', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC');
SELECT age('quarter', toDate('2015-08-18', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC');
SELECT age('year', toDate('2015-08-18', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC');
-- Const vs non-const columns
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
-- Non-const vs const columns
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC');
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC');
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC');
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDate('2015-08-19', 'UTC'), 'UTC');
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC');
-- Non-const vs non-const columns
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC');
SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC');
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC');
SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');
SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDate('2015-08-19', 'UTC')), 'UTC');
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC');

View File

@ -0,0 +1,113 @@
-- { echo }
-- DateTime64 vs DateTime64 same scale
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
10
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
600
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:00:00', 0, 'UTC'));
3600
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:10:10', 0, 'UTC'));
4210
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
10
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
600
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
10
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-02 00:00:00', 0, 'UTC'));
1
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-02-01 00:00:00', 0, 'UTC'));
1
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1928-01-01 00:00:00', 0, 'UTC'));
1
-- DateTime64 vs DateTime64 different scale
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
10
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
600
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:00:00', 3, 'UTC'));
3600
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:10:10', 3, 'UTC'));
4210
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
10
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
600
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
10
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'));
1
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'));
1
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'));
1
-- With DateTime
-- DateTime64 vs DateTime
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:00', 'UTC'));
0
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:10', 'UTC'));
10
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:10:00', 'UTC'));
600
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:00:00', 'UTC'));
3600
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:10:10', 'UTC'));
4210
-- DateTime vs DateTime64
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:00', 3, 'UTC'));
0
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
10
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:10:00', 3, 'UTC'));
600
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:00:00', 3, 'UTC'));
3600
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:10:10', 3, 'UTC'));
4210
-- With Date
-- DateTime64 vs Date
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDate('2015-08-19', 'UTC'));
1
-- Date vs DateTime64
SELECT age('day', toDate('2015-08-18', 'UTC'), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
1
-- Same thing but const vs non-const columns
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
10
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
10
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
10
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
10
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDate('2015-08-19', 'UTC')));
1
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
1
-- Same thing but non-const vs const columns
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
10
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
10
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDateTime('2015-08-18 00:00:10', 'UTC'));
10
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
10
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDate('2015-08-19', 'UTC'));
1
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
1
-- Same thing but non-const vs non-const columns
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
10
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
10
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
10
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
10
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC')));
1
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
1

View File

@ -0,0 +1,77 @@
-- { echo }
-- DateTime64 vs DateTime64 same scale
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:00:00', 0, 'UTC'));
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:10:10', 0, 'UTC'));
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC'));
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC'));
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-02 00:00:00', 0, 'UTC'));
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-02-01 00:00:00', 0, 'UTC'));
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1928-01-01 00:00:00', 0, 'UTC'));
-- DateTime64 vs DateTime64 different scale
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:00:00', 3, 'UTC'));
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:10:10', 3, 'UTC'));
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC'));
SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC'));
SELECT age('day', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'));
SELECT age('month', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'));
SELECT age('year', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'));
-- With DateTime
-- DateTime64 vs DateTime
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:00', 'UTC'));
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:10', 'UTC'));
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:10:00', 'UTC'));
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:00:00', 'UTC'));
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:10:10', 'UTC'));
-- DateTime vs DateTime64
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:00', 3, 'UTC'));
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:10:00', 3, 'UTC'));
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:00:00', 3, 'UTC'));
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:10:10', 3, 'UTC'));
-- With Date
-- DateTime64 vs Date
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDate('2015-08-19', 'UTC'));
-- Date vs DateTime64
SELECT age('day', toDate('2015-08-18', 'UTC'), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
-- Same thing but const vs non-const columns
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDate('2015-08-19', 'UTC')));
SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));
-- Same thing but non-const vs const columns
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), toDateTime64('1927-01-01 00:00:10', 0, 'UTC'));
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), toDateTime64('1927-01-01 00:00:10', 3, 'UTC'));
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDateTime('2015-08-18 00:00:10', 'UTC'));
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDateTime64('2015-08-18 00:00:10', 3, 'UTC'));
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDate('2015-08-19', 'UTC'));
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDateTime64('2015-08-19 00:00:00', 3, 'UTC'));
-- Same thing but non-const vs non-const columns
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC')));
SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC')));
SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDateTime('2015-08-18 00:00:10', 'UTC')));
SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC')));
SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC')));
SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC')));

View File

@ -0,0 +1,2 @@
test_02480_table MergeTree
test_02480_view View

View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CURDIR"/../shell_config.sh
database=$($CLICKHOUSE_CLIENT -q 'SELECT currentDatabase()')
$CLICKHOUSE_CLIENT -nm -q "
DROP TABLE IF EXISTS test_02480_table;
DROP VIEW IF EXISTS test_02480_view;
CREATE TABLE test_02480_table (id Int64) ENGINE=MergeTree ORDER BY id;
CREATE VIEW test_02480_view AS SELECT * FROM test_02480_table;
SHOW FULL TABLES FROM $database LIKE '%';
DROP TABLE IF EXISTS test_02480_table;
DROP VIEW IF EXISTS test_02480_view;
"

View File

@ -0,0 +1,8 @@
0 1 1 20
0 1 1 200
0 1 2 20
0 1 2 200
Aggregating
Order: a ASC, c ASC
ReadFromMergeTree (default.tab)
Sorting (Stream): a ASC, b ASC, c ASC

View File

@ -0,0 +1,7 @@
create table tab (a Int32, b Int32, c Int32, d Int32) engine = MergeTree order by (a, b, c);
insert into tab select 0, number % 3, 2 - intDiv(number, 3), (number % 3 + 1) * 10 from numbers(6);
insert into tab select 0, number % 3, 2 - intDiv(number, 3), (number % 3 + 1) * 100 from numbers(6);
select a, any(b), c, d from tab where b = 1 group by a, c, d order by c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1;
select * from (explain actions = 1, sorting=1 select a, any(b), c, d from tab where b = 1 group by a, c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1) where explain like '%Sorting (Stream)%' or explain like '%ReadFromMergeTree%' or explain like '%Aggregating%' or explain like '%Order:%';

View File

@ -0,0 +1,117 @@
-- { echoOn }
SELECT
cutURLParameter('http://bigmir.net/?a=b&c=d', []),
cutURLParameter('http://bigmir.net/?a=b&c=d', ['a']),
cutURLParameter('http://bigmir.net/?a=b&c=d', ['a', 'c']),
cutURLParameter('http://bigmir.net/?a=b&c=d', ['c']),
cutURLParameter('http://bigmir.net/?a=b&c=d#e=f', ['a', 'e']),
cutURLParameter('http://bigmir.net/?a&c=d#e=f', ['c', 'e']),
cutURLParameter('http://bigmir.net/?a&c=d#e=f', ['e']),
cutURLParameter('http://bigmir.net/?a=b&c=d#e=f&g=h', ['b', 'g']),
cutURLParameter('http://bigmir.net/?a=b&c=d#e', ['a', 'e']),
cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', ['c', 'g']),
cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', ['e', 'g']),
cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'e']),
cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'g']),
cutURLParameter('//bigmir.net/?a=b&c=d', []),
cutURLParameter('//bigmir.net/?a=b&c=d', ['a']),
cutURLParameter('//bigmir.net/?a=b&c=d', ['a', 'c']),
cutURLParameter('//bigmir.net/?a=b&c=d#e=f', ['a', 'e']),
cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a']),
cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a', 'c']),
cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a', 'e']),
cutURLParameter('//bigmir.net/?a=b&c=d#e=f&g=h', ['c', 'g']),
cutURLParameter('//bigmir.net/?a=b&c=d#e', ['a', 'c']),
cutURLParameter('//bigmir.net/?a=b&c=d#e', ['a', 'e']),
cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', ['c', 'e']),
cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', ['e', 'g']),
cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'e']),
cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'g'])
FORMAT Vertical;
Row 1:
──────
cutURLParameter('http://bigmir.net/?a=b&c=d', []): http://bigmir.net/?a=b&c=d
cutURLParameter('http://bigmir.net/?a=b&c=d', ['a']): http://bigmir.net/?c=d
cutURLParameter('http://bigmir.net/?a=b&c=d', ['a', 'c']): http://bigmir.net/?
cutURLParameter('http://bigmir.net/?a=b&c=d', ['c']): http://bigmir.net/?a=b
cutURLParameter('http://bigmir.net/?a=b&c=d#e=f', ['a', 'e']): http://bigmir.net/?c=d#
cutURLParameter('http://bigmir.net/?a&c=d#e=f', ['c', 'e']): http://bigmir.net/?a#
cutURLParameter('http://bigmir.net/?a&c=d#e=f', ['e']): http://bigmir.net/?a&c=d#
cutURLParameter('http://bigmir.net/?a=b&c=d#e=f&g=h', ['b', 'g']): http://bigmir.net/?a=b&c=d#e=f
cutURLParameter('http://bigmir.net/?a=b&c=d#e', ['a', 'e']): http://bigmir.net/?c=d#e
cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', ['c', 'g']): http://bigmir.net/?a=b#e
cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', ['e', 'g']): http://bigmir.net/?a=b&c=d#e
cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'e']): http://bigmir.net/?a=b&c=d#test?g=h
cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'g']): http://bigmir.net/?a=b&c=d#test?e=f
cutURLParameter('//bigmir.net/?a=b&c=d', []): //bigmir.net/?a=b&c=d
cutURLParameter('//bigmir.net/?a=b&c=d', ['a']): //bigmir.net/?c=d
cutURLParameter('//bigmir.net/?a=b&c=d', ['a', 'c']): //bigmir.net/?
cutURLParameter('//bigmir.net/?a=b&c=d#e=f', ['a', 'e']): //bigmir.net/?c=d#
cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a']): //bigmir.net/?a&c=d#e=f
cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a', 'c']): //bigmir.net/?a#e=f
cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a', 'e']): //bigmir.net/?a&c=d#
cutURLParameter('//bigmir.net/?a=b&c=d#e=f&g=h', ['c', 'g']): //bigmir.net/?a=b#e=f
cutURLParameter('//bigmir.net/?a=b&c=d#e', ['a', 'c']): //bigmir.net/?#e
cutURLParameter('//bigmir.net/?a=b&c=d#e', ['a', 'e']): //bigmir.net/?c=d#e
cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', ['c', 'e']): //bigmir.net/?a=b#e&g=h
cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', ['e', 'g']): //bigmir.net/?a=b&c=d#e
cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'e']): //bigmir.net/?a=b&c=d#test?g=h
cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'g']): //bigmir.net/?a=b&c=d#test?e=f
SELECT
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), []),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['a']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['a', 'c']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['c']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f'), ['a', 'e']),
cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), ['c', 'e']),
cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), ['e']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h'), ['b', 'g']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), ['a', 'e']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), ['c', 'g']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), ['e', 'g']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'e']),
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'g']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), []),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), ['a']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), ['a', 'c']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f'), ['a', 'e']),
cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a']),
cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a', 'c']),
cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a', 'e']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f&g=h'), ['c', 'g']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), ['a', 'c']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), ['a', 'e']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), ['c', 'e']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), ['e', 'g']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'e']),
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'g'])
FORMAT Vertical;
Row 1:
──────
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), []): http://bigmir.net/?a=b&c=d
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['a']): http://bigmir.net/?c=d
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['a', 'c']): http://bigmir.net/?
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['c']): http://bigmir.net/?a=b
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f'), ['a', 'e']): http://bigmir.net/?c=d#
cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), ['c', 'e']): http://bigmir.net/?a#
cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), ['e']): http://bigmir.net/?a&c=d#
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h'), ['b', 'g']): http://bigmir.net/?a=b&c=d#e=f
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), ['a', 'e']): http://bigmir.net/?c=d#e
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), ['c', 'g']): http://bigmir.net/?a=b#e
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), ['e', 'g']): http://bigmir.net/?a=b&c=d#e
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'e']): http://bigmir.net/?a=b&c=d#test?g=h
cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'g']): http://bigmir.net/?a=b&c=d#test?e=f
cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), []): //bigmir.net/?a=b&c=d
cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), ['a']): //bigmir.net/?c=d
cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), ['a', 'c']): //bigmir.net/?
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f'), ['a', 'e']): //bigmir.net/?c=d#
cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a']): //bigmir.net/?a&c=d#e=f
cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a', 'c']): //bigmir.net/?a#e=f
cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a', 'e']): //bigmir.net/?a&c=d#
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f&g=h'), ['c', 'g']): //bigmir.net/?a=b#e=f
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), ['a', 'c']): //bigmir.net/?#e
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), ['a', 'e']): //bigmir.net/?c=d#e
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), ['c', 'e']): //bigmir.net/?a=b#e&g=h
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), ['e', 'g']): //bigmir.net/?a=b&c=d#e
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'e']): //bigmir.net/?a=b&c=d#test?g=h
cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'g']): //bigmir.net/?a=b&c=d#test?e=f

Some files were not shown because too many files have changed in this diff Show More