diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 74a6f95dbb3..e045170561d 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -6,6 +6,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
-->
### Changelog category (leave one):
- New Feature
+- Experimental Feature
- Improvement
- Performance Improvement
- Backward Incompatible Change
@@ -49,7 +50,6 @@ At a minimum, the following information should be added (but add more as needed)
- [ ] Allow: Integration Tests
- [ ] Allow: Performance tests
- [ ] Allow: All Builds
-- [ ] Allow: All NOT Required Checks
- [ ] Allow: batch 1, 2 for multi-batch jobs
- [ ] Allow: batch 3, 4, 5, 6 for multi-batch jobs
---
@@ -60,6 +60,7 @@ At a minimum, the following information should be added (but add more as needed)
- [ ] Exclude: All with aarch64, release, debug
---
- [ ] Do not test
+- [ ] Woolen Wolfdog
- [ ] Upload binaries for special builds
- [ ] Disable merge-commit
- [ ] Disable CI cache
diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index e1980ec9ef2..48e8fbbba05 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -159,33 +159,18 @@ jobs:
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
- BuilderReport:
+ Builds_Report:
# run report check for failed builds to indicate the CI error
- if: ${{ !cancelled() }}
- needs:
- - RunConfig
- - BuilderDebAarch64
- - BuilderDebAsan
- - BuilderDebDebug
- - BuilderDebRelease
- - BuilderDebTsan
- uses: ./.github/workflows/reusable_test.yml
- with:
- test_name: ClickHouse build check
- runner_type: style-checker-aarch64
- data: ${{ needs.RunConfig.outputs.data }}
- BuilderSpecialReport:
- # run report check for failed builds to indicate the CI error
- if: ${{ !cancelled() }}
- needs:
- - RunConfig
- - BuilderBinDarwin
- - BuilderBinDarwinAarch64
- uses: ./.github/workflows/reusable_test.yml
- with:
- test_name: ClickHouse special build check
- runner_type: style-checker-aarch64
- data: ${{ needs.RunConfig.outputs.data }}
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, BuilderDebAarch64, BuilderDebAsan, BuilderDebDebug, BuilderDebRelease, BuilderDebTsan, BuilderBinDarwin, BuilderBinDarwinAarch64]
+ runs-on: [self-hosted, style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ - name: Builds report
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_tsan package_debug binary_darwin binary_darwin_aarch64
############################################################################################
#################################### INSTALL PACKAGES ######################################
############################################################################################
@@ -256,8 +241,7 @@ jobs:
FinishCheck:
if: ${{ !failure() && !cancelled() }}
needs:
- - BuilderReport
- - BuilderSpecialReport
+ - Builds_Report
- FunctionalStatelessTestAsan
- FunctionalStatefulTestDebug
- StressTestTsan
@@ -273,5 +257,8 @@ jobs:
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
+ # update mergeable check
+ python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ # update overall ci report
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
python3 merge_pr.py
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 88bc50a729d..2a7e6f737ab 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -104,10 +104,9 @@ jobs:
with:
stage: Tests_2
data: ${{ needs.RunConfig.outputs.data }}
- # stage for jobs that do not prohibit merge
Tests_3:
# Test_3 should not wait for Test_1/Test_2 and should not be blocked by them on master branch since all jobs need to run there.
- needs: [RunConfig, Builds_1, Builds_2]
+ needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
@@ -118,11 +117,11 @@ jobs:
# Reports should run even if Builds_1/2 fail - run them separately, not in Tests_1/2/3
Builds_Report:
# run report check for failed builds to indicate the CI error
- if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
needs: [RunConfig, Builds_1, Builds_2]
uses: ./.github/workflows/reusable_test.yml
with:
- test_name: ClickHouse build check
+ test_name: Builds
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml
index cfa01b0e8f3..01685ee1f5a 100644
--- a/.github/workflows/merge_queue.yml
+++ b/.github/workflows/merge_queue.yml
@@ -96,20 +96,15 @@ jobs:
stage: Tests_1
data: ${{ needs.RunConfig.outputs.data }}
- ################################# Stage Final #################################
- #
- FinishCheck:
- if: ${{ !cancelled() }}
+ CheckReadyForMerge:
+ if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
+ # Test_2 or Test_3 must not have jobs required for Mergeable check
needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Tests_1]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
uses: ClickHouse/checkout@v1
- - name: Check sync status
+ - name: Check and set merge status
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
- python3 sync_pr.py --status
- - name: Finish label
- run: |
- cd "$GITHUB_WORKSPACE/tests/ci"
- python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index 70b71da8fa5..4764e6d3c1a 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -126,16 +126,16 @@ jobs:
with:
stage: Builds_2
data: ${{ needs.RunConfig.outputs.data }}
+ # stage for running non-required checks without being blocked by required checks (Test_1) if corresponding settings is selected
Tests_2:
- needs: [RunConfig, Builds_2]
+ needs: [RunConfig, Builds_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
stage: Tests_2
data: ${{ needs.RunConfig.outputs.data }}
- # stage for jobs that do not prohibit merge
Tests_3:
- needs: [RunConfig, Builds_1, Tests_1, Builds_2, Tests_2]
+ needs: [RunConfig, Builds_1, Tests_1]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_3') }}
uses: ./.github/workflows/reusable_test_stage.yml
with:
@@ -146,17 +146,18 @@ jobs:
# Reports should run even if Builds_1/2 fail - run them separately (not in Tests_1/2/3)
Builds_Report:
# run report check for failed builds to indicate the CI error
- if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse build check') }}
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
needs: [RunConfig, StyleCheck, Builds_1, Builds_2]
uses: ./.github/workflows/reusable_test.yml
with:
- test_name: ClickHouse build check
+ test_name: Builds
runner_type: style-checker-aarch64
data: ${{ needs.RunConfig.outputs.data }}
CheckReadyForMerge:
if: ${{ !cancelled() && needs.StyleCheck.result == 'success' }}
- needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1, Tests_2]
+ # Test_2 or Test_3 must not have jobs required for Mergeable check
+ needs: [RunConfig, BuildDockers, StyleCheck, FastTest, Builds_1, Builds_2, Builds_Report, Tests_1]
runs-on: [self-hosted, style-checker-aarch64]
steps:
- name: Check out repository code
@@ -195,8 +196,7 @@ jobs:
concurrency:
group: jepsen
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'ClickHouse Keeper Jepsen') }}
- # jepsen needs binary_release build which is in Builds_2
- needs: [RunConfig, Builds_2]
+ needs: [RunConfig, Builds_1]
uses: ./.github/workflows/reusable_test.yml
with:
test_name: ClickHouse Keeper Jepsen
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index 45eb7431bb4..3e898c69ade 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -176,35 +176,18 @@ jobs:
############################################################################################
##################################### BUILD REPORTER #######################################
############################################################################################
- BuilderReport:
+ Builds_Report:
# run report check for failed builds to indicate the CI error
- if: ${{ !cancelled() }}
- needs:
- - RunConfig
- - BuilderDebRelease
- - BuilderDebAarch64
- - BuilderDebAsan
- - BuilderDebTsan
- - BuilderDebUBsan
- - BuilderDebMsan
- - BuilderDebDebug
- uses: ./.github/workflows/reusable_test.yml
- with:
- test_name: ClickHouse build check
- runner_type: style-checker-aarch64
- data: ${{ needs.RunConfig.outputs.data }}
- BuilderSpecialReport:
- # run report check for failed builds to indicate the CI error
- if: ${{ !cancelled() }}
- needs:
- - RunConfig
- - BuilderBinDarwin
- - BuilderBinDarwinAarch64
- uses: ./.github/workflows/reusable_test.yml
- with:
- test_name: ClickHouse special build check
- runner_type: style-checker-aarch64
- data: ${{ needs.RunConfig.outputs.data }}
+ if: ${{ !cancelled() && needs.RunConfig.result == 'success' && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Builds') }}
+ needs: [RunConfig, BuilderDebRelease, BuilderDebAarch64, BuilderDebAsan, BuilderDebUBsan, BuilderDebMsan, BuilderDebTsan, BuilderDebDebug, BuilderBinDarwin, BuilderBinDarwinAarch64]
+ runs-on: [self-hosted, style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: ClickHouse/checkout@v1
+ - name: Builds report
+ run: |
+ cd "$GITHUB_WORKSPACE/tests/ci"
+ python3 ./build_report_check.py --reports package_release package_aarch64 package_asan package_msan package_ubsan package_tsan package_debug binary_darwin binary_darwin_aarch64
MarkReleaseReady:
if: ${{ !failure() && !cancelled() }}
needs:
@@ -460,8 +443,7 @@ jobs:
needs:
- DockerServerImage
- DockerKeeperImage
- - BuilderReport
- - BuilderSpecialReport
+ - Builds_Report
- MarkReleaseReady
- FunctionalStatelessTestDebug
- FunctionalStatelessTestRelease
@@ -496,4 +478,7 @@ jobs:
- name: Finish label
run: |
cd "$GITHUB_WORKSPACE/tests/ci"
+ # update mergeable check
+ python3 merge_pr.py --set-ci-status --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
+ # update overall ci report
python3 finish_check.py --wf-status ${{ contains(needs.*.result, 'failure') && 'failure' || 'success' }}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4891b79e4c7..c4935f88245 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
### Table of Contents
+**[ClickHouse release v24.6, 2024-06-27](#246)**
**[ClickHouse release v24.5, 2024-05-30](#245)**
**[ClickHouse release v24.4, 2024-04-30](#244)**
**[ClickHouse release v24.3 LTS, 2024-03-26](#243)**
@@ -8,6 +9,179 @@
# 2024 Changelog
+### ClickHouse release 24.6, 2024-06-27
+
+#### Backward Incompatible Change
+* Enable asynchronous load of databases and tables by default. See the `async_load_databases` in config.xml. While this change is fully compatible, it can introduce a difference in behavior. When `async_load_databases` is false, as in the previous versions, the server will not accept connections until all tables are loaded. When `async_load_databases` is true, as in the new version, the server can accept connections before all the tables are loaded. If a query is made to a table that is not yet loaded, it will wait for the table's loading, which can take considerable time. It can change the behavior of the server if it is part of a large distributed system under a load balancer. In the first case, the load balancer can get a connection refusal and quickly failover to another server. In the second case, the load balancer can connect to a server that is still loading the tables, and the query will have a higher latency. Moreover, if many queries accumulate in the waiting state, it can lead to a "thundering herd" problem when they start processing simultaneously. This can make a difference only for highly loaded distributed backends. You can set the value of `async_load_databases` to false to avoid this problem. [#57695](https://github.com/ClickHouse/ClickHouse/pull/57695) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Some invalid queries will fail earlier during parsing. Note: disabled the support for inline KQL expressions (the experimental Kusto language) when they are put into a `kql` table function without a string literal, e.g. `kql(garbage | trash)` instead of `kql('garbage | trash')` or `kql($$garbage | trash$$)`. This feature was introduced unintentionally and should not exist. [#61500](https://github.com/ClickHouse/ClickHouse/pull/61500) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
+* Rework parallel processing in `Ordered` mode of storage `S3Queue`. This PR is backward incompatible for Ordered mode if you used settings `s3queue_processing_threads_num` or `s3queue_total_shards_num`. Setting `s3queue_total_shards_num` is deleted, previously it was allowed to use only under `s3queue_allow_experimental_sharded_mode`, which is now deprecated. A new setting is added - `s3queue_buckets`. [#64349](https://github.com/ClickHouse/ClickHouse/pull/64349) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* New functions `snowflakeIDToDateTime`, `snowflakeIDToDateTime64`, `dateTimeToSnowflakeID`, and `dateTime64ToSnowflakeID` were added. Unlike the existing functions `snowflakeToDateTime`, `snowflakeToDateTime64`, `dateTimeToSnowflake`, and `dateTime64ToSnowflake`, the new functions are compatible with function `generateSnowflakeID`, i.e. they accept the snowflake IDs generated by `generateSnowflakeID` and produce snowflake IDs of the same type as `generateSnowflakeID` (i.e. `UInt64`). Furthermore, the new functions default to the UNIX epoch (aka. 1970-01-01), just like `generateSnowflakeID`. If necessary, a different epoch, e.g. Twitter's/X's epoch 2010-11-04 aka. 1288834974657 msec since UNIX epoch, can be passed. The old conversion functions are deprecated and will be removed after a transition period: to use them regardless, enable setting `allow_deprecated_snowflake_conversion_functions`. [#64948](https://github.com/ClickHouse/ClickHouse/pull/64948) ([Robert Schulze](https://github.com/rschu1ze)).
+
+#### New Feature
+* Introduce statistics of type "number of distinct values". [#59357](https://github.com/ClickHouse/ClickHouse/pull/59357) ([Han Fei](https://github.com/hanfei1991)).
+* Add Hilbert Curve encode and decode functions. [#60156](https://github.com/ClickHouse/ClickHouse/pull/60156) ([Artem Mustafin](https://github.com/Artemmm91)).
+* Added support for reading LINESTRING geometry in WKT format using function `readWKTLineString`. [#62519](https://github.com/ClickHouse/ClickHouse/pull/62519) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Allow to attach parts from a different disk. [#63087](https://github.com/ClickHouse/ClickHouse/pull/63087) ([Unalian](https://github.com/Unalian)).
+* Allow proxy to be bypassed for hosts specified in `no_proxy` env variable and ClickHouse proxy configuration. [#63314](https://github.com/ClickHouse/ClickHouse/pull/63314) ([Arthur Passos](https://github.com/arthurpassos)).
+* Added a new table function `loop` to support returning query results in an infinite loop. [#63452](https://github.com/ClickHouse/ClickHouse/pull/63452) ([Sariel](https://github.com/sarielwxm)).
+* Added new SQL functions `generateSnowflakeID` for generating Twitter-style Snowflake IDs. [#63577](https://github.com/ClickHouse/ClickHouse/pull/63577) ([Danila Puzov](https://github.com/kazalika)).
+* Add the ability to reshuffle rows during insert to optimize for size without violating the order set by `PRIMARY KEY`. It's controlled by the setting `optimize_row_order` (off by default). [#63578](https://github.com/ClickHouse/ClickHouse/pull/63578) ([Igor Markelov](https://github.com/ElderlyPassionFruit)).
+* Added `merge_workload` and `mutation_workload` settings to regulate how resources are utilized and shared between merges, mutations and other workloads. [#64061](https://github.com/ClickHouse/ClickHouse/pull/64061) ([Sergei Trifonov](https://github.com/serxa)).
+* Add support for comparing IPv4 and IPv6 types using the `=` operator. [#64292](https://github.com/ClickHouse/ClickHouse/pull/64292) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
+* Allow to store named collections in zookeeper. [#64574](https://github.com/ClickHouse/ClickHouse/pull/64574) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Support decimal arguments in binary math functions (pow, atan2, max2, min2, hypot). [#64582](https://github.com/ClickHouse/ClickHouse/pull/64582) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
+* Add support for index analysis over `hilbertEncode`. [#64662](https://github.com/ClickHouse/ClickHouse/pull/64662) ([Artem Mustafin](https://github.com/Artemmm91)).
+* Added SQL functions `parseReadableSize` (along with `OrNull` and `OrZero` variants). [#64742](https://github.com/ClickHouse/ClickHouse/pull/64742) ([Francisco J. Jurado Moreno](https://github.com/Beetelbrox)).
+* Add server settings `max_table_num_to_throw` and `max_database_num_to_throw` to limit the number of databases or tables on `CREATE` queries. [#64781](https://github.com/ClickHouse/ClickHouse/pull/64781) ([Xu Jia](https://github.com/XuJia0210)).
+* Add _time virtual column to file alike storages (s3/file/hdfs/url/azureBlobStorage). [#64947](https://github.com/ClickHouse/ClickHouse/pull/64947) ([Ilya Golshtein](https://github.com/ilejn)).
+* Introduced new functions `base64URLEncode`, `base64URLDecode` and `tryBase64URLDecode`. [#64991](https://github.com/ClickHouse/ClickHouse/pull/64991) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
+* Add new function `editDistanceUTF8`, which calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings. [#65269](https://github.com/ClickHouse/ClickHouse/pull/65269) ([LiuNeng](https://github.com/liuneng1994)).
+
+#### Performance Improvement
+* Add a native parquet reader, which can read parquet binary to ClickHouse Columns directly. It's controlled by the setting `input_format_parquet_use_native_reader` (disabled by default). [#60361](https://github.com/ClickHouse/ClickHouse/pull/60361) ([ZhiHong Zhang](https://github.com/copperybean)).
+* Reduce the number of virtual function calls in ColumnNullable::size. [#60556](https://github.com/ClickHouse/ClickHouse/pull/60556) ([HappenLee](https://github.com/HappenLee)).
+* Speedup `splitByRegexp` when the regular expression argument is a single-character. [#62696](https://github.com/ClickHouse/ClickHouse/pull/62696) ([Robert Schulze](https://github.com/rschu1ze)).
+* Speed up FixedHashTable by keeping track of the min and max keys used. This allows to reduce the number of cells that need to be verified. [#62746](https://github.com/ClickHouse/ClickHouse/pull/62746) ([Jiebin Sun](https://github.com/jiebinn)).
+* Optimize the resolution of in(LowCardinality, ConstantSet). [#64060](https://github.com/ClickHouse/ClickHouse/pull/64060) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
+* Use a thread pool to initialize and destroy hash tables inside `ConcurrentHashJoin`. [#64241](https://github.com/ClickHouse/ClickHouse/pull/64241) ([Nikita Taranov](https://github.com/nickitat)).
+* Optimized vertical merges in tables with sparse columns. [#64311](https://github.com/ClickHouse/ClickHouse/pull/64311) ([Anton Popov](https://github.com/CurtizJ)).
+* Enabled prefetches of data from remote filesystem during vertical merges. It improves latency of vertical merges in tables with data stored on remote filesystem. [#64314](https://github.com/ClickHouse/ClickHouse/pull/64314) ([Anton Popov](https://github.com/CurtizJ)).
+* Reduce redundant calls to `isDefault()` of `ColumnSparse::filter` to improve performance. [#64426](https://github.com/ClickHouse/ClickHouse/pull/64426) ([Jiebin Sun](https://github.com/jiebinn)).
+* Speedup `find_super_nodes` and `find_big_family` keeper-client commands by making multiple asynchronous getChildren requests. [#64628](https://github.com/ClickHouse/ClickHouse/pull/64628) ([Alexander Gololobov](https://github.com/davenger)).
+* Improve function least/greatest for nullable numberic type arguments. [#64668](https://github.com/ClickHouse/ClickHouse/pull/64668) ([KevinyhZou](https://github.com/KevinyhZou)).
+* Allow merging two consequent `FilterSteps` of a query plan. This improves filter-push-down optimization if the filter condition can be pushed down from the parent step. [#64760](https://github.com/ClickHouse/ClickHouse/pull/64760) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Remove bad optimization in vertical final implementation and re-enable vertical final algorithm by default. [#64783](https://github.com/ClickHouse/ClickHouse/pull/64783) ([Duc Canh Le](https://github.com/canhld94)).
+* Remove ALIAS nodes from the filter expression. This slightly improves performance for queries with `PREWHERE` (with the new analyzer). [#64793](https://github.com/ClickHouse/ClickHouse/pull/64793) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix performance regression in cross join introduced in [#60459](https://github.com/ClickHouse/ClickHouse/issues/60459) (24.5). [#65243](https://github.com/ClickHouse/ClickHouse/pull/65243) ([Nikita Taranov](https://github.com/nickitat)).
+
+#### Improvement
+* Support empty tuples. [#55061](https://github.com/ClickHouse/ClickHouse/pull/55061) ([Amos Bird](https://github.com/amosbird)).
+* Hot reload storage policy for distributed tables when adding a new disk. [#58285](https://github.com/ClickHouse/ClickHouse/pull/58285) ([Duc Canh Le](https://github.com/canhld94)).
+* Avoid possible deadlock during MergeTree index analysis when scheduling threads in a saturated service. [#59427](https://github.com/ClickHouse/ClickHouse/pull/59427) ([Sean Haynes](https://github.com/seandhaynes)).
+* Support partial trivial count optimization when the query filter is able to select exact ranges from merge tree tables. [#60463](https://github.com/ClickHouse/ClickHouse/pull/60463) ([Amos Bird](https://github.com/amosbird)).
+* Reduce max memory usage of multithreaded `INSERT`s by collecting chunks of multiple threads in a single transform. [#61047](https://github.com/ClickHouse/ClickHouse/pull/61047) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
+* Reduce the memory usage when using Azure object storage by using fixed memory allocation, avoiding the allocation of an extra buffer. [#63160](https://github.com/ClickHouse/ClickHouse/pull/63160) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
+* Several minor corner case fixes to proxy support & tunneling. [#63427](https://github.com/ClickHouse/ClickHouse/pull/63427) ([Arthur Passos](https://github.com/arthurpassos)).
+* Add `http_response_headers` setting to support custom response headers in custom HTTP handlers. [#63562](https://github.com/ClickHouse/ClickHouse/pull/63562) ([Grigorii](https://github.com/GSokol)).
+* Improve io_uring resubmit visibility. Rename profile event `IOUringSQEsResubmits` -> `IOUringSQEsResubmitsAsync` and add a new one `IOUringSQEsResubmitsSync`. [#63699](https://github.com/ClickHouse/ClickHouse/pull/63699) ([Tomer Shafir](https://github.com/tomershafir)).
+* Introduce assertions to verify all functions are called with columns of the right size. [#63723](https://github.com/ClickHouse/ClickHouse/pull/63723) ([Raúl Marín](https://github.com/Algunenano)).
+* `SHOW CREATE TABLE` executed on top of system tables will now show the super handy comment unique for each table which will explain why this table is needed. [#63788](https://github.com/ClickHouse/ClickHouse/pull/63788) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Added setting `metadata_storage_type` to keep free space on metadata storage disk. [#64128](https://github.com/ClickHouse/ClickHouse/pull/64128) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
+* Add metrics to track the number of directories created and removed by the plain_rewritable metadata storage, and the number of entries in the local-to-remote in-memory map. [#64175](https://github.com/ClickHouse/ClickHouse/pull/64175) ([Julia Kartseva](https://github.com/jkartseva)).
+* The query cache now considers identical queries with different settings as different. This increases robustness in cases where different settings (e.g. `limit` or `additional_table_filters`) would affect the query result. [#64205](https://github.com/ClickHouse/ClickHouse/pull/64205) ([Robert Schulze](https://github.com/rschu1ze)).
+* Better Exception Message in Delete Table with Projection, users can understand the error and the steps should be taken. [#64212](https://github.com/ClickHouse/ClickHouse/pull/64212) ([jsc0218](https://github.com/jsc0218)).
+* Support the non standard error code `QpsLimitExceeded` in object storage as a retryable error. [#64225](https://github.com/ClickHouse/ClickHouse/pull/64225) ([Sema Checherinda](https://github.com/CheSema)).
+* Forbid converting a MergeTree table to replicated if the zookeeper path for this table already exists. [#64244](https://github.com/ClickHouse/ClickHouse/pull/64244) ([Kirill](https://github.com/kirillgarbar)).
+* If "replica group" is configured for a `Replicated` database, automatically create a cluster that includes replicas from all groups. [#64312](https://github.com/ClickHouse/ClickHouse/pull/64312) ([Alexander Tokmakov](https://github.com/tavplubix)).
+* Added settings to disable materialization of skip indexes and statistics on inserts (`materialize_skip_indexes_on_insert` and `materialize_statistics_on_insert`). [#64391](https://github.com/ClickHouse/ClickHouse/pull/64391) ([Anton Popov](https://github.com/CurtizJ)).
+* Use the allocated memory size to calculate the row group size and reduce the peak memory of the parquet writer in single-threaded mode. [#64424](https://github.com/ClickHouse/ClickHouse/pull/64424) ([LiuNeng](https://github.com/liuneng1994)).
+* Added new configuration input_format_parquet_prefer_block_bytes to control the average output block bytes, and modified the default value of input_format_parquet_max_block_size to 65409. [#64427](https://github.com/ClickHouse/ClickHouse/pull/64427) ([LiuNeng](https://github.com/liuneng1994)).
+* Always start Keeper with sufficient amount of threads in global thread pool. [#64444](https://github.com/ClickHouse/ClickHouse/pull/64444) ([Duc Canh Le](https://github.com/canhld94)).
+* Settings from user config doesn't affect merges and mutations for MergeTree on top of object storage. [#64456](https://github.com/ClickHouse/ClickHouse/pull/64456) ([alesapin](https://github.com/alesapin)).
+* Setting `replace_long_file_name_to_hash` is enabled by default for `MergeTree` tables. [#64457](https://github.com/ClickHouse/ClickHouse/pull/64457) ([Anton Popov](https://github.com/CurtizJ)).
+* Improve the iterator of sparse column to reduce call of size(). [#64497](https://github.com/ClickHouse/ClickHouse/pull/64497) ([Jiebin Sun](https://github.com/jiebinn)).
+* Update condition to use copy for azure blob storage. [#64518](https://github.com/ClickHouse/ClickHouse/pull/64518) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
+* Support the non standard error code `TotalQpsLimitExceeded` in object storage as a retryable error. [#64520](https://github.com/ClickHouse/ClickHouse/pull/64520) ([Sema Checherinda](https://github.com/CheSema)).
+* Optimized memory usage of vertical merges for tables with high number of skip indexes. [#64580](https://github.com/ClickHouse/ClickHouse/pull/64580) ([Anton Popov](https://github.com/CurtizJ)).
+* Introduced two additional columns in the `system.query_log`: `used_privileges` and `missing_privileges`. `used_privileges` is populated with the privileges that were checked during query execution, and `missing_privileges` contains required privileges that are missing. [#64597](https://github.com/ClickHouse/ClickHouse/pull/64597) ([Alexey Katsman](https://github.com/alexkats)).
+* Add settings `parallel_replicas_custom_key_range_lower` and `parallel_replicas_custom_key_range_upper` to control how parallel replicas with dynamic shards parallelizes queries when using a range filter. [#64604](https://github.com/ClickHouse/ClickHouse/pull/64604) ([josh-hildred](https://github.com/josh-hildred)).
+* Updated Advanced Dashboard for both open-source and ClickHouse Cloud versions to include a chart for 'Maximum concurrent network connections'. [#64610](https://github.com/ClickHouse/ClickHouse/pull/64610) ([Thom O'Connor](https://github.com/thomoco)).
+* The second argument (scale) of functions `round()`, `roundBankers()`, `floor()`, `ceil()` and `trunc()` can now be non-const. [#64798](https://github.com/ClickHouse/ClickHouse/pull/64798) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
+* Improve progress report on zeros_mt and generateRandom. [#64804](https://github.com/ClickHouse/ClickHouse/pull/64804) ([Raúl Marín](https://github.com/Algunenano)).
+* Add an asynchronous metric jemalloc.profile.active to show whether sampling is currently active. This is an activation mechanism in addition to prof.active; both must be active for the calling thread to sample. [#64842](https://github.com/ClickHouse/ClickHouse/pull/64842) ([Unalian](https://github.com/Unalian)).
+* Support statistics with ReplicatedMergeTree. [#64934](https://github.com/ClickHouse/ClickHouse/pull/64934) ([Han Fei](https://github.com/hanfei1991)).
+* Remove mark of `allow_experimental_join_condition` as important. This mark may have prevented distributed queries in a mixed versions cluster from being executed successfully. [#65008](https://github.com/ClickHouse/ClickHouse/pull/65008) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Added server Asynchronous metrics `DiskGetObjectThrottler*` and `DiskGetObjectThrottler*` reflecting request per second rate limit defined with `s3_max_get_rps` and `s3_max_put_rps` disk settings and currently available number of requests that could be sent without hitting throttling limit on the disk. Metrics are defined for every disk that has a configured limit. [#65050](https://github.com/ClickHouse/ClickHouse/pull/65050) ([Sergei Trifonov](https://github.com/serxa)).
+* Added a setting `output_format_pretty_display_footer_column_names` which when enabled displays column names at the end of the table for long tables (50 rows by default), with the threshold value for minimum number of rows controlled by `output_format_pretty_display_footer_column_names_min_rows`. [#65144](https://github.com/ClickHouse/ClickHouse/pull/65144) ([Shaun Struwig](https://github.com/Blargian)).
+* Returned back the behaviour of how ClickHouse works and interprets Tuples in CSV format. This change effectively reverts https://github.com/ClickHouse/ClickHouse/pull/60994 and makes it available only under a few settings: `output_format_csv_serialize_tuple_into_separate_columns`, `input_format_csv_deserialize_separate_columns_into_tuple` and `input_format_csv_try_infer_strings_from_quoted_tuples`. [#65170](https://github.com/ClickHouse/ClickHouse/pull/65170) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Initialize global trace collector for Poco::ThreadPool (needed for keeper, etc). [#65239](https://github.com/ClickHouse/ClickHouse/pull/65239) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Add validation when creating a user with bcrypt_hash. [#65242](https://github.com/ClickHouse/ClickHouse/pull/65242) ([Raúl Marín](https://github.com/Algunenano)).
+* Unite s3/hdfs/azure storage implementations into a single class working with IObjectStorage. Same for *Cluster, data lakes and Queue storages. [#59767](https://github.com/ClickHouse/ClickHouse/pull/59767) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Refactor data part writer to remove dependencies on MergeTreeData and DataPart. [#63620](https://github.com/ClickHouse/ClickHouse/pull/63620) ([Alexander Gololobov](https://github.com/davenger)).
+* Add profile events for number of rows read during/after prewhere. [#64198](https://github.com/ClickHouse/ClickHouse/pull/64198) ([Nikita Taranov](https://github.com/nickitat)).
+* Print query in explain plan with parallel replicas. [#64298](https://github.com/ClickHouse/ClickHouse/pull/64298) ([vdimir](https://github.com/vdimir)).
+* Rename `allow_deprecated_functions` to `allow_deprecated_error_prone_window_functions`. [#64358](https://github.com/ClickHouse/ClickHouse/pull/64358) ([Raúl Marín](https://github.com/Algunenano)).
+* Respect `max_read_buffer_size` setting for file descriptors as well in file() table function. [#64532](https://github.com/ClickHouse/ClickHouse/pull/64532) ([Azat Khuzhin](https://github.com/azat)).
+* Disable transactions for unsupported storages even for materialized views. [#64918](https://github.com/ClickHouse/ClickHouse/pull/64918) ([alesapin](https://github.com/alesapin)).
+* Refactor `KeyCondition` and key analysis to improve PartitionPruner and trivial count optimization. This is separated from [#60463](https://github.com/ClickHouse/ClickHouse/issues/60463) . [#61459](https://github.com/ClickHouse/ClickHouse/pull/61459) ([Amos Bird](https://github.com/amosbird)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+* Fix a permission error where a user in a specific situation can escalate their privileges on the default database without necessary grants. [#64769](https://github.com/ClickHouse/ClickHouse/pull/64769) ([pufit](https://github.com/pufit)).
+* Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
+* Fix a bug in ClickHouse Keeper that causes digest mismatch during closing session. [#65198](https://github.com/ClickHouse/ClickHouse/pull/65198) ([Aleksei Filatov](https://github.com/aalexfvk)).
+* Forbid `QUALIFY` clause in the old analyzer. The old analyzer ignored `QUALIFY`, so it could lead to unexpected data removal in mutations. [#65356](https://github.com/ClickHouse/ClickHouse/pull/65356) ([Dmitry Novik](https://github.com/novikd)).
+* Use correct memory alignment for Distinct combinator. Previously, crash could happen because of invalid memory allocation when the combinator was used. [#65379](https://github.com/ClickHouse/ClickHouse/pull/65379) ([Antonio Andelic](https://github.com/antonio2368)).
+* Fix crash with `DISTINCT` and window functions. [#64767](https://github.com/ClickHouse/ClickHouse/pull/64767) ([Igor Nikonov](https://github.com/devcrafter)).
+* Fixed 'set' skip index not working with IN and indexHint(). [#62083](https://github.com/ClickHouse/ClickHouse/pull/62083) ([Michael Kolupaev](https://github.com/al13n321)).
+* Support executing function during assignment of parameterized view value. [#63502](https://github.com/ClickHouse/ClickHouse/pull/63502) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
+* Fixed parquet memory tracking. [#63584](https://github.com/ClickHouse/ClickHouse/pull/63584) ([Michael Kolupaev](https://github.com/al13n321)).
+* Fixed reading of columns of type `Tuple(Map(LowCardinality(String), String), ...)`. [#63956](https://github.com/ClickHouse/ClickHouse/pull/63956) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix an `Cyclic aliases` error for cyclic aliases of different type (expression and function). [#63993](https://github.com/ClickHouse/ClickHouse/pull/63993) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* This fix will use a proper redefined context with the correct definer for each individual view in the query pipeline. [#64079](https://github.com/ClickHouse/ClickHouse/pull/64079) ([pufit](https://github.com/pufit)).
+* Fix analyzer: "Not found column" error is fixed when using INTERPOLATE. [#64096](https://github.com/ClickHouse/ClickHouse/pull/64096) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
+* Fix creating backups to S3 buckets with different credentials from the disk containing the file. [#64153](https://github.com/ClickHouse/ClickHouse/pull/64153) ([Antonio Andelic](https://github.com/antonio2368)).
+* The query cache now considers two identical queries against different databases as different. The previous behavior could be used to bypass missing privileges to read from a table. [#64199](https://github.com/ClickHouse/ClickHouse/pull/64199) ([Robert Schulze](https://github.com/rschu1ze)).
+* Fix possible abort on uncaught exception in ~WriteBufferFromFileDescriptor in StatusFile. [#64206](https://github.com/ClickHouse/ClickHouse/pull/64206) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fix `duplicate alias` error for distributed queries with `ARRAY JOIN`. [#64226](https://github.com/ClickHouse/ClickHouse/pull/64226) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix unexpected accurateCast from string to integer. [#64255](https://github.com/ClickHouse/ClickHouse/pull/64255) ([wudidapaopao](https://github.com/wudidapaopao)).
+* Fixed CNF simplification, in case any OR group contains mutually exclusive atoms. [#64256](https://github.com/ClickHouse/ClickHouse/pull/64256) ([Eduard Karacharov](https://github.com/korowa)).
+* Fix Query Tree size validation. [#64377](https://github.com/ClickHouse/ClickHouse/pull/64377) ([Dmitry Novik](https://github.com/novikd)).
+* Fix `Logical error: Bad cast` for `Buffer` table with `PREWHERE`. [#64388](https://github.com/ClickHouse/ClickHouse/pull/64388) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Prevent recursive logging in `blob_storage_log` when it's stored on object storage. [#64393](https://github.com/ClickHouse/ClickHouse/pull/64393) ([vdimir](https://github.com/vdimir)).
+* Fixed `CREATE TABLE AS` queries for tables with default expressions. [#64455](https://github.com/ClickHouse/ClickHouse/pull/64455) ([Anton Popov](https://github.com/CurtizJ)).
+* Fixed `optimize_read_in_order` behaviour for ORDER BY ... NULLS FIRST / LAST on tables with nullable keys. [#64483](https://github.com/ClickHouse/ClickHouse/pull/64483) ([Eduard Karacharov](https://github.com/korowa)).
+* Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix an error `Cannot find column` in distributed queries with constant CTE in the `GROUP BY` key. [#64519](https://github.com/ClickHouse/ClickHouse/pull/64519) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fixed ORC statistics calculation, when writing, for unsigned types on all platforms and Int8 on ARM. [#64563](https://github.com/ClickHouse/ClickHouse/pull/64563) ([Michael Kolupaev](https://github.com/al13n321)).
+* Fix the crash loop when restoring from backup is blocked by creating an MV with a definer that hasn't been restored yet. [#64595](https://github.com/ClickHouse/ClickHouse/pull/64595) ([pufit](https://github.com/pufit)).
+* Fix the output of function `formatDateTimeInJodaSyntax` when a formatter generates an uneven number of characters and the last character is `0`. For example, `SELECT formatDateTimeInJodaSyntax(toDate('2012-05-29'), 'D')` now correctly returns `150` instead of previously `15`. [#64614](https://github.com/ClickHouse/ClickHouse/pull/64614) ([LiuNeng](https://github.com/liuneng1994)).
+* Do not rewrite aggregation if `-If` combinator is already used. [#64638](https://github.com/ClickHouse/ClickHouse/pull/64638) ([Dmitry Novik](https://github.com/novikd)).
+* Fix type inference for float (in case of small buffer, i.e. `--max_read_buffer_size 1`). [#64641](https://github.com/ClickHouse/ClickHouse/pull/64641) ([Azat Khuzhin](https://github.com/azat)).
+* Fix bug which could lead to non-working TTLs with expressions. [#64694](https://github.com/ClickHouse/ClickHouse/pull/64694) ([alesapin](https://github.com/alesapin)).
+* Fix removing the `WHERE` and `PREWHERE` expressions, which are always true (for the new analyzer). [#64695](https://github.com/ClickHouse/ClickHouse/pull/64695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fixed excessive part elimination by token-based text indexes (`ngrambf` , `full_text`) when filtering by result of `startsWith`, `endsWith`, `match`, `multiSearchAny`. [#64720](https://github.com/ClickHouse/ClickHouse/pull/64720) ([Eduard Karacharov](https://github.com/korowa)).
+* Fixes incorrect behaviour of ANSI CSI escaping in the `UTF8::computeWidth` function. [#64756](https://github.com/ClickHouse/ClickHouse/pull/64756) ([Shaun Struwig](https://github.com/Blargian)).
+* Fix a case of incorrect removal of `ORDER BY` / `LIMIT BY` across subqueries. [#64766](https://github.com/ClickHouse/ClickHouse/pull/64766) ([Raúl Marín](https://github.com/Algunenano)).
+* Fix (experimental) unequal join with subqueries for sets which are in the mixed join conditions. [#64775](https://github.com/ClickHouse/ClickHouse/pull/64775) ([lgbo](https://github.com/lgbo-ustc)).
+* Fix crash in a local cache over `plain_rewritable` disk. [#64778](https://github.com/ClickHouse/ClickHouse/pull/64778) ([Julia Kartseva](https://github.com/jkartseva)).
+* Keeper fix: return correct value for `zk_latest_snapshot_size` in `mntr` command. [#64784](https://github.com/ClickHouse/ClickHouse/pull/64784) ([Antonio Andelic](https://github.com/antonio2368)).
+* Fix `Cannot find column` in distributed query with `ARRAY JOIN` by `Nested` column. Fixes [#64755](https://github.com/ClickHouse/ClickHouse/issues/64755). [#64801](https://github.com/ClickHouse/ClickHouse/pull/64801) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix memory leak in slru cache policy. [#64803](https://github.com/ClickHouse/ClickHouse/pull/64803) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Fixed possible incorrect memory tracking in several kinds of queries: queries that read any data from S3, queries via http protocol, asynchronous inserts. [#64844](https://github.com/ClickHouse/ClickHouse/pull/64844) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix the `Block structure mismatch` error for queries reading with `PREWHERE` from the materialized view when the materialized view has columns of different types than the source table. Fixes [#64611](https://github.com/ClickHouse/ClickHouse/issues/64611). [#64855](https://github.com/ClickHouse/ClickHouse/pull/64855) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix rare crash when table has TTL with subquery + database replicated + parallel replicas + analyzer. It's really rare, but please don't use TTLs with subqueries. [#64858](https://github.com/ClickHouse/ClickHouse/pull/64858) ([alesapin](https://github.com/alesapin)).
+* Fix duplicating `Delete` events in `blob_storage_log` in case of large batch to delete. [#64924](https://github.com/ClickHouse/ClickHouse/pull/64924) ([vdimir](https://github.com/vdimir)).
+* Fixed `Session moved to another server` error from [Zoo]Keeper that might happen after server startup when the config has includes from [Zoo]Keeper. [#64986](https://github.com/ClickHouse/ClickHouse/pull/64986) ([Alexander Tokmakov](https://github.com/tavplubix)).
+* Fix `ALTER MODIFY COMMENT` query that was broken for parameterized VIEWs in https://github.com/ClickHouse/ClickHouse/pull/54211. [#65031](https://github.com/ClickHouse/ClickHouse/pull/65031) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Fix `host_id` in DatabaseReplicated when `cluster_secure_connection` parameter is enabled. Previously all the connections within the cluster created by DatabaseReplicated were not secure, even if the parameter was enabled. [#65054](https://github.com/ClickHouse/ClickHouse/pull/65054) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Avoid writing to finalized buffer in File-like storages. [#65063](https://github.com/ClickHouse/ClickHouse/pull/65063) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fix possible infinite query duration in case of cyclic aliases. Fixes [#64849](https://github.com/ClickHouse/ClickHouse/issues/64849). [#65081](https://github.com/ClickHouse/ClickHouse/pull/65081) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix the `Unknown expression identifier` error for remote queries with `INTERPOLATE (alias)` (new analyzer). Fixes [#64636](https://github.com/ClickHouse/ClickHouse/issues/64636). [#65090](https://github.com/ClickHouse/ClickHouse/pull/65090) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix pushing arithmetic operations out of aggregation. In the new analyzer, optimization was applied only once. [#65104](https://github.com/ClickHouse/ClickHouse/pull/65104) ([Dmitry Novik](https://github.com/novikd)).
+* Fix aggregate function name rewriting in the new analyzer. [#65110](https://github.com/ClickHouse/ClickHouse/pull/65110) ([Dmitry Novik](https://github.com/novikd)).
+* Respond with 5xx instead of 200 OK in case of receive timeout while reading (parts of) the request body from the client socket. [#65118](https://github.com/ClickHouse/ClickHouse/pull/65118) ([Julian Maicher](https://github.com/jmaicher)).
+* Fix possible crash for hedged requests. [#65206](https://github.com/ClickHouse/ClickHouse/pull/65206) ([Azat Khuzhin](https://github.com/azat)).
+* Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. [#65256](https://github.com/ClickHouse/ClickHouse/pull/65256) ([jsc0218](https://github.com/jsc0218)).
+* This PR ensures that the type of the constant(IN operator's second parameter) is always visible during the IN operator's type conversion process. Otherwise, losing type information may cause some conversions to fail, such as the conversion from DateTime to Date. This fixes ([#64487](https://github.com/ClickHouse/ClickHouse/issues/64487)). [#65315](https://github.com/ClickHouse/ClickHouse/pull/65315) ([pn](https://github.com/chloro-pn)).
+
+#### Build/Testing/Packaging Improvement
+* Make `network` service be required when using the rc init script to start the ClickHouse server daemon. [#60650](https://github.com/ClickHouse/ClickHouse/pull/60650) ([Chun-Sheng, Li](https://github.com/peter279k)).
+* Fix typo in test_hdfsCluster_unset_skip_unavailable_shards. The test writes data to unskip_unavailable_shards, but uses skip_unavailable_shards from the previous test. [#64243](https://github.com/ClickHouse/ClickHouse/pull/64243) ([Mikhail Artemenko](https://github.com/Michicosun)).
+* Reduce the size of some slow tests. [#64387](https://github.com/ClickHouse/ClickHouse/pull/64387) ([Raúl Marín](https://github.com/Algunenano)).
+* Reduce the size of some slow tests. [#64452](https://github.com/ClickHouse/ClickHouse/pull/64452) ([Raúl Marín](https://github.com/Algunenano)).
+* Fix test_lost_part_other_replica. [#64512](https://github.com/ClickHouse/ClickHouse/pull/64512) ([Raúl Marín](https://github.com/Algunenano)).
+* Add tests for experimental unequal joins and randomize new settings in clickhouse-test. [#64535](https://github.com/ClickHouse/ClickHouse/pull/64535) ([Nikita Fomichev](https://github.com/fm4v)).
+* Upgrade tests: Update config and work with release candidates. [#64542](https://github.com/ClickHouse/ClickHouse/pull/64542) ([Raúl Marín](https://github.com/Algunenano)).
+* Add support for LLVM XRay. [#64592](https://github.com/ClickHouse/ClickHouse/pull/64592) ([Tomer Shafir](https://github.com/tomershafir)).
+* Speed up 02995_forget_partition. [#64761](https://github.com/ClickHouse/ClickHouse/pull/64761) ([Raúl Marín](https://github.com/Algunenano)).
+* Fix 02790_async_queries_in_query_log. [#64764](https://github.com/ClickHouse/ClickHouse/pull/64764) ([Raúl Marín](https://github.com/Algunenano)).
+* Support LLVM XRay on Linux amd64 only. [#64837](https://github.com/ClickHouse/ClickHouse/pull/64837) ([Tomer Shafir](https://github.com/tomershafir)).
+* Get rid of custom code in `tests/ci/download_release_packages.py` and `tests/ci/get_previous_release_tag.py` to avoid issues after the https://github.com/ClickHouse/ClickHouse/pull/64759 is merged. [#64848](https://github.com/ClickHouse/ClickHouse/pull/64848) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Decrease the `unit-test` image a few times. [#65102](https://github.com/ClickHouse/ClickHouse/pull/65102) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Replay ZooKeeper logs using keeper-bench. [#62481](https://github.com/ClickHouse/ClickHouse/pull/62481) ([Antonio Andelic](https://github.com/antonio2368)).
+* Re-enable OpenSSL session caching. [#65111](https://github.com/ClickHouse/ClickHouse/pull/65111) ([Robert Schulze](https://github.com/rschu1ze)).
+
### ClickHouse release 24.5, 2024-05-30
#### Backward Incompatible Change
diff --git a/README.md b/README.md
index 73d989210b5..dc253d4db2d 100644
--- a/README.md
+++ b/README.md
@@ -34,20 +34,18 @@ curl https://clickhouse.com/ | sh
Every month we get together with the community (users, contributors, customers, those interested in learning more about ClickHouse) to discuss what is coming in the latest release. If you are interested in sharing what you've built on ClickHouse, let us know.
-* [v24.5 Community Call](https://clickhouse.com/company/events/v24-5-community-release-call) - May 30
+* [v24.6 Community Call](https://clickhouse.com/company/events/v24-6-community-release-call) - Jul 2
## Upcoming Events
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `` clickhouse `` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
-* [ClickHouse Happy Hour @ Tom's Watch Bar - Los Angeles](https://www.meetup.com/clickhouse-los-angeles-user-group/events/300740584/) - May 22
-* [ClickHouse & Confluent Meetup in Dubai](https://www.meetup.com/clickhouse-dubai-meetup-group/events/299629189/) - May 28
-* [ClickHouse Meetup in Stockholm](https://www.meetup.com/clickhouse-stockholm-user-group/events/299752651/) - Jun 3
-* [ClickHouse Meetup @ Cloudflare - San Francisco](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/300523061/) - Jun 4
-* [ClickHouse (クリックハウス) Meetup Tokyo](https://www.meetup.com/clickhouse-tokyo-user-group/events/300798053/) - Jun 5
+* [AWS Summit in DC](https://clickhouse.com/company/events/2024-06-aws-summit-dc) - Jun 26
* [ClickHouse Meetup in Amsterdam](https://www.meetup.com/clickhouse-netherlands-user-group/events/300781068/) - Jun 27
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
+* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
+* [AWS Summit in New York](https://clickhouse.com/company/events/2024-07-awssummit-nyc) - Jul 10
* [ClickHouse Meetup @ Klaviyo - Boston](https://www.meetup.com/clickhouse-boston-user-group/events/300907870) - Jul 11
## Recent Recordings
diff --git a/base/poco/Crypto/src/OpenSSLInitializer.cpp b/base/poco/Crypto/src/OpenSSLInitializer.cpp
index 23447760b47..31798e8dd7e 100644
--- a/base/poco/Crypto/src/OpenSSLInitializer.cpp
+++ b/base/poco/Crypto/src/OpenSSLInitializer.cpp
@@ -23,9 +23,6 @@
#include
#endif
-#if __has_feature(address_sanitizer)
-#include
-#endif
using Poco::RandomInputStream;
using Poco::Thread;
@@ -70,18 +67,12 @@ void OpenSSLInitializer::initialize()
SSL_library_init();
SSL_load_error_strings();
OpenSSL_add_all_algorithms();
-
+
char seed[SEEDSIZE];
RandomInputStream rnd;
rnd.read(seed, sizeof(seed));
- {
-# if __has_feature(address_sanitizer)
- /// Leak sanitizer (part of address sanitizer) thinks that a few bytes of memory in OpenSSL are allocated during but never released.
- __lsan::ScopedDisabler lsan_disabler;
-#endif
- RAND_seed(seed, SEEDSIZE);
- }
-
+ RAND_seed(seed, SEEDSIZE);
+
int nMutexes = CRYPTO_num_locks();
_mutexes = new Poco::FastMutex[nMutexes];
CRYPTO_set_locking_callback(&OpenSSLInitializer::lock);
@@ -89,8 +80,8 @@ void OpenSSLInitializer::initialize()
// https://sourceforge.net/p/poco/bugs/110/
//
// From http://www.openssl.org/docs/crypto/threads.html :
-// "If the application does not register such a callback using CRYPTO_THREADID_set_callback(),
-// then a default implementation is used - on Windows and BeOS this uses the system's
+// "If the application does not register such a callback using CRYPTO_THREADID_set_callback(),
+// then a default implementation is used - on Windows and BeOS this uses the system's
// default thread identifying APIs"
CRYPTO_set_id_callback(&OpenSSLInitializer::id);
CRYPTO_set_dynlock_create_callback(&OpenSSLInitializer::dynlockCreate);
@@ -109,7 +100,7 @@ void OpenSSLInitializer::uninitialize()
CRYPTO_set_locking_callback(0);
CRYPTO_set_id_callback(0);
delete [] _mutexes;
-
+
CONF_modules_free();
}
}
diff --git a/base/poco/Foundation/CMakeLists.txt b/base/poco/Foundation/CMakeLists.txt
index dfb41a33fb1..324a0170bdd 100644
--- a/base/poco/Foundation/CMakeLists.txt
+++ b/base/poco/Foundation/CMakeLists.txt
@@ -213,6 +213,7 @@ target_compile_definitions (_poco_foundation
)
target_include_directories (_poco_foundation SYSTEM PUBLIC "include")
+target_link_libraries (_poco_foundation PRIVATE clickhouse_common_io)
target_link_libraries (_poco_foundation
PRIVATE
diff --git a/base/poco/Foundation/include/Poco/ThreadPool.h b/base/poco/Foundation/include/Poco/ThreadPool.h
index b9506cc5b7f..e2187bfeb66 100644
--- a/base/poco/Foundation/include/Poco/ThreadPool.h
+++ b/base/poco/Foundation/include/Poco/ThreadPool.h
@@ -48,7 +48,13 @@ class Foundation_API ThreadPool
/// from the pool.
{
public:
- ThreadPool(int minCapacity = 2, int maxCapacity = 16, int idleTime = 60, int stackSize = POCO_THREAD_STACK_SIZE);
+ explicit ThreadPool(
+ int minCapacity = 2,
+ int maxCapacity = 16,
+ int idleTime = 60,
+ int stackSize = POCO_THREAD_STACK_SIZE,
+ size_t global_profiler_real_time_period_ns_ = 0,
+ size_t global_profiler_cpu_time_period_ns_ = 0);
/// Creates a thread pool with minCapacity threads.
/// If required, up to maxCapacity threads are created
/// a NoThreadAvailableException exception is thrown.
@@ -56,8 +62,14 @@ public:
/// and more than minCapacity threads are running, the thread
/// is killed. Threads are created with given stack size.
- ThreadPool(
- const std::string & name, int minCapacity = 2, int maxCapacity = 16, int idleTime = 60, int stackSize = POCO_THREAD_STACK_SIZE);
+ explicit ThreadPool(
+ const std::string & name,
+ int minCapacity = 2,
+ int maxCapacity = 16,
+ int idleTime = 60,
+ int stackSize = POCO_THREAD_STACK_SIZE,
+ size_t global_profiler_real_time_period_ns_ = 0,
+ size_t global_profiler_cpu_time_period_ns_ = 0);
/// Creates a thread pool with the given name and minCapacity threads.
/// If required, up to maxCapacity threads are created
/// a NoThreadAvailableException exception is thrown.
@@ -171,6 +183,8 @@ private:
int _serial;
int _age;
int _stackSize;
+ size_t _globalProfilerRealTimePeriodNs;
+ size_t _globalProfilerCPUTimePeriodNs;
ThreadVec _threads;
mutable FastMutex _mutex;
};
diff --git a/base/poco/Foundation/src/ThreadPool.cpp b/base/poco/Foundation/src/ThreadPool.cpp
index 6335ee82b47..f57c81e4128 100644
--- a/base/poco/Foundation/src/ThreadPool.cpp
+++ b/base/poco/Foundation/src/ThreadPool.cpp
@@ -20,6 +20,7 @@
#include "Poco/ErrorHandler.h"
#include
#include
+#include
namespace Poco {
@@ -28,7 +29,11 @@ namespace Poco {
class PooledThread: public Runnable
{
public:
- PooledThread(const std::string& name, int stackSize = POCO_THREAD_STACK_SIZE);
+ explicit PooledThread(
+ const std::string& name,
+ int stackSize = POCO_THREAD_STACK_SIZE,
+ size_t globalProfilerRealTimePeriodNs_ = 0,
+ size_t globalProfilerCPUTimePeriodNs_ = 0);
~PooledThread();
void start();
@@ -51,16 +56,24 @@ private:
Event _targetCompleted;
Event _started;
FastMutex _mutex;
+ size_t _globalProfilerRealTimePeriodNs;
+ size_t _globalProfilerCPUTimePeriodNs;
};
-PooledThread::PooledThread(const std::string& name, int stackSize):
- _idle(true),
- _idleTime(0),
- _pTarget(0),
- _name(name),
+PooledThread::PooledThread(
+ const std::string& name,
+ int stackSize,
+ size_t globalProfilerRealTimePeriodNs_,
+ size_t globalProfilerCPUTimePeriodNs_) :
+ _idle(true),
+ _idleTime(0),
+ _pTarget(0),
+ _name(name),
_thread(name),
- _targetCompleted(false)
+ _targetCompleted(false),
+ _globalProfilerRealTimePeriodNs(globalProfilerRealTimePeriodNs_),
+ _globalProfilerCPUTimePeriodNs(globalProfilerCPUTimePeriodNs_)
{
poco_assert_dbg (stackSize >= 0);
_thread.setStackSize(stackSize);
@@ -83,7 +96,7 @@ void PooledThread::start()
void PooledThread::start(Thread::Priority priority, Runnable& target)
{
FastMutex::ScopedLock lock(_mutex);
-
+
poco_assert (_pTarget == 0);
_pTarget = ⌖
@@ -109,7 +122,7 @@ void PooledThread::start(Thread::Priority priority, Runnable& target, const std:
}
_thread.setName(fullName);
_thread.setPriority(priority);
-
+
poco_assert (_pTarget == 0);
_pTarget = ⌖
@@ -145,7 +158,7 @@ void PooledThread::join()
void PooledThread::activate()
{
FastMutex::ScopedLock lock(_mutex);
-
+
poco_assert (_idle);
_idle = false;
_targetCompleted.reset();
@@ -155,7 +168,7 @@ void PooledThread::activate()
void PooledThread::release()
{
const long JOIN_TIMEOUT = 10000;
-
+
_mutex.lock();
_pTarget = 0;
_mutex.unlock();
@@ -174,6 +187,10 @@ void PooledThread::release()
void PooledThread::run()
{
+ DB::ThreadStatus thread_status;
+ if (unlikely(_globalProfilerRealTimePeriodNs != 0 || _globalProfilerCPUTimePeriodNs != 0))
+ thread_status.initGlobalProfiler(_globalProfilerRealTimePeriodNs, _globalProfilerCPUTimePeriodNs);
+
_started.set();
for (;;)
{
@@ -220,13 +237,17 @@ void PooledThread::run()
ThreadPool::ThreadPool(int minCapacity,
int maxCapacity,
int idleTime,
- int stackSize):
- _minCapacity(minCapacity),
- _maxCapacity(maxCapacity),
+ int stackSize,
+ size_t globalProfilerRealTimePeriodNs_,
+ size_t globalProfilerCPUTimePeriodNs_) :
+ _minCapacity(minCapacity),
+ _maxCapacity(maxCapacity),
_idleTime(idleTime),
_serial(0),
_age(0),
- _stackSize(stackSize)
+ _stackSize(stackSize),
+ _globalProfilerRealTimePeriodNs(globalProfilerRealTimePeriodNs_),
+ _globalProfilerCPUTimePeriodNs(globalProfilerCPUTimePeriodNs_)
{
poco_assert (minCapacity >= 1 && maxCapacity >= minCapacity && idleTime > 0);
@@ -243,14 +264,18 @@ ThreadPool::ThreadPool(const std::string& name,
int minCapacity,
int maxCapacity,
int idleTime,
- int stackSize):
+ int stackSize,
+ size_t globalProfilerRealTimePeriodNs_,
+ size_t globalProfilerCPUTimePeriodNs_) :
_name(name),
- _minCapacity(minCapacity),
- _maxCapacity(maxCapacity),
+ _minCapacity(minCapacity),
+ _maxCapacity(maxCapacity),
_idleTime(idleTime),
_serial(0),
_age(0),
- _stackSize(stackSize)
+ _stackSize(stackSize),
+ _globalProfilerRealTimePeriodNs(globalProfilerRealTimePeriodNs_),
+ _globalProfilerCPUTimePeriodNs(globalProfilerCPUTimePeriodNs_)
{
poco_assert (minCapacity >= 1 && maxCapacity >= minCapacity && idleTime > 0);
@@ -393,15 +418,15 @@ void ThreadPool::housekeep()
ThreadVec activeThreads;
idleThreads.reserve(_threads.size());
activeThreads.reserve(_threads.size());
-
+
for (ThreadVec::iterator it = _threads.begin(); it != _threads.end(); ++it)
{
if ((*it)->idle())
{
if ((*it)->idleTime() < _idleTime)
idleThreads.push_back(*it);
- else
- expiredThreads.push_back(*it);
+ else
+ expiredThreads.push_back(*it);
}
else activeThreads.push_back(*it);
}
@@ -463,7 +488,7 @@ PooledThread* ThreadPool::createThread()
{
std::ostringstream name;
name << _name << "[#" << ++_serial << "]";
- return new PooledThread(name.str(), _stackSize);
+ return new PooledThread(name.str(), _stackSize, _globalProfilerRealTimePeriodNs, _globalProfilerCPUTimePeriodNs);
}
@@ -481,7 +506,7 @@ public:
ThreadPool* pool()
{
FastMutex::ScopedLock lock(_mutex);
-
+
if (!_pPool)
{
_pPool = new ThreadPool("default");
@@ -490,7 +515,7 @@ public:
}
return _pPool;
}
-
+
private:
ThreadPool* _pPool;
FastMutex _mutex;
diff --git a/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h b/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h
index e4037c87927..25dc133fb20 100644
--- a/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h
+++ b/base/poco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h
@@ -17,6 +17,7 @@
#ifndef NetSSL_SSLManager_INCLUDED
#define NetSSL_SSLManager_INCLUDED
+#include
#include
#include "Poco/BasicEvent.h"
@@ -219,6 +220,13 @@ namespace Net
/// Unless initializeClient() has been called, the first call to this method initializes the default Context
/// from the application configuration.
+ Context::Ptr getCustomServerContext(const std::string & name);
+ /// Return custom Context used by the server.
+
+ Context::Ptr setCustomServerContext(const std::string & name, Context::Ptr ctx);
+ /// Set custom Context used by the server.
+ /// Return pointer on inserted Context or on old Context if exists.
+
PrivateKeyPassphraseHandlerPtr serverPassphraseHandler();
/// Returns the configured passphrase handler of the server. If none is set, the method will create a default one
/// from an application configuration.
@@ -258,6 +266,40 @@ namespace Net
static const std::string CFG_SERVER_PREFIX;
static const std::string CFG_CLIENT_PREFIX;
+ static const std::string CFG_PRIV_KEY_FILE;
+ static const std::string CFG_CERTIFICATE_FILE;
+ static const std::string CFG_CA_LOCATION;
+ static const std::string CFG_VER_MODE;
+ static const Context::VerificationMode VAL_VER_MODE;
+ static const std::string CFG_VER_DEPTH;
+ static const int VAL_VER_DEPTH;
+ static const std::string CFG_ENABLE_DEFAULT_CA;
+ static const bool VAL_ENABLE_DEFAULT_CA;
+ static const std::string CFG_CIPHER_LIST;
+ static const std::string CFG_CYPHER_LIST; // for backwards compatibility
+ static const std::string VAL_CIPHER_LIST;
+ static const std::string CFG_PREFER_SERVER_CIPHERS;
+ static const std::string CFG_DELEGATE_HANDLER;
+ static const std::string VAL_DELEGATE_HANDLER;
+ static const std::string CFG_CERTIFICATE_HANDLER;
+ static const std::string VAL_CERTIFICATE_HANDLER;
+ static const std::string CFG_CACHE_SESSIONS;
+ static const std::string CFG_SESSION_ID_CONTEXT;
+ static const std::string CFG_SESSION_CACHE_SIZE;
+ static const std::string CFG_SESSION_TIMEOUT;
+ static const std::string CFG_EXTENDED_VERIFICATION;
+ static const std::string CFG_REQUIRE_TLSV1;
+ static const std::string CFG_REQUIRE_TLSV1_1;
+ static const std::string CFG_REQUIRE_TLSV1_2;
+ static const std::string CFG_DISABLE_PROTOCOLS;
+ static const std::string CFG_DH_PARAMS_FILE;
+ static const std::string CFG_ECDH_CURVE;
+
+#ifdef OPENSSL_FIPS
+ static const std::string CFG_FIPS_MODE;
+ static const bool VAL_FIPS_MODE;
+#endif
+
protected:
static int verifyClientCallback(int ok, X509_STORE_CTX * pStore);
/// The return value of this method defines how errors in
@@ -314,39 +356,7 @@ namespace Net
InvalidCertificateHandlerPtr _ptrClientCertificateHandler;
Poco::FastMutex _mutex;
- static const std::string CFG_PRIV_KEY_FILE;
- static const std::string CFG_CERTIFICATE_FILE;
- static const std::string CFG_CA_LOCATION;
- static const std::string CFG_VER_MODE;
- static const Context::VerificationMode VAL_VER_MODE;
- static const std::string CFG_VER_DEPTH;
- static const int VAL_VER_DEPTH;
- static const std::string CFG_ENABLE_DEFAULT_CA;
- static const bool VAL_ENABLE_DEFAULT_CA;
- static const std::string CFG_CIPHER_LIST;
- static const std::string CFG_CYPHER_LIST; // for backwards compatibility
- static const std::string VAL_CIPHER_LIST;
- static const std::string CFG_PREFER_SERVER_CIPHERS;
- static const std::string CFG_DELEGATE_HANDLER;
- static const std::string VAL_DELEGATE_HANDLER;
- static const std::string CFG_CERTIFICATE_HANDLER;
- static const std::string VAL_CERTIFICATE_HANDLER;
- static const std::string CFG_CACHE_SESSIONS;
- static const std::string CFG_SESSION_ID_CONTEXT;
- static const std::string CFG_SESSION_CACHE_SIZE;
- static const std::string CFG_SESSION_TIMEOUT;
- static const std::string CFG_EXTENDED_VERIFICATION;
- static const std::string CFG_REQUIRE_TLSV1;
- static const std::string CFG_REQUIRE_TLSV1_1;
- static const std::string CFG_REQUIRE_TLSV1_2;
- static const std::string CFG_DISABLE_PROTOCOLS;
- static const std::string CFG_DH_PARAMS_FILE;
- static const std::string CFG_ECDH_CURVE;
-
-#ifdef OPENSSL_FIPS
- static const std::string CFG_FIPS_MODE;
- static const bool VAL_FIPS_MODE;
-#endif
+ std::unordered_map _mapPtrServerContexts;
friend class Poco::SingletonHolder;
friend class Context;
diff --git a/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp b/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp
index 7f6cc9abcb2..ae04a994786 100644
--- a/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp
+++ b/base/poco/NetSSL_OpenSSL/src/SSLManager.cpp
@@ -330,27 +330,26 @@ void SSLManager::initDefaultContext(bool server)
else
_ptrDefaultClientContext->disableProtocols(disabledProtocols);
- /// Temporarily disabled during the transition from boringssl to OpenSSL due to tsan issues.
- /// bool cacheSessions = config.getBool(prefix + CFG_CACHE_SESSIONS, false);
- /// if (server)
- /// {
- /// std::string sessionIdContext = config.getString(prefix + CFG_SESSION_ID_CONTEXT, config.getString("application.name", ""));
- /// _ptrDefaultServerContext->enableSessionCache(cacheSessions, sessionIdContext);
- /// if (config.hasProperty(prefix + CFG_SESSION_CACHE_SIZE))
- /// {
- /// int cacheSize = config.getInt(prefix + CFG_SESSION_CACHE_SIZE);
- /// _ptrDefaultServerContext->setSessionCacheSize(cacheSize);
- /// }
- /// if (config.hasProperty(prefix + CFG_SESSION_TIMEOUT))
- /// {
- /// int timeout = config.getInt(prefix + CFG_SESSION_TIMEOUT);
- /// _ptrDefaultServerContext->setSessionTimeout(timeout);
- /// }
- /// }
- /// else
- /// {
- /// _ptrDefaultClientContext->enableSessionCache(cacheSessions);
- /// }
+ bool cacheSessions = config.getBool(prefix + CFG_CACHE_SESSIONS, false);
+ if (server)
+ {
+ std::string sessionIdContext = config.getString(prefix + CFG_SESSION_ID_CONTEXT, config.getString("application.name", ""));
+ _ptrDefaultServerContext->enableSessionCache(cacheSessions, sessionIdContext);
+ if (config.hasProperty(prefix + CFG_SESSION_CACHE_SIZE))
+ {
+ int cacheSize = config.getInt(prefix + CFG_SESSION_CACHE_SIZE);
+ _ptrDefaultServerContext->setSessionCacheSize(cacheSize);
+ }
+ if (config.hasProperty(prefix + CFG_SESSION_TIMEOUT))
+ {
+ int timeout = config.getInt(prefix + CFG_SESSION_TIMEOUT);
+ _ptrDefaultServerContext->setSessionTimeout(timeout);
+ }
+ }
+ else
+ {
+ _ptrDefaultClientContext->enableSessionCache(cacheSessions);
+ }
bool extendedVerification = config.getBool(prefix + CFG_EXTENDED_VERIFICATION, false);
if (server)
_ptrDefaultServerContext->enableExtendedCertificateVerification(extendedVerification);
@@ -429,6 +428,23 @@ void SSLManager::initCertificateHandler(bool server)
}
+Context::Ptr SSLManager::getCustomServerContext(const std::string & name)
+{
+ Poco::FastMutex::ScopedLock lock(_mutex);
+ auto it = _mapPtrServerContexts.find(name);
+ if (it != _mapPtrServerContexts.end())
+ return it->second;
+ return nullptr;
+}
+
+Context::Ptr SSLManager::setCustomServerContext(const std::string & name, Context::Ptr ctx)
+{
+ Poco::FastMutex::ScopedLock lock(_mutex);
+ ctx = _mapPtrServerContexts.insert({name, ctx}).first->second;
+ return ctx;
+}
+
+
Poco::Util::AbstractConfiguration& SSLManager::appConfig()
{
try
diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt
index dfbbb66a1e9..bb776fa9506 100644
--- a/cmake/autogenerated_versions.txt
+++ b/cmake/autogenerated_versions.txt
@@ -1,12 +1,12 @@
# This variables autochanged by tests/ci/version_helper.py:
-# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
+# NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
-SET(VERSION_REVISION 54487)
+SET(VERSION_REVISION 54488)
SET(VERSION_MAJOR 24)
-SET(VERSION_MINOR 6)
+SET(VERSION_MINOR 7)
SET(VERSION_PATCH 1)
-SET(VERSION_GITHASH 70a1d3a63d47f0be077d67b8deb907230fc7cfb0)
-SET(VERSION_DESCRIBE v24.6.1.1-testing)
-SET(VERSION_STRING 24.6.1.1)
+SET(VERSION_GITHASH aa023477a9265e403982fca5ee29a714db5133d9)
+SET(VERSION_DESCRIBE v24.7.1.1-testing)
+SET(VERSION_STRING 24.7.1.1)
# end of autochange
diff --git a/contrib/openssl b/contrib/openssl
index 67c0b63e578..5d81fa7068f 160000
--- a/contrib/openssl
+++ b/contrib/openssl
@@ -1 +1 @@
-Subproject commit 67c0b63e578e4c751ac9edf490f5a96124fff8dc
+Subproject commit 5d81fa7068fc8c07f4d0997d5b703f3c541a637c
diff --git a/contrib/re2 b/contrib/re2
index a807e8a3aac..85dd7ad833a 160000
--- a/contrib/re2
+++ b/contrib/re2
@@ -1 +1 @@
-Subproject commit a807e8a3aac2cc33c77b7071efea54fcabe38e0c
+Subproject commit 85dd7ad833a73095ecf3e3baea608ba051bbe2c7
diff --git a/contrib/re2-cmake/CMakeLists.txt b/contrib/re2-cmake/CMakeLists.txt
index f773bc65a69..99d61839b30 100644
--- a/contrib/re2-cmake/CMakeLists.txt
+++ b/contrib/re2-cmake/CMakeLists.txt
@@ -28,16 +28,20 @@ set(RE2_SOURCES
add_library(_re2 ${RE2_SOURCES})
target_include_directories(_re2 PUBLIC "${SRC_DIR}")
target_link_libraries(_re2 PRIVATE
+ absl::absl_check
+ absl::absl_log
absl::base
absl::core_headers
absl::fixed_array
+ absl::flags
absl::flat_hash_map
absl::flat_hash_set
+ absl::hash
absl::inlined_vector
- absl::strings
- absl::str_format
- absl::synchronization
absl::optional
- absl::span)
+ absl::span
+ absl::str_format
+ absl::strings
+ absl::synchronization)
add_library(ch_contrib::re2 ALIAS _re2)
diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh
index f94621ba092..3ce489b9e0e 100755
--- a/docker/test/stateless/run.sh
+++ b/docker/test/stateless/run.sh
@@ -254,7 +254,7 @@ function run_tests()
set +e
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
- --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
+ --no-drop-if-fail --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \
| ts '%Y-%m-%d %H:%M:%S' \
| tee -a test_output/test_result.txt
set -e
@@ -285,7 +285,7 @@ stop_logs_replication
# Try to get logs while server is running
failed_to_save_logs=0
-for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log
+for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
do
err=$(clickhouse-client -q "select * from system.$table into outfile '/test_output/$table.tsv.gz' format TSVWithNamesAndTypes")
echo "$err"
@@ -339,7 +339,7 @@ if [ $failed_to_save_logs -ne 0 ]; then
# directly
# - even though ci auto-compress some files (but not *.tsv) it does this only
# for files >64MB, we want this files to be compressed explicitly
- for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log
+ for table in query_log zookeeper_log trace_log transactions_info_log metric_log blob_storage_log error_log
do
clickhouse-local "$data_path_config" --only-system-tables --stacktrace -q "select * from system.$table format TSVWithNamesAndTypes" | zstd --threads=0 > /test_output/$table.tsv.zst ||:
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
@@ -379,6 +379,10 @@ fi
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
+rm -rf /var/lib/clickhouse/data/system/*/
+tar -chf /test_output/store.tar /var/lib/clickhouse/store ||:
+tar -chf /test_output/metadata.tar /var/lib/clickhouse/metadata/*.sql ||:
+
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
rg -Fa "" /var/log/clickhouse-server/clickhouse-server1.log ||:
rg -Fa "" /var/log/clickhouse-server/clickhouse-server2.log ||:
diff --git a/docker/test/style/Dockerfile b/docker/test/style/Dockerfile
index 6ad03852b66..7cd712b73f6 100644
--- a/docker/test/style/Dockerfile
+++ b/docker/test/style/Dockerfile
@@ -37,6 +37,7 @@ RUN pip3 install \
tqdm==4.66.4 \
types-requests \
unidiff \
+ jwt \
&& rm -rf /root/.cache/pip
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
diff --git a/docs/changelogs/v24.4.3.25-stable.md b/docs/changelogs/v24.4.3.25-stable.md
new file mode 100644
index 00000000000..9582753c731
--- /dev/null
+++ b/docs/changelogs/v24.4.3.25-stable.md
@@ -0,0 +1,30 @@
+---
+sidebar_position: 1
+sidebar_label: 2024
+---
+
+# 2024 Changelog
+
+### ClickHouse release v24.4.3.25-stable (a915dd4eda4) FIXME as compared to v24.4.2.141-stable (9e23d27bd11)
+
+#### Build/Testing/Packaging Improvement
+* Backported in [#65130](https://github.com/ClickHouse/ClickHouse/issues/65130): Decrease the `unit-test` image a few times. [#65102](https://github.com/ClickHouse/ClickHouse/pull/65102) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+
+#### Bug Fix (user-visible misbehavior in an official stable release)
+
+* Backported in [#64982](https://github.com/ClickHouse/ClickHouse/issues/64982): Fix the `Block structure mismatch` error for queries reading with `PREWHERE` from the materialized view when the materialized view has columns of different types than the source table. Fixes [#64611](https://github.com/ClickHouse/ClickHouse/issues/64611). [#64855](https://github.com/ClickHouse/ClickHouse/pull/64855) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Backported in [#64974](https://github.com/ClickHouse/ClickHouse/issues/64974): Fix rare crash when table has TTL with subquery + database replicated + parallel replicas + analyzer. It's really rare, but please don't use TTLs with subqueries. [#64858](https://github.com/ClickHouse/ClickHouse/pull/64858) ([alesapin](https://github.com/alesapin)).
+* Backported in [#65072](https://github.com/ClickHouse/ClickHouse/issues/65072): Fix `ALTER MODIFY COMMENT` query that was broken for parameterized VIEWs in https://github.com/ClickHouse/ClickHouse/pull/54211. [#65031](https://github.com/ClickHouse/ClickHouse/pull/65031) ([Nikolay Degterinsky](https://github.com/evillique)).
+* Backported in [#65177](https://github.com/ClickHouse/ClickHouse/issues/65177): Fix the `Unknown expression identifier` error for remote queries with `INTERPOLATE (alias)` (new analyzer). Fixes [#64636](https://github.com/ClickHouse/ClickHouse/issues/64636). [#65090](https://github.com/ClickHouse/ClickHouse/pull/65090) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Backported in [#65263](https://github.com/ClickHouse/ClickHouse/issues/65263): Fix the bug in Hashed and Hashed_Array dictionary short circuit evaluation, which may read uninitialized number, leading to various errors. [#65256](https://github.com/ClickHouse/ClickHouse/pull/65256) ([jsc0218](https://github.com/jsc0218)).
+
+#### Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
+
+* Backported in [#65285](https://github.com/ClickHouse/ClickHouse/issues/65285): Fix crash with UniqInjectiveFunctionsEliminationPass and uniqCombined. [#65188](https://github.com/ClickHouse/ClickHouse/pull/65188) ([Raúl Marín](https://github.com/Algunenano)).
+
+#### NOT FOR CHANGELOG / INSIGNIFICANT
+
+* Backported in [#65114](https://github.com/ClickHouse/ClickHouse/issues/65114): Adjust the `version_helper` and script to a new release scheme. [#64759](https://github.com/ClickHouse/ClickHouse/pull/64759) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Backported in [#65225](https://github.com/ClickHouse/ClickHouse/issues/65225): Capture weak_ptr of ContextAccess for safety. [#65051](https://github.com/ClickHouse/ClickHouse/pull/65051) ([Alexander Gololobov](https://github.com/davenger)).
+* Backported in [#65217](https://github.com/ClickHouse/ClickHouse/issues/65217): Fix false positives leaky memory warnings in OpenSSL. [#65125](https://github.com/ClickHouse/ClickHouse/pull/65125) ([Robert Schulze](https://github.com/rschu1ze)).
+
diff --git a/docs/en/development/developer-instruction.md b/docs/en/development/developer-instruction.md
index ec5760541e8..0a1fe58b16f 100644
--- a/docs/en/development/developer-instruction.md
+++ b/docs/en/development/developer-instruction.md
@@ -267,7 +267,7 @@ A pull request can be created even if the work is not completed yet. In this cas
Testing will commence as soon as ClickHouse employees label your PR with a tag “can be tested”. The results of some first checks (e.g. code style) will come in within several minutes. Build check results will arrive within half an hour. And the main set of tests will report itself within an hour.
-The system will prepare ClickHouse binary builds for your pull request individually. To retrieve these builds click the “Details” link next to “ClickHouse build check” entry in the list of checks. There you will find direct links to the built .deb packages of ClickHouse which you can deploy even on your production servers (if you have no fear).
+The system will prepare ClickHouse binary builds for your pull request individually. To retrieve these builds click the “Details” link next to “Builds” entry in the list of checks. There you will find direct links to the built .deb packages of ClickHouse which you can deploy even on your production servers (if you have no fear).
Most probably some of the builds will fail at first times. This is due to the fact that we check builds both with gcc as well as with clang, with almost all of existing warnings (always with the `-Werror` flag) enabled for clang. On that same page, you can find all of the build logs so that you do not have to build ClickHouse in all of the possible ways.
diff --git a/docs/en/engines/table-engines/integrations/iceberg.md b/docs/en/engines/table-engines/integrations/iceberg.md
index 9d6395f73ac..21fdbc0b1a5 100644
--- a/docs/en/engines/table-engines/integrations/iceberg.md
+++ b/docs/en/engines/table-engines/integrations/iceberg.md
@@ -37,7 +37,7 @@ Using named collections:
http://test.s3.amazonaws.com/clickhouse-bucket/
- test
+ test
test
diff --git a/docs/en/engines/table-engines/integrations/s3queue.md b/docs/en/engines/table-engines/integrations/s3queue.md
index 0958680dc56..97ca79501a7 100644
--- a/docs/en/engines/table-engines/integrations/s3queue.md
+++ b/docs/en/engines/table-engines/integrations/s3queue.md
@@ -13,7 +13,7 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
CREATE TABLE s3_queue_engine_table (name String, value UInt32)
ENGINE = S3Queue(path, [NOSIGN, | aws_access_key_id, aws_secret_access_key,] format, [compression])
[SETTINGS]
- [mode = 'unordered',]
+ [mode = '',]
[after_processing = 'keep',]
[keeper_path = '',]
[s3queue_loading_retries = 0,]
diff --git a/docs/en/getting-started/install.md b/docs/en/getting-started/install.md
index 67752f223ce..98e73dec451 100644
--- a/docs/en/getting-started/install.md
+++ b/docs/en/getting-started/install.md
@@ -314,7 +314,7 @@ For example, to download a aarch64 binary for ClickHouse v23.4, follow these ste
- Find the GitHub pull request for release v23.4: [Release pull request for branch 23.4](https://github.com/ClickHouse/ClickHouse/pull/49238)
- Click "Commits", then click a commit similar to "Update autogenerated version to 23.4.2.1 and contributors" for the particular version you like to install.
- Click the green check / yellow dot / red cross to open the list of CI checks.
-- Click "Details" next to "ClickHouse Build Check" in the list, it will open a page similar to [this page](https://s3.amazonaws.com/clickhouse-test-reports/46793/b460eb70bf29b19eadd19a1f959b15d186705394/clickhouse_build_check/report.html)
+- Click "Details" next to "Builds" in the list, it will open a page similar to [this page](https://s3.amazonaws.com/clickhouse-test-reports/46793/b460eb70bf29b19eadd19a1f959b15d186705394/clickhouse_build_check/report.html)
- Find the rows with compiler = "clang-*-aarch64" - there are multiple rows.
- Download the artifacts for these builds.
diff --git a/docs/en/interfaces/cli.md b/docs/en/interfaces/cli.md
index 1eb426af617..e18ff6f1a3f 100644
--- a/docs/en/interfaces/cli.md
+++ b/docs/en/interfaces/cli.md
@@ -193,6 +193,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
- `--hardware-utilization` — Print hardware utilization information in progress bar.
- `--print-profile-events` – Print `ProfileEvents` packets.
- `--profile-events-delay-ms` – Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet).
+- `--jwt` – If specified, enables authorization via JSON Web Token. Server JWT authorization is available only in ClickHouse Cloud.
Instead of `--host`, `--port`, `--user` and `--password` options, ClickHouse client also supports connection strings (see next section).
diff --git a/docs/en/interfaces/formats.md b/docs/en/interfaces/formats.md
index ffdd7e2ca25..a81a17e65d6 100644
--- a/docs/en/interfaces/formats.md
+++ b/docs/en/interfaces/formats.md
@@ -2169,6 +2169,7 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `lz4`.
- [input_format_parquet_max_block_size](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_max_block_size) - Max block row size for parquet reader. Default value - `65409`.
- [input_format_parquet_prefer_block_bytes](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_prefer_block_bytes) - Average block bytes output by parquet reader. Default value - `16744704`.
+- [output_format_parquet_write_page_index](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_max_block_size) - Add a possibility to write page index into parquet files. Need to disable `output_format_parquet_use_custom_encoder` at present. Default value - `true`.
## ParquetMetadata {data-format-parquet-metadata}
diff --git a/docs/en/interfaces/mysql.md b/docs/en/interfaces/mysql.md
index ce5ab24ecb0..42820505406 100644
--- a/docs/en/interfaces/mysql.md
+++ b/docs/en/interfaces/mysql.md
@@ -31,6 +31,56 @@ Alternatively, in order to enable the MySQL interface for an existing service:
3. After entering the password, you will get prompted the MySQL connection string for this service
![Connection screen - MySQL Enabled](./images/mysql5.png)
+## Creating multiple MySQL users in ClickHouse Cloud
+
+By default, there is a built-in `mysql4` user, which uses the same password as the `default` one. The `` part is the first segment of your ClickHouse Cloud hostname. This format is necessary to work with the tools that implement secure connection, but don't provide [SNI information in their TLS handshake](https://www.cloudflare.com/learning/ssl/what-is-sni), which makes it impossible to do the internal routing without an extra hint in the username (MySQL console client is one of such tools).
+
+Because of this, we _highly recommend_ following the `mysql4_` format when creating a new user intended to be used with the MySQL interface, where `` is a hint to identify your Cloud service, and `` is an arbitrary suffix of your choice.
+
+:::tip
+For ClickHouse Cloud hostname like `foobar.us-east1.aws.clickhouse.cloud`, the `` part equals to `foobar`, and a custom MySQL username could look like `mysql4foobar_team1`.
+:::
+
+You can create extra users to use with the MySQL interface if, for example, you need to apply extra settings.
+
+1. Optional - create a [settings profile](https://clickhouse.com/docs/en/sql-reference/statements/create/settings-profile) to apply for your custom user. For example, `my_custom_profile` with an extra setting which will be applied by default when we connect with the user we create later:
+
+ ```sql
+ CREATE SETTINGS PROFILE my_custom_profile SETTINGS prefer_column_name_to_alias=1;
+ ```
+
+ `prefer_column_name_to_alias` is used just as an example, you can use other settings there.
+2. [Create a user](https://clickhouse.com/docs/en/sql-reference/statements/create/user) using the following format: `mysql4_` ([see above](#creating-multiple-mysql-users-in-clickhouse-cloud)). The password must be in double SHA1 format. For example:
+
+ ```sql
+ CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$';
+ ```
+
+ or if you want to use a custom profile for this user:
+
+ ```sql
+ CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$' SETTINGS PROFILE 'my_custom_profile';
+ ```
+
+ where `my_custom_profile` is the name of the profile you created earlier.
+3. [Grant](https://clickhouse.com/docs/en/sql-reference/statements/grant) the new user the necessary permissions to interact with the desired tables or databases. For example, if you want to grant access to `system.query_log` only:
+
+ ```sql
+ GRANT SELECT ON system.query_log TO mysql4foobar_team1;
+ ```
+
+4. Use the created user to connect to your ClickHouse Cloud service with the MySQL interface.
+
+### Troubleshooting multiple MySQL users in ClickHouse Cloud
+
+If you created a new MySQL user, and you see the following error while connecting via MySQL CLI client:
+
+```
+ERROR 2013 (HY000): Lost connection to MySQL server at 'reading authorization packet', system error: 54
+```
+
+In this case, ensure that the username follows the `mysql4_` format, as described ([above](#creating-multiple-mysql-users-in-clickhouse-cloud)).
+
## Enabling the MySQL Interface On Self-managed ClickHouse
Add the [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d/` [folder](../operations/configuration-files):
diff --git a/docs/en/operations/allocation-profiling.md b/docs/en/operations/allocation-profiling.md
index 64b4106a7e1..574e1ae2ff3 100644
--- a/docs/en/operations/allocation-profiling.md
+++ b/docs/en/operations/allocation-profiling.md
@@ -59,10 +59,10 @@ For that, we need to use `jemalloc`'s tool called [jeprof](https://github.com/je
If that’s the case, we recommend installing an [alternative implementation](https://github.com/gimli-rs/addr2line) of the tool.
```
-git clone https://github.com/gimli-rs/addr2line
+git clone https://github.com/gimli-rs/addr2line.git --depth=1 --branch=0.23.0
cd addr2line
-cargo b --examples -r
-cp ./target/release/examples/addr2line path/to/current/addr2line
+cargo build --features bin --release
+cp ./target/release/addr2line path/to/current/addr2line
```
:::
diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md
index f50dae0f1a2..db8157592db 100644
--- a/docs/en/operations/server-configuration-parameters/settings.md
+++ b/docs/en/operations/server-configuration-parameters/settings.md
@@ -591,6 +591,22 @@ Default value: 100000
400
```
+## max\_table\_num\_to\_throw {#max-table-num-to-throw}
+If number of tables is greater than this value, server will throw an exception. 0 means no limitation. View, remote tables, dictionary, system tables are not counted. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.Default value: 0
+
+**Example**
+```xml
+400
+```
+
+## max\_database\_num\_to\_throw {#max-table-num-to-throw}
+If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
+Default value: 0
+
+**Example**
+```xml
+400
+```
## max_temporary_data_on_disk_size
@@ -938,6 +954,38 @@ Or it can be set in hex:
Everything mentioned above can be applied for `aes_256_gcm_siv` (but the key must be 32 bytes long).
+## error_log {#error_log}
+
+It is disabled by default.
+
+**Enabling**
+
+To manually turn on error history collection [`system.error_log`](../../operations/system-tables/error_log.md), create `/etc/clickhouse-server/config.d/error_log.xml` with the following content:
+
+``` xml
+
+
+ system
+
+ 7500
+ 1000
+ 1048576
+ 8192
+ 524288
+ false
+
+
+```
+
+**Disabling**
+
+To disable `error_log` setting, you should create the following file `/etc/clickhouse-server/config.d/disable_error_log.xml` with the following content:
+
+``` xml
+
+
+
+```
## custom_settings_prefixes {#custom_settings_prefixes}
@@ -1415,6 +1463,9 @@ Keys:
- `size` – Size of the file. Applies to `log` and `errorlog`. Once the file reaches `size`, ClickHouse archives and renames it, and creates a new log file in its place.
- `count` – The number of archived log files that ClickHouse stores.
- `console` – Send `log` and `errorlog` to the console instead of file. To enable, set to `1` or `true`.
+- `console_log_level` – Logging level for console. Default to `level`.
+- `use_syslog` - Log to syslog as well.
+- `syslog_level` - Logging level for logging to syslog.
- `stream_compress` – Compress `log` and `errorlog` with `lz4` stream compression. To enable, set to `1` or `true`.
- `formatting` – Specify log format to be printed in console log (currently only `json` supported).
@@ -1901,7 +1952,7 @@ For more information, see the MergeTreeSettings.h header file.
## metric_log {#metric_log}
-It is enabled by default. If it`s not, you can do this manually.
+It is disabled by default.
**Enabling**
@@ -3084,3 +3135,21 @@ This setting is only necessary for the migration period and will become obsolete
Type: Bool
Default: 1
+
+## merge_workload {#merge_workload}
+
+Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for all background merges. Can be overridden by a merge tree setting.
+
+Default value: "default"
+
+**See Also**
+- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
+
+## mutation_workload {#mutation_workload}
+
+Used to regulate how resources are utilized and shared between mutations and other workloads. Specified value is used as `workload` setting value for all background mutations. Can be overridden by a merge tree setting.
+
+Default value: "default"
+
+**See Also**
+- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md
index b45dc290797..9879ee35612 100644
--- a/docs/en/operations/settings/merge-tree-settings.md
+++ b/docs/en/operations/settings/merge-tree-settings.md
@@ -974,6 +974,24 @@ Default value: false
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
+## merge_workload
+
+Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for background merges of this table. If not specified (empty string), then server setting `merge_workload` is used instead.
+
+Default value: an empty string
+
+**See Also**
+- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
+
+## mutation_workload
+
+Used to regulate how resources are utilized and shared between mutations and other workloads. Specified value is used as `workload` setting value for background mutations of this table. If not specified (empty string), then server setting `mutation_workload` is used instead.
+
+Default value: an empty string
+
+**See Also**
+- [Workload Scheduling](/docs/en/operations/workload-scheduling.md)
+
### optimize_row_order
Controls if the row order should be optimized during inserts to improve the compressability of the newly inserted table part.
diff --git a/docs/en/operations/settings/settings-formats.md b/docs/en/operations/settings/settings-formats.md
index 670c9c6cbf1..530023df5b7 100644
--- a/docs/en/operations/settings/settings-formats.md
+++ b/docs/en/operations/settings/settings-formats.md
@@ -1428,6 +1428,13 @@ Average block bytes output by parquet reader. Lowering the configuration in the
Default value: `65409 * 256 = 16744704`
+### output_format_parquet_write_page_index {#input_format_parquet_max_block_size}
+
+Could add page index into parquet files. To enable this, need set `output_format_parquet_use_custom_encoder`=`false` and
+`output_format_parquet_write_page_index`=`true`.
+
+Enable by default.
+
## Hive format settings {#hive-format-settings}
### input_format_hive_text_fields_delimiter {#input_format_hive_text_fields_delimiter}
diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md
index 79d0ca4f151..3d6d776f4da 100644
--- a/docs/en/operations/settings/settings.md
+++ b/docs/en/operations/settings/settings.md
@@ -1592,19 +1592,19 @@ Default value: `default`.
## parallel_replicas_custom_key_range_lower {#parallel_replicas_custom_key_range_lower}
-Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`.
+Allows the filter type `range` to split the work evenly between replicas based on the custom range `[parallel_replicas_custom_key_range_lower, INT_MAX]`.
-When used in conjuction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
+When used in conjuction with [parallel_replicas_custom_key_range_upper](#parallel_replicas_custom_key_range_upper), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
-Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
+Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
## parallel_replicas_custom_key_range_upper {#parallel_replicas_custom_key_range_upper}
Allows the filter type `range` to split the work evenly between replicas based on the custom range `[0, parallel_replicas_custom_key_range_upper]`. A value of 0 disables the upper bound, setting it the max value of the custom key expression.
-When used in conjuction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
+When used in conjuction with [parallel_replicas_custom_key_range_lower](#parallel_replicas_custom_key_range_lower), it lets the filter evenly split the work over replicas for the range `[parallel_replicas_custom_key_range_lower, parallel_replicas_custom_key_range_upper]`.
-Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
+Note: This setting will not cause any additional data to be filtered during query processing, rather it changes the points at which the range filter breaks up the range `[0, INT_MAX]` for parallel processing.
## allow_experimental_parallel_reading_from_replicas
@@ -3188,7 +3188,7 @@ Default value: `0`.
## lightweight_deletes_sync {#lightweight_deletes_sync}
-The same as 'mutation_sync', but controls only execution of lightweight deletes.
+The same as 'mutation_sync', but controls only execution of lightweight deletes.
Possible values:
@@ -5150,7 +5150,7 @@ Allows using statistic to optimize the order of [prewhere conditions](../../sql-
## analyze_index_with_space_filling_curves
-If a table has a space-filling curve in its index, e.g. `ORDER BY mortonEncode(x, y)`, and the query has conditions on its arguments, e.g. `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30`, use the space-filling curve for index analysis.
+If a table has a space-filling curve in its index, e.g. `ORDER BY mortonEncode(x, y)` or `ORDER BY hilbertEncode(x, y)`, and the query has conditions on its arguments, e.g. `x >= 10 AND x <= 20 AND y >= 20 AND y <= 30`, use the space-filling curve for index analysis.
## query_plan_enable_optimizations {#query_plan_enable_optimizations}
@@ -5418,6 +5418,15 @@ When set to `false` than all attempts are made with identical timeouts.
Default value: `true`.
+## allow_deprecated_snowflake_conversion_functions {#allow_deprecated_snowflake_conversion_functions}
+
+Functions `snowflakeToDateTime`, `snowflakeToDateTime64`, `dateTimeToSnowflake`, and `dateTime64ToSnowflake` are deprecated and disabled by default.
+Please use functions `snowflakeIDToDateTime`, `snowflakeIDToDateTime64`, `dateTimeToSnowflakeID`, and `dateTime64ToSnowflakeID` instead.
+
+To re-enable the deprecated functions (e.g., during a transition period), please set this setting to `true`.
+
+Default value: `false`
+
## allow_experimental_variant_type {#allow_experimental_variant_type}
Allows creation of experimental [Variant](../../sql-reference/data-types/variant.md).
diff --git a/docs/en/operations/system-tables/error_log.md b/docs/en/operations/system-tables/error_log.md
new file mode 100644
index 00000000000..15edef58662
--- /dev/null
+++ b/docs/en/operations/system-tables/error_log.md
@@ -0,0 +1,39 @@
+---
+slug: /en/operations/system-tables/error_log
+---
+# error_log
+
+Contains history of error values from table `system.errors`, periodically flushed to disk.
+
+Columns:
+- `hostname` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) — Hostname of the server executing the query.
+- `event_date` ([Date](../../sql-reference/data-types/date.md)) — Event date.
+- `event_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Event time.
+- `code` ([Int32](../../sql-reference/data-types/int-uint.md)) — Code number of the error.
+- `error` ([LowCardinality(String)](../../sql-reference/data-types/string.md)) - Name of the error.
+- `value` ([UInt64](../../sql-reference/data-types/int-uint.md)) — The number of times this error happened.
+- `remote` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Remote exception (i.e. received during one of the distributed queries).
+
+**Example**
+
+``` sql
+SELECT * FROM system.error_log LIMIT 1 FORMAT Vertical;
+```
+
+``` text
+Row 1:
+──────
+hostname: clickhouse.eu-central1.internal
+event_date: 2024-06-18
+event_time: 2024-06-18 07:32:39
+code: 999
+error: KEEPER_EXCEPTION
+value: 2
+remote: 0
+```
+
+**See also**
+
+- [error_log setting](../../operations/server-configuration-parameters/settings.md#error_log) — Enabling and disabling the setting.
+- [system.errors](../../operations/system-tables/errors.md) — Contains error codes with the number of times they have been triggered.
+- [Monitoring](../../operations/monitoring.md) — Base concepts of ClickHouse monitoring.
diff --git a/docs/en/operations/system-tables/query_log.md b/docs/en/operations/system-tables/query_log.md
index 75b855966a3..47094eec3f0 100644
--- a/docs/en/operations/system-tables/query_log.md
+++ b/docs/en/operations/system-tables/query_log.md
@@ -113,6 +113,8 @@ Columns:
- `used_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `functions`, which were used during query execution.
- `used_storages` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `storages`, which were used during query execution.
- `used_table_functions` ([Array(String)](../../sql-reference/data-types/array.md)) — Canonical names of `table functions`, which were used during query execution.
+- `used_privileges` ([Array(String)](../../sql-reference/data-types/array.md)) - Privileges which were successfully checked during query execution.
+- `missing_privileges` ([Array(String)](../../sql-reference/data-types/array.md)) - Privileges that are missing during query execution.
- `query_cache_usage` ([Enum8](../../sql-reference/data-types/enum.md)) — Usage of the [query cache](../query-cache.md) during query execution. Values:
- `'Unknown'` = Status unknown.
- `'None'` = The query result was neither written into nor read from the query cache.
@@ -194,6 +196,8 @@ used_formats: []
used_functions: []
used_storages: []
used_table_functions: []
+used_privileges: []
+missing_privileges: []
query_cache_usage: None
```
diff --git a/docs/en/operations/tips.md b/docs/en/operations/tips.md
index ed22679a3e6..df041f5885e 100644
--- a/docs/en/operations/tips.md
+++ b/docs/en/operations/tips.md
@@ -36,9 +36,24 @@ $ echo 0 | sudo tee /proc/sys/vm/overcommit_memory
Use `perf top` to watch the time spent in the kernel for memory management.
Permanent huge pages also do not need to be allocated.
-:::warning
-If your system has less than 16 GB of RAM, you may experience various memory exceptions because default settings do not match this amount of memory. The recommended amount of RAM is 32 GB or more. You can use ClickHouse in a system with a small amount of RAM, even with 2 GB of RAM, but it requires additional tuning and can ingest at a low rate.
-:::
+### Using less than 16GB of RAM
+
+The recommended amount of RAM is 32 GB or more.
+
+If your system has less than 16 GB of RAM, you may experience various memory exceptions because default settings do not match this amount of memory. You can use ClickHouse in a system with a small amount of RAM (as low as 2 GB), but these setups require additional tuning and can only ingest at a low rate.
+
+When using ClickHouse with less than 16GB of RAM, we recommend the following:
+
+- Lower the size of the mark cache in the `config.xml`. It can be set as low as 500 MB, but it cannot be set to zero.
+- Lower the number of query processing threads down to `1`.
+- Lower the `max_block_size` to `8192`. Values as low as `1024` can still be practical.
+- Lower `max_download_threads` to `1`.
+- Set `input_format_parallel_parsing` and `output_format_parallel_formatting` to `0`.
+
+Additional notes:
+- To flush the memory cached by the memory allocator, you can run the `SYSTEM JEMALLOC PURGE`
+command.
+- We do not recommend using S3 or Kafka integrations on low-memory machines because they require significant memory for buffers.
## Storage Subsystem {#storage-subsystem}
diff --git a/docs/en/operations/workload-scheduling.md b/docs/en/operations/workload-scheduling.md
index 24149099892..08629492ec6 100644
--- a/docs/en/operations/workload-scheduling.md
+++ b/docs/en/operations/workload-scheduling.md
@@ -47,6 +47,8 @@ Example:
Queries can be marked with setting `workload` to distinguish different workloads. If `workload` is not set, than value "default" is used. Note that you are able to specify the other value using settings profiles. Setting constraints can be used to make `workload` constant if you want all queries from the user to be marked with fixed value of `workload` setting.
+It is possible to assign a `workload` setting for background activities. Merges and mutations are using `merge_workload` and `mutation_workload` server settings correspondingly. These values can also be overridden for specific tables using `merge_workload` and `mutation_workload` merge tree settings
+
Let's consider an example of a system with two different workloads: "production" and "development".
```sql
@@ -151,6 +153,9 @@ Example:
```
-
## See also
- [system.scheduler](/docs/en/operations/system-tables/scheduler.md)
+ - [merge_workload](/docs/en/operations/settings/merge-tree-settings.md#merge_workload) merge tree setting
+ - [merge_workload](/docs/en/operations/server-configuration-parameters/settings.md#merge_workload) global server setting
+ - [mutation_workload](/docs/en/operations/settings/merge-tree-settings.md#mutation_workload) merge tree setting
+ - [mutation_workload](/docs/en/operations/server-configuration-parameters/settings.md#mutation_workload) global server setting
diff --git a/docs/en/sql-reference/aggregate-functions/reference/analysis_of_variance.md b/docs/en/sql-reference/aggregate-functions/reference/analysis_of_variance.md
index d9b44b3ff07..56e54d3faf9 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/analysis_of_variance.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/analysis_of_variance.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/analysis_of_variance
-sidebar_position: 6
+sidebar_position: 101
---
# analysisOfVariance
diff --git a/docs/en/sql-reference/aggregate-functions/reference/any.md b/docs/en/sql-reference/aggregate-functions/reference/any.md
index f1b5a6683e5..cdff7dde4a9 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/any.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/any.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/any
-sidebar_position: 6
+sidebar_position: 102
---
# any
diff --git a/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md b/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md
index 9fbc21910f8..9c6e6b5fead 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/anyheavy.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/anyheavy
-sidebar_position: 103
+sidebar_position: 104
---
# anyHeavy
diff --git a/docs/en/sql-reference/aggregate-functions/reference/anylast.md b/docs/en/sql-reference/aggregate-functions/reference/anylast.md
index 8fcee2cf8e6..e43bc07fbdc 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/anylast.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/anylast.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/anylast
-sidebar_position: 104
+sidebar_position: 105
---
# anyLast
diff --git a/docs/en/sql-reference/aggregate-functions/reference/anylast_respect_nulls.md b/docs/en/sql-reference/aggregate-functions/reference/anylast_respect_nulls.md
index b6d0806f35d..8f093cfdb61 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/anylast_respect_nulls.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/anylast_respect_nulls.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/anylast_respect_nulls
-sidebar_position: 104
+sidebar_position: 106
---
# anyLast_respect_nulls
diff --git a/docs/en/sql-reference/aggregate-functions/reference/approxtopk.md b/docs/en/sql-reference/aggregate-functions/reference/approxtopk.md
index 2bb43a9f665..ea2083ebd04 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/approxtopk.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/approxtopk.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/approxtopk
-sidebar_position: 212
+sidebar_position: 107
---
# approx_top_k
diff --git a/docs/en/sql-reference/aggregate-functions/reference/approxtopsum.md b/docs/en/sql-reference/aggregate-functions/reference/approxtopsum.md
index aa884b26d8e..639142331f0 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/approxtopsum.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/approxtopsum.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/approxtopsum
-sidebar_position: 212
+sidebar_position: 108
---
# approx_top_sum
diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmax.md b/docs/en/sql-reference/aggregate-functions/reference/argmax.md
index 2274dd4a5dc..8c6b2b532e8 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/argmax.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/argmax.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/argmax
-sidebar_position: 106
+sidebar_position: 109
---
# argMax
diff --git a/docs/en/sql-reference/aggregate-functions/reference/argmin.md b/docs/en/sql-reference/aggregate-functions/reference/argmin.md
index 297744fb1db..0ab21fe2b52 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/argmin.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/argmin.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/argmin
-sidebar_position: 105
+sidebar_position: 110
---
# argMin
diff --git a/docs/en/sql-reference/aggregate-functions/reference/arrayconcatagg.md b/docs/en/sql-reference/aggregate-functions/reference/arrayconcatagg.md
index 3c71129bdb5..c0ac0db33f3 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/arrayconcatagg.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/arrayconcatagg.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/array_concat_agg
-sidebar_position: 110
+sidebar_position: 111
---
# array_concat_agg
diff --git a/docs/en/sql-reference/aggregate-functions/reference/avg.md b/docs/en/sql-reference/aggregate-functions/reference/avg.md
index 5463d8a1874..7789c30bfe0 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/avg.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/avg.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/avg
-sidebar_position: 5
+sidebar_position: 112
---
# avg
diff --git a/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md b/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md
index 99d3bac763d..304d0407d98 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/avgweighted.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/avgweighted
-sidebar_position: 107
+sidebar_position: 113
---
# avgWeighted
diff --git a/docs/en/sql-reference/aggregate-functions/reference/boundrat.md b/docs/en/sql-reference/aggregate-functions/reference/boundrat.md
index f3907af8030..d253a250600 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/boundrat.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/boundrat.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/boundingRatio
-sidebar_position: 2
+sidebar_position: 114
title: boundingRatio
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md b/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md
index 57edb47950a..7983c3f2e60 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/categoricalinformationvalue.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/categoricalinformationvalue
-sidebar_position: 250
+sidebar_position: 115
title: categoricalInformationValue
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/contingency.md b/docs/en/sql-reference/aggregate-functions/reference/contingency.md
index 902c1f4af80..a49ff22febc 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/contingency.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/contingency.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/contingency
-sidebar_position: 350
+sidebar_position: 116
---
# contingency
diff --git a/docs/en/sql-reference/aggregate-functions/reference/corr.md b/docs/en/sql-reference/aggregate-functions/reference/corr.md
index 5681c942169..c43b4d3b25a 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/corr.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/corr.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/corr
-sidebar_position: 107
+sidebar_position: 117
---
# corr
diff --git a/docs/en/sql-reference/aggregate-functions/reference/corrmatrix.md b/docs/en/sql-reference/aggregate-functions/reference/corrmatrix.md
index 718477b28dd..96978863646 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/corrmatrix.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/corrmatrix.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/corrmatrix
-sidebar_position: 108
+sidebar_position: 118
---
# corrMatrix
diff --git a/docs/en/sql-reference/aggregate-functions/reference/corrstable.md b/docs/en/sql-reference/aggregate-functions/reference/corrstable.md
index b35442a32b6..979cf244245 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/corrstable.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/corrstable.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/corrstable
-sidebar_position: 107
+sidebar_position: 119
---
# corrStable
diff --git a/docs/en/sql-reference/aggregate-functions/reference/count.md b/docs/en/sql-reference/aggregate-functions/reference/count.md
index ca4067c8d8c..e6f2cdd6aa9 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/count.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/count.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/count
-sidebar_position: 1
+sidebar_position: 120
---
# count
diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarpop.md b/docs/en/sql-reference/aggregate-functions/reference/covarpop.md
index 78b9f4cffea..7231f92b8fa 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/covarpop.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/covarpop.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/covarpop
-sidebar_position: 37
+sidebar_position: 121
---
# covarPop
diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarpopmatrix.md b/docs/en/sql-reference/aggregate-functions/reference/covarpopmatrix.md
index d7400599a49..c8811b3811e 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/covarpopmatrix.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/covarpopmatrix.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/covarpopmatrix
-sidebar_position: 36
+sidebar_position: 122
---
# covarPopMatrix
diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarpopstable.md b/docs/en/sql-reference/aggregate-functions/reference/covarpopstable.md
index 68e78fc3bd8..48e5368faac 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/covarpopstable.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/covarpopstable.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/covarpopstable
-sidebar_position: 36
+sidebar_position: 123
---
# covarPopStable
diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md b/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md
index 7d5d5d13f35..92fe213b407 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/covarsamp.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/covarsamp
-sidebar_position: 37
+sidebar_position: 124
---
# covarSamp
diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarsampmatrix.md b/docs/en/sql-reference/aggregate-functions/reference/covarsampmatrix.md
index b71d753f0be..1585c4a9970 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/covarsampmatrix.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/covarsampmatrix.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/covarsampmatrix
-sidebar_position: 38
+sidebar_position: 125
---
# covarSampMatrix
diff --git a/docs/en/sql-reference/aggregate-functions/reference/covarsampstable.md b/docs/en/sql-reference/aggregate-functions/reference/covarsampstable.md
index 3e6867b96d6..6764877768e 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/covarsampstable.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/covarsampstable.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/covarsampstable
-sidebar_position: 37
+sidebar_position: 126
---
# covarSampStable
diff --git a/docs/en/sql-reference/aggregate-functions/reference/cramersv.md b/docs/en/sql-reference/aggregate-functions/reference/cramersv.md
index 2424ff95237..db0e1c5eb4c 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/cramersv.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/cramersv.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/cramersv
-sidebar_position: 351
+sidebar_position: 127
---
# cramersV
diff --git a/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md b/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md
index 939c04e3fdc..2ff7ce489d3 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/cramersvbiascorrected.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/cramersvbiascorrected
-sidebar_position: 352
+sidebar_position: 128
---
# cramersVBiasCorrected
diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md
index 37d9d08cbdb..650135ecfeb 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/deltasum.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/deltasum.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/deltasum
-sidebar_position: 141
+sidebar_position: 129
---
# deltaSum
diff --git a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md
index c51d86389b0..ec5cfa5fecc 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/deltasumtimestamp.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/deltasumtimestamp
-sidebar_position: 141
+sidebar_position: 130
title: deltaSumTimestamp
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/entropy.md b/docs/en/sql-reference/aggregate-functions/reference/entropy.md
index fc8d627ecab..7970cdd268b 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/entropy.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/entropy.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/entropy
-sidebar_position: 302
+sidebar_position: 131
---
# entropy
diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md
index 75041ace7a3..3086a48f819 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialmovingaverage.md
@@ -1,7 +1,7 @@
---
-slug: /en/sql-reference/aggregate-functions/reference/exponentialmovingaverage
-sidebar_position: 108
-sidebar_title: exponentialMovingAverage
+slug: /en/sql-reference/aggregate-functions/reference/exponentialMovingAverage
+sidebar_position: 132
+title: exponentialMovingAverage
---
## exponentialMovingAverage
@@ -96,56 +96,56 @@ Result:
``` text
┌─value─┬─time─┬─round(exp_smooth, 3)─┬─bar────────────────────────────────────────┐
-│ 1 │ 0 │ 0.067 │ ███▎ │
+│ 1 │ 0 │ 0.067 │ ███▎ │
│ 0 │ 1 │ 0.062 │ ███ │
-│ 0 │ 2 │ 0.058 │ ██▊ │
-│ 0 │ 3 │ 0.054 │ ██▋ │
+│ 0 │ 2 │ 0.058 │ ██▊ │
+│ 0 │ 3 │ 0.054 │ ██▋ │
│ 0 │ 4 │ 0.051 │ ██▌ │
-│ 0 │ 5 │ 0.047 │ ██▎ │
-│ 0 │ 6 │ 0.044 │ ██▏ │
+│ 0 │ 5 │ 0.047 │ ██▎ │
+│ 0 │ 6 │ 0.044 │ ██▏ │
│ 0 │ 7 │ 0.041 │ ██ │
-│ 0 │ 8 │ 0.038 │ █▊ │
-│ 0 │ 9 │ 0.036 │ █▋ │
-│ 0 │ 10 │ 0.033 │ █▋ │
+│ 0 │ 8 │ 0.038 │ █▊ │
+│ 0 │ 9 │ 0.036 │ █▋ │
+│ 0 │ 10 │ 0.033 │ █▋ │
│ 0 │ 11 │ 0.031 │ █▌ │
-│ 0 │ 12 │ 0.029 │ █▍ │
-│ 0 │ 13 │ 0.027 │ █▎ │
-│ 0 │ 14 │ 0.025 │ █▎ │
-│ 0 │ 15 │ 0.024 │ █▏ │
+│ 0 │ 12 │ 0.029 │ █▍ │
+│ 0 │ 13 │ 0.027 │ █▎ │
+│ 0 │ 14 │ 0.025 │ █▎ │
+│ 0 │ 15 │ 0.024 │ █▏ │
│ 0 │ 16 │ 0.022 │ █ │
│ 0 │ 17 │ 0.021 │ █ │
-│ 0 │ 18 │ 0.019 │ ▊ │
-│ 0 │ 19 │ 0.018 │ ▊ │
-│ 0 │ 20 │ 0.017 │ ▋ │
-│ 0 │ 21 │ 0.016 │ ▋ │
-│ 0 │ 22 │ 0.015 │ ▋ │
-│ 0 │ 23 │ 0.014 │ ▋ │
-│ 0 │ 24 │ 0.013 │ ▋ │
-│ 1 │ 25 │ 0.079 │ ███▊ │
+│ 0 │ 18 │ 0.019 │ ▊ │
+│ 0 │ 19 │ 0.018 │ ▊ │
+│ 0 │ 20 │ 0.017 │ ▋ │
+│ 0 │ 21 │ 0.016 │ ▋ │
+│ 0 │ 22 │ 0.015 │ ▋ │
+│ 0 │ 23 │ 0.014 │ ▋ │
+│ 0 │ 24 │ 0.013 │ ▋ │
+│ 1 │ 25 │ 0.079 │ ███▊ │
│ 1 │ 26 │ 0.14 │ ███████ │
-│ 1 │ 27 │ 0.198 │ █████████▊ │
+│ 1 │ 27 │ 0.198 │ █████████▊ │
│ 1 │ 28 │ 0.252 │ ████████████▌ │
│ 1 │ 29 │ 0.302 │ ███████████████ │
-│ 1 │ 30 │ 0.349 │ █████████████████▍ │
+│ 1 │ 30 │ 0.349 │ █████████████████▍ │
│ 1 │ 31 │ 0.392 │ ███████████████████▌ │
-│ 1 │ 32 │ 0.433 │ █████████████████████▋ │
+│ 1 │ 32 │ 0.433 │ █████████████████████▋ │
│ 1 │ 33 │ 0.471 │ ███████████████████████▌ │
-│ 1 │ 34 │ 0.506 │ █████████████████████████▎ │
-│ 1 │ 35 │ 0.539 │ ██████████████████████████▊ │
+│ 1 │ 34 │ 0.506 │ █████████████████████████▎ │
+│ 1 │ 35 │ 0.539 │ ██████████████████████████▊ │
│ 1 │ 36 │ 0.57 │ ████████████████████████████▌ │
-│ 1 │ 37 │ 0.599 │ █████████████████████████████▊ │
-│ 1 │ 38 │ 0.626 │ ███████████████████████████████▎ │
+│ 1 │ 37 │ 0.599 │ █████████████████████████████▊ │
+│ 1 │ 38 │ 0.626 │ ███████████████████████████████▎ │
│ 1 │ 39 │ 0.651 │ ████████████████████████████████▌ │
-│ 1 │ 40 │ 0.674 │ █████████████████████████████████▋ │
-│ 1 │ 41 │ 0.696 │ ██████████████████████████████████▋ │
-│ 1 │ 42 │ 0.716 │ ███████████████████████████████████▋ │
-│ 1 │ 43 │ 0.735 │ ████████████████████████████████████▋ │
-│ 1 │ 44 │ 0.753 │ █████████████████████████████████████▋ │
-│ 1 │ 45 │ 0.77 │ ██████████████████████████████████████▍ │
-│ 1 │ 46 │ 0.785 │ ███████████████████████████████████████▎ │
-│ 1 │ 47 │ 0.8 │ ███████████████████████████████████████▊ │
-│ 1 │ 48 │ 0.813 │ ████████████████████████████████████████▋ │
-│ 1 │ 49 │ 0.825 │ █████████████████████████████████████████▎│
+│ 1 │ 40 │ 0.674 │ █████████████████████████████████▋ │
+│ 1 │ 41 │ 0.696 │ ██████████████████████████████████▋ │
+│ 1 │ 42 │ 0.716 │ ███████████████████████████████████▋ │
+│ 1 │ 43 │ 0.735 │ ████████████████████████████████████▋ │
+│ 1 │ 44 │ 0.753 │ █████████████████████████████████████▋ │
+│ 1 │ 45 │ 0.77 │ ██████████████████████████████████████▍ │
+│ 1 │ 46 │ 0.785 │ ███████████████████████████████████████▎ │
+│ 1 │ 47 │ 0.8 │ ███████████████████████████████████████▊ │
+│ 1 │ 48 │ 0.813 │ ████████████████████████████████████████▋ │
+│ 1 │ 49 │ 0.825 │ █████████████████████████████████████████▎ │
└───────┴──────┴──────────────────────┴────────────────────────────────────────────┘
```
diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedavg.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedavg.md
new file mode 100644
index 00000000000..c729552749a
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedavg.md
@@ -0,0 +1,105 @@
+---
+slug: /en/sql-reference/aggregate-functions/reference/exponentialTimeDecayedAvg
+sidebar_position: 133
+title: exponentialTimeDecayedAvg
+---
+
+## exponentialTimeDecayedAvg
+
+Returns the exponentially smoothed weighted moving average of values of a time series at point `t` in time.
+
+**Syntax**
+
+```sql
+exponentialTimeDecayedAvg(x)(v, t)
+```
+
+**Arguments**
+
+- `v` — Value. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
+- `t` — Time. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md), [DateTime](../../data-types/datetime.md), [DateTime64](../../data-types/datetime64.md).
+
+**Parameters**
+
+- `x` — Half-life period. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
+
+**Returned values**
+
+- Returns an exponentially smoothed weighted moving average at index `t` in time. [Float64](../../data-types/float.md).
+
+**Examples**
+
+Query:
+
+```sql
+SELECT
+ value,
+ time,
+ round(exp_smooth, 3),
+ bar(exp_smooth, 0, 5, 50) AS bar
+FROM
+ (
+ SELECT
+ (number = 0) OR (number >= 25) AS value,
+ number AS time,
+ exponentialTimeDecayedAvg(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
+ FROM numbers(50)
+ );
+```
+
+Response:
+
+```sql
+ ┌─value─┬─time─┬─round(exp_smooth, 3)─┬─bar────────┐
+1. │ 1 │ 0 │ 1 │ ██████████ │
+2. │ 0 │ 1 │ 0.475 │ ████▊ │
+3. │ 0 │ 2 │ 0.301 │ ███ │
+4. │ 0 │ 3 │ 0.214 │ ██▏ │
+5. │ 0 │ 4 │ 0.162 │ █▌ │
+6. │ 0 │ 5 │ 0.128 │ █▎ │
+7. │ 0 │ 6 │ 0.104 │ █ │
+8. │ 0 │ 7 │ 0.086 │ ▊ │
+9. │ 0 │ 8 │ 0.072 │ ▋ │
+0. │ 0 │ 9 │ 0.061 │ ▌ │
+1. │ 0 │ 10 │ 0.052 │ ▌ │
+2. │ 0 │ 11 │ 0.045 │ ▍ │
+3. │ 0 │ 12 │ 0.039 │ ▍ │
+4. │ 0 │ 13 │ 0.034 │ ▎ │
+5. │ 0 │ 14 │ 0.03 │ ▎ │
+6. │ 0 │ 15 │ 0.027 │ ▎ │
+7. │ 0 │ 16 │ 0.024 │ ▏ │
+8. │ 0 │ 17 │ 0.021 │ ▏ │
+9. │ 0 │ 18 │ 0.018 │ ▏ │
+0. │ 0 │ 19 │ 0.016 │ ▏ │
+1. │ 0 │ 20 │ 0.015 │ ▏ │
+2. │ 0 │ 21 │ 0.013 │ ▏ │
+3. │ 0 │ 22 │ 0.012 │ │
+4. │ 0 │ 23 │ 0.01 │ │
+5. │ 0 │ 24 │ 0.009 │ │
+6. │ 1 │ 25 │ 0.111 │ █ │
+7. │ 1 │ 26 │ 0.202 │ ██ │
+8. │ 1 │ 27 │ 0.283 │ ██▊ │
+9. │ 1 │ 28 │ 0.355 │ ███▌ │
+0. │ 1 │ 29 │ 0.42 │ ████▏ │
+1. │ 1 │ 30 │ 0.477 │ ████▊ │
+2. │ 1 │ 31 │ 0.529 │ █████▎ │
+3. │ 1 │ 32 │ 0.576 │ █████▊ │
+4. │ 1 │ 33 │ 0.618 │ ██████▏ │
+5. │ 1 │ 34 │ 0.655 │ ██████▌ │
+6. │ 1 │ 35 │ 0.689 │ ██████▉ │
+7. │ 1 │ 36 │ 0.719 │ ███████▏ │
+8. │ 1 │ 37 │ 0.747 │ ███████▍ │
+9. │ 1 │ 38 │ 0.771 │ ███████▋ │
+0. │ 1 │ 39 │ 0.793 │ ███████▉ │
+1. │ 1 │ 40 │ 0.813 │ ████████▏ │
+2. │ 1 │ 41 │ 0.831 │ ████████▎ │
+3. │ 1 │ 42 │ 0.848 │ ████████▍ │
+4. │ 1 │ 43 │ 0.862 │ ████████▌ │
+5. │ 1 │ 44 │ 0.876 │ ████████▊ │
+6. │ 1 │ 45 │ 0.888 │ ████████▉ │
+7. │ 1 │ 46 │ 0.898 │ ████████▉ │
+8. │ 1 │ 47 │ 0.908 │ █████████ │
+9. │ 1 │ 48 │ 0.917 │ █████████▏ │
+0. │ 1 │ 49 │ 0.925 │ █████████▏ │
+ └───────┴──────┴──────────────────────┴────────────┘
+```
\ No newline at end of file
diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedcount.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedcount.md
new file mode 100644
index 00000000000..b73d6c2503d
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedcount.md
@@ -0,0 +1,104 @@
+---
+slug: /en/sql-reference/aggregate-functions/reference/exponentialTimeDecayedCount
+sidebar_position: 134
+title: exponentialTimeDecayedCount
+---
+
+## exponentialTimeDecayedCount
+
+Returns the cumulative exponential decay over a time series at the index `t` in time.
+
+**Syntax**
+
+```sql
+exponentialTimeDecayedCount(x)(t)
+```
+
+**Arguments**
+
+- `t` — Time. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md), [DateTime](../../data-types/datetime.md), [DateTime64](../../data-types/datetime64.md).
+
+**Parameters**
+
+- `x` — Half-life period. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
+
+**Returned values**
+
+- Returns the cumulative exponential decay at the given point in time. [Float64](../../data-types/float.md).
+
+**Example**
+
+Query:
+
+```sql
+SELECT
+ value,
+ time,
+ round(exp_smooth, 3),
+ bar(exp_smooth, 0, 20, 50) AS bar
+FROM
+(
+ SELECT
+ (number % 5) = 0 AS value,
+ number AS time,
+ exponentialTimeDecayedCount(10)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
+ FROM numbers(50)
+);
+```
+
+Result:
+
+```response
+ ┌─value─┬─time─┬─round(exp_smooth, 3)─┬─bar────────────────────────┐
+ 1. │ 1 │ 0 │ 1 │ ██▌ │
+ 2. │ 0 │ 1 │ 1.905 │ ████▊ │
+ 3. │ 0 │ 2 │ 2.724 │ ██████▊ │
+ 4. │ 0 │ 3 │ 3.464 │ ████████▋ │
+ 5. │ 0 │ 4 │ 4.135 │ ██████████▎ │
+ 6. │ 1 │ 5 │ 4.741 │ ███████████▊ │
+ 7. │ 0 │ 6 │ 5.29 │ █████████████▏ │
+ 8. │ 0 │ 7 │ 5.787 │ ██████████████▍ │
+ 9. │ 0 │ 8 │ 6.236 │ ███████████████▌ │
+10. │ 0 │ 9 │ 6.643 │ ████████████████▌ │
+11. │ 1 │ 10 │ 7.01 │ █████████████████▌ │
+12. │ 0 │ 11 │ 7.343 │ ██████████████████▎ │
+13. │ 0 │ 12 │ 7.644 │ ███████████████████ │
+14. │ 0 │ 13 │ 7.917 │ ███████████████████▊ │
+15. │ 0 │ 14 │ 8.164 │ ████████████████████▍ │
+16. │ 1 │ 15 │ 8.387 │ ████████████████████▉ │
+17. │ 0 │ 16 │ 8.589 │ █████████████████████▍ │
+18. │ 0 │ 17 │ 8.771 │ █████████████████████▉ │
+19. │ 0 │ 18 │ 8.937 │ ██████████████████████▎ │
+20. │ 0 │ 19 │ 9.086 │ ██████████████████████▋ │
+21. │ 1 │ 20 │ 9.222 │ ███████████████████████ │
+22. │ 0 │ 21 │ 9.344 │ ███████████████████████▎ │
+23. │ 0 │ 22 │ 9.455 │ ███████████████████████▋ │
+24. │ 0 │ 23 │ 9.555 │ ███████████████████████▉ │
+25. │ 0 │ 24 │ 9.646 │ ████████████████████████ │
+26. │ 1 │ 25 │ 9.728 │ ████████████████████████▎ │
+27. │ 0 │ 26 │ 9.802 │ ████████████████████████▌ │
+28. │ 0 │ 27 │ 9.869 │ ████████████████████████▋ │
+29. │ 0 │ 28 │ 9.93 │ ████████████████████████▊ │
+30. │ 0 │ 29 │ 9.985 │ ████████████████████████▉ │
+31. │ 1 │ 30 │ 10.035 │ █████████████████████████ │
+32. │ 0 │ 31 │ 10.08 │ █████████████████████████▏ │
+33. │ 0 │ 32 │ 10.121 │ █████████████████████████▎ │
+34. │ 0 │ 33 │ 10.158 │ █████████████████████████▍ │
+35. │ 0 │ 34 │ 10.191 │ █████████████████████████▍ │
+36. │ 1 │ 35 │ 10.221 │ █████████████████████████▌ │
+37. │ 0 │ 36 │ 10.249 │ █████████████████████████▌ │
+38. │ 0 │ 37 │ 10.273 │ █████████████████████████▋ │
+39. │ 0 │ 38 │ 10.296 │ █████████████████████████▋ │
+40. │ 0 │ 39 │ 10.316 │ █████████████████████████▊ │
+41. │ 1 │ 40 │ 10.334 │ █████████████████████████▊ │
+42. │ 0 │ 41 │ 10.351 │ █████████████████████████▉ │
+43. │ 0 │ 42 │ 10.366 │ █████████████████████████▉ │
+44. │ 0 │ 43 │ 10.379 │ █████████████████████████▉ │
+45. │ 0 │ 44 │ 10.392 │ █████████████████████████▉ │
+46. │ 1 │ 45 │ 10.403 │ ██████████████████████████ │
+47. │ 0 │ 46 │ 10.413 │ ██████████████████████████ │
+48. │ 0 │ 47 │ 10.422 │ ██████████████████████████ │
+49. │ 0 │ 48 │ 10.43 │ ██████████████████████████ │
+50. │ 0 │ 49 │ 10.438 │ ██████████████████████████ │
+ └───────┴──────┴──────────────────────┴────────────────────────────┘
+```
diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedmax.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedmax.md
new file mode 100644
index 00000000000..06dc5313904
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedmax.md
@@ -0,0 +1,105 @@
+---
+slug: /en/sql-reference/aggregate-functions/reference/exponentialTimeDecayedMax
+sidebar_position: 135
+title: exponentialTimeDecayedMax
+---
+
+## exponentialTimeDecayedMax
+
+Returns the maximum of the computed exponentially smoothed moving average at index `t` in time with that at `t-1`.
+
+**Syntax**
+
+```sql
+exponentialTimeDecayedMax(x)(value, timeunit)
+```
+
+**Arguments**
+
+- `value` — Value. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
+- `timeunit` — Timeunit. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md), [DateTime](../../data-types/datetime.md), [DateTime64](../../data-types/datetime64.md).
+
+**Parameters**
+
+- `x` — Half-life period. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
+
+**Returned values**
+
+- Returns the maximum of the exponentially smoothed weighted moving average at `t` and `t-1`. [Float64](../../data-types/float.md).
+
+**Example**
+
+Query:
+
+```sql
+SELECT
+ value,
+ time,
+ round(exp_smooth, 3),
+ bar(exp_smooth, 0, 5, 50) AS bar
+FROM
+ (
+ SELECT
+ (number = 0) OR (number >= 25) AS value,
+ number AS time,
+ exponentialTimeDecayedMax(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
+ FROM numbers(50)
+ );
+```
+
+Result:
+
+```response
+ ┌─value─┬─time─┬─round(exp_smooth, 3)─┬─bar────────┐
+ 1. │ 1 │ 0 │ 1 │ ██████████ │
+ 2. │ 0 │ 1 │ 0.905 │ █████████ │
+ 3. │ 0 │ 2 │ 0.819 │ ████████▏ │
+ 4. │ 0 │ 3 │ 0.741 │ ███████▍ │
+ 5. │ 0 │ 4 │ 0.67 │ ██████▋ │
+ 6. │ 0 │ 5 │ 0.607 │ ██████ │
+ 7. │ 0 │ 6 │ 0.549 │ █████▍ │
+ 8. │ 0 │ 7 │ 0.497 │ ████▉ │
+ 9. │ 0 │ 8 │ 0.449 │ ████▍ │
+10. │ 0 │ 9 │ 0.407 │ ████ │
+11. │ 0 │ 10 │ 0.368 │ ███▋ │
+12. │ 0 │ 11 │ 0.333 │ ███▎ │
+13. │ 0 │ 12 │ 0.301 │ ███ │
+14. │ 0 │ 13 │ 0.273 │ ██▋ │
+15. │ 0 │ 14 │ 0.247 │ ██▍ │
+16. │ 0 │ 15 │ 0.223 │ ██▏ │
+17. │ 0 │ 16 │ 0.202 │ ██ │
+18. │ 0 │ 17 │ 0.183 │ █▊ │
+19. │ 0 │ 18 │ 0.165 │ █▋ │
+20. │ 0 │ 19 │ 0.15 │ █▍ │
+21. │ 0 │ 20 │ 0.135 │ █▎ │
+22. │ 0 │ 21 │ 0.122 │ █▏ │
+23. │ 0 │ 22 │ 0.111 │ █ │
+24. │ 0 │ 23 │ 0.1 │ █ │
+25. │ 0 │ 24 │ 0.091 │ ▉ │
+26. │ 1 │ 25 │ 1 │ ██████████ │
+27. │ 1 │ 26 │ 1 │ ██████████ │
+28. │ 1 │ 27 │ 1 │ ██████████ │
+29. │ 1 │ 28 │ 1 │ ██████████ │
+30. │ 1 │ 29 │ 1 │ ██████████ │
+31. │ 1 │ 30 │ 1 │ ██████████ │
+32. │ 1 │ 31 │ 1 │ ██████████ │
+33. │ 1 │ 32 │ 1 │ ██████████ │
+34. │ 1 │ 33 │ 1 │ ██████████ │
+35. │ 1 │ 34 │ 1 │ ██████████ │
+36. │ 1 │ 35 │ 1 │ ██████████ │
+37. │ 1 │ 36 │ 1 │ ██████████ │
+38. │ 1 │ 37 │ 1 │ ██████████ │
+39. │ 1 │ 38 │ 1 │ ██████████ │
+40. │ 1 │ 39 │ 1 │ ██████████ │
+41. │ 1 │ 40 │ 1 │ ██████████ │
+42. │ 1 │ 41 │ 1 │ ██████████ │
+43. │ 1 │ 42 │ 1 │ ██████████ │
+44. │ 1 │ 43 │ 1 │ ██████████ │
+45. │ 1 │ 44 │ 1 │ ██████████ │
+46. │ 1 │ 45 │ 1 │ ██████████ │
+47. │ 1 │ 46 │ 1 │ ██████████ │
+48. │ 1 │ 47 │ 1 │ ██████████ │
+49. │ 1 │ 48 │ 1 │ ██████████ │
+50. │ 1 │ 49 │ 1 │ ██████████ │
+ └───────┴──────┴──────────────────────┴────────────┘
+```
\ No newline at end of file
diff --git a/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedsum.md b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedsum.md
new file mode 100644
index 00000000000..617cd265dac
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/exponentialtimedecayedsum.md
@@ -0,0 +1,105 @@
+---
+slug: /en/sql-reference/aggregate-functions/reference/exponentialTimeDecayedSum
+sidebar_position: 136
+title: exponentialTimeDecayedSum
+---
+
+## exponentialTimeDecayedSum
+
+Returns the sum of exponentially smoothed moving average values of a time series at the index `t` in time.
+
+**Syntax**
+
+```sql
+exponentialTimeDecayedSum(x)(v, t)
+```
+
+**Arguments**
+
+- `v` — Value. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
+- `t` — Time. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md), [DateTime](../../data-types/datetime.md), [DateTime64](../../data-types/datetime64.md).
+
+**Parameters**
+
+- `x` — Half-life period. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md).
+
+**Returned values**
+
+- Returns the sum of exponentially smoothed moving average values at the given point in time. [Float64](../../data-types/float.md).
+
+**Example**
+
+Query:
+
+```sql
+SELECT
+ value,
+ time,
+ round(exp_smooth, 3),
+ bar(exp_smooth, 0, 10, 50) AS bar
+FROM
+ (
+ SELECT
+ (number = 0) OR (number >= 25) AS value,
+ number AS time,
+ exponentialTimeDecayedSum(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth
+ FROM numbers(50)
+ );
+```
+
+Result:
+
+```response
+ ┌─value─┬─time─┬─round(exp_smooth, 3)─┬─bar───────────────────────────────────────────────┐
+ 1. │ 1 │ 0 │ 1 │ █████ │
+ 2. │ 0 │ 1 │ 0.905 │ ████▌ │
+ 3. │ 0 │ 2 │ 0.819 │ ████ │
+ 4. │ 0 │ 3 │ 0.741 │ ███▋ │
+ 5. │ 0 │ 4 │ 0.67 │ ███▎ │
+ 6. │ 0 │ 5 │ 0.607 │ ███ │
+ 7. │ 0 │ 6 │ 0.549 │ ██▋ │
+ 8. │ 0 │ 7 │ 0.497 │ ██▍ │
+ 9. │ 0 │ 8 │ 0.449 │ ██▏ │
+10. │ 0 │ 9 │ 0.407 │ ██ │
+11. │ 0 │ 10 │ 0.368 │ █▊ │
+12. │ 0 │ 11 │ 0.333 │ █▋ │
+13. │ 0 │ 12 │ 0.301 │ █▌ │
+14. │ 0 │ 13 │ 0.273 │ █▎ │
+15. │ 0 │ 14 │ 0.247 │ █▏ │
+16. │ 0 │ 15 │ 0.223 │ █ │
+17. │ 0 │ 16 │ 0.202 │ █ │
+18. │ 0 │ 17 │ 0.183 │ ▉ │
+19. │ 0 │ 18 │ 0.165 │ ▊ │
+20. │ 0 │ 19 │ 0.15 │ ▋ │
+21. │ 0 │ 20 │ 0.135 │ ▋ │
+22. │ 0 │ 21 │ 0.122 │ ▌ │
+23. │ 0 │ 22 │ 0.111 │ ▌ │
+24. │ 0 │ 23 │ 0.1 │ ▌ │
+25. │ 0 │ 24 │ 0.091 │ ▍ │
+26. │ 1 │ 25 │ 1.082 │ █████▍ │
+27. │ 1 │ 26 │ 1.979 │ █████████▉ │
+28. │ 1 │ 27 │ 2.791 │ █████████████▉ │
+29. │ 1 │ 28 │ 3.525 │ █████████████████▋ │
+30. │ 1 │ 29 │ 4.19 │ ████████████████████▉ │
+31. │ 1 │ 30 │ 4.791 │ ███████████████████████▉ │
+32. │ 1 │ 31 │ 5.335 │ ██████████████████████████▋ │
+33. │ 1 │ 32 │ 5.827 │ █████████████████████████████▏ │
+34. │ 1 │ 33 │ 6.273 │ ███████████████████████████████▎ │
+35. │ 1 │ 34 │ 6.676 │ █████████████████████████████████▍ │
+36. │ 1 │ 35 │ 7.041 │ ███████████████████████████████████▏ │
+37. │ 1 │ 36 │ 7.371 │ ████████████████████████████████████▊ │
+38. │ 1 │ 37 │ 7.669 │ ██████████████████████████████████████▎ │
+39. │ 1 │ 38 │ 7.939 │ ███████████████████████████████████████▋ │
+40. │ 1 │ 39 │ 8.184 │ ████████████████████████████████████████▉ │
+41. │ 1 │ 40 │ 8.405 │ ██████████████████████████████████████████ │
+42. │ 1 │ 41 │ 8.605 │ ███████████████████████████████████████████ │
+43. │ 1 │ 42 │ 8.786 │ ███████████████████████████████████████████▉ │
+44. │ 1 │ 43 │ 8.95 │ ████████████████████████████████████████████▊ │
+45. │ 1 │ 44 │ 9.098 │ █████████████████████████████████████████████▍ │
+46. │ 1 │ 45 │ 9.233 │ ██████████████████████████████████████████████▏ │
+47. │ 1 │ 46 │ 9.354 │ ██████████████████████████████████████████████▊ │
+48. │ 1 │ 47 │ 9.464 │ ███████████████████████████████████████████████▎ │
+49. │ 1 │ 48 │ 9.563 │ ███████████████████████████████████████████████▊ │
+50. │ 1 │ 49 │ 9.653 │ ████████████████████████████████████████████████▎ │
+ └───────┴──────┴──────────────────────┴───────────────────────────────────────────────────┘
+```
\ No newline at end of file
diff --git a/docs/en/sql-reference/aggregate-functions/reference/first_value.md b/docs/en/sql-reference/aggregate-functions/reference/first_value.md
index 0c26b66c64a..2cd0e1fa16f 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/first_value.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/first_value.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/first_value
-sidebar_position: 7
+sidebar_position: 137
---
# first_value
diff --git a/docs/en/sql-reference/aggregate-functions/reference/flame_graph.md b/docs/en/sql-reference/aggregate-functions/reference/flame_graph.md
index ae17153085c..4abb3e03226 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/flame_graph.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/flame_graph.md
@@ -1,6 +1,6 @@
---
-slug: /en/sql-reference/aggregate-functions/reference/flamegraph
-sidebar_position: 110
+slug: /en/sql-reference/aggregate-functions/reference/flame_graph
+sidebar_position: 138
---
# flameGraph
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparray.md b/docs/en/sql-reference/aggregate-functions/reference/grouparray.md
index a38e35a72ad..1a87e3aeba9 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparray.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparray.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/grouparray
-sidebar_position: 110
+sidebar_position: 139
---
# groupArray
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md
index d745e8a0e7a..c6b23c2f808 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparrayinsertat.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/grouparrayinsertat
-sidebar_position: 112
+sidebar_position: 140
---
# groupArrayInsertAt
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparrayintersect.md b/docs/en/sql-reference/aggregate-functions/reference/grouparrayintersect.md
index 5cac88be073..a370f595923 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparrayintersect.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparrayintersect.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/grouparrayintersect
-sidebar_position: 115
+sidebar_position: 141
---
# groupArrayIntersect
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraylast.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraylast.md
index 9b48ee54ecd..ff62dcdde9b 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparraylast.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraylast.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/grouparraylast
-sidebar_position: 110
+sidebar_position: 142
---
# groupArrayLast
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md
index 32c0608afeb..6b6c4830535 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingavg.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/grouparraymovingavg
-sidebar_position: 114
+sidebar_position: 143
---
# groupArrayMovingAvg
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md
index 6f2a60dd080..d1fa6fce9b0 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraymovingsum.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/grouparraymovingsum
-sidebar_position: 113
+sidebar_position: 144
---
# groupArrayMovingSum
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md
index 393087161df..38ddae48ee7 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraysample.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/grouparraysample
-sidebar_position: 114
+sidebar_position: 145
---
# groupArraySample
diff --git a/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md b/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md
index 9bee0c29e7a..22a150bb8fb 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/grouparraysorted.md
@@ -1,6 +1,7 @@
- ---
- toc_priority: 112
- ---
+---
+slug: /en/sql-reference/aggregate-functions/reference/grouparraysorted
+sidebar_position: 146
+---
# groupArraySorted {#groupArraySorted}
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md
index 3d833555a43..eee383d84e9 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitand.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupbitand
-sidebar_position: 125
+sidebar_position: 147
---
# groupBitAnd
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md
index 02b9e0e8821..23b686e29b2 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmap.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupbitmap
-sidebar_position: 128
+sidebar_position: 148
---
# groupBitmap
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md
index 1e649645e75..77bbf7d3d2c 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapand.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupbitmapand
-sidebar_position: 129
+sidebar_position: 149
title: groupBitmapAnd
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md
index c88c80ceff2..7bb3dc689e8 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapor.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupbitmapor
-sidebar_position: 130
+sidebar_position: 150
title: groupBitmapOr
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md
index aa24b3d2128..3212e94a47b 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitmapxor.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupbitmapxor
-sidebar_position: 131
+sidebar_position: 151
title: groupBitmapXor
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md
index 138ee998405..802b839d56e 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitor.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupbitor
-sidebar_position: 126
+sidebar_position: 152
---
# groupBitOr
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md b/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md
index 168335a010c..94891891d64 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupbitxor.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupbitxor
-sidebar_position: 127
+sidebar_position: 153
---
# groupBitXor
diff --git a/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md b/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md
index fe5f714c307..0462f4a4ab2 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/groupuniqarray.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/groupuniqarray
-sidebar_position: 111
+sidebar_position: 154
---
# groupUniqArray
diff --git a/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md b/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md
index 5990345b765..66e23a716ba 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/intervalLengthSum.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/intervalLengthSum
-sidebar_position: 146
+sidebar_position: 155
sidebar_label: intervalLengthSum
title: intervalLengthSum
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md b/docs/en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md
index d159eec7ce6..33afcdfbf38 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest
-sidebar_position: 300
+sidebar_position: 156
sidebar_label: kolmogorovSmirnovTest
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md
index e1a29973fcf..c543831addc 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/kurtpop.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/kurtpop
-sidebar_position: 153
+sidebar_position: 157
---
# kurtPop
diff --git a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md
index 911c2bfbe74..57e80729454 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/kurtsamp.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/kurtsamp
-sidebar_position: 154
+sidebar_position: 158
---
# kurtSamp
diff --git a/docs/en/sql-reference/aggregate-functions/reference/largestTriangleThreeBuckets.md b/docs/en/sql-reference/aggregate-functions/reference/largestTriangleThreeBuckets.md
index 4f73aadb8da..673f3cb69c7 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/largestTriangleThreeBuckets.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/largestTriangleThreeBuckets.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/largestTriangleThreeBuckets
-sidebar_position: 312
+sidebar_position: 159
sidebar_label: largestTriangleThreeBuckets
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/last_value.md b/docs/en/sql-reference/aggregate-functions/reference/last_value.md
index 21a86a5f130..b2aa5c86d81 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/last_value.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/last_value.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/last_value
-sidebar_position: 8
+sidebar_position: 160
---
# last_value
diff --git a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md
index af744f445d9..17f6afecde2 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/mannwhitneyutest.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/mannwhitneyutest
-sidebar_position: 310
+sidebar_position: 161
sidebar_label: mannWhitneyUTest
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/max.md b/docs/en/sql-reference/aggregate-functions/reference/max.md
index 4bb2145d683..12c8800ef7f 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/max.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/max.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/max
-sidebar_position: 3
+sidebar_position: 162
title: max
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/maxintersections.md b/docs/en/sql-reference/aggregate-functions/reference/maxintersections.md
index db99b900a3e..c65e31114ff 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/maxintersections.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/maxintersections.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/maxintersections
-sidebar_position: 360
+sidebar_position: 163
title: maxIntersections
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/maxintersectionsposition.md b/docs/en/sql-reference/aggregate-functions/reference/maxintersectionsposition.md
index 7dd63f09316..d5c2b0bd3c2 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/maxintersectionsposition.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/maxintersectionsposition.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/maxintersectionsposition
-sidebar_position: 361
+sidebar_position: 164
title: maxIntersectionsPosition
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md
index 4d8c67e1b90..c9c6913249c 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/maxmap.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/maxmap.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/maxmap
-sidebar_position: 143
+sidebar_position: 165
---
# maxMap
diff --git a/docs/en/sql-reference/aggregate-functions/reference/meanztest.md b/docs/en/sql-reference/aggregate-functions/reference/meanztest.md
index 1cf2bebf26f..19afb5ae742 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/meanztest.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/meanztest.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/meanztest
-sidebar_position: 303
+sidebar_position: 166
sidebar_label: meanZTest
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/median.md b/docs/en/sql-reference/aggregate-functions/reference/median.md
index 2a166c83dad..dcf174254ac 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/median.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/median.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/median
-sidebar_position: 212
+sidebar_position: 167
---
# median
diff --git a/docs/en/sql-reference/aggregate-functions/reference/min.md b/docs/en/sql-reference/aggregate-functions/reference/min.md
index cca515b76e8..6bfcaf020c8 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/min.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/min.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/min
-sidebar_position: 2
+sidebar_position: 168
title: min
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/minmap.md b/docs/en/sql-reference/aggregate-functions/reference/minmap.md
index 5436e1fc6a6..b1fbb9e49f3 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/minmap.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/minmap.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/minmap
-sidebar_position: 142
+sidebar_position: 169
---
# minMap
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantile.md b/docs/en/sql-reference/aggregate-functions/reference/quantile.md
index 91b6b1b0d80..d5278125cbc 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantile.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantile.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantile
-sidebar_position: 200
+sidebar_position: 170
---
# quantile
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileGK.md b/docs/en/sql-reference/aggregate-functions/reference/quantileGK.md
index 7352781d126..9582f264a6f 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantileGK.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantileGK.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantileGK
-sidebar_position: 204
+sidebar_position: 175
---
# quantileGK
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md
index 4377f2f1b17..4469438db6a 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantilebfloat16.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantilebfloat16
-sidebar_position: 209
+sidebar_position: 171
title: quantileBFloat16
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md b/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md
index f9acd2e20cb..fc9db7ef08d 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantileddsketch.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantileddsketch
-sidebar_position: 211
+sidebar_position: 171
title: quantileDD
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md b/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md
index 7235c47da70..0ac4b5e3a51 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantiledeterministic.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantiledeterministic
-sidebar_position: 206
+sidebar_position: 172
---
# quantileDeterministic
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md
index d7d7413c283..46873bcd2b6 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexact.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantileexact
-sidebar_position: 202
+sidebar_position: 173
---
# quantileExact Functions
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md
index 34def8d7411..4ce212888c4 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantileexactweighted.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantileexactweighted
-sidebar_position: 203
+sidebar_position: 174
---
# quantileExactWeighted
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantileinterpolatedweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantileinterpolatedweighted.md
index 41d2627fb7b..9eb4fde6102 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantileinterpolatedweighted.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantileinterpolatedweighted.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantileInterpolatedWeighted
-sidebar_position: 203
+sidebar_position: 176
---
# quantileInterpolatedWeighted
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md
index 856d447ac13..e2c3295221d 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantiles.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantiles.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantiles
-sidebar_position: 201
+sidebar_position: 177
---
# quantiles Functions
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md
index 796e87b02d8..ece54ca24ab 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigest.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantiletdigest
-sidebar_position: 207
+sidebar_position: 178
---
# quantileTDigest
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md
index b3e21e0e69e..7f8f7f53a97 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletdigestweighted.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantiletdigestweighted
-sidebar_position: 208
+sidebar_position: 179
---
# quantileTDigestWeighted
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md
index b5b1c8a0c01..78050fe5b5e 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletiming.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantiletiming
-sidebar_position: 204
+sidebar_position: 180
---
# quantileTiming
diff --git a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md
index df483aac01e..c5fff0825c3 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/quantiletimingweighted.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/quantiletimingweighted
-sidebar_position: 205
+sidebar_position: 181
---
# quantileTimingWeighted
diff --git a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md
index 27f2dd124e4..eb995923d97 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/rankCorr.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/rankCorr
-sidebar_position: 145
+sidebar_position: 182
---
# rankCorr
diff --git a/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md
index ea3dbff8691..2aebccfdc53 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/simplelinearregression.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/simplelinearregression
-sidebar_position: 220
+sidebar_position: 183
---
# simpleLinearRegression
diff --git a/docs/en/sql-reference/aggregate-functions/reference/singlevalueornull.md b/docs/en/sql-reference/aggregate-functions/reference/singlevalueornull.md
index e39af77059a..21344b58ba6 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/singlevalueornull.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/singlevalueornull.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/singlevalueornull
-sidebar_position: 220
+sidebar_position: 184
---
# singleValueOrNull
diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md
index 379fdcfa7c2..58ea33edb81 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/skewpop.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/skewpop.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/skewpop
-sidebar_position: 150
+sidebar_position: 185
---
# skewPop
diff --git a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md
index 9e64b186db3..9c32a0183ef 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/skewsamp.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/skewsamp
-sidebar_position: 151
+sidebar_position: 186
---
# skewSamp
diff --git a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md
index 62edc221858..8791847ead0 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/sparkbar.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sparkbar
-sidebar_position: 311
+sidebar_position: 187
sidebar_label: sparkbar
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md b/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md
index d2406197ecc..e52a442d76a 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/stddevpop.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/stddevpop
-sidebar_position: 30
+sidebar_position: 188
---
# stddevPop
@@ -25,7 +25,7 @@ stddevPop(x)
**Returned value**
-Square root of standard deviation of `x`. [Float64](../../data-types/float.md).
+- Square root of standard deviation of `x`. [Float64](../../data-types/float.md).
**Example**
diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevpopstable.md b/docs/en/sql-reference/aggregate-functions/reference/stddevpopstable.md
index a8ad5956ae8..2051ce7b125 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/stddevpopstable.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/stddevpopstable.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/stddevpopstable
-sidebar_position: 30
+sidebar_position: 189
---
# stddevPopStable
diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md b/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md
index cf8b9b20d63..e2cad40b267 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/stddevsamp.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/stddevsamp
-sidebar_position: 31
+sidebar_position: 190
---
# stddevSamp
diff --git a/docs/en/sql-reference/aggregate-functions/reference/stddevsampstable.md b/docs/en/sql-reference/aggregate-functions/reference/stddevsampstable.md
index 9ae1f5f8411..205e10cced5 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/stddevsampstable.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/stddevsampstable.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/stddevsampstable
-sidebar_position: 31
+sidebar_position: 191
---
# stddevSampStable
diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md
index 7ab9e1d3256..6cc5cbd8fe1 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlinearregression.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/stochasticlinearregression
-sidebar_position: 221
+sidebar_position: 192
---
# stochasticLinearRegression {#agg_functions_stochasticlinearregression_parameters}
diff --git a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md
index 4bf5529ddcb..dca452a1702 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/stochasticlogisticregression.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/stochasticlogisticregression
-sidebar_position: 222
+sidebar_position: 193
---
# stochasticLogisticRegression
diff --git a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md
index fa320b4e336..1605e8efa13 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/studentttest.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/studentttest.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/studentttest
-sidebar_position: 300
+sidebar_position: 194
sidebar_label: studentTTest
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/sum.md b/docs/en/sql-reference/aggregate-functions/reference/sum.md
index a33a99f63e6..19636f003c7 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/sum.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/sum.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sum
-sidebar_position: 4
+sidebar_position: 195
---
# sum
diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumcount.md b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md
index a59b87022d6..ff4ddcec142 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/sumcount.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/sumcount.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sumcount
-sidebar_position: 144
+sidebar_position: 196
title: sumCount
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md b/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md
index 1a729b18b42..ed58b3c3369 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/sumkahan.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sumkahan
-sidebar_position: 145
+sidebar_position: 197
title: sumKahan
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/summap.md b/docs/en/sql-reference/aggregate-functions/reference/summap.md
index fd3f095511b..4ff937f1e4f 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/summap.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/summap.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/summap
-sidebar_position: 141
+sidebar_position: 198
---
# sumMap
diff --git a/docs/en/sql-reference/aggregate-functions/reference/summapwithoverflow.md b/docs/en/sql-reference/aggregate-functions/reference/summapwithoverflow.md
index 7c0aa31e459..e36818e2ab8 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/summapwithoverflow.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/summapwithoverflow.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/summapwithoverflow
-sidebar_position: 141
+sidebar_position: 199
---
# sumMapWithOverflow
diff --git a/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md b/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md
index a120eafe738..5fe3cb7de8e 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/sumwithoverflow.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/sumwithoverflow
-sidebar_position: 140
+sidebar_position: 200
---
# sumWithOverflow
diff --git a/docs/en/sql-reference/aggregate-functions/reference/theilsu.md b/docs/en/sql-reference/aggregate-functions/reference/theilsu.md
index ef19438a53a..73b063cf965 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/theilsu.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/theilsu.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/theilsu
-sidebar_position: 353
+sidebar_position: 201
---
# theilsU
diff --git a/docs/en/sql-reference/aggregate-functions/reference/topk.md b/docs/en/sql-reference/aggregate-functions/reference/topk.md
index dd4b2251a8a..695e9b1d7d8 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/topk.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/topk.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/topk
-sidebar_position: 108
+sidebar_position: 202
---
# topK
diff --git a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md
index d2a469828fc..148a8b6ea18 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/topkweighted.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/topkweighted
-sidebar_position: 109
+sidebar_position: 203
---
# topKWeighted
diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniq.md b/docs/en/sql-reference/aggregate-functions/reference/uniq.md
index b1c8336630b..c1dc6a29e58 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/uniq.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/uniq.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/uniq
-sidebar_position: 190
+sidebar_position: 204
---
# uniq
diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md
index 18f44d2fcc4..70bb4463140 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/uniqcombined
-sidebar_position: 192
+sidebar_position: 205
---
# uniqCombined
diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md
index b6e09bcaae3..014984f6291 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/uniqcombined64.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/uniqcombined64
-sidebar_position: 193
+sidebar_position: 206
---
# uniqCombined64
diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md
index fd68a464881..da4d4aa9588 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/uniqexact.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/uniqexact
-sidebar_position: 191
+sidebar_position: 207
---
# uniqExact
diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md
index 8594ebb3782..78d84edf1be 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/uniqhll12.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/uniqhll12
-sidebar_position: 194
+sidebar_position: 208
---
# uniqHLL12
diff --git a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md
index 45970f144cb..fbae42117ee 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/uniqthetasketch.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/uniqthetasketch
-sidebar_position: 195
+sidebar_position: 209
title: uniqTheta
---
diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpop.md b/docs/en/sql-reference/aggregate-functions/reference/varpop.md
index 4e010248f6e..182e830f19f 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/varpop.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/varpop.md
@@ -1,33 +1,28 @@
---
title: "varPop"
-slug: "/en/sql-reference/aggregate-functions/reference/varpop"
-sidebar_position: 32
+slug: "/en/sql-reference/aggregate-functions/reference/varPop"
+sidebar_position: 210
---
-This page covers the `varPop` and `varPopStable` functions available in ClickHouse.
-
## varPop
-Calculates the population covariance between two data columns. The population covariance measures the degree to which two variables vary together. Calculates the amount `Σ((x - x̅)^2) / n`, where `n` is the sample size and `x̅`is the average value of `x`.
+Calculates the population variance.
**Syntax**
```sql
-covarPop(x, y)
+varPop(x)
```
+Alias: `VAR_POP`.
+
**Parameters**
-- `x`: The first data column. [Numeric](../../../native-protocol/columns.md)
-- `y`: The second data column. [Numeric](../../../native-protocol/columns.md)
+- `x`: Population of values to find the population variance of. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal*](../../data-types/decimal.md).
**Returned value**
-Returns an integer of type `Float64`.
-
-**Implementation details**
-
-This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varPopStable`](#varpopstable) function.
+- Returns the population variance of `x`. [`Float64`](../../data-types/float.md).
**Example**
@@ -37,69 +32,21 @@ Query:
DROP TABLE IF EXISTS test_data;
CREATE TABLE test_data
(
- x Int32,
- y Int32
+ x UInt8,
)
ENGINE = Memory;
-INSERT INTO test_data VALUES (1, 2), (2, 3), (3, 5), (4, 6), (5, 8);
+INSERT INTO test_data VALUES (3), (3), (3), (4), (4), (5), (5), (7), (11), (15);
SELECT
- covarPop(x, y) AS covar_pop
+ varPop(x) AS var_pop
FROM test_data;
```
Result:
```response
-3
-```
-
-## varPopStable
-
-Calculates population covariance between two data columns using a stable, numerically accurate method to calculate the variance. This function is designed to provide reliable results even with large datasets or values that might cause numerical instability in other implementations.
-
-**Syntax**
-
-```sql
-covarPopStable(x, y)
-```
-
-**Parameters**
-
-- `x`: The first data column. [String literal](../../syntax#syntax-string-literal)
-- `y`: The second data column. [Expression](../../syntax#syntax-expressions)
-
-**Returned value**
-
-Returns an integer of type `Float64`.
-
-**Implementation details**
-
-Unlike [`varPop`](#varpop), this function uses a stable, numerically accurate algorithm to calculate the population variance to avoid issues like catastrophic cancellation or loss of precision. This function also handles `NaN` and `Inf` values correctly, excluding them from calculations.
-
-**Example**
-
-Query:
-
-```sql
-DROP TABLE IF EXISTS test_data;
-CREATE TABLE test_data
-(
- x Int32,
- y Int32
-)
-ENGINE = Memory;
-
-INSERT INTO test_data VALUES (1, 2), (2, 9), (9, 5), (4, 6), (5, 8);
-
-SELECT
- covarPopStable(x, y) AS covar_pop_stable
-FROM test_data;
-```
-
-Result:
-
-```response
-0.5999999999999999
+┌─var_pop─┐
+│ 14.4 │
+└─────────┘
```
diff --git a/docs/en/sql-reference/aggregate-functions/reference/varpopstable.md b/docs/en/sql-reference/aggregate-functions/reference/varpopstable.md
new file mode 100644
index 00000000000..68037a5a533
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/varpopstable.md
@@ -0,0 +1,52 @@
+---
+title: "varPopStable"
+slug: "/en/sql-reference/aggregate-functions/reference/varpopstable"
+sidebar_position: 211
+---
+
+## varPopStable
+
+Returns the population variance. Unlike [`varPop`](../reference/varpop.md), this function uses a [numerically stable](https://en.wikipedia.org/wiki/Numerical_stability) algorithm. It works slower but provides a lower computational error.
+
+**Syntax**
+
+```sql
+varPopStable(x)
+```
+
+Alias: `VAR_POP_STABLE`.
+
+**Parameters**
+
+- `x`: Population of values to find the population variance of. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal*](../../data-types/decimal.md).
+
+**Returned value**
+
+- Returns the population variance of `x`. [Float64](../../data-types/float.md).
+
+**Example**
+
+Query:
+
+```sql
+DROP TABLE IF EXISTS test_data;
+CREATE TABLE test_data
+(
+ x UInt8,
+)
+ENGINE = Memory;
+
+INSERT INTO test_data VALUES (3),(3),(3),(4),(4),(5),(5),(7),(11),(15);
+
+SELECT
+ varPopStable(x) AS var_pop_stable
+FROM test_data;
+```
+
+Result:
+
+```response
+┌─var_pop_stable─┐
+│ 14.4 │
+└────────────────┘
+```
diff --git a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md
index bd1cfa5742a..87a97c15dd8 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/varsamp.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/varsamp.md
@@ -1,11 +1,9 @@
---
title: "varSamp"
-slug: /en/sql-reference/aggregate-functions/reference/varsamp
-sidebar_position: 33
+slug: /en/sql-reference/aggregate-functions/reference/varSamp
+sidebar_position: 212
---
-This page contains information on the `varSamp` and `varSampStable` ClickHouse functions.
-
## varSamp
Calculate the sample variance of a data set.
@@ -13,24 +11,27 @@ Calculate the sample variance of a data set.
**Syntax**
```sql
-varSamp(expr)
+varSamp(x)
```
+Alias: `VAR_SAMP`.
+
**Parameters**
-- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../../syntax#syntax-expressions)
+- `x`: The population for which you want to calculate the sample variance. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal*](../../data-types/decimal.md).
**Returned value**
-Returns a Float64 value representing the sample variance of the input data set.
+
+- Returns the sample variance of the input data set `x`. [Float64](../../data-types/float.md).
**Implementation details**
-The `varSamp()` function calculates the sample variance using the following formula:
+The `varSamp` function calculates the sample variance using the following formula:
-```plaintext
-∑(x - mean(x))^2 / (n - 1)
-```
+$$
+\sum\frac{(x - \text{mean}(x))^2}{(n - 1)}
+$$
Where:
@@ -38,91 +39,29 @@ Where:
- `mean(x)` is the arithmetic mean of the data set.
- `n` is the number of data points in the data set.
-The function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use the [`varPop()` function](./varpop#varpop) instead.
-
-This function uses a numerically unstable algorithm. If you need numerical stability in calculations, use the slower but more stable [`varSampStable`](#varsampstable) function.
+The function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use [`varPop`](../reference/varpop.md) instead.
**Example**
Query:
```sql
-CREATE TABLE example_table
+DROP TABLE IF EXISTS test_data;
+CREATE TABLE test_data
(
- id UInt64,
- value Float64
+ x Float64
)
-ENGINE = MergeTree
-ORDER BY id;
+ENGINE = Memory;
-INSERT INTO example_table VALUES (1, 10.5), (2, 12.3), (3, 9.8), (4, 11.2), (5, 10.7);
+INSERT INTO test_data VALUES (10.5), (12.3), (9.8), (11.2), (10.7);
-SELECT varSamp(value) FROM example_table;
+SELECT round(varSamp(x),3) AS var_samp FROM test_data;
```
Response:
```response
-0.8650000000000091
+┌─var_samp─┐
+│ 0.865 │
+└──────────┘
```
-
-## varSampStable
-
-Calculate the sample variance of a data set using a numerically stable algorithm.
-
-**Syntax**
-
-```sql
-varSampStable(expr)
-```
-
-**Parameters**
-
-- `expr`: An expression representing the data set for which you want to calculate the sample variance. [Expression](../../syntax#syntax-expressions)
-
-**Returned value**
-
-The `varSampStable` function returns a Float64 value representing the sample variance of the input data set.
-
-**Implementation details**
-
-The `varSampStable` function calculates the sample variance using the same formula as the [`varSamp`](#varsamp) function:
-
-```plaintext
-∑(x - mean(x))^2 / (n - 1)
-```
-
-Where:
-- `x` is each individual data point in the data set.
-- `mean(x)` is the arithmetic mean of the data set.
-- `n` is the number of data points in the data set.
-
-The difference between `varSampStable` and `varSamp` is that `varSampStable` is designed to provide a more deterministic and stable result when dealing with floating-point arithmetic. It uses an algorithm that minimizes the accumulation of rounding errors, which can be particularly important when dealing with large data sets or data with a wide range of values.
-
-Like `varSamp`, the `varSampStable` function assumes that the input data set represents a sample from a larger population. If you want to calculate the variance of the entire population (when you have the complete data set), you should use the [`varPopStable`](./varpop#varpopstable) function instead.
-
-**Example**
-
-Query:
-
-```sql
-CREATE TABLE example_table
-(
- id UInt64,
- value Float64
-)
-ENGINE = MergeTree
-ORDER BY id;
-
-INSERT INTO example_table VALUES (1, 10.5), (2, 12.3), (3, 9.8), (4, 11.2), (5, 10.7);
-
-SELECT varSampStable(value) FROM example_table;
-```
-
-Response:
-
-```response
-0.865
-```
-
-This query calculates the sample variance of the `value` column in the `example_table` using the `varSampStable()` function. The result shows that the sample variance of the values `[10.5, 12.3, 9.8, 11.2, 10.7]` is approximately 0.865, which may differ slightly from the result of `varSamp` due to the more precise handling of floating-point arithmetic.
diff --git a/docs/en/sql-reference/aggregate-functions/reference/varsampstable.md b/docs/en/sql-reference/aggregate-functions/reference/varsampstable.md
new file mode 100644
index 00000000000..ebe647e1951
--- /dev/null
+++ b/docs/en/sql-reference/aggregate-functions/reference/varsampstable.md
@@ -0,0 +1,63 @@
+---
+title: "varSampStable"
+slug: /en/sql-reference/aggregate-functions/reference/varsampstable
+sidebar_position: 213
+---
+
+## varSampStable
+
+Calculate the sample variance of a data set. Unlike [`varSamp`](../reference/varsamp.md), this function uses a numerically stable algorithm. It works slower but provides a lower computational error.
+
+**Syntax**
+
+```sql
+varSampStable(x)
+```
+
+Alias: `VAR_SAMP_STABLE`
+
+**Parameters**
+
+- `x`: The population for which you want to calculate the sample variance. [(U)Int*](../../data-types/int-uint.md), [Float*](../../data-types/float.md), [Decimal*](../../data-types/decimal.md).
+
+**Returned value**
+
+- Returns the sample variance of the input data set. [Float64](../../data-types/float.md).
+
+**Implementation details**
+
+The `varSampStable` function calculates the sample variance using the same formula as the [`varSamp`](../reference/varsamp.md):
+
+$$
+\sum\frac{(x - \text{mean}(x))^2}{(n - 1)}
+$$
+
+Where:
+- `x` is each individual data point in the data set.
+- `mean(x)` is the arithmetic mean of the data set.
+- `n` is the number of data points in the data set.
+
+**Example**
+
+Query:
+
+```sql
+DROP TABLE IF EXISTS test_data;
+CREATE TABLE test_data
+(
+ x Float64
+)
+ENGINE = Memory;
+
+INSERT INTO test_data VALUES (10.5), (12.3), (9.8), (11.2), (10.7);
+
+SELECT round(varSampStable(x),3) AS var_samp_stable FROM test_data;
+```
+
+Response:
+
+```response
+┌─var_samp_stable─┐
+│ 0.865 │
+└─────────────────┘
+```
diff --git a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md
index 4f1085e65b4..296b70f758e 100644
--- a/docs/en/sql-reference/aggregate-functions/reference/welchttest.md
+++ b/docs/en/sql-reference/aggregate-functions/reference/welchttest.md
@@ -1,6 +1,6 @@
---
slug: /en/sql-reference/aggregate-functions/reference/welchttest
-sidebar_position: 301
+sidebar_position: 214
sidebar_label: welchTTest
---
diff --git a/docs/en/sql-reference/data-types/datetime.md b/docs/en/sql-reference/data-types/datetime.md
index ac9a72c2641..250e766f2b7 100644
--- a/docs/en/sql-reference/data-types/datetime.md
+++ b/docs/en/sql-reference/data-types/datetime.md
@@ -137,7 +137,7 @@ If the time transition (due to daylight saving time or for other reasons) was pe
Non-monotonic calendar dates. For example, in Happy Valley - Goose Bay, the time was transitioned one hour backwards at 00:01:00 7 Nov 2010 (one minute after midnight). So after 6th Nov has ended, people observed a whole one minute of 7th Nov, then time was changed back to 23:01 6th Nov and after another 59 minutes the 7th Nov started again. ClickHouse does not (yet) support this kind of fun. During these days the results of time processing functions may be slightly incorrect.
-Similar issue exists for Casey Antarctic station in year 2010. They changed time three hours back at 5 Mar, 02:00. If you are working in antarctic station, please don't afraid to use ClickHouse. Just make sure you set timezone to UTC or be aware of inaccuracies.
+Similar issue exists for Casey Antarctic station in year 2010. They changed time three hours back at 5 Mar, 02:00. If you are working in antarctic station, please don't be afraid to use ClickHouse. Just make sure you set timezone to UTC or be aware of inaccuracies.
Time shifts for multiple days. Some pacific islands changed their timezone offset from UTC+14 to UTC-12. That's alright but some inaccuracies may present if you do calculations with their timezone for historical time points at the days of conversion.
diff --git a/docs/en/sql-reference/functions/math-functions.md b/docs/en/sql-reference/functions/math-functions.md
index 12098efc635..b9b5c6d7a05 100644
--- a/docs/en/sql-reference/functions/math-functions.md
+++ b/docs/en/sql-reference/functions/math-functions.md
@@ -8,7 +8,7 @@ sidebar_label: Mathematical
## e
-Returns e ([Euler's constant](https://en.wikipedia.org/wiki/Euler%27s_constant)).
+Returns $e$ ([Euler's constant](https://en.wikipedia.org/wiki/Euler%27s_constant)).
**Syntax**
@@ -22,7 +22,7 @@ Type: [Float64](../data-types/float.md).
## pi
-Returns π ([Pi](https://en.wikipedia.org/wiki/Pi)).
+Returns $\pi$ ([Pi](https://en.wikipedia.org/wiki/Pi)).
**Syntax**
@@ -35,7 +35,7 @@ Type: [Float64](../data-types/float.md).
## exp
-Returns e to the power of the given argument.
+Returns $e^{x}$, where x is the given argument to the function.
**Syntax**
@@ -47,6 +47,22 @@ exp(x)
- `x` - [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+**Example**
+
+Query:
+
+```sql
+SELECT round(exp(-1), 4);
+```
+
+Result:
+
+```response
+┌─round(exp(-1), 4)─┐
+│ 0.3679 │
+└───────────────────┘
+```
+
**Returned value**
Type: [Float*](../data-types/float.md).
@@ -91,7 +107,7 @@ Type: [Float*](../data-types/float.md).
## intExp2
-Like `exp` but returns a UInt64.
+Like [`exp`](#exp) but returns a UInt64.
**Syntax**
@@ -137,7 +153,7 @@ Type: [Float*](../data-types/float.md).
## intExp10
-Like `exp10` but returns a UInt64.
+Like [`exp10`](#exp10) but returns a UInt64.
**Syntax**
@@ -197,7 +213,7 @@ Type: [Float*](../data-types/float.md).
## erf
-If `x` is non-negative, then `erf(x / σ√2)` is the probability that a random variable having a normal distribution with standard deviation `σ` takes the value that is separated from the expected value by more than `x`.
+If `x` is non-negative, then $erf(\frac{x}{\sigma\sqrt{2}})$ is the probability that a random variable having a normal distribution with standard deviation $\sigma$ takes the value that is separated from the expected value by more than `x`.
**Syntax**
@@ -229,7 +245,7 @@ SELECT erf(3 / sqrt(2));
## erfc
-Returns a number close to `1 - erf(x)` without loss of precision for large ‘x’ values.
+Returns a number close to $1-erf(x)$ without loss of precision for large `x` values.
**Syntax**
@@ -403,7 +419,7 @@ Type: [Float*](../data-types/float.md).
## pow
-Returns `x` to the power of `y`.
+Returns $x^y$.
**Syntax**
@@ -434,11 +450,11 @@ cosh(x)
**Arguments**
-- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — The angle, in radians. Values from the interval: $-\infty \lt x \lt +\infty$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
-- Values from the interval: `1 <= cosh(x) < +∞`.
+- Values from the interval: $1 \le cosh(x) \lt +\infty$.
Type: [Float64](../data-types/float.md#float32-float64).
@@ -468,11 +484,11 @@ acosh(x)
**Arguments**
-- `x` — Hyperbolic cosine of angle. Values from the interval: `1 <= x < +∞`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — Hyperbolic cosine of angle. Values from the interval: $1 \le x \lt +\infty$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
-- The angle, in radians. Values from the interval: `0 <= acosh(x) < +∞`.
+- The angle, in radians. Values from the interval: $0 \le acosh(x) \lt +\infty$.
Type: [Float64](../data-types/float.md#float32-float64).
@@ -502,11 +518,11 @@ sinh(x)
**Arguments**
-- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — The angle, in radians. Values from the interval: $-\infty \lt x \lt +\infty$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
-- Values from the interval: `-∞ < sinh(x) < +∞`.
+- Values from the interval: $-\infty \lt sinh(x) \lt +\infty$.
Type: [Float64](../data-types/float.md#float32-float64).
@@ -536,11 +552,11 @@ asinh(x)
**Arguments**
-- `x` — Hyperbolic sine of angle. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — Hyperbolic sine of angle. Values from the interval: $-\infty \lt x \lt +\infty$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
-- The angle, in radians. Values from the interval: `-∞ < asinh(x) < +∞`.
+- The angle, in radians. Values from the interval: $-\infty \lt asinh(x) \lt +\infty$.
Type: [Float64](../data-types/float.md#float32-float64).
@@ -569,11 +585,11 @@ tanh(x)
**Arguments**
-- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — The angle, in radians. Values from the interval: $-\infty \lt x \lt +\infty$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
-- Values from the interval: `-1 < tanh(x) < 1`.
+- Values from the interval: $-1 \lt tanh(x) \lt 1$.
Type: [Float*](../data-types/float.md#float32-float64).
@@ -601,11 +617,11 @@ atanh(x)
**Arguments**
-- `x` — Hyperbolic tangent of angle. Values from the interval: `–1 < x < 1`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — Hyperbolic tangent of angle. Values from the interval: $-1 \lt x \lt 1$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
-- The angle, in radians. Values from the interval: `-∞ < atanh(x) < +∞`.
+- The angle, in radians. Values from the interval: $-\infty \lt atanh(x) \lt +\infty$.
Type: [Float64](../data-types/float.md#float32-float64).
@@ -640,7 +656,7 @@ atan2(y, x)
**Returned value**
-- The angle `θ` such that `−π < θ ≤ π`, in radians.
+- The angle `θ` such that $-\pi \lt 0 \le \pi$, in radians.
Type: [Float64](../data-types/float.md#float32-float64).
@@ -705,11 +721,11 @@ log1p(x)
**Arguments**
-- `x` — Values from the interval: `-1 < x < +∞`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — Values from the interval: $-1 \lt x \lt +\infty$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
-- Values from the interval: `-∞ < log1p(x) < +∞`.
+- Values from the interval: $-\infty < log1p(x) \lt +\infty$.
Type: [Float64](../data-types/float.md#float32-float64).
@@ -739,7 +755,7 @@ sign(x)
**Arguments**
-- `x` — Values from `-∞` to `+∞`. Support all numeric types in ClickHouse.
+- `x` — Values from $-\infty$ to $+\infty$. Supports all numeric types in ClickHouse.
**Returned value**
@@ -804,7 +820,7 @@ sigmoid(x)
**Parameters**
-- `x` — input value. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
+- `x` — input value. Values from the interval: $-\infty \lt x \lt +\infty$. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md) or [Decimal*](../data-types/decimal.md).
**Returned value**
diff --git a/docs/en/sql-reference/functions/nlp-functions.md b/docs/en/sql-reference/functions/nlp-functions.md
index 4bfa181a35f..7057ebebfe4 100644
--- a/docs/en/sql-reference/functions/nlp-functions.md
+++ b/docs/en/sql-reference/functions/nlp-functions.md
@@ -6,26 +6,297 @@ sidebar_label: NLP (experimental)
# Natural Language Processing (NLP) Functions
-:::note
+:::warning
This is an experimental feature that is currently in development and is not ready for general use. It will change in unpredictable backwards-incompatible ways in future releases. Set `allow_experimental_nlp_functions = 1` to enable it.
:::
+## detectCharset
+
+The `detectCharset` function detects the character set of the non-UTF8-encoded input string.
+
+*Syntax*
+
+``` sql
+detectCharset('text_to_be_analyzed')
+```
+
+*Arguments*
+
+- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
+
+*Returned value*
+
+- A `String` containing the code of the detected character set
+
+*Examples*
+
+Query:
+
+```sql
+SELECT detectCharset('Ich bleibe für ein paar Tage.');
+```
+
+Result:
+
+```response
+┌─detectCharset('Ich bleibe für ein paar Tage.')─┐
+│ WINDOWS-1252 │
+└────────────────────────────────────────────────┘
+```
+
+## detectLanguage
+
+Detects the language of the UTF8-encoded input string. The function uses the [CLD2 library](https://github.com/CLD2Owners/cld2) for detection, and it returns the 2-letter ISO language code.
+
+The `detectLanguage` function works best when providing over 200 characters in the input string.
+
+*Syntax*
+
+``` sql
+detectLanguage('text_to_be_analyzed')
+```
+
+*Arguments*
+
+- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
+
+*Returned value*
+
+- The 2-letter ISO code of the detected language
+
+Other possible results:
+
+- `un` = unknown, can not detect any language.
+- `other` = the detected language does not have 2 letter code.
+
+*Examples*
+
+Query:
+
+```sql
+SELECT detectLanguage('Je pense que je ne parviendrai jamais à parler français comme un natif. Where there’s a will, there’s a way.');
+```
+
+Result:
+
+```response
+fr
+```
+
+## detectLanguageMixed
+
+Similar to the `detectLanguage` function, but `detectLanguageMixed` returns a `Map` of 2-letter language codes that are mapped to the percentage of the certain language in the text.
+
+
+*Syntax*
+
+``` sql
+detectLanguageMixed('text_to_be_analyzed')
+```
+
+*Arguments*
+
+- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
+
+*Returned value*
+
+- `Map(String, Float32)`: The keys are 2-letter ISO codes and the values are a percentage of text found for that language
+
+
+*Examples*
+
+Query:
+
+```sql
+SELECT detectLanguageMixed('二兎を追う者は一兎をも得ず二兎を追う者は一兎をも得ず A vaincre sans peril, on triomphe sans gloire.');
+```
+
+Result:
+
+```response
+┌─detectLanguageMixed()─┐
+│ {'ja':0.62,'fr':0.36 │
+└───────────────────────┘
+```
+
+## detectProgrammingLanguage
+
+Determines the programming language from the source code. Calculates all the unigrams and bigrams of commands in the source code.
+Then using a marked-up dictionary with weights of unigrams and bigrams of commands for various programming languages finds the biggest weight of the programming language and returns it.
+
+*Syntax*
+
+``` sql
+detectProgrammingLanguage('source_code')
+```
+
+*Arguments*
+
+- `source_code` — String representation of the source code to analyze. [String](../data-types/string.md#string).
+
+*Returned value*
+
+- Programming language. [String](../data-types/string.md).
+
+*Examples*
+
+Query:
+
+```sql
+SELECT detectProgrammingLanguage('#include ');
+```
+
+Result:
+
+```response
+┌─detectProgrammingLanguage('#include ')─┐
+│ C++ │
+└──────────────────────────────────────────────────┘
+```
+
+## detectLanguageUnknown
+
+Similar to the `detectLanguage` function, except the `detectLanguageUnknown` function works with non-UTF8-encoded strings. Prefer this version when your character set is UTF-16 or UTF-32.
+
+
+*Syntax*
+
+``` sql
+detectLanguageUnknown('text_to_be_analyzed')
+```
+
+*Arguments*
+
+- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
+
+*Returned value*
+
+- The 2-letter ISO code of the detected language
+
+Other possible results:
+
+- `un` = unknown, can not detect any language.
+- `other` = the detected language does not have 2 letter code.
+
+*Examples*
+
+Query:
+
+```sql
+SELECT detectLanguageUnknown('Ich bleibe für ein paar Tage.');
+```
+
+Result:
+
+```response
+┌─detectLanguageUnknown('Ich bleibe für ein paar Tage.')─┐
+│ de │
+└────────────────────────────────────────────────────────┘
+```
+
+## detectTonality
+
+Determines the sentiment of text data. Uses a marked-up sentiment dictionary, in which each word has a tonality ranging from `-12` to `6`.
+For each text, it calculates the average sentiment value of its words and returns it in the range `[-1,1]`.
+
+:::note
+This function is limited in its current form. Currently it makes use of the embedded emotional dictionary at `/contrib/nlp-data/tonality_ru.zst` and only works for the Russian language.
+:::
+
+*Syntax*
+
+``` sql
+detectTonality(text)
+```
+
+*Arguments*
+
+- `text` — The text to be analyzed. [String](../data-types/string.md#string).
+
+*Returned value*
+
+- The average sentiment value of the words in `text`. [Float32](../data-types/float.md).
+
+*Examples*
+
+Query:
+
+```sql
+SELECT detectTonality('Шарик - хороший пёс'), -- Sharik is a good dog
+ detectTonality('Шарик - пёс'), -- Sharik is a dog
+ detectTonality('Шарик - плохой пёс'); -- Sharkik is a bad dog
+```
+
+Result:
+
+```response
+┌─detectTonality('Шарик - хороший пёс')─┬─detectTonality('Шарик - пёс')─┬─detectTonality('Шарик - плохой пёс')─┐
+│ 0.44445 │ 0 │ -0.3 │
+└───────────────────────────────────────┴───────────────────────────────┴──────────────────────────────────────┘
+```
+## lemmatize
+
+Performs lemmatization on a given word. Needs dictionaries to operate, which can be obtained [here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models).
+
+*Syntax*
+
+``` sql
+lemmatize('language', word)
+```
+
+*Arguments*
+
+- `language` — Language which rules will be applied. [String](../data-types/string.md#string).
+- `word` — Word that needs to be lemmatized. Must be lowercase. [String](../data-types/string.md#string).
+
+*Examples*
+
+Query:
+
+``` sql
+SELECT lemmatize('en', 'wolves');
+```
+
+Result:
+
+``` text
+┌─lemmatize("wolves")─┐
+│ "wolf" │
+└─────────────────────┘
+```
+
+*Configuration*
+
+This configuration specifies that the dictionary `en.bin` should be used for lemmatization of English (`en`) words. The `.bin` files can be downloaded from
+[here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models).
+
+``` xml
+
+
+
+ en
+ en.bin
+
+
+
+```
+
## stem
Performs stemming on a given word.
-### Syntax
+*Syntax*
``` sql
stem('language', word)
```
-### Arguments
+*Arguments*
- `language` — Language which rules will be applied. Use the two letter [ISO 639-1 code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes).
- `word` — word that needs to be stemmed. Must be in lowercase. [String](../data-types/string.md#string).
-### Examples
+*Examples*
Query:
@@ -40,7 +311,7 @@ Result:
│ ['I','think','it','is','a','bless','in','disguis'] │
└────────────────────────────────────────────────────┘
```
-### Supported languages for stem()
+*Supported languages for stem()*
:::note
The stem() function uses the [Snowball stemming](https://snowballstem.org/) library, see the Snowball website for updated languages etc.
@@ -76,53 +347,6 @@ The stem() function uses the [Snowball stemming](https://snowballstem.org/) libr
- Turkish
- Yiddish
-## lemmatize
-
-Performs lemmatization on a given word. Needs dictionaries to operate, which can be obtained [here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models).
-
-### Syntax
-
-``` sql
-lemmatize('language', word)
-```
-
-### Arguments
-
-- `language` — Language which rules will be applied. [String](../data-types/string.md#string).
-- `word` — Word that needs to be lemmatized. Must be lowercase. [String](../data-types/string.md#string).
-
-### Examples
-
-Query:
-
-``` sql
-SELECT lemmatize('en', 'wolves');
-```
-
-Result:
-
-``` text
-┌─lemmatize("wolves")─┐
-│ "wolf" │
-└─────────────────────┘
-```
-
-### Configuration
-
-This configuration specifies that the dictionary `en.bin` should be used for lemmatization of English (`en`) words. The `.bin` files can be downloaded from
-[here](https://github.com/vpodpecan/lemmagen3/tree/master/src/lemmagen3/models).
-
-``` xml
-
-
-
- en
- en.bin
-
-
-
-```
-
## synonyms
Finds synonyms to a given word. There are two types of synonym extensions: `plain` and `wordnet`.
@@ -131,18 +355,18 @@ With the `plain` extension type we need to provide a path to a simple text file,
With the `wordnet` extension type we need to provide a path to a directory with WordNet thesaurus in it. Thesaurus must contain a WordNet sense index.
-### Syntax
+*Syntax*
``` sql
synonyms('extension_name', word)
```
-### Arguments
+*Arguments*
- `extension_name` — Name of the extension in which search will be performed. [String](../data-types/string.md#string).
- `word` — Word that will be searched in extension. [String](../data-types/string.md#string).
-### Examples
+*Examples*
Query:
@@ -158,7 +382,7 @@ Result:
└──────────────────────────────────────────┘
```
-### Configuration
+*Configuration*
``` xml
@@ -172,154 +396,4 @@ Result:
en/
-```
-
-## detectLanguage
-
-Detects the language of the UTF8-encoded input string. The function uses the [CLD2 library](https://github.com/CLD2Owners/cld2) for detection, and it returns the 2-letter ISO language code.
-
-The `detectLanguage` function works best when providing over 200 characters in the input string.
-
-### Syntax
-
-``` sql
-detectLanguage('text_to_be_analyzed')
-```
-
-### Arguments
-
-- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
-
-### Returned value
-
-- The 2-letter ISO code of the detected language
-
-Other possible results:
-
-- `un` = unknown, can not detect any language.
-- `other` = the detected language does not have 2 letter code.
-
-### Examples
-
-Query:
-
-```sql
-SELECT detectLanguage('Je pense que je ne parviendrai jamais à parler français comme un natif. Where there’s a will, there’s a way.');
-```
-
-Result:
-
-```response
-fr
-```
-
-## detectLanguageMixed
-
-Similar to the `detectLanguage` function, but `detectLanguageMixed` returns a `Map` of 2-letter language codes that are mapped to the percentage of the certain language in the text.
-
-
-### Syntax
-
-``` sql
-detectLanguageMixed('text_to_be_analyzed')
-```
-
-### Arguments
-
-- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
-
-### Returned value
-
-- `Map(String, Float32)`: The keys are 2-letter ISO codes and the values are a percentage of text found for that language
-
-
-### Examples
-
-Query:
-
-```sql
-SELECT detectLanguageMixed('二兎を追う者は一兎をも得ず二兎を追う者は一兎をも得ず A vaincre sans peril, on triomphe sans gloire.');
-```
-
-Result:
-
-```response
-┌─detectLanguageMixed()─┐
-│ {'ja':0.62,'fr':0.36 │
-└───────────────────────┘
-```
-
-## detectLanguageUnknown
-
-Similar to the `detectLanguage` function, except the `detectLanguageUnknown` function works with non-UTF8-encoded strings. Prefer this version when your character set is UTF-16 or UTF-32.
-
-
-### Syntax
-
-``` sql
-detectLanguageUnknown('text_to_be_analyzed')
-```
-
-### Arguments
-
-- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
-
-### Returned value
-
-- The 2-letter ISO code of the detected language
-
-Other possible results:
-
-- `un` = unknown, can not detect any language.
-- `other` = the detected language does not have 2 letter code.
-
-### Examples
-
-Query:
-
-```sql
-SELECT detectLanguageUnknown('Ich bleibe für ein paar Tage.');
-```
-
-Result:
-
-```response
-┌─detectLanguageUnknown('Ich bleibe für ein paar Tage.')─┐
-│ de │
-└────────────────────────────────────────────────────────┘
-```
-
-## detectCharset
-
-The `detectCharset` function detects the character set of the non-UTF8-encoded input string.
-
-
-### Syntax
-
-``` sql
-detectCharset('text_to_be_analyzed')
-```
-
-### Arguments
-
-- `text_to_be_analyzed` — A collection (or sentences) of strings to analyze. [String](../data-types/string.md#string).
-
-### Returned value
-
-- A `String` containing the code of the detected character set
-
-### Examples
-
-Query:
-
-```sql
-SELECT detectCharset('Ich bleibe für ein paar Tage.');
-```
-
-Result:
-
-```response
-┌─detectCharset('Ich bleibe für ein paar Tage.')─┐
-│ WINDOWS-1252 │
-└────────────────────────────────────────────────┘
-```
+```
\ No newline at end of file
diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md
index e22dd5d827c..58fc1eba02e 100644
--- a/docs/en/sql-reference/functions/other-functions.md
+++ b/docs/en/sql-reference/functions/other-functions.md
@@ -3820,3 +3820,43 @@ Result:
10. │ df │ │
└────┴───────────────────────┘
```
+
+## displayName
+
+Returns the value of `display_name` from [config](../../operations/configuration-files.md/#configuration-files) or server Fully Qualified Domain Name (FQDN) if not set.
+
+**Syntax**
+
+```sql
+displayName()
+```
+
+**Returned value**
+
+- Value of `display_name` from config or server FQDN if not set. [String](../data-types/string.md).
+
+**Example**
+
+The `display_name` can be set in `config.xml`. Taking for example a server with `display_name` configured to 'production':
+
+```xml
+
+production
+```
+
+Query:
+
+```sql
+SELECT displayName();
+```
+
+Result:
+
+```response
+┌─displayName()─┐
+│ production │
+└───────────────┘
+```
+
diff --git a/docs/en/sql-reference/functions/string-functions.md b/docs/en/sql-reference/functions/string-functions.md
index c535b82d710..a258456345e 100644
--- a/docs/en/sql-reference/functions/string-functions.md
+++ b/docs/en/sql-reference/functions/string-functions.md
@@ -2178,6 +2178,32 @@ Result:
Alias: levenshteinDistance
+## editDistanceUTF8
+
+Calculates the [edit distance](https://en.wikipedia.org/wiki/Edit_distance) between two UTF8 strings.
+
+**Syntax**
+
+```sql
+editDistanceUTF8(string1, string2)
+```
+
+**Examples**
+
+``` sql
+SELECT editDistanceUTF8('我是谁', '我是我');
+```
+
+Result:
+
+``` text
+┌─editDistanceUTF8('我是谁', '我是我')──┐
+│ 1 │
+└─────────────────────────────────────┘
+```
+
+Alias: levenshteinDistanceUTF8
+
## damerauLevenshteinDistance
Calculates the [Damerau-Levenshtein distance](https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance) between two byte strings.
diff --git a/docs/en/sql-reference/functions/url-functions.md b/docs/en/sql-reference/functions/url-functions.md
index 8b3e4f44840..76c0141ac8b 100644
--- a/docs/en/sql-reference/functions/url-functions.md
+++ b/docs/en/sql-reference/functions/url-functions.md
@@ -818,6 +818,40 @@ The same as above, but including query string and fragment.
Example: `/top/news.html?page=2#comments`.
+### protocol
+
+Extracts the protocol from a URL.
+
+**Syntax**
+
+```sql
+protocol(url)
+```
+
+**Arguments**
+
+- `url` — URL to extract protocol from. [String](../data-types/string.md).
+
+**Returned value**
+
+- Protocol, or an empty string if it cannot be determined. [String](../data-types/string.md).
+
+**Example**
+
+Query:
+
+```sql
+SELECT protocol('https://clickhouse.com/');
+```
+
+Result:
+
+```response
+┌─protocol('https://clickhouse.com/')─┐
+│ https │
+└─────────────────────────────────────┘
+```
+
### queryString
Returns the query string without the initial question mark, `#` and everything after `#`.
diff --git a/docs/en/sql-reference/functions/uuid-functions.md b/docs/en/sql-reference/functions/uuid-functions.md
index 5f15907d029..e990023efbc 100644
--- a/docs/en/sql-reference/functions/uuid-functions.md
+++ b/docs/en/sql-reference/functions/uuid-functions.md
@@ -543,12 +543,17 @@ serverUUID()
Generates a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID).
-The generated Snowflake ID contains the current Unix timestamp in milliseconds 41 (+ 1 top zero bit) bits, followed by machine id (10 bits), a counter (12 bits) to distinguish IDs within a millisecond.
+The generated Snowflake ID contains the current Unix timestamp in milliseconds (41 + 1 top zero bits), followed by a machine id (10 bits), and a counter (12 bits) to distinguish IDs within a millisecond.
For any given timestamp (unix_ts_ms), the counter starts at 0 and is incremented by 1 for each new Snowflake ID until the timestamp changes.
In case the counter overflows, the timestamp field is incremented by 1 and the counter is reset to 0.
Function `generateSnowflakeID` guarantees that the counter field within a timestamp increments monotonically across all function invocations in concurrently running threads and queries.
+:::note
+The generated Snowflake IDs are based on the UNIX epoch 1970-01-01.
+While no standard or recommendation exists for the epoch of Snowflake IDs, implementations in other systems may use a different epoch, e.g. Twitter/X (2010-11-04) or Mastodon (2015-01-01).
+:::
+
```
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
@@ -605,6 +610,11 @@ SELECT generateSnowflakeID(1), generateSnowflakeID(2);
## snowflakeToDateTime
+:::warning
+This function is deprecated and can only be used if setting [allow_deprecated_snowflake_conversion_functions](../../operations/settings/settings.md#allow_deprecated_snowflake_conversion_functions) is enabled.
+The function will be removed at some point in future.
+:::
+
Extracts the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) in [DateTime](../data-types/datetime.md) format.
**Syntax**
@@ -641,6 +651,11 @@ Result:
## snowflakeToDateTime64
+:::warning
+This function is deprecated and can only be used if setting [allow_deprecated_snowflake_conversion_functions](../../operations/settings/settings.md#allow_deprecated_snowflake_conversion_functions) is enabled.
+The function will be removed at some point in future.
+:::
+
Extracts the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) in [DateTime64](../data-types/datetime64.md) format.
**Syntax**
@@ -677,6 +692,11 @@ Result:
## dateTimeToSnowflake
+:::warning
+This function is deprecated and can only be used if setting [allow_deprecated_snowflake_conversion_functions](../../operations/settings/settings.md#allow_deprecated_snowflake_conversion_functions) is enabled.
+The function will be removed at some point in future.
+:::
+
Converts a [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
**Syntax**
@@ -711,6 +731,11 @@ Result:
## dateTime64ToSnowflake
+:::warning
+This function is deprecated and can only be used if setting [allow_deprecated_snowflake_conversion_functions](../../operations/settings/settings.md#allow_deprecated_snowflake_conversion_functions) is enabled.
+The function will be removed at some point in future.
+:::
+
Convert a [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
**Syntax**
@@ -743,6 +768,148 @@ Result:
└─────────────────────────────┘
```
+## snowflakeIDToDateTime
+
+Returns the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as a value of type [DateTime](../data-types/datetime.md).
+
+**Syntax**
+
+``` sql
+snowflakeIDToDateTime(value[, epoch[, time_zone]])
+```
+
+**Arguments**
+
+- `value` — Snowflake ID. [UInt64](../data-types/int-uint.md).
+- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
+- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../data-types/string.md).
+
+**Returned value**
+
+- The timestamp component of `value` as a [DateTime](../data-types/datetime.md) value.
+
+**Example**
+
+Query:
+
+```sql
+SELECT snowflakeIDToDateTime(7204436857747984384) AS res
+```
+
+Result:
+
+```
+┌─────────────────res─┐
+│ 2024-06-06 10:59:58 │
+└─────────────────────┘
+```
+
+## snowflakeIDToDateTime64
+
+Returns the timestamp component of a [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) as a value of type [DateTime64](../data-types/datetime64.md).
+
+**Syntax**
+
+``` sql
+snowflakeIDToDateTime64(value[, epoch[, time_zone]])
+```
+
+**Arguments**
+
+- `value` — Snowflake ID. [UInt64](../data-types/int-uint.md).
+- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
+- `time_zone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md/#server_configuration_parameters-timezone). The function parses `time_string` according to the timezone. Optional. [String](../data-types/string.md).
+
+**Returned value**
+
+- The timestamp component of `value` as a [DateTime64](../data-types/datetime64.md) with scale = 3, i.e. millisecond precision.
+
+**Example**
+
+Query:
+
+```sql
+SELECT snowflakeIDToDateTime64(7204436857747984384) AS res
+```
+
+Result:
+
+```
+┌─────────────────res─┐
+│ 2024-06-06 10:59:58 │
+└─────────────────────┘
+```
+
+## dateTimeToSnowflakeID
+
+Converts a [DateTime](../data-types/datetime.md) value to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
+
+**Syntax**
+
+``` sql
+dateTimeToSnowflakeID(value[, epoch])
+```
+
+**Arguments**
+
+- `value` — Date with time. [DateTime](../data-types/datetime.md).
+- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
+
+**Returned value**
+
+- Input value converted to [UInt64](../data-types/int-uint.md) as the first Snowflake ID at that time.
+
+**Example**
+
+Query:
+
+```sql
+SELECT toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt, dateTimeToSnowflakeID(dt) AS res;
+```
+
+Result:
+
+```
+┌──────────────────dt─┬─────────────────res─┐
+│ 2021-08-15 18:57:56 │ 6832626392367104000 │
+└─────────────────────┴─────────────────────┘
+```
+
+## dateTime64ToSnowflakeID
+
+Convert a [DateTime64](../data-types/datetime64.md) to the first [Snowflake ID](https://en.wikipedia.org/wiki/Snowflake_ID) at the giving time.
+
+**Syntax**
+
+``` sql
+dateTime64ToSnowflakeID(value[, epoch])
+```
+
+**Arguments**
+
+- `value` — Date with time. [DateTime64](../data-types/datetime64.md).
+- `epoch` - Epoch of the Snowflake ID in milliseconds since 1970-01-01. Defaults to 0 (1970-01-01). For the Twitter/X epoch (2015-01-01), provide 1288834974657. Optional. [UInt*](../data-types/int-uint.md).
+
+**Returned value**
+
+- Input value converted to [UInt64](../data-types/int-uint.md) as the first Snowflake ID at that time.
+
+**Example**
+
+Query:
+
+```sql
+SELECT toDateTime('2021-08-15 18:57:56.493', 3, 'Asia/Shanghai') AS dt, dateTime64ToSnowflakeID(dt) AS res;
+```
+
+Result:
+
+```
+┌──────────────────────dt─┬─────────────────res─┐
+│ 2021-08-15 18:57:56.493 │ 6832626394434895872 │
+└─────────────────────────┴─────────────────────┘
+```
+
## See also
- [dictGetUUID](../functions/ext-dict-functions.md#ext_dict_functions-other)
diff --git a/docs/en/sql-reference/statements/alter/constraint.md b/docs/en/sql-reference/statements/alter/constraint.md
index 29675f704b5..54c456f9aa2 100644
--- a/docs/en/sql-reference/statements/alter/constraint.md
+++ b/docs/en/sql-reference/statements/alter/constraint.md
@@ -9,8 +9,8 @@ sidebar_label: CONSTRAINT
Constraints could be added or deleted using following syntax:
``` sql
-ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT constraint_name CHECK expression;
-ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT constraint_name;
+ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT [IF NOT EXISTS] constraint_name CHECK expression;
+ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT [IF EXISTS] constraint_name;
```
See more on [constraints](../../../sql-reference/statements/create/table.md#constraints).
diff --git a/docs/en/sql-reference/window-functions/index.md b/docs/en/sql-reference/window-functions/index.md
index 32ebc6d028f..3a8afd10359 100644
--- a/docs/en/sql-reference/window-functions/index.md
+++ b/docs/en/sql-reference/window-functions/index.md
@@ -36,7 +36,7 @@ Finds non-negative derivative for given `metric_column` by `timestamp_column`.
`INTERVAL` can be omitted, default is `INTERVAL 1 SECOND`.
The computed value is the following for each row:
- `0` for 1st row,
-- ${metric_i - metric_{i-1} \over timestamp_i - timestamp_{i-1}} * interval$ for $i_th$ row.
+- ${\text{metric}_i - \text{metric}_{i-1} \over \text{timestamp}_i - \text{timestamp}_{i-1}} * \text{interval}$ for $i_{th}$ row.
## Syntax
diff --git a/docs/ru/development/developer-instruction.md b/docs/ru/development/developer-instruction.md
index 01ff4dd5f28..bf42edf89ff 100644
--- a/docs/ru/development/developer-instruction.md
+++ b/docs/ru/development/developer-instruction.md
@@ -283,7 +283,7 @@ Pull request можно создать, даже если работа над з
Тесты будут запущены, как только сотрудники ClickHouse поставят для pull request тег «Can be tested». Результаты первых проверок (стиль кода) появятся уже через несколько минут. Результаты сборки появятся примерно через пол часа. Результаты основного набора тестов будут доступны в пределах часа.
-Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Clickhouse build check». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно).
+Система подготовит сборки ClickHouse специально для вашего pull request. Для их получения, нажмите на ссылку «Details» у проверки «Builds». Там вы сможете найти прямые ссылки на собранные .deb пакеты ClickHouse, которые, при желании, вы даже сможете установить на свои продакшен серверы (если не страшно).
Вероятнее всего, часть сборок не будет успешной с первого раза. Ведь мы проверяем сборку кода и gcc и clang, а при сборке с помощью clang включаются почти все существующие в природе warnings (всегда с флагом `-Werror`). На той же странице, вы сможете найти логи сборки - вам не обязательно самому собирать ClickHouse всеми возможными способами.
diff --git a/docs/ru/interfaces/cli.md b/docs/ru/interfaces/cli.md
index 4d19cf50ae1..86eeaac2da7 100644
--- a/docs/ru/interfaces/cli.md
+++ b/docs/ru/interfaces/cli.md
@@ -141,6 +141,7 @@ $ clickhouse-client --param_tbl="numbers" --param_db="system" --param_col="numbe
- `--secure` — если указано, будет использован безопасный канал.
- `--history_file` - путь к файлу с историей команд.
- `--param_` — значение параметра для [запроса с параметрами](#cli-queries-with-parameters).
+- `--jwt` – авторизация с использованием JSON Web Token. Доступно только в ClickHouse Cloud.
Вместо параметров `--host`, `--port`, `--user` и `--password` клиент ClickHouse также поддерживает строки подключения (смотри следующий раздел).
diff --git a/docs/ru/sql-reference/statements/alter/constraint.md b/docs/ru/sql-reference/statements/alter/constraint.md
index ad5f23e5fdc..45b0f5f6350 100644
--- a/docs/ru/sql-reference/statements/alter/constraint.md
+++ b/docs/ru/sql-reference/statements/alter/constraint.md
@@ -11,8 +11,8 @@ sidebar_label: "Манипуляции с ограничениями"
Добавить или удалить ограничение можно с помощью запросов
``` sql
-ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT constraint_name CHECK expression;
-ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT constraint_name;
+ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT [IF NOT EXISTS] constraint_name CHECK expression;
+ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT [IF EXISTS] constraint_name;
```
Запросы выполняют добавление или удаление метаданных об ограничениях таблицы `[db].name`, поэтому выполняются мгновенно.
diff --git a/docs/zh/sql-reference/statements/alter/constraint.md b/docs/zh/sql-reference/statements/alter/constraint.md
index 86ffcf09d65..59edcf10645 100644
--- a/docs/zh/sql-reference/statements/alter/constraint.md
+++ b/docs/zh/sql-reference/statements/alter/constraint.md
@@ -9,8 +9,8 @@ sidebar_label: 约束
约束可以使用以下语法添加或删除:
``` sql
-ALTER TABLE [db].name ADD CONSTRAINT constraint_name CHECK expression;
-ALTER TABLE [db].name DROP CONSTRAINT constraint_name;
+ALTER TABLE [db].name [ON CLUSTER cluster] ADD CONSTRAINT [IF NOT EXISTS] constraint_name CHECK expression;
+ALTER TABLE [db].name [ON CLUSTER cluster] DROP CONSTRAINT [IF EXISTS] constraint_name;
```
查看[constraints](../../../sql-reference/statements/create/table.mdx#constraints)。
diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp
index efe23d57478..c4878b18f00 100644
--- a/programs/client/Client.cpp
+++ b/programs/client/Client.cpp
@@ -64,6 +64,7 @@ namespace ErrorCodes
extern const int NETWORK_ERROR;
extern const int AUTHENTICATION_FAILED;
extern const int NO_ELEMENTS_IN_CONFIG;
+ extern const int USER_EXPIRED;
}
@@ -74,6 +75,12 @@ void Client::processError(const String & query) const
fmt::print(stderr, "Received exception from server (version {}):\n{}\n",
server_version,
getExceptionMessage(*server_exception, print_stack_trace, true));
+
+ if (server_exception->code() == ErrorCodes::USER_EXPIRED)
+ {
+ server_exception->rethrow();
+ }
+
if (is_interactive)
{
fmt::print(stderr, "\n");
@@ -944,6 +951,7 @@ void Client::addOptions(OptionsDescription & options_description)
("ssh-key-file", po::value(), "File containing the SSH private key for authenticate with the server.")
("ssh-key-passphrase", po::value(), "Passphrase for the SSH private key specified by --ssh-key-file.")
("quota_key", po::value(), "A string to differentiate quotas when the user have keyed quotas configured on server")
+ ("jwt", po::value(), "Use JWT for authentication")
("max_client_network_bandwidth", po::value(), "the maximum speed of data exchange over the network for the client in bytes per second.")
("compression", po::value(), "enable or disable compression (enabled by default for remote communication and disabled for localhost communication).")
@@ -1102,6 +1110,12 @@ void Client::processOptions(const OptionsDescription & options_description,
config().setBool("no-warnings", true);
if (options.count("fake-drop"))
config().setString("ignore_drop_queries_probability", "1");
+ if (options.count("jwt"))
+ {
+ if (!options["user"].defaulted())
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "User and JWT flags can't be specified together");
+ config().setString("jwt", options["jwt"].as());
+ }
if (options.count("accept-invalid-certificate"))
{
config().setString("openSSL.client.invalidCertificateHandler.name", "AcceptCertificateHandler");
diff --git a/programs/keeper-client/KeeperClient.cpp b/programs/keeper-client/KeeperClient.cpp
index ebec337060c..68adc2c2aac 100644
--- a/programs/keeper-client/KeeperClient.cpp
+++ b/programs/keeper-client/KeeperClient.cpp
@@ -368,7 +368,7 @@ int KeeperClient::main(const std::vector & /* args */)
return 0;
}
- DB::ConfigProcessor config_processor(config().getString("config-file", "config.xml"));
+ ConfigProcessor config_processor(config().getString("config-file", "config.xml"));
/// This will handle a situation when clickhouse is running on the embedded config, but config.d folder is also present.
ConfigProcessor::registerEmbeddedConfig("config.xml", "");
diff --git a/programs/keeper-client/Parser.cpp b/programs/keeper-client/Parser.cpp
index 5b16e6d2c23..51f85cf4a69 100644
--- a/programs/keeper-client/Parser.cpp
+++ b/programs/keeper-client/Parser.cpp
@@ -12,8 +12,7 @@ bool parseKeeperArg(IParser::Pos & pos, Expected & expected, String & result)
if (!parseIdentifierOrStringLiteral(pos, expected, result))
return false;
}
-
- while (pos->type != TokenType::Whitespace && pos->type != TokenType::EndOfStream && pos->type != TokenType::Semicolon)
+ else if (pos->type == TokenType::Number)
{
result.append(pos->begin, pos->end);
++pos;
@@ -40,8 +39,8 @@ bool KeeperParser::parseImpl(Pos & pos, ASTPtr & node, Expected & expected)
for (const auto & pair : KeeperClient::commands)
expected.add(pos, pair.first.data());
- for (const auto & flwc : four_letter_word_commands)
- expected.add(pos, flwc.data());
+ for (const auto & four_letter_word_command : four_letter_word_commands)
+ expected.add(pos, four_letter_word_command.data());
if (pos->type != TokenType::BareWord)
return false;
diff --git a/programs/keeper-client/Parser.h b/programs/keeper-client/Parser.h
index 57ee6ce4a18..503edfa4f73 100644
--- a/programs/keeper-client/Parser.h
+++ b/programs/keeper-client/Parser.h
@@ -11,7 +11,6 @@ namespace DB
{
bool parseKeeperArg(IParser::Pos & pos, Expected & expected, String & result);
-
bool parseKeeperPath(IParser::Pos & pos, Expected & expected, String & path);
diff --git a/programs/keeper/Keeper.cpp b/programs/keeper/Keeper.cpp
index 0d3c1f10894..bb04ff88936 100644
--- a/programs/keeper/Keeper.cpp
+++ b/programs/keeper/Keeper.cpp
@@ -577,8 +577,7 @@ try
#if USE_SSL
CertificateReloader::instance().tryLoad(*config);
#endif
- },
- /* already_loaded = */ false); /// Reload it right now (initial loading)
+ });
SCOPE_EXIT({
LOG_INFO(log, "Shutting down.");
diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp
index 4d5cfb09e6a..cb1c35743b2 100644
--- a/programs/local/LocalServer.cpp
+++ b/programs/local/LocalServer.cpp
@@ -732,11 +732,8 @@ void LocalServer::processConfig()
attachInformationSchema(global_context, *createMemoryDatabaseIfNotExists(global_context, DatabaseCatalog::INFORMATION_SCHEMA_UPPERCASE));
}
- server_display_name = config().getString("display_name", getFQDNOrHostName());
- prompt_by_server_display_name = config().getRawString("prompt_by_server_display_name.default", "{display_name} :) ");
- std::map prompt_substitutions{{"display_name", server_display_name}};
- for (const auto & [key, value] : prompt_substitutions)
- boost::replace_all(prompt_by_server_display_name, "{" + key + "}", value);
+ server_display_name = config().getString("display_name", "");
+ prompt_by_server_display_name = config().getRawString("prompt_by_server_display_name.default", ":) ");
global_context->setQueryKindInitial();
global_context->setQueryKind(query_kind);
diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp
index 6414f7f6ea5..e2554a6ff03 100644
--- a/programs/server/Server.cpp
+++ b/programs/server/Server.cpp
@@ -10,6 +10,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -721,11 +722,6 @@ try
CurrentMetrics::set(CurrentMetrics::Revision, ClickHouseRevision::getVersionRevision());
CurrentMetrics::set(CurrentMetrics::VersionInteger, ClickHouseRevision::getVersionInteger());
- Poco::ThreadPool server_pool(3, server_settings.max_connections);
- std::mutex servers_lock;
- std::vector servers;
- std::vector servers_to_start_before_tables;
-
/** Context contains all that query execution is dependent:
* settings, available functions, data types, aggregate functions, databases, ...
*/
@@ -823,6 +819,18 @@ try
total_memory_tracker.setSampleMaxAllocationSize(server_settings.total_memory_profiler_sample_max_allocation_size);
}
+ Poco::ThreadPool server_pool(
+ /* minCapacity */3,
+ /* maxCapacity */server_settings.max_connections,
+ /* idleTime */60,
+ /* stackSize */POCO_THREAD_STACK_SIZE,
+ server_settings.global_profiler_real_time_period_ns,
+ server_settings.global_profiler_cpu_time_period_ns);
+
+ std::mutex servers_lock;
+ std::vector servers;
+ std::vector servers_to_start_before_tables;
+
/// Wait for all threads to avoid possible use-after-free (for example logging objects can be already destroyed).
SCOPE_EXIT({
Stopwatch watch;
@@ -1372,8 +1380,8 @@ try
global_context->setQueryCache(query_cache_max_size_in_bytes, query_cache_max_entries, query_cache_query_cache_max_entry_size_in_bytes, query_cache_max_entry_size_in_rows);
#if USE_EMBEDDED_COMPILER
- size_t compiled_expression_cache_max_size_in_bytes = config().getUInt64("compiled_expression_cache_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_SIZE);
- size_t compiled_expression_cache_max_elements = config().getUInt64("compiled_expression_cache_elements_size", DEFAULT_COMPILED_EXPRESSION_CACHE_MAX_ENTRIES);
+ size_t compiled_expression_cache_max_size_in_bytes = server_settings.compiled_expression_cache_size;
+ size_t compiled_expression_cache_max_elements = server_settings.compiled_expression_cache_elements_size;
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_max_size_in_bytes, compiled_expression_cache_max_elements);
#endif
@@ -1399,8 +1407,8 @@ try
tryLogCurrentException(log, "Disabling cgroup memory observer because of an error during initialization");
}
- const std::string cert_path = config().getString("openSSL.server.certificateFile", "");
- const std::string key_path = config().getString("openSSL.server.privateKeyFile", "");
+ std::string cert_path = config().getString("openSSL.server.certificateFile", "");
+ std::string key_path = config().getString("openSSL.server.privateKeyFile", "");
std::vector extra_paths = {include_from_path};
if (!cert_path.empty())
@@ -1408,6 +1416,18 @@ try
if (!key_path.empty())
extra_paths.emplace_back(key_path);
+ Poco::Util::AbstractConfiguration::Keys protocols;
+ config().keys("protocols", protocols);
+ for (const auto & protocol : protocols)
+ {
+ cert_path = config().getString("protocols." + protocol + ".certificateFile", "");
+ key_path = config().getString("protocols." + protocol + ".privateKeyFile", "");
+ if (!cert_path.empty())
+ extra_paths.emplace_back(cert_path);
+ if (!key_path.empty())
+ extra_paths.emplace_back(key_path);
+ }
+
auto main_config_reloader = std::make_unique(
config_path,
extra_paths,
@@ -1520,6 +1540,8 @@ try
global_context->setMaxDictionaryNumToWarn(new_server_settings.max_dictionary_num_to_warn);
global_context->setMaxDatabaseNumToWarn(new_server_settings.max_database_num_to_warn);
global_context->setMaxPartNumToWarn(new_server_settings.max_part_num_to_warn);
+ /// Only for system.server_settings
+ global_context->setConfigReloaderInterval(new_server_settings.config_reload_interval_ms);
SlotCount concurrent_threads_soft_limit = UnlimitedSlots;
if (new_server_settings.concurrent_threads_soft_limit_num > 0 && new_server_settings.concurrent_threads_soft_limit_num < concurrent_threads_soft_limit)
@@ -1609,6 +1631,10 @@ try
0, // We don't need any threads one all the parts will be deleted
new_server_settings.max_parts_cleaning_thread_pool_size);
+
+ global_context->setMergeWorkload(new_server_settings.merge_workload);
+ global_context->setMutationWorkload(new_server_settings.mutation_workload);
+
if (config->has("resources"))
{
global_context->getResourceManager()->updateConfiguration(*config);
@@ -1644,7 +1670,7 @@ try
CompressionCodecEncrypted::Configuration::instance().tryLoad(*config, "encryption_codecs");
#if USE_SSL
- CertificateReloader::instance().tryLoad(*config);
+ CertificateReloader::instance().tryReloadAll(*config);
#endif
NamedCollectionFactory::instance().reloadFromConfig(*config);
@@ -1678,8 +1704,7 @@ try
/// Must be the last.
latest_config = config;
- },
- /* already_loaded = */ false); /// Reload it right now (initial loading)
+ });
const auto listen_hosts = getListenHosts(config());
const auto interserver_listen_hosts = getInterserverListenHosts(config());
diff --git a/programs/server/config.xml b/programs/server/config.xml
index b7a4b8dd0e9..94825a55f67 100644
--- a/programs/server/config.xml
+++ b/programs/server/config.xml
@@ -29,7 +29,14 @@
-->
1000M
10
+
+
+
+
+
+
+
-
+ true
@@ -408,13 +415,11 @@
- 5368709120
+ You should not lower this value. -->
+
-
- 5368709120
+
+
- 1000
+
- 134217728
+
- 10000
+
+
+
+
/var/lib/clickhouse/caches/
@@ -1155,6 +1170,18 @@
false
+
+
+ system
+
+ 7500
+ 1048576
+ 8192
+ 524288
+ 1000
+ false
+
+
+
+
+
-
-
- 1073741824
- 1024
- 1048576
- 30000000
-
-
backups
diff --git a/programs/server/config.yaml.example b/programs/server/config.yaml.example
index 9fc188e97aa..5d5499f876c 100644
--- a/programs/server/config.yaml.example
+++ b/programs/server/config.yaml.example
@@ -260,7 +260,10 @@ uncompressed_cache_size: 8589934592
# Approximate size of mark cache, used in tables of MergeTree family.
# In bytes. Cache is single for server. Memory is allocated only on demand.
# You should not lower this value.
-mark_cache_size: 5368709120
+# mark_cache_size: 5368709120
+
+# For marks of secondary indices.
+# index_mark_cache_size: 5368709120
# If you enable the `min_bytes_to_use_mmap_io` setting,
# the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace.
@@ -277,13 +280,20 @@ mark_cache_size: 5368709120
# in query or server memory usage - because this memory can be discarded similar to OS page cache.
# The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree,
# also it can be dropped manually by the SYSTEM DROP MMAP CACHE query.
-mmap_cache_size: 1000
+# mmap_cache_size: 1024
# Cache size in bytes for compiled expressions.
-compiled_expression_cache_size: 134217728
+# compiled_expression_cache_size: 134217728
# Cache size in elements for compiled expressions.
-compiled_expression_cache_elements_size: 10000
+# compiled_expression_cache_elements_size: 10000
+
+# Configuration for the query cache
+# query_cache:
+# max_size_in_bytes: 1073741824
+# max_entries: 1024
+# max_entry_size_in_bytes: 1048576
+# max_entry_size_in_rows: 30000000
# Path to data directory, with trailing slash.
path: /var/lib/clickhouse/
@@ -726,6 +736,13 @@ metric_log:
flush_interval_milliseconds: 7500
collect_interval_milliseconds: 1000
+# Error log contains rows with current values of errors collected with "collect_interval_milliseconds" interval.
+error_log:
+ database: system
+ table: error_log
+ flush_interval_milliseconds: 7500
+ collect_interval_milliseconds: 1000
+
# Asynchronous metric log contains values of metrics from
# system.asynchronous_metrics.
asynchronous_metric_log:
diff --git a/src/Access/AccessControl.cpp b/src/Access/AccessControl.cpp
index c3bb42160ad..353358fac65 100644
--- a/src/Access/AccessControl.cpp
+++ b/src/Access/AccessControl.cpp
@@ -261,7 +261,24 @@ AccessControl::AccessControl()
}
-AccessControl::~AccessControl() = default;
+AccessControl::~AccessControl()
+{
+ try
+ {
+ AccessControl::shutdown();
+ }
+ catch (...)
+ {
+ tryLogCurrentException(__PRETTY_FUNCTION__);
+ }
+}
+
+
+void AccessControl::shutdown()
+{
+ MultipleAccessStorage::shutdown();
+ removeAllStorages();
+}
void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
diff --git a/src/Access/AccessControl.h b/src/Access/AccessControl.h
index d1537219a06..bfaf256ad48 100644
--- a/src/Access/AccessControl.h
+++ b/src/Access/AccessControl.h
@@ -53,6 +53,9 @@ public:
AccessControl();
~AccessControl() override;
+ /// Shutdown the access control and stops all background activity.
+ void shutdown() override;
+
/// Initializes access storage (user directories).
void setUpFromMainConfig(const Poco::Util::AbstractConfiguration & config_, const String & config_path_,
const zkutil::GetZooKeeper & get_zookeeper_function_);
diff --git a/src/Access/Authentication.cpp b/src/Access/Authentication.cpp
index bf1fe3feec3..f8df56516ec 100644
--- a/src/Access/Authentication.cpp
+++ b/src/Access/Authentication.cpp
@@ -108,6 +108,9 @@ bool Authentication::areCredentialsValid(
case AuthenticationType::HTTP:
throw Authentication::Require("ClickHouse Basic Authentication");
+ case AuthenticationType::JWT:
+ throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
+
case AuthenticationType::KERBEROS:
return external_authenticators.checkKerberosCredentials(auth_data.getKerberosRealm(), *gss_acceptor_context);
@@ -149,6 +152,9 @@ bool Authentication::areCredentialsValid(
case AuthenticationType::SSL_CERTIFICATE:
throw Authentication::Require("ClickHouse X.509 Authentication");
+ case AuthenticationType::JWT:
+ throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
+
case AuthenticationType::SSH_KEY:
#if USE_SSH
throw Authentication::Require("SSH Keys Authentication");
@@ -193,6 +199,9 @@ bool Authentication::areCredentialsValid(
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSH is disabled, because ClickHouse is built without libssh");
#endif
+ case AuthenticationType::JWT:
+ throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
+
case AuthenticationType::BCRYPT_PASSWORD:
return checkPasswordBcrypt(basic_credentials->getPassword(), auth_data.getPasswordHashBinary());
@@ -222,6 +231,9 @@ bool Authentication::areCredentialsValid(
case AuthenticationType::HTTP:
throw Authentication::Require("ClickHouse Basic Authentication");
+ case AuthenticationType::JWT:
+ throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
+
case AuthenticationType::KERBEROS:
throw Authentication::Require(auth_data.getKerberosRealm());
@@ -254,6 +266,9 @@ bool Authentication::areCredentialsValid(
case AuthenticationType::HTTP:
throw Authentication::Require("ClickHouse Basic Authentication");
+ case AuthenticationType::JWT:
+ throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
+
case AuthenticationType::KERBEROS:
throw Authentication::Require(auth_data.getKerberosRealm());
diff --git a/src/Access/AuthenticationData.cpp b/src/Access/AuthenticationData.cpp
index 70355fadfbd..e9bc111e18a 100644
--- a/src/Access/AuthenticationData.cpp
+++ b/src/Access/AuthenticationData.cpp
@@ -135,6 +135,7 @@ void AuthenticationData::setPassword(const String & password_)
case AuthenticationType::BCRYPT_PASSWORD:
case AuthenticationType::NO_PASSWORD:
case AuthenticationType::LDAP:
+ case AuthenticationType::JWT:
case AuthenticationType::KERBEROS:
case AuthenticationType::SSL_CERTIFICATE:
case AuthenticationType::SSH_KEY:
@@ -251,6 +252,7 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
case AuthenticationType::NO_PASSWORD:
case AuthenticationType::LDAP:
+ case AuthenticationType::JWT:
case AuthenticationType::KERBEROS:
case AuthenticationType::SSL_CERTIFICATE:
case AuthenticationType::SSH_KEY:
@@ -322,6 +324,10 @@ std::shared_ptr AuthenticationData::toAST() const
node->children.push_back(std::make_shared(getLDAPServerName()));
break;
}
+ case AuthenticationType::JWT:
+ {
+ throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "JWT is available only in ClickHouse Cloud");
+ }
case AuthenticationType::KERBEROS:
{
const auto & realm = getKerberosRealm();
diff --git a/src/Access/CachedAccessChecking.cpp b/src/Access/CachedAccessChecking.cpp
index aa8ef6073d3..0d629e7b77a 100644
--- a/src/Access/CachedAccessChecking.cpp
+++ b/src/Access/CachedAccessChecking.cpp
@@ -4,12 +4,12 @@
namespace DB
{
-CachedAccessChecking::CachedAccessChecking(const std::shared_ptr & access_, AccessFlags access_flags_)
+CachedAccessChecking::CachedAccessChecking(const std::shared_ptr & access_, AccessFlags access_flags_)
: CachedAccessChecking(access_, AccessRightsElement{access_flags_})
{
}
-CachedAccessChecking::CachedAccessChecking(const std::shared_ptr & access_, const AccessRightsElement & element_)
+CachedAccessChecking::CachedAccessChecking(const std::shared_ptr & access_, const AccessRightsElement & element_)
: access(access_), element(element_)
{
}
diff --git a/src/Access/CachedAccessChecking.h b/src/Access/CachedAccessChecking.h
index e87c28dd823..aaeea6ceddc 100644
--- a/src/Access/CachedAccessChecking.h
+++ b/src/Access/CachedAccessChecking.h
@@ -1,6 +1,7 @@
#pragma once
#include
+#include
#include
@@ -13,14 +14,14 @@ class ContextAccess;
class CachedAccessChecking
{
public:
- CachedAccessChecking(const std::shared_ptr & access_, AccessFlags access_flags_);
- CachedAccessChecking(const std::shared_ptr & access_, const AccessRightsElement & element_);
+ CachedAccessChecking(const std::shared_ptr & access_, AccessFlags access_flags_);
+ CachedAccessChecking(const std::shared_ptr & access_, const AccessRightsElement & element_);
~CachedAccessChecking();
bool checkAccess(bool throw_if_denied = true);
private:
- const std::shared_ptr access;
+ const std::shared_ptr access;
const AccessRightsElement element;
bool checked = false;
bool result = false;
diff --git a/src/Access/Common/AuthenticationType.cpp b/src/Access/Common/AuthenticationType.cpp
index 2cc126ad9b7..427765b8a79 100644
--- a/src/Access/Common/AuthenticationType.cpp
+++ b/src/Access/Common/AuthenticationType.cpp
@@ -72,6 +72,11 @@ const AuthenticationTypeInfo & AuthenticationTypeInfo::get(AuthenticationType ty
static const auto info = make_info(Keyword::HTTP);
return info;
}
+ case AuthenticationType::JWT:
+ {
+ static const auto info = make_info(Keyword::JWT);
+ return info;
+ }
case AuthenticationType::MAX:
break;
}
diff --git a/src/Access/Common/AuthenticationType.h b/src/Access/Common/AuthenticationType.h
index a68549aff4c..16f4388bbff 100644
--- a/src/Access/Common/AuthenticationType.h
+++ b/src/Access/Common/AuthenticationType.h
@@ -41,6 +41,9 @@ enum class AuthenticationType : uint8_t
/// Authentication through HTTP protocol
HTTP,
+ /// JSON Web Token
+ JWT,
+
MAX,
};
diff --git a/src/Access/ContextAccess.cpp b/src/Access/ContextAccess.cpp
index 28a825de6cf..a2807ecc5ea 100644
--- a/src/Access/ContextAccess.cpp
+++ b/src/Access/ContextAccess.cpp
@@ -20,6 +20,7 @@
#include
#include
#include
+#include
namespace DB
@@ -271,7 +272,7 @@ namespace
std::shared_ptr ContextAccess::fromContext(const ContextPtr & context)
{
- return context->getAccess();
+ return ContextAccessWrapper::fromContext(context)->getAccess();
}
@@ -560,7 +561,7 @@ std::shared_ptr ContextAccess::getAccessRightsWithImplicit()
template
-bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... args) const
+bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, AccessFlags flags, const Args &... args) const
{
if (user_was_dropped)
{
@@ -573,8 +574,10 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
if (params.full_access)
return true;
- auto access_granted = []
+ auto access_granted = [&]
{
+ if constexpr (throw_if_denied)
+ context->addQueryPrivilegesInfo(AccessRightsElement{flags, args...}.toStringWithoutOptions(), true);
return true;
};
@@ -583,7 +586,10 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
FmtArgs && ...fmt_args [[maybe_unused]])
{
if constexpr (throw_if_denied)
+ {
+ context->addQueryPrivilegesInfo(AccessRightsElement{flags, args...}.toStringWithoutOptions(), false);
throw Exception(error_code, std::move(fmt_string), getUserName(), std::forward(fmt_args)...);
+ }
return false;
};
@@ -686,102 +692,102 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
}
template
-bool ContextAccess::checkAccessImpl(const AccessFlags & flags) const
+bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessFlags & flags) const
{
- return checkAccessImplHelper(flags);
+ return checkAccessImplHelper(context, flags);
}
template
-bool ContextAccess::checkAccessImpl(const AccessFlags & flags, std::string_view database, const Args &... args) const
+bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessFlags & flags, std::string_view database, const Args &... args) const
{
- return checkAccessImplHelper(flags, database.empty() ? params.current_database : database, args...);
+ return checkAccessImplHelper(context, flags, database.empty() ? params.current_database : database, args...);
}
template
-bool ContextAccess::checkAccessImplHelper(const AccessRightsElement & element) const
+bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, const AccessRightsElement & element) const
{
assert(!element.grant_option || grant_option);
if (element.isGlobalWithParameter())
{
if (element.any_parameter)
- return checkAccessImpl(element.access_flags);
+ return checkAccessImpl(context, element.access_flags);
else
- return checkAccessImpl(element.access_flags, element.parameter);
+ return checkAccessImpl(context, element.access_flags, element.parameter);
}
else if (element.any_database)
- return checkAccessImpl(element.access_flags);
+ return checkAccessImpl(context, element.access_flags);
else if (element.any_table)
- return checkAccessImpl(element.access_flags, element.database);
+ return checkAccessImpl(context, element.access_flags, element.database);
else if (element.any_column)
- return checkAccessImpl(element.access_flags, element.database, element.table);
+ return checkAccessImpl(context, element.access_flags, element.database, element.table);
else
- return checkAccessImpl(element.access_flags, element.database, element.table, element.columns);
+ return checkAccessImpl(context, element.access_flags, element.database, element.table, element.columns);
}
template
-bool ContextAccess::checkAccessImpl(const AccessRightsElement & element) const
+bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRightsElement & element) const
{
if constexpr (grant_option)
{
- return checkAccessImplHelper(element);
+ return checkAccessImplHelper(context, element);
}
else
{
if (element.grant_option)
- return checkAccessImplHelper(element);
+ return checkAccessImplHelper(context, element);
else
- return checkAccessImplHelper(element);
+ return checkAccessImplHelper(context, element);
}
}
template
-bool ContextAccess::checkAccessImpl(const AccessRightsElements & elements) const
+bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRightsElements & elements) const
{
for (const auto & element : elements)
- if (!checkAccessImpl(element))
+ if (!checkAccessImpl(context, element))
return false;
return true;
}
-bool ContextAccess::isGranted(const AccessFlags & flags) const { return checkAccessImpl(flags); }
-bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database) const { return checkAccessImpl(flags, database); }
-bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl(flags, database, table); }
-bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl(flags, database, table, column); }
-bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); }
-bool ContextAccess::isGranted(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); }
-bool ContextAccess::isGranted(const AccessRightsElement & element) const { return checkAccessImpl(element); }
-bool ContextAccess::isGranted(const AccessRightsElements & elements) const { return checkAccessImpl(elements); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags) const { return checkAccessImpl(context, flags); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { return checkAccessImpl(context, flags, database); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl(context, flags, database, table); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl(context, flags, database, table, column); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return checkAccessImpl(context, flags, database, table, columns); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl(context, flags, database, table, columns); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessRightsElement & element) const { return checkAccessImpl(context, element); }
+bool ContextAccess::isGranted(const ContextPtr & context, const AccessRightsElements & elements) const { return checkAccessImpl(context, elements); }
-bool ContextAccess::hasGrantOption(const AccessFlags & flags) const { return checkAccessImpl(flags); }
-bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database) const { return checkAccessImpl(flags, database); }
-bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl(flags, database, table); }
-bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl(flags, database, table, column); }
-bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return checkAccessImpl(flags, database, table, columns); }
-bool ContextAccess::hasGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl(flags, database, table, columns); }
-bool ContextAccess::hasGrantOption(const AccessRightsElement & element) const { return checkAccessImpl(element); }
-bool ContextAccess::hasGrantOption(const AccessRightsElements & elements) const { return checkAccessImpl(elements); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags) const { return checkAccessImpl(context, flags); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { return checkAccessImpl(context, flags, database); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { return checkAccessImpl(context, flags, database, table); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { return checkAccessImpl(context, flags, database, table, column); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { return checkAccessImpl(context, flags, database, table, columns); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { return checkAccessImpl(context, flags, database, table, columns); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessRightsElement & element) const { return checkAccessImpl(context, element); }
+bool ContextAccess::hasGrantOption(const ContextPtr & context, const AccessRightsElements & elements) const { return checkAccessImpl(context, elements); }
-void ContextAccess::checkAccess(const AccessFlags & flags) const { checkAccessImpl(flags); }
-void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database) const { checkAccessImpl(flags, database); }
-void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl(flags, database, table); }
-void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl(flags, database, table, column); }
-void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { checkAccessImpl(flags, database, table, columns); }
-void ContextAccess::checkAccess(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl(flags, database, table, columns); }
-void ContextAccess::checkAccess(const AccessRightsElement & element) const { checkAccessImpl(element); }
-void ContextAccess::checkAccess(const AccessRightsElements & elements) const { checkAccessImpl(elements); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags) const { checkAccessImpl(context, flags); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { checkAccessImpl(context, flags, database); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl(context, flags, database, table); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl(context, flags, database, table, column); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { checkAccessImpl(context, flags, database, table, columns); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl(context, flags, database, table, columns); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessRightsElement & element) const { checkAccessImpl(context, element); }
+void ContextAccess::checkAccess(const ContextPtr & context, const AccessRightsElements & elements) const { checkAccessImpl(context, elements); }
-void ContextAccess::checkGrantOption(const AccessFlags & flags) const { checkAccessImpl(flags); }
-void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database) const { checkAccessImpl(flags, database); }
-void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl(flags, database, table); }
-void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl(flags, database, table, column); }
-void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { checkAccessImpl(flags, database, table, columns); }
-void ContextAccess::checkGrantOption(const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl(flags, database, table, columns); }
-void ContextAccess::checkGrantOption(const AccessRightsElement & element) const { checkAccessImpl(element); }
-void ContextAccess::checkGrantOption(const AccessRightsElements & elements) const { checkAccessImpl(elements); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags) const { checkAccessImpl(context, flags); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database) const { checkAccessImpl(context, flags, database); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table) const { checkAccessImpl(context, flags, database, table); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, std::string_view column) const { checkAccessImpl(context, flags, database, table, column); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const std::vector & columns) const { checkAccessImpl(context, flags, database, table, columns); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessFlags & flags, std::string_view database, std::string_view table, const Strings & columns) const { checkAccessImpl(context, flags, database, table, columns); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessRightsElement & element) const { checkAccessImpl(context, element); }
+void ContextAccess::checkGrantOption(const ContextPtr & context, const AccessRightsElements & elements) const { checkAccessImpl(context, elements); }
template
-bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const GetNameFunction & get_name_function) const
+bool ContextAccess::checkAdminOptionImplHelper(const ContextPtr & context, const Container & role_ids, const GetNameFunction & get_name_function) const
{
auto show_error = [](int error_code [[maybe_unused]],
FormatStringHelper fmt_string [[maybe_unused]],
@@ -804,7 +810,7 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
if (!std::size(role_ids))
return true;
- if (isGranted(AccessType::ROLE_ADMIN))
+ if (isGranted(context, AccessType::ROLE_ADMIN))
return true;
auto info = getRolesInfo();
@@ -840,54 +846,54 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
}
template
-bool ContextAccess::checkAdminOptionImpl(const UUID & role_id) const
+bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id) const
{
- return checkAdminOptionImplHelper(to_array(role_id), [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
+ return checkAdminOptionImplHelper(context, to_array(role_id), [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
}
template
-bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const String & role_name) const
+bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id, const String & role_name) const
{
- return checkAdminOptionImplHelper(to_array(role_id), [&role_name](const UUID &, size_t) { return std::optional{role_name}; });
+ return checkAdminOptionImplHelper(context, to_array(role_id), [&role_name](const UUID &, size_t) { return std::optional{role_name}; });
}
template
-bool ContextAccess::checkAdminOptionImpl(const UUID & role_id, const std::unordered_map & names_of_roles) const
+bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const UUID & role_id, const std::unordered_map & names_of_roles) const
{
- return checkAdminOptionImplHelper(to_array(role_id), [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; });
+ return checkAdminOptionImplHelper(context, to_array(role_id), [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; });
}
template
-bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids) const
+bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const std::vector & role_ids) const
{
- return checkAdminOptionImplHelper(role_ids, [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
+ return checkAdminOptionImplHelper(context, role_ids, [this](const UUID & id, size_t) { return access_control->tryReadName(id); });
}
template
-bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids, const Strings & names_of_roles) const
+bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const std::vector & role_ids, const Strings & names_of_roles) const
{
- return checkAdminOptionImplHelper(role_ids, [&names_of_roles](const UUID &, size_t i) { return std::optional{names_of_roles[i]}; });
+ return checkAdminOptionImplHelper(context, role_ids, [&names_of_roles](const UUID &, size_t i) { return std::optional{names_of_roles[i]}; });
}
template
-bool ContextAccess::checkAdminOptionImpl(const std::vector & role_ids, const std::unordered_map & names_of_roles) const
+bool ContextAccess::checkAdminOptionImpl(const ContextPtr & context, const std::vector & role_ids, const std::unordered_map & names_of_roles) const
{
- return checkAdminOptionImplHelper(role_ids, [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; });
+ return checkAdminOptionImplHelper(context, role_ids, [&names_of_roles](const UUID & id, size_t) { auto it = names_of_roles.find(id); return (it != names_of_roles.end()) ? it->second : std::optional{}; });
}
-bool ContextAccess::hasAdminOption(const UUID & role_id) const { return checkAdminOptionImpl(role_id); }
-bool ContextAccess::hasAdminOption(const UUID & role_id, const String & role_name) const { return checkAdminOptionImpl(role_id, role_name); }
-bool ContextAccess::hasAdminOption(const UUID & role_id, const std::unordered_map & names_of_roles) const { return checkAdminOptionImpl(role_id, names_of_roles); }
-bool ContextAccess::hasAdminOption(const std::vector & role_ids) const { return checkAdminOptionImpl(role_ids); }
-bool ContextAccess::hasAdminOption(const std::vector & role_ids, const Strings & names_of_roles) const { return checkAdminOptionImpl(role_ids, names_of_roles); }
-bool ContextAccess::hasAdminOption(const std::vector & role_ids, const std::unordered_map & names_of_roles) const { return checkAdminOptionImpl(role_ids, names_of_roles); }
+bool ContextAccess::hasAdminOption(const ContextPtr & context, const UUID & role_id) const { return checkAdminOptionImpl(context, role_id); }
+bool ContextAccess::hasAdminOption(const ContextPtr & context, const UUID & role_id, const String & role_name) const { return checkAdminOptionImpl(context, role_id, role_name); }
+bool ContextAccess::hasAdminOption(const ContextPtr & context, const UUID & role_id, const std::unordered_map & names_of_roles) const { return checkAdminOptionImpl(context, role_id, names_of_roles); }
+bool ContextAccess::hasAdminOption(const ContextPtr & context, const std::vector & role_ids) const { return checkAdminOptionImpl(context, role_ids); }
+bool ContextAccess::hasAdminOption(const ContextPtr & context, const std::vector & role_ids, const Strings & names_of_roles) const { return checkAdminOptionImpl(context, role_ids, names_of_roles); }
+bool ContextAccess::hasAdminOption(const ContextPtr & context, const std::vector & role_ids, const std::unordered_map & names_of_roles) const { return checkAdminOptionImpl(context, role_ids, names_of_roles); }
-void ContextAccess::checkAdminOption(const UUID & role_id) const { checkAdminOptionImpl(role_id); }
-void ContextAccess::checkAdminOption(const UUID & role_id, const String & role_name) const { checkAdminOptionImpl(role_id, role_name); }
-void ContextAccess::checkAdminOption(const UUID & role_id, const std::unordered_map & names_of_roles) const { checkAdminOptionImpl(role_id, names_of_roles); }
-void ContextAccess::checkAdminOption(const std::vector & role_ids) const { checkAdminOptionImpl(role_ids); }
-void ContextAccess::checkAdminOption(const std::vector & role_ids, const Strings & names_of_roles) const { checkAdminOptionImpl(role_ids, names_of_roles); }
-void ContextAccess::checkAdminOption(const std::vector & role_ids, const std::unordered_map & names_of_roles) const { checkAdminOptionImpl(role_ids, names_of_roles); }
+void ContextAccess::checkAdminOption(const ContextPtr & context, const UUID & role_id) const { checkAdminOptionImpl(context, role_id); }
+void ContextAccess::checkAdminOption(const ContextPtr & context, const UUID & role_id, const String & role_name) const { checkAdminOptionImpl(context, role_id, role_name); }
+void ContextAccess::checkAdminOption(const ContextPtr & context, const UUID & role_id, const std::unordered_map