mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge branch 'master' of https://github.com/ClickHouse/ClickHouse into HDFS_PowerPC
This commit is contained in:
commit
9f43446959
3
.github/workflows/release.yml
vendored
3
.github/workflows/release.yml
vendored
@ -14,7 +14,8 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy packages and assets
|
||||
run: |
|
||||
curl '${{ secrets.PACKAGES_RELEASE_URL }}/release/${{ github.ref }}?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' -d ''
|
||||
GITHUB_TAG="${GITHUB_REF#refs/tags/}"
|
||||
curl '${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' -d ''
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
|
16
SECURITY.md
16
SECURITY.md
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 23.1 | ✔️ |
|
||||
| 22.12 | ✔️ |
|
||||
| 22.11 | ✔️ |
|
||||
| 22.10 | ✔️ |
|
||||
| 22.10 | ❌ |
|
||||
| 22.9 | ❌ |
|
||||
| 22.8 | ✔️ |
|
||||
| 22.7 | ❌ |
|
||||
@ -25,18 +26,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 22.3 | ✔️ |
|
||||
| 22.2 | ❌ |
|
||||
| 22.1 | ❌ |
|
||||
| 21.12 | ❌ |
|
||||
| 21.11 | ❌ |
|
||||
| 21.10 | ❌ |
|
||||
| 21.9 | ❌ |
|
||||
| 21.8 | ❌ |
|
||||
| 21.7 | ❌ |
|
||||
| 21.6 | ❌ |
|
||||
| 21.5 | ❌ |
|
||||
| 21.4 | ❌ |
|
||||
| 21.3 | ❌ |
|
||||
| 21.2 | ❌ |
|
||||
| 21.1 | ❌ |
|
||||
| 21.* | ❌ |
|
||||
| 20.* | ❌ |
|
||||
| 19.* | ❌ |
|
||||
| 18.* | ❌ |
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54470)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 13)
|
||||
SET(VERSION_REVISION 54471)
|
||||
SET(VERSION_MAJOR 23)
|
||||
SET(VERSION_MINOR 2)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 688e488e930c83eefeac4f87c4cc029cc5b231e3)
|
||||
SET(VERSION_DESCRIBE v22.13.1.1-testing)
|
||||
SET(VERSION_STRING 22.13.1.1)
|
||||
SET(VERSION_GITHASH dcaac47702510cc87ddf266bc524f6b7ce0a8e6e)
|
||||
SET(VERSION_DESCRIBE v23.2.1.1-testing)
|
||||
SET(VERSION_STRING 23.2.1.1)
|
||||
# end of autochange
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit afc36dfa9b0beb45bc4cd935060631cc80ba04a5
|
||||
Subproject commit 545b8c810a956b2efdc116e86be219af7e83d68a
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit 4b1c8dd9913d2a16db62df0e509fa598da5c8219
|
||||
Subproject commit 7fefdf30244a9bf8eb58562a9b2a51cc59a8877a
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="22.12.3.5"
|
||||
ARG VERSION="23.1.1.3077"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="22.12.3.5"
|
||||
ARG VERSION="23.1.1.3077"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -234,6 +234,7 @@ function run_tests
|
||||
--check-zookeeper-session
|
||||
--order random
|
||||
--print-time
|
||||
--report-logs-stats
|
||||
--jobs "${NPROC}"
|
||||
)
|
||||
time clickhouse-test "${test_opts[@]}" -- "$FASTTEST_FOCUS" 2>&1 \
|
||||
|
@ -146,6 +146,12 @@ def prepare_for_hung_check(drop_databases):
|
||||
"KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'"
|
||||
)
|
||||
)
|
||||
# Long query from 02136_kill_scalar_queries
|
||||
call_with_retry(
|
||||
make_query_command(
|
||||
"KILL QUERY WHERE query LIKE 'SELECT (SELECT number FROM system.numbers WHERE number = 1000000000000)%'"
|
||||
)
|
||||
)
|
||||
|
||||
if drop_databases:
|
||||
for i in range(5):
|
||||
|
21
docs/changelogs/v22.10.7.13-stable.md
Normal file
21
docs/changelogs/v22.10.7.13-stable.md
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.10.7.13-stable (d261d9036cc) FIXME as compared to v22.10.6.3-stable (645a66d221f)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#44998](https://github.com/ClickHouse/ClickHouse/issues/44998): Another fix for `Cannot read all data` error which could happen while reading `LowCardinality` dictionary from remote fs. Fixes [#44709](https://github.com/ClickHouse/ClickHouse/issues/44709). [#44875](https://github.com/ClickHouse/ClickHouse/pull/44875) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#45551](https://github.com/ClickHouse/ClickHouse/issues/45551): Fix `SELECT ... FROM system.dictionaries` exception when there is a dictionary with a bad structure (e.g. incorrect type in xml config). [#45399](https://github.com/ClickHouse/ClickHouse/pull/45399) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Automatically merge green backport PRs and green approved PRs [#41110](https://github.com/ClickHouse/ClickHouse/pull/41110) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Improve release scripts [#45074](https://github.com/ClickHouse/ClickHouse/pull/45074) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix wrong approved_at, simplify conditions [#45302](https://github.com/ClickHouse/ClickHouse/pull/45302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Get rid of artifactory in favor of r2 + ch-repos-manager [#45421](https://github.com/ClickHouse/ClickHouse/pull/45421) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
592
docs/changelogs/v23.1.1.3077-stable.md
Normal file
592
docs/changelogs/v23.1.1.3077-stable.md
Normal file
@ -0,0 +1,592 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.1.1.3077-stable (dcaac477025) FIXME as compared to v22.12.1.1752-stable (688e488e930)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Remove query `SYSTEM RESTART DISK`. [#44647](https://github.com/ClickHouse/ClickHouse/pull/44647) ([alesapin](https://github.com/alesapin)).
|
||||
* Disallow Gorilla compression on columns of non-Float32 or non-Float64 type. [#45252](https://github.com/ClickHouse/ClickHouse/pull/45252) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove PREALLOCATE for HASHED/SPARSE_HASHED dictionaries. [#45388](https://github.com/ClickHouse/ClickHouse/pull/45388) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Parallel quorum inserts might work incorrectly with `*MergeTree` tables created with deprecated syntax. Therefore, parallel quorum inserts support is completely disabled for such tables. It does not affect tables created with a new syntax. [#45430](https://github.com/ClickHouse/ClickHouse/pull/45430) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### New Feature
|
||||
* Add `quantileInterpolatedWeighted`/`quantilesInterpolatedWeighted` functions. [#38252](https://github.com/ClickHouse/ClickHouse/pull/38252) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add an experimental inverted index as a new secondary index type for efficient text search. [#38667](https://github.com/ClickHouse/ClickHouse/pull/38667) ([larryluogit](https://github.com/larryluogit)).
|
||||
* Add column `ptr` to `system.trace_log` for `trace_type = 'MemorySample'`. This column contains an address of allocation. Added function `flameGraph` which can build flamegraph containing allocated and not released memory. Reworking of [#38391](https://github.com/ClickHouse/ClickHouse/issues/38391). [#38953](https://github.com/ClickHouse/ClickHouse/pull/38953) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Dictionary source for extracting keys by traversing regular expressions tree. [#40878](https://github.com/ClickHouse/ClickHouse/pull/40878) ([Vage Ogannisian](https://github.com/nooblose)).
|
||||
* Added parametrized view functionality, now it's possible to specify query parameters for View table engine. resolves [#40907](https://github.com/ClickHouse/ClickHouse/issues/40907). [#41687](https://github.com/ClickHouse/ClickHouse/pull/41687) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* added extendable and configurable scheduling subsystem for IO requests (not yet integrated with IO code itself). [#41840](https://github.com/ClickHouse/ClickHouse/pull/41840) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Added `SYSTEM DROP DATABASE REPLICA` that removes metadata of dead replica of `Replicated` database. Resolves [#41794](https://github.com/ClickHouse/ClickHouse/issues/41794). [#42807](https://github.com/ClickHouse/ClickHouse/pull/42807) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Array join support map type, like function explode in spark. [#43239](https://github.com/ClickHouse/ClickHouse/pull/43239) ([李扬](https://github.com/taiyang-li)).
|
||||
* Support SQL standard binary and hex string literals. [#43785](https://github.com/ClickHouse/ClickHouse/pull/43785) ([Mo Xuan](https://github.com/mo-avatar)).
|
||||
* Add experimental query result cache. [#43797](https://github.com/ClickHouse/ClickHouse/pull/43797) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* format datetime in joda datetime style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. [#43818](https://github.com/ClickHouse/ClickHouse/pull/43818) ([李扬](https://github.com/taiyang-li)).
|
||||
* to merge [#40878](https://github.com/ClickHouse/ClickHouse/issues/40878) , supporting regexp dictionary. [#43858](https://github.com/ClickHouse/ClickHouse/pull/43858) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Implemented a fractional second formatter (`%f`) for formatDateTime. [#44060](https://github.com/ClickHouse/ClickHouse/pull/44060) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Added age function to calculate difference between two dates or dates with time values expressed as number of full units. Closes [#41115](https://github.com/ClickHouse/ClickHouse/issues/41115). [#44421](https://github.com/ClickHouse/ClickHouse/pull/44421) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Implemented a fractional second formatter (%f) for formatDateTime. This is slightly modified PR [#44060](https://github.com/ClickHouse/ClickHouse/issues/44060) by @ltrk2. [#44497](https://github.com/ClickHouse/ClickHouse/pull/44497) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Add null source for dictionaries. Closes [#44240](https://github.com/ClickHouse/ClickHouse/issues/44240). [#44502](https://github.com/ClickHouse/ClickHouse/pull/44502) ([mayamika](https://github.com/mayamika)).
|
||||
* We can use `s3_storage_class` to set different tier. Such as ``` <disks> <s3> <type>s3</type> <endpoint>xxx</endpoint> <access_key_id>xxx</access_key_id> <secret_access_key>xxx</secret_access_key> <s3_storage_class>STANDARD/INTELLIGENT_TIERING</s3_storage_class> </s3> </disks> ``` Closes [#44443](https://github.com/ClickHouse/ClickHouse/issues/44443). [#44707](https://github.com/ClickHouse/ClickHouse/pull/44707) ([chen](https://github.com/xiedeyantu)).
|
||||
* Try to detect header with column names (and maybe types) for CSV/TSV/CustomSeparated input formats. Add settings `input_format_tsv/csv/custom_detect_header` that enables this behaviour (enabled by default). Closes [#44640](https://github.com/ClickHouse/ClickHouse/issues/44640). [#44953](https://github.com/ClickHouse/ClickHouse/pull/44953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Insert default values in case of missing elements in JSON object while parsing named tuple. Add setting `input_format_json_defaults_for_missing_elements_in_named_tuple` that controls this behaviour. Closes [#45142](https://github.com/ClickHouse/ClickHouse/issues/45142)#issuecomment-1380153217. [#45231](https://github.com/ClickHouse/ClickHouse/pull/45231) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Add total memory and used memory metrics with respect to cgroup in AsyncMetrics (https://github.com/ClickHouse/ClickHouse/issues/37983). [#45301](https://github.com/ClickHouse/ClickHouse/pull/45301) ([sichenzhao](https://github.com/sichenzhao)).
|
||||
* Introduce non-throwing variants of hasToken and hasTokenCaseInsensitive. [#45341](https://github.com/ClickHouse/ClickHouse/pull/45341) ([ltrk2](https://github.com/ltrk2)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Added sharding support in HashedDictionary to allow parallel load (almost linear scaling based on number of shards). [#40003](https://github.com/ClickHouse/ClickHouse/pull/40003) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Do not load inactive parts at startup of `MergeTree` tables. [#42181](https://github.com/ClickHouse/ClickHouse/pull/42181) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* - Speed up query parsing. [#42284](https://github.com/ClickHouse/ClickHouse/pull/42284) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Always replace OR chain `expr = x1 OR ... OR expr = xN` to `expr IN (x1, ..., xN)` in case if `expr` is a `LowCardinality` column. Setting `optimize_min_equality_disjunction_chain_length` is ignored in this case. [#42889](https://github.com/ClickHouse/ClickHouse/pull/42889) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
* > Original changelog In the original implementation, the memory of ThreadGroupStatus:: finished_threads_counters_memory is released by moving it to a temporary std::vector, which soon expired and gets destructed. This method is viable, however not straightforward enough. To enhance the code readability, this commit releases the memory in the vector by firstly resizing it to 0 and then shrinking the capacity accordingly. [#43586](https://github.com/ClickHouse/ClickHouse/pull/43586) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* As a follow-up of [#42214](https://github.com/ClickHouse/ClickHouse/issues/42214), this PR tries to optimize the column-wise ternary logic evaluation by achieving auto-vectorization. In the performance test of this [microbenchmark](https://github.com/ZhiguoZh/ClickHouse/blob/20221123-ternary-logic-opt-example/src/Functions/examples/associative_applier_perf.cpp), we've observed a peak **performance gain** of **21x** on the ICX device (Intel Xeon Platinum 8380 CPU). [#43669](https://github.com/ClickHouse/ClickHouse/pull/43669) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Improved latency of reading from storage `S3` and table function `s3` with large number of small files. Now settings `remote_filesystem_read_method` and `remote_filesystem_read_prefetch` take effect while reading from storage `S3`. [#43726](https://github.com/ClickHouse/ClickHouse/pull/43726) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* - Avoid acquiring read locks in system.tables if possible. [#43840](https://github.com/ClickHouse/ClickHouse/pull/43840) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* The performance experiments of SSB (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could effectively decrease the lock contention for ThreadPoolImpl::mutex by **75%**, increasing the CPU utilization and improving the overall performance by **2.4%**. [#44308](https://github.com/ClickHouse/ClickHouse/pull/44308) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Now optimisation is applied only if the cached HT size is sufficiently large (thresholds were determined empirically and hardcoded). [#44455](https://github.com/ClickHouse/ClickHouse/pull/44455) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* ... The whole struct field will be loaded at current, even though we just want to read one field of the struct. [#44484](https://github.com/ClickHouse/ClickHouse/pull/44484) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Small performance improvement for asynchronous reading from remote fs. [#44868](https://github.com/ClickHouse/ClickHouse/pull/44868) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Switched to faster shared (RW) mutex implementation. Performance may be improved in queries with a lot of thread synchronization or for data structures experiencing heavy contention. [#45007](https://github.com/ClickHouse/ClickHouse/pull/45007) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add fast path for: - col like '%%' - col like '%' - col not like '%' - col not like '%' - match(col, '.*'). [#45244](https://github.com/ClickHouse/ClickHouse/pull/45244) ([李扬](https://github.com/taiyang-li)).
|
||||
* todo. [#45289](https://github.com/ClickHouse/ClickHouse/pull/45289) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### Improvement
|
||||
* Refactor and Improve streaming engines Kafka/RabbitMQ/NATS and add support for all formats, also refactor formats a bit: - Fix producing messages in row-based formats with suffixes/prefixes. Now every message is formatted complitely with all delimiters and can be parsed back using input format. - Support block-based formats like Native, Parquet, ORC, etc. Every block is formatted as a separated message. The number of rows in one message depends on block size, so you can control it via setting `max_block_size`. - Add new engine settings `kafka_max_rows_per_message/rabbitmq_max_rows_per_message/nats_max_rows_per_message`. They control the number of rows formatted in one message in row-based formats. Default value: 1. - Fix high memory consumption in NATS table engine. - Support arbitrary binary data in NATS producer (previously it worked only with strings contained \0 at the end) - Add missing Kafka/RabbitMQ/NATS engine settings in documentation. - Refactor producing and consuming in Kafka/RabbitMQ/NATS, separate it from WriteBuffers/ReadBuffers semantic. - Refactor output formats: remove callbacks on each row used in Kafka/RabbitMQ/NATS (now we don't use callbacks there), allow to use IRowOutputFormat directly, clarify row end and row between delimiters, make it possible to reset output format to start formatting again - Add proper implementation in formatRow function (bonus after formats refactoring). [#42777](https://github.com/ClickHouse/ClickHouse/pull/42777) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support `optimize_or_like_chain` in the new infrastructure. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#42797](https://github.com/ClickHouse/ClickHouse/pull/42797) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Improve the Asterisk and ColumnMatcher parsers. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#42884](https://github.com/ClickHouse/ClickHouse/pull/42884) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Implement `optimize_redundant_functions_in_order_by` on top of QueryTree. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#42970](https://github.com/ClickHouse/ClickHouse/pull/42970) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Support `optimize_group_by_function_keys` in the new analyzer architecture. Also, add support for optimizing GROUPING SETS keys. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#43261](https://github.com/ClickHouse/ClickHouse/pull/43261) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Improve reading CSV field in CustomSeparated/Template format. Closes [#42352](https://github.com/ClickHouse/ClickHouse/issues/42352) Closes [#39620](https://github.com/ClickHouse/ClickHouse/issues/39620). [#43332](https://github.com/ClickHouse/ClickHouse/pull/43332) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support reading/writing `Nested` tables as `List` of `Struct` in CapnProto format. Read/write `Decimal32/64` as `Int32/64`. Closes [#43319](https://github.com/ClickHouse/ClickHouse/issues/43319). [#43379](https://github.com/ClickHouse/ClickHouse/pull/43379) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Unify query elapsed time measurements. [#43455](https://github.com/ClickHouse/ClickHouse/pull/43455) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support scalar subqueries cache Implementation: * Added a map with hash of the node (without alias) and the evaluated value to Context. Testing: * Added a test-case with new analyser in 02174_cte_scalar_cache.sql. [#43640](https://github.com/ClickHouse/ClickHouse/pull/43640) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Improve automatic usage of structure from insertion table in table functions file/hdfs/s3 when virtual columns present in select query, it fixes possible error `Block structure mismatch` or `number of columns mismatch`. [#43695](https://github.com/ClickHouse/ClickHouse/pull/43695) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for signed arguments in range(). Fixes [#43333](https://github.com/ClickHouse/ClickHouse/issues/43333). [#43733](https://github.com/ClickHouse/ClickHouse/pull/43733) ([sanyu](https://github.com/wineternity)).
|
||||
* Remove redundant sorting, for example, sorting related ORDER BY clauses in subqueries. Implemented on top of query plan. It does similar optimization as `optimize_duplicate_order_by_and_distinct` regarding `ORDER BY` clauses, but more generic, since it's applied to any redundant sorting steps (not only caused by ORDER BY clause) and applied to subqueries of any depth. Related to [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#43905](https://github.com/ClickHouse/ClickHouse/pull/43905) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Added mmap support for StorageFile, which should improve the performance of clickhouse-local. [#43927](https://github.com/ClickHouse/ClickHouse/pull/43927) ([pufit](https://github.com/pufit)).
|
||||
* Add ability to disable deduplication for BACKUP (for backups wiithout deduplication ATTACH can be used instead of full RESTORE), example `BACKUP foo TO S3(...) SETTINGS deduplicate_files=0` (default `deduplicate_files=1`). [#43947](https://github.com/ClickHouse/ClickHouse/pull/43947) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make `system.replicas` table do parallel fetches of replicas statuses. Closes [#43918](https://github.com/ClickHouse/ClickHouse/issues/43918). [#43998](https://github.com/ClickHouse/ClickHouse/pull/43998) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Refactor and improve schema inference for text formats. Add new setting `schema_inference_make_columns_nullable` that controls making result types `Nullable` (enabled by default);. [#44019](https://github.com/ClickHouse/ClickHouse/pull/44019) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Better support for PROXYv1. [#44135](https://github.com/ClickHouse/ClickHouse/pull/44135) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add information about the latest part check by cleanup thread into `system.parts` table. [#44244](https://github.com/ClickHouse/ClickHouse/pull/44244) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Disable functions in readonly for inserts. [#44290](https://github.com/ClickHouse/ClickHouse/pull/44290) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Add a setting `simultaneous_parts_removal_limit` to allow to limit the number of parts being processed by one iteration of CleanupThread. [#44461](https://github.com/ClickHouse/ClickHouse/pull/44461) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* If user only need virtual columns, we don't need to initialize ReadBufferFromS3. May be helpful to [#44246](https://github.com/ClickHouse/ClickHouse/issues/44246). [#44493](https://github.com/ClickHouse/ClickHouse/pull/44493) ([chen](https://github.com/xiedeyantu)).
|
||||
* Prevent duplicate column names hints. Closes [#44130](https://github.com/ClickHouse/ClickHouse/issues/44130). [#44519](https://github.com/ClickHouse/ClickHouse/pull/44519) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||
* Allow macro substitution in endpoint of disks resolve [#40951](https://github.com/ClickHouse/ClickHouse/issues/40951). [#44533](https://github.com/ClickHouse/ClickHouse/pull/44533) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Added a `message_format_string` column to `system.text_log`. The column contains a pattern that was used to format the message. [#44543](https://github.com/ClickHouse/ClickHouse/pull/44543) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve schema inference when `input_format_json_read_object_as_string` is enabled. [#44546](https://github.com/ClickHouse/ClickHouse/pull/44546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add user-level setting `database_replicated_allow_replicated_engine_arguments` which allow to ban creation of `ReplicatedMergeTree` tables with arguments in `DatabaseReplicated`. [#44566](https://github.com/ClickHouse/ClickHouse/pull/44566) ([alesapin](https://github.com/alesapin)).
|
||||
* Prevent users from mistakenly specifying zero (invalid) value for `index_granularity`. This closes [#44536](https://github.com/ClickHouse/ClickHouse/issues/44536). [#44578](https://github.com/ClickHouse/ClickHouse/pull/44578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added possibility to set path to service keytab file in `keytab` parameter in `kerberos` section of config.xml. [#44594](https://github.com/ClickHouse/ClickHouse/pull/44594) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Use already written part of the query for fuzzy search (pass to skim). [#44600](https://github.com/ClickHouse/ClickHouse/pull/44600) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable input_format_json_read_objects_as_strings by default to be able to read nested JSON objects while JSON Object type is experimental. [#44657](https://github.com/ClickHouse/ClickHouse/pull/44657) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* When users do duplicate async inserts, we should dedup inside the memory before we query keeper. [#44682](https://github.com/ClickHouse/ClickHouse/pull/44682) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Input/ouptut Avro bool type as ClickHouse bool type. [#44684](https://github.com/ClickHouse/ClickHouse/pull/44684) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Don't parse beyond the quotes when reading UUIDs. [#44686](https://github.com/ClickHouse/ClickHouse/pull/44686) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Infer UInt64 in case of Int64 overflow and fix some transforms in schema inference. [#44696](https://github.com/ClickHouse/ClickHouse/pull/44696) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Previously dependency resolving inside DatabaseReplicated was done in a hacky way and now it done right using an explicit graph. [#44697](https://github.com/ClickHouse/ClickHouse/pull/44697) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Support Bool type in Arrow/Parquet/ORC. Closes [#43970](https://github.com/ClickHouse/ClickHouse/issues/43970). [#44698](https://github.com/ClickHouse/ClickHouse/pull/44698) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `output_format_pretty_row_numbers` does not preserve the counter across the blocks. Closes [#44815](https://github.com/ClickHouse/ClickHouse/issues/44815). [#44832](https://github.com/ClickHouse/ClickHouse/pull/44832) ([flynn](https://github.com/ucasfl)).
|
||||
* Extend function "toDayOfWeek" with a mode argument describing if a) the week starts on Monday or Sunday and b) if counting starts at 0 or 1. [#44860](https://github.com/ClickHouse/ClickHouse/pull/44860) ([李扬](https://github.com/taiyang-li)).
|
||||
* - Don't report errors in system.errors due to parts being merged concurrently with the background cleanup process. [#44874](https://github.com/ClickHouse/ClickHouse/pull/44874) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Optimize and fix metrics for Distributed async INSERT. [#44922](https://github.com/ClickHouse/ClickHouse/pull/44922) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added settings to disallow concurrent backups and restores resolves [#43891](https://github.com/ClickHouse/ClickHouse/issues/43891) Implementation: * Added server level settings to disallow concurrent backups and restores, which are read and set when BackupWorker is created in Context. * Settings are set to true by default. * Before starting backup or restores, added a check to see if any other backups/restores are running. For internal request it checks if its from the self node using backup_uuid. [#45072](https://github.com/ClickHouse/ClickHouse/pull/45072) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* add a cache for async block ids. This will reduce the requests of zookeeper when we enable async inserts deduplication. [#45106](https://github.com/ClickHouse/ClickHouse/pull/45106) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* CRC32 changes to address the WeakHash collision issue in PowerPC. [#45144](https://github.com/ClickHouse/ClickHouse/pull/45144) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||
* Optimize memory consumption during backup to S3: files to S3 now will be copied directly without using `WriteBufferFromS3` (which could use a lot of memory). [#45188](https://github.com/ClickHouse/ClickHouse/pull/45188) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Use structure from insertion table in generateRandom without arguments. [#45239](https://github.com/ClickHouse/ClickHouse/pull/45239) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Use `GetObjectAttributes` request instead of `HeadObject` request to get the size of an object in AWS S3. This change fixes handling endpoints without explicit region, for example. [#45288](https://github.com/ClickHouse/ClickHouse/pull/45288) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Add `<storage_policy>` config parameter for system logs. [#45320](https://github.com/ClickHouse/ClickHouse/pull/45320) ([Stig Bakken](https://github.com/stigsb)).
|
||||
* Remove redundant sorting, for example, sorting related ORDER BY clauses in subqueries. Implemented on top of query plan. It does similar optimization as `optimize_duplicate_order_by_and_distinct` regarding `ORDER BY` clauses, but more generic, since it's applied to any redundant sorting steps (not only caused by ORDER BY clause) and applied to subqueries of any depth. Related to [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#45420](https://github.com/ClickHouse/ClickHouse/pull/45420) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Allow to implicitly convert floats stored in string fields of JSON to integers in `JSONExtract` functions. E.g. `JSONExtract('{"a": "1000.111"}', 'a', 'UInt64')` -> `1000`, previously it returned 0. [#45432](https://github.com/ClickHouse/ClickHouse/pull/45432) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added fields `supports_parallel_parsing` and `supports_parallel_formatting` to table `system.formats` for better introspection. [#45499](https://github.com/ClickHouse/ClickHouse/pull/45499) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Attempt to improve fsync latency (by syncing all files at once during fetches and small files after mutations) and one tiny fix for fsync_part_directory. [#45537](https://github.com/ClickHouse/ClickHouse/pull/45537) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Bug Fix
|
||||
* Fix HTTP requests without path for AWS. After updating AWS SDK the sdk no longer adds a slash to requesting paths so we need to do it in our PocoHTTPClient to keep HTTP requests correct. [#45238](https://github.com/ClickHouse/ClickHouse/pull/45238) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix backup if mutations get killed during the backup process. [#45351](https://github.com/ClickHouse/ClickHouse/pull/45351) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Builtin skim for fuzzy search in clickhouse client/local history. [#44239](https://github.com/ClickHouse/ClickHouse/pull/44239) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Memory limit for server is set now in AST fuzz tests to avoid OOMs. [#44282](https://github.com/ClickHouse/ClickHouse/pull/44282) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* In rare cases, we don't rebuild binaries, because another task with a similar prefix succeeded. E.g. `binary_darwin` didn't restart because `binary_darwin_aarch64`. [#44311](https://github.com/ClickHouse/ClickHouse/pull/44311) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* The "universal.sh" now fetches a SSE2 build on systems which don't have SSE4.2. [#44366](https://github.com/ClickHouse/ClickHouse/pull/44366) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Retry the integration tests on compressing errors. [#44529](https://github.com/ClickHouse/ClickHouse/pull/44529) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* ... 1. Added pytest-random by default in integration tests runner 2. Disable TSAN checks for tests with GPRC ( like https://s3.amazonaws.com/clickhouse-test-reports/42807/e9d7407a58f6e3f7d88c0c534685704f23560704/integration_tests__tsan__[4/6].html ) 3. Cleanup tables after tests in odbc. [#44711](https://github.com/ClickHouse/ClickHouse/pull/44711) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* We removed support for shared linking because of Rust. Actually, Rust is only an excuse for this removal, and we wanted to remove it nevertheless. [#44828](https://github.com/ClickHouse/ClickHouse/pull/44828) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Checks will try to download images before running integration tests. If image, proxy or whatever is broken in infrastructure it will not make tests flaky. Images will be cached locally and download time will not be added to random tests. Compose images are now changed to be used without correct environment from helpers/cluster.py. [#44848](https://github.com/ClickHouse/ClickHouse/pull/44848) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix zookeeper downloading, update the version, and optimize the image size. [#44853](https://github.com/ClickHouse/ClickHouse/pull/44853) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* The performance tests were silently broken because `Errors` wasn't detected in the status message. [#44867](https://github.com/ClickHouse/ClickHouse/pull/44867) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove the dependency on the `adduser` tool from the packages, because we don't use it. This fixes [#44934](https://github.com/ClickHouse/ClickHouse/issues/44934). [#45011](https://github.com/ClickHouse/ClickHouse/pull/45011) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* SQLite library is updated to the latest. It is used for the SQLite database and table integration engines. Also, fixed a false-positive TSan report. This closes [#45027](https://github.com/ClickHouse/ClickHouse/issues/45027). [#45031](https://github.com/ClickHouse/ClickHouse/pull/45031) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix report sending in the case when FastTest failed. [#45588](https://github.com/ClickHouse/ClickHouse/pull/45588) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* #40651 [#41404](https://github.com/ClickHouse/ClickHouse/issues/41404). [#42126](https://github.com/ClickHouse/ClickHouse/pull/42126) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix possible use-of-unitialized value after executing expressions after sorting. Closes [#43386](https://github.com/ClickHouse/ClickHouse/issues/43386) CC: @nickitat. [#43635](https://github.com/ClickHouse/ClickHouse/pull/43635) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Better handling of NULL in aggregate combinators, fix possible segfault/logical error while using optimization `optimize_rewrite_sum_if_to_count_if`. Closes [#43758](https://github.com/ClickHouse/ClickHouse/issues/43758). [#43813](https://github.com/ClickHouse/ClickHouse/pull/43813) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix CREATE USER/ROLE query settings constraints. [#43993](https://github.com/ClickHouse/ClickHouse/pull/43993) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* * Fix wrong behavior of `JOIN ON t1.x = t2.x AND 1 = 1`, forbid such queries. [#44016](https://github.com/ClickHouse/ClickHouse/pull/44016) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed bug with non-parsable default value for EPHEMERAL column in table metadata. [#44026](https://github.com/ClickHouse/ClickHouse/pull/44026) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix parsing of bad version from compatibility setting. [#44224](https://github.com/ClickHouse/ClickHouse/pull/44224) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Bring interval subtraction from datetime in line with addition. [#44241](https://github.com/ClickHouse/ClickHouse/pull/44241) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Fix double-free in HashTable::clearAndShrink() with zero elements in it. [#44256](https://github.com/ClickHouse/ClickHouse/pull/44256) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove limits on maximum size of the result for view. [#44261](https://github.com/ClickHouse/ClickHouse/pull/44261) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Fix possible logical error in cache if `do_not_evict_index_and_mrk_files=1`. Closes [#42142](https://github.com/ClickHouse/ClickHouse/issues/42142). [#44268](https://github.com/ClickHouse/ClickHouse/pull/44268) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible too early cache write interruption in write-through cache (caching could be stopped due to false assumption when it shouldn't have). [#44289](https://github.com/ClickHouse/ClickHouse/pull/44289) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible crash in case function `IN` with constant arguments was used as a constant argument together with `LowCardinality`. Fixes [#44221](https://github.com/ClickHouse/ClickHouse/issues/44221). [#44346](https://github.com/ClickHouse/ClickHouse/pull/44346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix support for complex parameters (like arrays) of parametric aggregate functions. This closes [#30975](https://github.com/ClickHouse/ClickHouse/issues/30975). The aggregate function `sumMapFiltered` was unusable in distributed queries before this change. [#44358](https://github.com/ClickHouse/ClickHouse/pull/44358) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* * Fix possible nullptr deference in JoinSwitcher with `allow_experimental_analyzer`. [#44371](https://github.com/ClickHouse/ClickHouse/pull/44371) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix reading ObjectId in BSON schema inference. [#44382](https://github.com/ClickHouse/ClickHouse/pull/44382) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix race which can lead to premature temp parts removal before merge finished in ReplicatedMergeTree. This issue could lead to errors like `No such file or directory: xxx`. Fixes [#43983](https://github.com/ClickHouse/ClickHouse/issues/43983). [#44383](https://github.com/ClickHouse/ClickHouse/pull/44383) ([alesapin](https://github.com/alesapin)).
|
||||
* Some invalid `SYSTEM ... ON CLUSTER` queries worked in an unexpected way if a cluster name was not specified. It's fixed, now invalid queries throw `SYNTAX_ERROR` as they should. Fixes [#44264](https://github.com/ClickHouse/ClickHouse/issues/44264). [#44387](https://github.com/ClickHouse/ClickHouse/pull/44387) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix reading Map type in ORC format. [#44400](https://github.com/ClickHouse/ClickHouse/pull/44400) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading columns that are not presented in input data in Parquet/ORC formats. Previously it could lead to error `INCORRECT_NUMBER_OF_COLUMNS`. Closes [#44333](https://github.com/ClickHouse/ClickHouse/issues/44333). [#44405](https://github.com/ClickHouse/ClickHouse/pull/44405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Previously bar() function used the same '▋' (U+258B "Left five eighths block") character to display both 5/8 and 6/8 bars. This change corrects this behavior by using '▊' (U+258A "Left three quarters block") for displaying 6/8 bar. [#44410](https://github.com/ClickHouse/ClickHouse/pull/44410) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Placing profile settings after profile settings constraints in the configuration file made constraints ineffective. [#44411](https://github.com/ClickHouse/ClickHouse/pull/44411) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Fix `SYNTAX_ERROR` while running `EXPLAIN AST INSERT` queries with data. Closes [#44207](https://github.com/ClickHouse/ClickHouse/issues/44207). [#44413](https://github.com/ClickHouse/ClickHouse/pull/44413) ([save-my-heart](https://github.com/save-my-heart)).
|
||||
* Fix reading bool value with CRLF in CSV format. Closes [#44401](https://github.com/ClickHouse/ClickHouse/issues/44401). [#44442](https://github.com/ClickHouse/ClickHouse/pull/44442) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Don't execute and/or/if/multiIf on LowCardinality dictionary, so the result type cannot be LowCardinality. It could lead to error `Illegal column ColumnLowCardinality` in some cases. Fixes [#43603](https://github.com/ClickHouse/ClickHouse/issues/43603). [#44469](https://github.com/ClickHouse/ClickHouse/pull/44469) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix mutations with setting `max_streams_for_merge_tree_reading`. [#44472](https://github.com/ClickHouse/ClickHouse/pull/44472) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix potential null pointer dereference with GROUPING SETS in ASTSelectQuery::formatImpl ([#43049](https://github.com/ClickHouse/ClickHouse/issues/43049)). [#44479](https://github.com/ClickHouse/ClickHouse/pull/44479) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Validate types in table function arguments, CAST function arguments, JSONAsObject schema inference according to settings. [#44501](https://github.com/ClickHouse/ClickHouse/pull/44501) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Fix IN function with LC and const column, close [#44503](https://github.com/ClickHouse/ClickHouse/issues/44503). [#44506](https://github.com/ClickHouse/ClickHouse/pull/44506) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fixed a bug in normalization of a `DEFAULT` expression in `CREATE TABLE` statement. The second argument of function `in` (or the right argument of operator `IN`) might be replaced with the result of its evaluation during CREATE query execution. Fixes [#44496](https://github.com/ClickHouse/ClickHouse/issues/44496). [#44547](https://github.com/ClickHouse/ClickHouse/pull/44547) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Projections do not work in presence of WITH ROLLUP, WITH CUBE and WITH TOTALS. In previous versions, a query produced an exception instead of skipping the usage of projections. This closes [#44614](https://github.com/ClickHouse/ClickHouse/issues/44614). This closes [#42772](https://github.com/ClickHouse/ClickHouse/issues/42772). [#44615](https://github.com/ClickHouse/ClickHouse/pull/44615) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* * Fix bug in experimental analyzer and `aggregate_functions_null_for_empty = 1`. Close [#44644](https://github.com/ClickHouse/ClickHouse/issues/44644). [#44648](https://github.com/ClickHouse/ClickHouse/pull/44648) ([Vladimir C](https://github.com/vdimir)).
|
||||
* async blocks are not cleaned because the function `get all blocks sorted by time` didn't get async blocks. [#44651](https://github.com/ClickHouse/ClickHouse/pull/44651) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix `LOGICAL_ERROR` `The top step of the right pipeline should be ExpressionStep` for JOIN with subquery, UNION, and TOTALS. Fixes [#43687](https://github.com/ClickHouse/ClickHouse/issues/43687). [#44673](https://github.com/ClickHouse/ClickHouse/pull/44673) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Avoid std::out_of_range exception in StorageExecutable. [#44681](https://github.com/ClickHouse/ClickHouse/pull/44681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not apply `optimize_syntax_fuse_functions` to quantiles on AST, close [#44712](https://github.com/ClickHouse/ClickHouse/issues/44712). [#44713](https://github.com/ClickHouse/ClickHouse/pull/44713) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix bug with wrong type in Merge table and PREWHERE, close [#43324](https://github.com/ClickHouse/ClickHouse/issues/43324). [#44716](https://github.com/ClickHouse/ClickHouse/pull/44716) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix possible crash during shutdown (while destroying TraceCollector). Fixes [#44757](https://github.com/ClickHouse/ClickHouse/issues/44757). [#44758](https://github.com/ClickHouse/ClickHouse/pull/44758) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix a possible crash in distributed query processing. The crash could happen if a query with totals or extremes returned an empty result and there are mismatched types in the Distrubuted and the local tables. Fixes [#44738](https://github.com/ClickHouse/ClickHouse/issues/44738). [#44760](https://github.com/ClickHouse/ClickHouse/pull/44760) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix fsync for fetches (`min_compressed_bytes_to_fsync_after_fetch`)/small files (ttl.txt, columns.txt) in mutations (`min_rows_to_fsync_after_merge`/`min_compressed_bytes_to_fsync_after_merge`). [#44781](https://github.com/ClickHouse/ClickHouse/pull/44781) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* A rare race condition was possible when querying the `system.parts` or `system.parts_columns` tables in the presence of parts being moved between disks. Introduced in [#41145](https://github.com/ClickHouse/ClickHouse/issues/41145). [#44809](https://github.com/ClickHouse/ClickHouse/pull/44809) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix the error `Context has expired` which could appear with enabled projections optimization. Can be reproduced for queries with specific functions, like `dictHas/dictGet` which use context in runtime. Fixes [#44844](https://github.com/ClickHouse/ClickHouse/issues/44844). [#44850](https://github.com/ClickHouse/ClickHouse/pull/44850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Another fix for `Cannot read all data` error which could happen while reading `LowCardinality` dictionary from remote fs. Fixes [#44709](https://github.com/ClickHouse/ClickHouse/issues/44709). [#44875](https://github.com/ClickHouse/ClickHouse/pull/44875) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* - Ignore hwmon sensors on label read issues. [#44895](https://github.com/ClickHouse/ClickHouse/pull/44895) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Use `max_delay_to_insert` value in case calculated time to delay INSERT exceeds the setting value. Related to [#44902](https://github.com/ClickHouse/ClickHouse/issues/44902). [#44916](https://github.com/ClickHouse/ClickHouse/pull/44916) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix error `Different order of columns in UNION subquery` for queries with `UNION`. Fixes [#44866](https://github.com/ClickHouse/ClickHouse/issues/44866). [#44920](https://github.com/ClickHouse/ClickHouse/pull/44920) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Delay for INSERT can be calculated incorrectly, which can lead to always using `max_delay_to_insert` setting as delay instead of a correct value. Using simple formula `max_delay_to_insert * (parts_over_threshold/max_allowed_parts_over_threshold)` i.e. delay grows proportionally to parts over threshold. Closes [#44902](https://github.com/ClickHouse/ClickHouse/issues/44902). [#44954](https://github.com/ClickHouse/ClickHouse/pull/44954) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* fix alter table ttl error when wide part has light weight delete mask. [#44959](https://github.com/ClickHouse/ClickHouse/pull/44959) ([Mingliang Pan](https://github.com/liangliangpan)).
|
||||
* Follow-up fix for Replace domain IP types (IPv4, IPv6) with native [#43221](https://github.com/ClickHouse/ClickHouse/issues/43221). [#45024](https://github.com/ClickHouse/ClickHouse/pull/45024) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Follow-up fix for Replace domain IP types (IPv4, IPv6) with native https://github.com/ClickHouse/ClickHouse/pull/43221. [#45043](https://github.com/ClickHouse/ClickHouse/pull/45043) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* A buffer overflow was possible in the parser. Found by fuzzer. [#45047](https://github.com/ClickHouse/ClickHouse/pull/45047) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible cannot-read-all-data error in storage FileLog. Closes [#45051](https://github.com/ClickHouse/ClickHouse/issues/45051), [#38257](https://github.com/ClickHouse/ClickHouse/issues/38257). [#45057](https://github.com/ClickHouse/ClickHouse/pull/45057) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Memory efficient aggregation (setting `distributed_aggregation_memory_efficient`) is disabled when grouping sets are present in the query. [#45058](https://github.com/ClickHouse/ClickHouse/pull/45058) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix `RANGE_HASHED` dictionary to count range columns as part of primary key during updates when `update_field` is specified. Closes [#44588](https://github.com/ClickHouse/ClickHouse/issues/44588). [#45061](https://github.com/ClickHouse/ClickHouse/pull/45061) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix error `Cannot capture column` for `LowCardinality` captured argument of nested labmda. Fixes [#45028](https://github.com/ClickHouse/ClickHouse/issues/45028). [#45065](https://github.com/ClickHouse/ClickHouse/pull/45065) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix the wrong query result of `additional_table_filters` (additional filter was not applied) in case if minmax/count projection is used. [#45133](https://github.com/ClickHouse/ClickHouse/pull/45133) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* - Fixed bug in `histogram` function accepting negative values. [#45147](https://github.com/ClickHouse/ClickHouse/pull/45147) ([simpleton](https://github.com/rgzntrade)).
|
||||
* Follow-up fix for Replace domain IP types (IPv4, IPv6) with native https://github.com/ClickHouse/ClickHouse/pull/43221. [#45150](https://github.com/ClickHouse/ClickHouse/pull/45150) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix wrong column nullability in StoreageJoin, close [#44940](https://github.com/ClickHouse/ClickHouse/issues/44940). [#45184](https://github.com/ClickHouse/ClickHouse/pull/45184) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix `background_fetches_pool_size` settings reload (increase at runtime). [#45189](https://github.com/ClickHouse/ClickHouse/pull/45189) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Correctly process `SELECT` queries on KV engines (e.g. KeeperMap, EmbeddedRocksDB) using `IN` on the key with subquery producing different type. [#45215](https://github.com/ClickHouse/ClickHouse/pull/45215) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix logical error in SEMI JOIN & join_use_nulls in some cases, close [#45163](https://github.com/ClickHouse/ClickHouse/issues/45163), close [#45209](https://github.com/ClickHouse/ClickHouse/issues/45209). [#45230](https://github.com/ClickHouse/ClickHouse/pull/45230) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix heap-use-after-free in reading from s3. [#45253](https://github.com/ClickHouse/ClickHouse/pull/45253) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug when the Avro Union type is ['null', Nested type], closes [#45275](https://github.com/ClickHouse/ClickHouse/issues/45275). Fix bug that incorrectly infer `bytes` type to `Float`. [#45276](https://github.com/ClickHouse/ClickHouse/pull/45276) ([flynn](https://github.com/ucasfl)).
|
||||
* Throw a correct exception when explicit PREWHERE cannot be used with table using storage engine `Merge`. [#45319](https://github.com/ClickHouse/ClickHouse/pull/45319) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Under WSL1 Ubuntu self-extracting clickhouse fails to decompress due to inconsistency - /proc/self/maps reporting 32bit file's inode, while stat reporting 64bit inode. [#45339](https://github.com/ClickHouse/ClickHouse/pull/45339) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix race in Distributed table startup (that could lead to processing file of async INSERT multiple times). [#45360](https://github.com/ClickHouse/ClickHouse/pull/45360) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible crash while reading from storage `S3` and table function `s3` in case when `ListObject` request has failed. [#45371](https://github.com/ClickHouse/ClickHouse/pull/45371) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* * Fixed some bugs in JOINS with WHERE by disabling "move to prewhere" optimization for it, close [#44062](https://github.com/ClickHouse/ClickHouse/issues/44062). [#45391](https://github.com/ClickHouse/ClickHouse/pull/45391) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix `SELECT ... FROM system.dictionaries` exception when there is a dictionary with a bad structure (e.g. incorrect type in xml config). [#45399](https://github.com/ClickHouse/ClickHouse/pull/45399) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Fix s3Cluster schema inference when structure from insertion table is used in `INSERT INTO ... SELECT * FROM s3Cluster` queries. [#45422](https://github.com/ClickHouse/ClickHouse/pull/45422) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug in JSON/BSONEachRow parsing with HTTP that could lead to using default values for some columns instead of values from data. [#45424](https://github.com/ClickHouse/ClickHouse/pull/45424) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed bug (Code: 632. DB::Exception: Unexpected data ... after parsed IPv6 value ...) with typed parsing of IP types from text source. [#45425](https://github.com/ClickHouse/ClickHouse/pull/45425) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* close [#45297](https://github.com/ClickHouse/ClickHouse/issues/45297) Add check for empty regular expressions. [#45428](https://github.com/ClickHouse/ClickHouse/pull/45428) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix possible (likely distributed) query hung. [#45448](https://github.com/ClickHouse/ClickHouse/pull/45448) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix disabled two-level aggregation from HTTP. [#45450](https://github.com/ClickHouse/ClickHouse/pull/45450) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible deadlock with `allow_asynchronous_read_from_io_pool_for_merge_tree` enabled in case of exception from `ThreadPool::schedule`. [#45481](https://github.com/ClickHouse/ClickHouse/pull/45481) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible in-use table after DETACH. [#45493](https://github.com/ClickHouse/ClickHouse/pull/45493) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix rare abort in case when query is canceled and parallel parsing was used during its execution. [#45498](https://github.com/ClickHouse/ClickHouse/pull/45498) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix a race between Distributed table creation and INSERT into it (could lead to CANNOT_LINK during INSERT into the table). [#45502](https://github.com/ClickHouse/ClickHouse/pull/45502) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add proper default (SLRU) to cache policy getter. Closes [#45514](https://github.com/ClickHouse/ClickHouse/issues/45514). [#45524](https://github.com/ClickHouse/ClickHouse/pull/45524) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove AST-based optimization `optimize_fuse_sum_count_avg`, close [#45439](https://github.com/ClickHouse/ClickHouse/issues/45439). [#45558](https://github.com/ClickHouse/ClickHouse/pull/45558) ([Vladimir C](https://github.com/vdimir)).
|
||||
|
||||
#### Bug-fix
|
||||
|
||||
* Disallow arrayjoin in mutations closes [#42637](https://github.com/ClickHouse/ClickHouse/issues/42637) Implementation: * Added a new parameter to ActionsVisitor::Data disallow_arrayjoin, which is set by MutationsIterator when it appends expression. * ActionsVisitor uses disallow_arrayjoin and throws error when its used with mutations. Testing: * Added test for the same 02504_disallow_arrayjoin_in_mutations.sql. [#44447](https://github.com/ClickHouse/ClickHouse/pull/44447) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix for qualified asterisks with alias table name and column transformer resolves [#44736](https://github.com/ClickHouse/ClickHouse/issues/44736). [#44755](https://github.com/ClickHouse/ClickHouse/pull/44755) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Updated backup/restore status when concurrent backups & restores are not allowed resolves [#45486](https://github.com/ClickHouse/ClickHouse/issues/45486) Implementation: * Moved concurrent backup/restore check inside try-catch block which sets the status so that other nodes in cluster are aware of failures. * Renamed backup_uuid to restore_uuid in RestoreSettings. [#45497](https://github.com/ClickHouse/ClickHouse/pull/45497) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
|
||||
#### Build Improvement
|
||||
|
||||
* crc32 fix for s390x. [#43706](https://github.com/ClickHouse/ClickHouse/pull/43706) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* Fixed endian issues in transform function for s390x. [#45522](https://github.com/ClickHouse/ClickHouse/pull/45522) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
|
||||
#### Feature
|
||||
|
||||
* Record server startup time in ProfileEvents resolves [#43188](https://github.com/ClickHouse/ClickHouse/issues/43188) Implementation: * Added ProfileEvents::ServerStartupMilliseconds. * Recorded time from start of main till listening to sockets. Testing: * Added a test 02532_profileevents_server_startup_time.sql. [#45250](https://github.com/ClickHouse/ClickHouse/pull/45250) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "If user only need virtual columns, we don't need to initialize ReadBufferFromS3"'. [#44939](https://github.com/ClickHouse/ClickHouse/pull/44939) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* NO CL ENTRY: 'Revert "Custom reading for mutation"'. [#45121](https://github.com/ClickHouse/ClickHouse/pull/45121) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Custom reading for mutation""'. [#45122](https://github.com/ClickHouse/ClickHouse/pull/45122) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* NO CL ENTRY: 'Revert "update function DAYOFWEEK and add new function WEEKDAY for mysql/spark compatiability"'. [#45221](https://github.com/ClickHouse/ClickHouse/pull/45221) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Validate function arguments in query tree"'. [#45299](https://github.com/ClickHouse/ClickHouse/pull/45299) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Validate function arguments in query tree""'. [#45300](https://github.com/ClickHouse/ClickHouse/pull/45300) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* NO CL ENTRY: 'Revert "Support optimize_or_like_chain in QueryTreePassManager"'. [#45406](https://github.com/ClickHouse/ClickHouse/pull/45406) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* NO CL ENTRY: 'Resubmit Support optimize_or_like_chain in QueryTreePassManager'. [#45410](https://github.com/ClickHouse/ClickHouse/pull/45410) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* NO CL ENTRY: 'Revert "Remove redundant sorting"'. [#45414](https://github.com/ClickHouse/ClickHouse/pull/45414) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Automatically merge green backport PRs and green approved PRs [#41110](https://github.com/ClickHouse/ClickHouse/pull/41110) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix assertion in async read buffer from remote [#41231](https://github.com/ClickHouse/ClickHouse/pull/41231) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* add retries on ConnectionError [#42991](https://github.com/ClickHouse/ClickHouse/pull/42991) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Update aws-c* submodules [#43020](https://github.com/ClickHouse/ClickHouse/pull/43020) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Replace domain IP types (IPv4, IPv6) with native [#43221](https://github.com/ClickHouse/ClickHouse/pull/43221) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix aggregate functions optimisation in AggregateFunctionsArithmericOperationsPass [#43372](https://github.com/ClickHouse/ClickHouse/pull/43372) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Improve pytest --pdb experience by preserving dockerd on SIGINT [#43392](https://github.com/ClickHouse/ClickHouse/pull/43392) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* RFC: tests: add stacktraces for hunged queries [#43396](https://github.com/ClickHouse/ClickHouse/pull/43396) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Followup fixes for systemd notification ([#43400](https://github.com/ClickHouse/ClickHouse/issues/43400)) [#43597](https://github.com/ClickHouse/ClickHouse/pull/43597) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Refactor FunctionNode [#43761](https://github.com/ClickHouse/ClickHouse/pull/43761) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Some cleanup: grace hash join [#43851](https://github.com/ClickHouse/ClickHouse/pull/43851) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Temporary files evict fs cache - 2nd approach [#43972](https://github.com/ClickHouse/ClickHouse/pull/43972) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Randomize setting `enable_memory_bound_merging_of_aggregation_results` in tests [#43986](https://github.com/ClickHouse/ClickHouse/pull/43986) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Analyzer aggregate functions passes small fixes [#44013](https://github.com/ClickHouse/ClickHouse/pull/44013) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix wrong char in command [#44018](https://github.com/ClickHouse/ClickHouse/pull/44018) ([alesapin](https://github.com/alesapin)).
|
||||
* Analyzer support Set index [#44097](https://github.com/ClickHouse/ClickHouse/pull/44097) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Provide monotonicity info for `toUnixTimestamp64*` [#44116](https://github.com/ClickHouse/ClickHouse/pull/44116) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Avoid loading toolchain files multiple times [#44122](https://github.com/ClickHouse/ClickHouse/pull/44122) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* tests: exclude flaky columns from SHOW CLUSTERS test [#44123](https://github.com/ClickHouse/ClickHouse/pull/44123) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Bump libdivide (to gain some new optimizations) [#44132](https://github.com/ClickHouse/ClickHouse/pull/44132) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make atomic counter relaxed in blockNumber() [#44193](https://github.com/ClickHouse/ClickHouse/pull/44193) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Try fix flaky 01072_window_view_multiple_columns_groupby [#44195](https://github.com/ClickHouse/ClickHouse/pull/44195) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Apply new code of named collections (from [#43147](https://github.com/ClickHouse/ClickHouse/issues/43147)) to external table engines part 1 [#44204](https://github.com/ClickHouse/ClickHouse/pull/44204) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add some settings under `compatibility` [#44209](https://github.com/ClickHouse/ClickHouse/pull/44209) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Recommend Slack over Telegram in the "Question" issue template [#44222](https://github.com/ClickHouse/ClickHouse/pull/44222) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||
* Forbid paths in timezone names [#44225](https://github.com/ClickHouse/ClickHouse/pull/44225) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Analyzer storage view crash fix [#44230](https://github.com/ClickHouse/ClickHouse/pull/44230) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add ThreadsInOvercommitTracker metric [#44233](https://github.com/ClickHouse/ClickHouse/pull/44233) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Analyzer expired Context crash fix [#44234](https://github.com/ClickHouse/ClickHouse/pull/44234) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix grace join memory consumption, pt1 [#44238](https://github.com/ClickHouse/ClickHouse/pull/44238) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed use-after-free of BLAKE3 error message [#44242](https://github.com/ClickHouse/ClickHouse/pull/44242) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||
* Fix deadlock in StorageSystemDatabases [#44272](https://github.com/ClickHouse/ClickHouse/pull/44272) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Get rid of global Git object [#44273](https://github.com/ClickHouse/ClickHouse/pull/44273) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update version after release [#44275](https://github.com/ClickHouse/ClickHouse/pull/44275) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update version_date.tsv and changelogs after v22.12.1.1752-stable [#44281](https://github.com/ClickHouse/ClickHouse/pull/44281) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Do not hold data parts during insert [#44299](https://github.com/ClickHouse/ClickHouse/pull/44299) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Another fix `test_server_reload` [#44306](https://github.com/ClickHouse/ClickHouse/pull/44306) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Update version_date.tsv and changelogs after v22.9.7.34-stable [#44309](https://github.com/ClickHouse/ClickHouse/pull/44309) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* tests/perf: fix dependency check during DROP [#44312](https://github.com/ClickHouse/ClickHouse/pull/44312) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* (unused openssl integration, not for production) a follow-up [#44325](https://github.com/ClickHouse/ClickHouse/pull/44325) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
* Replace old named collections code with new (from [#43147](https://github.com/ClickHouse/ClickHouse/issues/43147)) part 2 [#44327](https://github.com/ClickHouse/ClickHouse/pull/44327) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable "git-import" test in debug mode [#44328](https://github.com/ClickHouse/ClickHouse/pull/44328) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Check s3 part upload settings [#44335](https://github.com/ClickHouse/ClickHouse/pull/44335) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix typo [#44337](https://github.com/ClickHouse/ClickHouse/pull/44337) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for PowerBI [#44338](https://github.com/ClickHouse/ClickHouse/pull/44338) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#36038](https://github.com/ClickHouse/ClickHouse/issues/36038) [#44339](https://github.com/ClickHouse/ClickHouse/pull/44339) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#29386](https://github.com/ClickHouse/ClickHouse/issues/29386) [#44340](https://github.com/ClickHouse/ClickHouse/pull/44340) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#22929](https://github.com/ClickHouse/ClickHouse/issues/22929) [#44341](https://github.com/ClickHouse/ClickHouse/pull/44341) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#29883](https://github.com/ClickHouse/ClickHouse/issues/29883) [#44342](https://github.com/ClickHouse/ClickHouse/pull/44342) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix Docker [#44343](https://github.com/ClickHouse/ClickHouse/pull/44343) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* fix flack test "02481_async_insert_dedup.python" [#44349](https://github.com/ClickHouse/ClickHouse/pull/44349) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Add a test for [#22160](https://github.com/ClickHouse/ClickHouse/issues/22160) [#44355](https://github.com/ClickHouse/ClickHouse/pull/44355) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#34708](https://github.com/ClickHouse/ClickHouse/issues/34708) [#44356](https://github.com/ClickHouse/ClickHouse/pull/44356) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#30679](https://github.com/ClickHouse/ClickHouse/issues/30679) [#44357](https://github.com/ClickHouse/ClickHouse/pull/44357) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#34669](https://github.com/ClickHouse/ClickHouse/issues/34669) [#44359](https://github.com/ClickHouse/ClickHouse/pull/44359) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#34724](https://github.com/ClickHouse/ClickHouse/issues/34724) [#44360](https://github.com/ClickHouse/ClickHouse/pull/44360) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Try restarting ZK cluster on failed connection in `test_keeper_zookeeper_converted` [#44363](https://github.com/ClickHouse/ClickHouse/pull/44363) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Disable grase_hash in test 00172_parallel_join [#44367](https://github.com/ClickHouse/ClickHouse/pull/44367) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add check for submodules sanity [#44386](https://github.com/ClickHouse/ClickHouse/pull/44386) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Lock table for share during startup for database ordinary [#44393](https://github.com/ClickHouse/ClickHouse/pull/44393) ([alesapin](https://github.com/alesapin)).
|
||||
* Implement a custom central checkout action [#44399](https://github.com/ClickHouse/ClickHouse/pull/44399) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Try fix some tests [#44406](https://github.com/ClickHouse/ClickHouse/pull/44406) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Better ParserAllCollectionsOfLiterals [#44408](https://github.com/ClickHouse/ClickHouse/pull/44408) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix bug with merge/mutate pool size increase [#44436](https://github.com/ClickHouse/ClickHouse/pull/44436) ([alesapin](https://github.com/alesapin)).
|
||||
* Update 01072_window_view_multiple_columns_groupby.sh [#44438](https://github.com/ClickHouse/ClickHouse/pull/44438) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable buggy tsan assertion for integration test [#44444](https://github.com/ClickHouse/ClickHouse/pull/44444) ([alesapin](https://github.com/alesapin)).
|
||||
* Respect setting settings.schema_inference_make_columns_nullable in Parquet/ORC/Arrow formats [#44446](https://github.com/ClickHouse/ClickHouse/pull/44446) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add tests as examples with errors of date(time) and string comparison that we should eliminate [#44462](https://github.com/ClickHouse/ClickHouse/pull/44462) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Parallel parts cleanup with zero copy replication [#44466](https://github.com/ClickHouse/ClickHouse/pull/44466) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix incorrect usages of `getPartName()` [#44468](https://github.com/ClickHouse/ClickHouse/pull/44468) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test `roaring_memory_tracking` [#44470](https://github.com/ClickHouse/ClickHouse/pull/44470) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Clarify query_id in test 01092_memory_profiler [#44483](https://github.com/ClickHouse/ClickHouse/pull/44483) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Default value for optional in SortNode::updateTreeHashImpl [#44491](https://github.com/ClickHouse/ClickHouse/pull/44491) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Do not try to remove WAL/move broken parts for static storage [#44495](https://github.com/ClickHouse/ClickHouse/pull/44495) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Removed parent pid check that breaks in containers [#44499](https://github.com/ClickHouse/ClickHouse/pull/44499) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Analyzer duplicate alias crash fix [#44508](https://github.com/ClickHouse/ClickHouse/pull/44508) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Minor code polishing [#44513](https://github.com/ClickHouse/ClickHouse/pull/44513) ([alesapin](https://github.com/alesapin)).
|
||||
* Better error message if named collection does not exist [#44517](https://github.com/ClickHouse/ClickHouse/pull/44517) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add the lambda to collect data for workflow_jobs [#44520](https://github.com/ClickHouse/ClickHouse/pull/44520) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Introduce groupArrayLast() (useful to store last X values) [#44521](https://github.com/ClickHouse/ClickHouse/pull/44521) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Infer numbers starting from zero as strings in TSV [#44522](https://github.com/ClickHouse/ClickHouse/pull/44522) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix wrong condition for enabling async reading from MergeTree. [#44530](https://github.com/ClickHouse/ClickHouse/pull/44530) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* tests: capture dmesg in integration tests [#44535](https://github.com/ClickHouse/ClickHouse/pull/44535) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Analyzer support distributed queries processing [#44540](https://github.com/ClickHouse/ClickHouse/pull/44540) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Followup [#43761](https://github.com/ClickHouse/ClickHouse/issues/43761) [#44541](https://github.com/ClickHouse/ClickHouse/pull/44541) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Drop unused columns after join on/using [#44545](https://github.com/ClickHouse/ClickHouse/pull/44545) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Improve inferring arrays with nulls in JSON formats [#44550](https://github.com/ClickHouse/ClickHouse/pull/44550) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Make BC check optional (if env var set) [#44564](https://github.com/ClickHouse/ClickHouse/pull/44564) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix extremely slow stack traces in debug build [#44569](https://github.com/ClickHouse/ClickHouse/pull/44569) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better command line argument name in `clickhouse-benchmark` [#44570](https://github.com/ClickHouse/ClickHouse/pull/44570) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix HDFS test [#44572](https://github.com/ClickHouse/ClickHouse/pull/44572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test_distributed_queries_stress [#44573](https://github.com/ClickHouse/ClickHouse/pull/44573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Switch "contrib/sysroot" back to master. [#44574](https://github.com/ClickHouse/ClickHouse/pull/44574) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Non-significant changes [#44575](https://github.com/ClickHouse/ClickHouse/pull/44575) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fuzzer HTML: fix trash [#44580](https://github.com/ClickHouse/ClickHouse/pull/44580) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better diagnostics on server stop for the stress test [#44593](https://github.com/ClickHouse/ClickHouse/pull/44593) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The position of the log message about the server environment was wrong [#44595](https://github.com/ClickHouse/ClickHouse/pull/44595) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad punctuation in log [#44596](https://github.com/ClickHouse/ClickHouse/pull/44596) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix misleading log message [#44598](https://github.com/ClickHouse/ClickHouse/pull/44598) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad log message about MergeTree metadata cache. [#44599](https://github.com/ClickHouse/ClickHouse/pull/44599) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Slightly cleanup interactive line reader code [#44601](https://github.com/ClickHouse/ClickHouse/pull/44601) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Rename `runlog.log` to `run.log` in tests [#44603](https://github.com/ClickHouse/ClickHouse/pull/44603) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix hung query in stress test [#44604](https://github.com/ClickHouse/ClickHouse/pull/44604) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve variable name [#44605](https://github.com/ClickHouse/ClickHouse/pull/44605) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Faster server startup after stress test [#44606](https://github.com/ClickHouse/ClickHouse/pull/44606) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix log messages in Coordination [#44607](https://github.com/ClickHouse/ClickHouse/pull/44607) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable Analyzer in fuzz and stress tests [#44609](https://github.com/ClickHouse/ClickHouse/pull/44609) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better log message [#44610](https://github.com/ClickHouse/ClickHouse/pull/44610) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Maybe fix a bogus MSan error [#44611](https://github.com/ClickHouse/ClickHouse/pull/44611) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix "too large allocation" message from MSan [#44613](https://github.com/ClickHouse/ClickHouse/pull/44613) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Do not fail the AST fuzzer if sanitizer is out of memory [#44616](https://github.com/ClickHouse/ClickHouse/pull/44616) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test `01111_create_drop_replicated_db_stress` [#44617](https://github.com/ClickHouse/ClickHouse/pull/44617) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* tests/integration: suppress exceptions during logging (due to pytest) [#44618](https://github.com/ClickHouse/ClickHouse/pull/44618) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix rust modules rebuild (previously ignores changes in cargo config.toml) [#44623](https://github.com/ClickHouse/ClickHouse/pull/44623) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Sometimes spot instances fail more than 20 times in a row [#44626](https://github.com/ClickHouse/ClickHouse/pull/44626) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix restart after quorum insert [#44628](https://github.com/ClickHouse/ClickHouse/pull/44628) ([alesapin](https://github.com/alesapin)).
|
||||
* Revert "Merge pull request [#38953](https://github.com/ClickHouse/ClickHouse/issues/38953) from ClickHouse/add-allocation-ptr-to-trace-log [#44629](https://github.com/ClickHouse/ClickHouse/pull/44629) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix lambdas parsing [#44639](https://github.com/ClickHouse/ClickHouse/pull/44639) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Function viewExplain accept SELECT and settings [#44641](https://github.com/ClickHouse/ClickHouse/pull/44641) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix test `02015_async_inserts_2` [#44642](https://github.com/ClickHouse/ClickHouse/pull/44642) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix flaky test `test_keeper_multinode_simple` [#44645](https://github.com/ClickHouse/ClickHouse/pull/44645) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add +x flag for run-fuzzer.sh [#44649](https://github.com/ClickHouse/ClickHouse/pull/44649) ([alesapin](https://github.com/alesapin)).
|
||||
* Custom reading for mutation [#44653](https://github.com/ClickHouse/ClickHouse/pull/44653) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix flaky test test_backup_restore_on_cluster [#44660](https://github.com/ClickHouse/ClickHouse/pull/44660) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* tests/integration: add missing kazoo client termination [#44666](https://github.com/ClickHouse/ClickHouse/pull/44666) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Move dmesg dumping out from runner to ci-runner.py [#44667](https://github.com/ClickHouse/ClickHouse/pull/44667) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove questdb (it makes a little sense but the test was flaky) [#44669](https://github.com/ClickHouse/ClickHouse/pull/44669) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix minor typo: replace validate_bugix_check with validate_bugfix_check [#44672](https://github.com/ClickHouse/ClickHouse/pull/44672) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Fix parsing of ANY operator [#44678](https://github.com/ClickHouse/ClickHouse/pull/44678) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix test `01130_in_memory_parts` [#44683](https://github.com/ClickHouse/ClickHouse/pull/44683) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Remove old code [#44685](https://github.com/ClickHouse/ClickHouse/pull/44685) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test git-import [#44687](https://github.com/ClickHouse/ClickHouse/pull/44687) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve odbc test [#44688](https://github.com/ClickHouse/ClickHouse/pull/44688) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add retries to HTTP requests in ClickHouse test [#44689](https://github.com/ClickHouse/ClickHouse/pull/44689) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix flaky tests [#44690](https://github.com/ClickHouse/ClickHouse/pull/44690) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix flaky test "01502_long_log_tinylog_deadlock_race" [#44693](https://github.com/ClickHouse/ClickHouse/pull/44693) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve handling of old parts [#44694](https://github.com/ClickHouse/ClickHouse/pull/44694) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update entrypoint.sh [#44699](https://github.com/ClickHouse/ClickHouse/pull/44699) ([Denny Crane](https://github.com/den-crane)).
|
||||
* tests: more fixes for test_keeper_auth [#44702](https://github.com/ClickHouse/ClickHouse/pull/44702) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash on delete from materialized view [#44705](https://github.com/ClickHouse/ClickHouse/pull/44705) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix flaky filelog tests with database ordinary [#44706](https://github.com/ClickHouse/ClickHouse/pull/44706) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Make lightweight deletes always synchronous [#44718](https://github.com/ClickHouse/ClickHouse/pull/44718) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix deadlock in attach thread [#44719](https://github.com/ClickHouse/ClickHouse/pull/44719) ([alesapin](https://github.com/alesapin)).
|
||||
* A few improvements to AST Fuzzer [#44720](https://github.com/ClickHouse/ClickHouse/pull/44720) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test [#44721](https://github.com/ClickHouse/ClickHouse/pull/44721) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Rename log in stress test [#44722](https://github.com/ClickHouse/ClickHouse/pull/44722) ([alesapin](https://github.com/alesapin)).
|
||||
* Debug deadlock in stress test [#44723](https://github.com/ClickHouse/ClickHouse/pull/44723) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test "02102_row_binary_with_names_and_types.sh" [#44724](https://github.com/ClickHouse/ClickHouse/pull/44724) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Slightly better some tests [#44725](https://github.com/ClickHouse/ClickHouse/pull/44725) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix cases when clickhouse-server takes long time to start in functional tests with MSan [#44726](https://github.com/ClickHouse/ClickHouse/pull/44726) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Perf test: Log the time spent waiting for file sync [#44737](https://github.com/ClickHouse/ClickHouse/pull/44737) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix flaky test 02448_clone_replica_lost_part [#44759](https://github.com/ClickHouse/ClickHouse/pull/44759) ([alesapin](https://github.com/alesapin)).
|
||||
* Build rust modules from the binary directory [#44762](https://github.com/ClickHouse/ClickHouse/pull/44762) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove database ordinary from stress test [#44763](https://github.com/ClickHouse/ClickHouse/pull/44763) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix flaky test 02479_mysql_connect_to_self [#44768](https://github.com/ClickHouse/ClickHouse/pull/44768) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Print fatal messages in Fuzzer [#44769](https://github.com/ClickHouse/ClickHouse/pull/44769) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect docs [#44795](https://github.com/ClickHouse/ClickHouse/pull/44795) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added table name to error message [#44806](https://github.com/ClickHouse/ClickHouse/pull/44806) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Retry packages download if GitHub returned HTTP 500. [#44807](https://github.com/ClickHouse/ClickHouse/pull/44807) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Slightly better docs [#44808](https://github.com/ClickHouse/ClickHouse/pull/44808) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix total trash in stress test [#44810](https://github.com/ClickHouse/ClickHouse/pull/44810) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix ASan builds for glibc 2.36+ [#44811](https://github.com/ClickHouse/ClickHouse/pull/44811) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove the remainings of TestFlows [#44812](https://github.com/ClickHouse/ClickHouse/pull/44812) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `grep` [#44813](https://github.com/ClickHouse/ClickHouse/pull/44813) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad cast in monotonicity analysis [#44818](https://github.com/ClickHouse/ClickHouse/pull/44818) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Modern tools, part 1 [#44819](https://github.com/ClickHouse/ClickHouse/pull/44819) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Modern tools in CI, part 2. [#44820](https://github.com/ClickHouse/ClickHouse/pull/44820) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data race in DDLWorker [#44821](https://github.com/ClickHouse/ClickHouse/pull/44821) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix tests for bridges [#44822](https://github.com/ClickHouse/ClickHouse/pull/44822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test_multiple_disks::test_jbod_overflow [#44823](https://github.com/ClickHouse/ClickHouse/pull/44823) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Less OOM in stress test [#44824](https://github.com/ClickHouse/ClickHouse/pull/44824) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix misleading integration tests reports for parametrized tests [#44825](https://github.com/ClickHouse/ClickHouse/pull/44825) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix two typos [#44826](https://github.com/ClickHouse/ClickHouse/pull/44826) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Adjust CSS [#44829](https://github.com/ClickHouse/ClickHouse/pull/44829) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix fuzzer report [#44830](https://github.com/ClickHouse/ClickHouse/pull/44830) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* check-style: check base for std::cerr/cout too [#44833](https://github.com/ClickHouse/ClickHouse/pull/44833) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Try fixing `test_keeper_snapshot_small_distance` with ZK restart [#44834](https://github.com/ClickHouse/ClickHouse/pull/44834) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Exclude cargo shared libraries from the artifacts [#44836](https://github.com/ClickHouse/ClickHouse/pull/44836) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add a tiny but important logging [#44837](https://github.com/ClickHouse/ClickHouse/pull/44837) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Escape submodules in style-check [#44838](https://github.com/ClickHouse/ClickHouse/pull/44838) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Move `test_dies_with_parent` to another module [#44839](https://github.com/ClickHouse/ClickHouse/pull/44839) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Remove unneeded softlink to official dev docs [#44841](https://github.com/ClickHouse/ClickHouse/pull/44841) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix data race in StorageS3 [#44842](https://github.com/ClickHouse/ClickHouse/pull/44842) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix rare race which can lead to queue hang [#44847](https://github.com/ClickHouse/ClickHouse/pull/44847) ([alesapin](https://github.com/alesapin)).
|
||||
* No more retries in integration tests [#44851](https://github.com/ClickHouse/ClickHouse/pull/44851) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Document usage of check_cxx_source_compiles instead of check_cxx_source_runs [#44854](https://github.com/ClickHouse/ClickHouse/pull/44854) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* More cases of OOM in Fuzzer [#44855](https://github.com/ClickHouse/ClickHouse/pull/44855) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix: sorted DISTINCT with empty string [#44856](https://github.com/ClickHouse/ClickHouse/pull/44856) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Try to fix MSan build [#44857](https://github.com/ClickHouse/ClickHouse/pull/44857) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Cleanup setup_minio.sh [#44858](https://github.com/ClickHouse/ClickHouse/pull/44858) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Wait for ZK process to stop in tests using snapshot [#44859](https://github.com/ClickHouse/ClickHouse/pull/44859) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky test and several typos [#44870](https://github.com/ClickHouse/ClickHouse/pull/44870) ([alesapin](https://github.com/alesapin)).
|
||||
* Upload status files to S3 report for bugfix check [#44871](https://github.com/ClickHouse/ClickHouse/pull/44871) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix flaky test `02503_insert_storage_snapshot` [#44873](https://github.com/ClickHouse/ClickHouse/pull/44873) ([alesapin](https://github.com/alesapin)).
|
||||
* Revert some changes from [#42777](https://github.com/ClickHouse/ClickHouse/issues/42777) to fix performance tests [#44876](https://github.com/ClickHouse/ClickHouse/pull/44876) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Rewrite test_postgres_protocol test [#44880](https://github.com/ClickHouse/ClickHouse/pull/44880) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix ConcurrentBoundedQueue::emplace() return value in case of finished queue [#44881](https://github.com/ClickHouse/ClickHouse/pull/44881) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Validate function arguments in query tree [#44882](https://github.com/ClickHouse/ClickHouse/pull/44882) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Rework CI reports to have a class and clarify the logic [#44883](https://github.com/ClickHouse/ClickHouse/pull/44883) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* fix-typo [#44886](https://github.com/ClickHouse/ClickHouse/pull/44886) ([Enrique Herreros](https://github.com/eherrerosj)).
|
||||
* Store ZK generated data in `test_keeper_snapshot_small_distance` [#44888](https://github.com/ClickHouse/ClickHouse/pull/44888) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix "AttributeError: 'BuildResult' object has no attribute 'libraries'" in BuilderReport and BuilderSpecialReport [#44890](https://github.com/ClickHouse/ClickHouse/pull/44890) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Convert integration test_dictionaries_update_field to a stateless [#44891](https://github.com/ClickHouse/ClickHouse/pull/44891) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Upgrade googletest to latest HEAD [#44894](https://github.com/ClickHouse/ClickHouse/pull/44894) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Try fix rabbitmq potential leak [#44897](https://github.com/ClickHouse/ClickHouse/pull/44897) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Try to fix flaky `test_storage_kafka::test_kafka_produce_key_timestamp` [#44898](https://github.com/ClickHouse/ClickHouse/pull/44898) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky `test_concurrent_queries_restriction_by_query_kind` [#44903](https://github.com/ClickHouse/ClickHouse/pull/44903) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Avoid Keeper crash on shutdown (fix `test_keeper_snapshot_on_exit`) [#44908](https://github.com/ClickHouse/ClickHouse/pull/44908) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Do not merge over a gap with outdated undeleted parts [#44909](https://github.com/ClickHouse/ClickHouse/pull/44909) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix logging message in MergeTreeDataMergerMutator (about merged parts) [#44917](https://github.com/ClickHouse/ClickHouse/pull/44917) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix flaky test `test_lost_part` [#44921](https://github.com/ClickHouse/ClickHouse/pull/44921) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add fast and cancellable shared_mutex alternatives [#44924](https://github.com/ClickHouse/ClickHouse/pull/44924) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fix deadlock in Keeper's changelog [#44937](https://github.com/ClickHouse/ClickHouse/pull/44937) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Stop merges to avoid a race between merge and freeze. [#44938](https://github.com/ClickHouse/ClickHouse/pull/44938) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix memory leak in Aws::InitAPI [#44942](https://github.com/ClickHouse/ClickHouse/pull/44942) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Change error code on invalid background_pool_size config [#44947](https://github.com/ClickHouse/ClickHouse/pull/44947) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix exception fix in TraceCollector dtor [#44948](https://github.com/ClickHouse/ClickHouse/pull/44948) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Parallel distributed insert select with s3Cluster [3] [#44955](https://github.com/ClickHouse/ClickHouse/pull/44955) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Do not check read result consistency when unwinding [#44956](https://github.com/ClickHouse/ClickHouse/pull/44956) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Up the log level of tables dependencies graphs [#44957](https://github.com/ClickHouse/ClickHouse/pull/44957) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Hipster's HTML [#44961](https://github.com/ClickHouse/ClickHouse/pull/44961) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Docs: Mention non-standard DOTALL behavior of ClickHouse's match() [#44977](https://github.com/ClickHouse/ClickHouse/pull/44977) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* tests: fix test_replicated_users flakiness [#44978](https://github.com/ClickHouse/ClickHouse/pull/44978) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Check what if disable some checks in storage Merge. [#44983](https://github.com/ClickHouse/ClickHouse/pull/44983) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix check for not existing input in ActionsDAG [#44987](https://github.com/ClickHouse/ClickHouse/pull/44987) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Update version_date.tsv and changelogs after v22.12.2.25-stable [#44988](https://github.com/ClickHouse/ClickHouse/pull/44988) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix test test_grpc_protocol/test.py::test_progress [#44996](https://github.com/ClickHouse/ClickHouse/pull/44996) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Improve S3 EC2 metadata tests [#45001](https://github.com/ClickHouse/ClickHouse/pull/45001) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix minmax_count_projection with _partition_value [#45003](https://github.com/ClickHouse/ClickHouse/pull/45003) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix strange trash in Fuzzer [#45006](https://github.com/ClickHouse/ClickHouse/pull/45006) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `dmesg.log` to Fuzzer [#45008](https://github.com/ClickHouse/ClickHouse/pull/45008) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `01961_roaring_memory_tracking` test, again [#45009](https://github.com/ClickHouse/ClickHouse/pull/45009) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Recognize more ok cases for Fuzzer [#45012](https://github.com/ClickHouse/ClickHouse/pull/45012) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Supposedly fix the "Download script failed" error [#45013](https://github.com/ClickHouse/ClickHouse/pull/45013) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add snapshot creation retry in Keeper tests using ZooKeeper [#45016](https://github.com/ClickHouse/ClickHouse/pull/45016) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* test for [#20098](https://github.com/ClickHouse/ClickHouse/issues/20098) [#45017](https://github.com/ClickHouse/ClickHouse/pull/45017) ([Denny Crane](https://github.com/den-crane)).
|
||||
* test for [#26473](https://github.com/ClickHouse/ClickHouse/issues/26473) [#45018](https://github.com/ClickHouse/ClickHouse/pull/45018) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Remove the remainings of Testflows (2). [#45021](https://github.com/ClickHouse/ClickHouse/pull/45021) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable the check that was commented [#45022](https://github.com/ClickHouse/ClickHouse/pull/45022) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix false positive in Fuzzer [#45025](https://github.com/ClickHouse/ClickHouse/pull/45025) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix false positive in Fuzzer, alternative variant [#45026](https://github.com/ClickHouse/ClickHouse/pull/45026) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix function `range` (the bug was unreleased) [#45030](https://github.com/ClickHouse/ClickHouse/pull/45030) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix OOM in Fuzzer [#45032](https://github.com/ClickHouse/ClickHouse/pull/45032) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Less OOM in Stress test [#45033](https://github.com/ClickHouse/ClickHouse/pull/45033) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#31361](https://github.com/ClickHouse/ClickHouse/issues/31361) [#45034](https://github.com/ClickHouse/ClickHouse/pull/45034) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729) [#45035](https://github.com/ClickHouse/ClickHouse/pull/45035) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix typos [#45036](https://github.com/ClickHouse/ClickHouse/pull/45036) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* I didn't understand the logic of this test, @azat [#45037](https://github.com/ClickHouse/ClickHouse/pull/45037) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Small fixes for Coordination unit tests [#45039](https://github.com/ClickHouse/ClickHouse/pull/45039) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky test (hilarious) [#45042](https://github.com/ClickHouse/ClickHouse/pull/45042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Non significant changes [#45046](https://github.com/ClickHouse/ClickHouse/pull/45046) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Don't fix parallel formatting [#45050](https://github.com/ClickHouse/ClickHouse/pull/45050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix (benign) data race in clickhouse-client [#45053](https://github.com/ClickHouse/ClickHouse/pull/45053) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Analyzer aggregation without column fix [#45055](https://github.com/ClickHouse/ClickHouse/pull/45055) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Analyzer ARRAY JOIN crash fix [#45059](https://github.com/ClickHouse/ClickHouse/pull/45059) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix data race in openSQLiteDB [#45062](https://github.com/ClickHouse/ClickHouse/pull/45062) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Analyzer function IN crash fix [#45064](https://github.com/ClickHouse/ClickHouse/pull/45064) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* JIT compilation float to bool conversion fix [#45067](https://github.com/ClickHouse/ClickHouse/pull/45067) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Update version_date.tsv and changelogs after v22.11.3.47-stable [#45069](https://github.com/ClickHouse/ClickHouse/pull/45069) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.10.5.54-stable [#45071](https://github.com/ClickHouse/ClickHouse/pull/45071) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.3.16.1190-lts [#45073](https://github.com/ClickHouse/ClickHouse/pull/45073) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Improve release scripts [#45074](https://github.com/ClickHouse/ClickHouse/pull/45074) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Change the color of links in dark reports a little bit [#45077](https://github.com/ClickHouse/ClickHouse/pull/45077) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix Fuzzer script [#45082](https://github.com/ClickHouse/ClickHouse/pull/45082) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Try fixing KeeperMap tests [#45094](https://github.com/ClickHouse/ClickHouse/pull/45094) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Update version_date.tsv and changelogs after v22.8.12.45-lts [#45098](https://github.com/ClickHouse/ClickHouse/pull/45098) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Try to fix flaky test_create_user_and_login/test.py::test_login_as_dropped_user_xml [#45099](https://github.com/ClickHouse/ClickHouse/pull/45099) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Update version_date.tsv and changelogs after v22.10.6.3-stable [#45107](https://github.com/ClickHouse/ClickHouse/pull/45107) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Docs: Make heading consistent with other headings in System Table docs [#45109](https://github.com/ClickHouse/ClickHouse/pull/45109) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update version_date.tsv and changelogs after v22.11.4.3-stable [#45110](https://github.com/ClickHouse/ClickHouse/pull/45110) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.12.3.5-stable [#45113](https://github.com/ClickHouse/ClickHouse/pull/45113) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Docs: Rewrite awkwardly phrased sentence about flush interval [#45114](https://github.com/ClickHouse/ClickHouse/pull/45114) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix data race in s3Cluster. [#45123](https://github.com/ClickHouse/ClickHouse/pull/45123) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Pull SQLancer image before check run [#45125](https://github.com/ClickHouse/ClickHouse/pull/45125) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix flaky azure test [#45134](https://github.com/ClickHouse/ClickHouse/pull/45134) ([alesapin](https://github.com/alesapin)).
|
||||
* Minor cleanup in stress/run.sh [#45136](https://github.com/ClickHouse/ClickHouse/pull/45136) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Performance report: "Partial queries" --> "Backward-incompatible queries [#45152](https://github.com/ClickHouse/ClickHouse/pull/45152) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix flaky test_tcp_handler_interserver_listen_host [#45156](https://github.com/ClickHouse/ClickHouse/pull/45156) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Clean trash from changelog for v22.3.16.1190-lts [#45159](https://github.com/ClickHouse/ClickHouse/pull/45159) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable `test_storage_rabbitmq` [#45161](https://github.com/ClickHouse/ClickHouse/pull/45161) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_ttl_move_memory_usage as too flaky. [#45162](https://github.com/ClickHouse/ClickHouse/pull/45162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* More logging to facilitate debugging of flaky test_ttl_replicated [#45165](https://github.com/ClickHouse/ClickHouse/pull/45165) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Try to fix flaky test_ttl_move_memory_usage [#45168](https://github.com/ClickHouse/ClickHouse/pull/45168) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test test_multiple_disks/test.py::test_rename [#45180](https://github.com/ClickHouse/ClickHouse/pull/45180) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Calculate only required columns in system.detached_parts [#45181](https://github.com/ClickHouse/ClickHouse/pull/45181) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Restart NightlyBuilds if the runner died [#45187](https://github.com/ClickHouse/ClickHouse/pull/45187) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix part ID generation for IP types for backward compatibility [#45191](https://github.com/ClickHouse/ClickHouse/pull/45191) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix integration test test_replicated_users::test_rename_replicated [#45192](https://github.com/ClickHouse/ClickHouse/pull/45192) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add CACHE_INVALIDATOR for sqlancer builds [#45201](https://github.com/ClickHouse/ClickHouse/pull/45201) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix possible stack-use-after-return in LimitReadBuffer [#45203](https://github.com/ClickHouse/ClickHouse/pull/45203) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Disable check to make test_overcommit_tracker not flaky [#45206](https://github.com/ClickHouse/ClickHouse/pull/45206) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix flaky test `01961_roaring_memory_tracking` (3) [#45208](https://github.com/ClickHouse/ClickHouse/pull/45208) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove trash from stress test [#45211](https://github.com/ClickHouse/ClickHouse/pull/45211) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* remove unused function [#45212](https://github.com/ClickHouse/ClickHouse/pull/45212) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix flaky `test_keeper_three_nodes_two_alive` [#45213](https://github.com/ClickHouse/ClickHouse/pull/45213) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fuzz PREWHERE clause [#45222](https://github.com/ClickHouse/ClickHouse/pull/45222) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Added a test for merge join key condition with big int & decimal [#45228](https://github.com/ClickHouse/ClickHouse/pull/45228) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix rare logical error: `Too large alignment` [#45229](https://github.com/ClickHouse/ClickHouse/pull/45229) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Update version_date.tsv and changelogs after v22.3.17.13-lts [#45234](https://github.com/ClickHouse/ClickHouse/pull/45234) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* More verbose logs about replication log entries [#45235](https://github.com/ClickHouse/ClickHouse/pull/45235) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* One more attempt to fix race in TCPHandler [#45240](https://github.com/ClickHouse/ClickHouse/pull/45240) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Update clickhouse-test [#45251](https://github.com/ClickHouse/ClickHouse/pull/45251) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Planner small fixes [#45254](https://github.com/ClickHouse/ClickHouse/pull/45254) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix log level "Test" for send_logs_level in client [#45273](https://github.com/ClickHouse/ClickHouse/pull/45273) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* tests: fix clickhouse binaries detection [#45283](https://github.com/ClickHouse/ClickHouse/pull/45283) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* tests/ci: encode HTML entities in the reports [#45284](https://github.com/ClickHouse/ClickHouse/pull/45284) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable `02151_hash_table_sizes_stats_distributed` under TSAN [#45287](https://github.com/ClickHouse/ClickHouse/pull/45287) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix wrong approved_at, simplify conditions [#45302](https://github.com/ClickHouse/ClickHouse/pull/45302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable 02028_create_select_settings with Ordinary [#45307](https://github.com/ClickHouse/ClickHouse/pull/45307) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Save message format strings for DB::Exception [#45342](https://github.com/ClickHouse/ClickHouse/pull/45342) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Slightly better output for glibc check [#45353](https://github.com/ClickHouse/ClickHouse/pull/45353) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add checks for compilation of regexps [#45356](https://github.com/ClickHouse/ClickHouse/pull/45356) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Analyzer compound identifier typo correction fix [#45357](https://github.com/ClickHouse/ClickHouse/pull/45357) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Bump to newer version of debug-action [#45359](https://github.com/ClickHouse/ClickHouse/pull/45359) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Improve failed kafka startup logging [#45369](https://github.com/ClickHouse/ClickHouse/pull/45369) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix flaky ttl test [#45370](https://github.com/ClickHouse/ClickHouse/pull/45370) ([alesapin](https://github.com/alesapin)).
|
||||
* Add detailed profile events for throttling [#45373](https://github.com/ClickHouse/ClickHouse/pull/45373) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Update .gitignore [#45378](https://github.com/ClickHouse/ClickHouse/pull/45378) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Make test simpler to see errors [#45402](https://github.com/ClickHouse/ClickHouse/pull/45402) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Reduce an amount of trash in `tests_system_merges` [#45403](https://github.com/ClickHouse/ClickHouse/pull/45403) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix reading from encrypted disk with passed file size [#45418](https://github.com/ClickHouse/ClickHouse/pull/45418) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add delete by ttl for zookeeper_log [#45419](https://github.com/ClickHouse/ClickHouse/pull/45419) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Get rid of artifactory in favor of r2 + ch-repos-manager [#45421](https://github.com/ClickHouse/ClickHouse/pull/45421) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Minor improvements around reading from remote [#45442](https://github.com/ClickHouse/ClickHouse/pull/45442) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Docs: Beautify section on secondary index types [#45444](https://github.com/ClickHouse/ClickHouse/pull/45444) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix Buffer's offsets mismatch logical error in stress test [#45446](https://github.com/ClickHouse/ClickHouse/pull/45446) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Better formatting for exception messages [#45449](https://github.com/ClickHouse/ClickHouse/pull/45449) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add default GRANULARITY argument for secondary indexes [#45451](https://github.com/ClickHouse/ClickHouse/pull/45451) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Cleanup of inverted index [#45460](https://github.com/ClickHouse/ClickHouse/pull/45460) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* CherryPick: Fix a wrong staring search date [#45466](https://github.com/ClickHouse/ClickHouse/pull/45466) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix typos [#45470](https://github.com/ClickHouse/ClickHouse/pull/45470) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix possible aborts in arrow lib [#45478](https://github.com/ClickHouse/ClickHouse/pull/45478) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add more retries to AST Fuzzer [#45479](https://github.com/ClickHouse/ClickHouse/pull/45479) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix schema inference from insertion table in hdfsCluster [#45483](https://github.com/ClickHouse/ClickHouse/pull/45483) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove unnecessary getTotalRowCount function calls [#45485](https://github.com/ClickHouse/ClickHouse/pull/45485) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Use new copy s3 functions in S3ObjectStorage [#45487](https://github.com/ClickHouse/ClickHouse/pull/45487) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Forward declaration of ConcurrentBoundedQueue in ThreadStatus [#45489](https://github.com/ClickHouse/ClickHouse/pull/45489) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Revert "Merge pull request [#44922](https://github.com/ClickHouse/ClickHouse/issues/44922) from azat/dist/async-INSERT-metrics" [#45492](https://github.com/ClickHouse/ClickHouse/pull/45492) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Docs: Fix weird formatting [#45495](https://github.com/ClickHouse/ClickHouse/pull/45495) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Docs: Fix link to writing guide [#45496](https://github.com/ClickHouse/ClickHouse/pull/45496) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Improve logging for TeePopen.timeout exceeded [#45504](https://github.com/ClickHouse/ClickHouse/pull/45504) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix MSan build once again (too heavy translation units) [#45512](https://github.com/ClickHouse/ClickHouse/pull/45512) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Additional check in MergeTreeReadPool [#45515](https://github.com/ClickHouse/ClickHouse/pull/45515) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update test_system_merges/test.py [#45516](https://github.com/ClickHouse/ClickHouse/pull/45516) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Revert "Merge pull request [#45493](https://github.com/ClickHouse/ClickHouse/issues/45493) from azat/fix-detach" [#45545](https://github.com/ClickHouse/ClickHouse/pull/45545) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update stress [#45546](https://github.com/ClickHouse/ClickHouse/pull/45546) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Ignore utf errors in clickhouse-test reportLogStats [#45556](https://github.com/ClickHouse/ClickHouse/pull/45556) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Resubmit "Fix possible in-use table after DETACH" [#45566](https://github.com/ClickHouse/ClickHouse/pull/45566) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Typo: "Granulesis" --> "Granules" [#45598](https://github.com/ClickHouse/ClickHouse/pull/45598) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix version in autogenerated_versions.txt [#45624](https://github.com/ClickHouse/ClickHouse/pull/45624) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
@ -21,6 +21,13 @@ ENGINE = HDFS(URI, format)
|
||||
`SELECT` queries, the format must be supported for input, and to perform
|
||||
`INSERT` queries – for output. The available formats are listed in the
|
||||
[Formats](../../../interfaces/formats.md#formats) section.
|
||||
- [PARTITION BY expr]
|
||||
|
||||
### PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
**Example:**
|
||||
|
||||
|
@ -13,6 +13,7 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[PARTITION BY expr]
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
@ -23,6 +24,12 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
|
||||
|
||||
### PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
|
@ -1,15 +1,22 @@
|
||||
# Inverted indexes [experimental] {#table_engines-ANNIndex}
|
||||
---
|
||||
slug: /en/engines/table-engines/mergetree-family/invertedindexes
|
||||
sidebar_label: Inverted Indexes
|
||||
description: Quickly find search terms in text.
|
||||
keywords: [full-text search, text search]
|
||||
---
|
||||
|
||||
Inverted indexes are an experimental type of [secondary indexes](mergetree.md#available-types-of-indices) which provide fast text search
|
||||
capabilities for [String](../../../sql-reference/data-types/string.md) or [FixedString](../../../sql-reference/data-types/fixedstring.md)
|
||||
columns. The main idea of an inverted indexes is to store a mapping from "terms" to the rows which contains these terms. "Terms" are
|
||||
tokenized cells of the string column. For example, string cell "I will be a little late" is by default tokenized into six terms "I", "will",
|
||||
"be", "a", "little" and "late". Another kind of tokenizer are n-grams. For example, the result of 3-gram tokenization will be 21 terms "I w",
|
||||
# Inverted indexes [experimental]
|
||||
|
||||
Inverted indexes are an experimental type of [secondary indexes](/docs/en/engines/table-engines/mergetree-family/mergetree.md/#available-types-of-indices) which provide fast text search
|
||||
capabilities for [String](/docs/en/sql-reference/data-types/string.md) or [FixedString](/docs/en/sql-reference/data-types/fixedstring.md)
|
||||
columns. The main idea of an inverted index is to store a mapping from "terms" to the rows which contain these terms. "Terms" are
|
||||
tokenized cells of the string column. For example, the string cell "I will be a little late" is by default tokenized into six terms "I", "will",
|
||||
"be", "a", "little" and "late". Another kind of tokenizer is n-grams. For example, the result of 3-gram tokenization will be 21 terms "I w",
|
||||
" wi", "wil", "ill", "ll ", "l b", " be" etc. The more fine-granular the input strings are tokenized, the bigger but also the more
|
||||
useful the resulting inverted index will be.
|
||||
|
||||
:::warning
|
||||
Inverted indexes are experimental and should not be used in production environment yet. They may change in future in backwards-incompatible
|
||||
Inverted indexes are experimental and should not be used in production environments yet. They may change in the future in backward-incompatible
|
||||
ways, for example with respect to their DDL/DQL syntax or performance/compression characteristics.
|
||||
:::
|
||||
|
||||
@ -24,7 +31,14 @@ SET allow_experimental_inverted_index = true;
|
||||
An inverted index can be defined on a string column using the following syntax
|
||||
|
||||
``` sql
|
||||
CREATE TABLE tab (key UInt64, str String, INDEX inv_idx(s) TYPE inverted(N) GRANULARITY 1) Engine=MergeTree ORDER BY (k);
|
||||
CREATE TABLE tab
|
||||
(
|
||||
`key` UInt64,
|
||||
`str` String,
|
||||
INDEX inv_idx(str) TYPE inverted(0) GRANULARITY 1
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY key
|
||||
```
|
||||
|
||||
where `N` specifies the tokenizer:
|
||||
@ -32,7 +46,7 @@ where `N` specifies the tokenizer:
|
||||
- `inverted(0)` (or shorter: `inverted()`) set the tokenizer to "tokens", i.e. split strings along spaces,
|
||||
- `inverted(N)` with `N` between 2 and 8 sets the tokenizer to "ngrams(N)"
|
||||
|
||||
Being a type of skipping indexes, inverted indexes can be dropped or added to a column after table creation:
|
||||
Being a type of skipping index, inverted indexes can be dropped or added to a column after table creation:
|
||||
|
||||
``` sql
|
||||
ALTER TABLE tbl DROP INDEX inv_idx;
|
||||
@ -54,13 +68,13 @@ SELECT * from tab WHERE multiSearchAll(s, [‘Hello’, ‘World’]);
|
||||
The inverted index also works on columns of type `Array(String)`, `Array(FixedString)`, `Map(String)` and `Map(String)`.
|
||||
|
||||
Like for other secondary indices, each column part has its own inverted index. Furthermore, each inverted index is internally divided into
|
||||
"segments". The existence and size of the segments is generally transparent to users but the segment size determines the memory consumption
|
||||
"segments". The existence and size of the segments are generally transparent to users but the segment size determines the memory consumption
|
||||
during index construction (e.g. when two parts are merged). Configuration parameter "max_digestion_size_per_segment" (default: 256 MB)
|
||||
controls the amount of data read consumed from the underlying column before a new segment is created. Incrementing the parameter raises the
|
||||
intermediate memory consumption for index constuction but also improves lookup performance since fewer segments need to be checked on
|
||||
intermediate memory consumption for index construction but also improves lookup performance since fewer segments need to be checked on
|
||||
average to evaluate a query.
|
||||
|
||||
Unlike other secondary indices, inverted indexes (for now) map to row numbers (row ids) instead of granule ids. The reason for this design
|
||||
is performance. In practice, users often search for multiple terms at once. For example, filter predicate `WHERE s LIKE '%little%' OR s LIKE
|
||||
'%big%'` can be evaluated directly using an inverted index by forming the union of the rowid lists for terms "little" and "big". This also
|
||||
means that parameter `GRANULARITY` supplied to index creation has no meaning (it may be removed from the syntax in future).
|
||||
'%big%'` can be evaluated directly using an inverted index by forming the union of the row id lists for terms "little" and "big". This also
|
||||
means that the parameter `GRANULARITY` supplied to index creation has no meaning (it may be removed from the syntax in the future).
|
||||
|
@ -77,7 +77,7 @@ Use the `ORDER BY tuple()` syntax, if you do not need sorting. See [Selecting th
|
||||
|
||||
#### PARTITION BY
|
||||
|
||||
`PARTITION BY` — The [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
||||
`PARTITION BY` — The [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases, you don't need a partition key, and if you do need to partition, generally you do not need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
|
@ -86,3 +86,9 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
||||
- `SELECT ... SAMPLE`
|
||||
- Indices
|
||||
- Replication
|
||||
|
||||
## PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
@ -96,3 +96,9 @@ SELECT * FROM url_engine_table
|
||||
- `ALTER` and `SELECT...SAMPLE` operations.
|
||||
- Indexes.
|
||||
- Replication.
|
||||
|
||||
## PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
259
docs/en/getting-started/example-datasets/laion.md
Normal file
259
docs/en/getting-started/example-datasets/laion.md
Normal file
@ -0,0 +1,259 @@
|
||||
# Laion-400M dataset
|
||||
|
||||
The dataset contains 400 million images with English text. For more information follow this [link](https://laion.ai/blog/laion-400-open-dataset/). Laion provides even larger datasets (e.g. [5 billion](https://laion.ai/blog/laion-5b/)). Working with them will be similar.
|
||||
|
||||
The dataset has prepared embeddings for texts and images. This will be used to demonstrate [Approximate nearest neighbor search indexes](../../engines/table-engines/mergetree-family/annindexes.md).
|
||||
|
||||
## Prepare data
|
||||
|
||||
Embeddings are stored in `.npy` files, so we have to read them with python and merge with other data.
|
||||
|
||||
Download data and process it with simple `download.sh` script:
|
||||
|
||||
```bash
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${1}.npy
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${1}.parquet
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${1}.npy
|
||||
python3 process.py ${1}
|
||||
```
|
||||
|
||||
Where `process.py`:
|
||||
|
||||
```python
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
|
||||
str_i = str(sys.argv[1])
|
||||
npy_file = "img_emb_" + str_i + '.npy'
|
||||
metadata_file = "metadata_" + str_i + '.parquet'
|
||||
text_npy = "text_emb_" + str_i + '.npy'
|
||||
|
||||
# load all files
|
||||
im_emb = np.load(npy_file)
|
||||
text_emb = np.load(text_npy)
|
||||
data = pd.read_parquet(metadata_file)
|
||||
|
||||
# combine them
|
||||
data = pd.concat([data, pd.DataFrame({"image_embedding" : [*im_emb]}), pd.DataFrame({"text_embedding" : [*text_emb]})], axis=1, copy=False)
|
||||
|
||||
# you can save more columns
|
||||
data = data[['url', 'caption', 'similarity', "image_embedding", "text_embedding"]]
|
||||
|
||||
# transform np.arrays to lists
|
||||
data['image_embedding'] = data['image_embedding'].apply(lambda x: list(x))
|
||||
data['text_embedding'] = data['text_embedding'].apply(lambda x: list(x))
|
||||
|
||||
# this small hack is needed becase caption sometimes contains all kind of quotes
|
||||
data['caption'] = data['caption'].apply(lambda x: x.replace("'", " ").replace('"', " "))
|
||||
|
||||
# save data to file
|
||||
data.to_csv(str_i + '.csv', header=False)
|
||||
|
||||
# previous files can be removed
|
||||
os.system(f"rm {npy_file} {metadata_file} {text_npy}")
|
||||
```
|
||||
|
||||
You can download data with
|
||||
```bash
|
||||
seq 0 409 | xargs -P100 -I{} bash -c './download.sh {}'
|
||||
```
|
||||
|
||||
The dataset is divided into 409 files. If you want to work only with a certain part of the dataset, just change the limits.
|
||||
|
||||
## Create table for laion
|
||||
|
||||
Without indexes table can be created by
|
||||
|
||||
```sql
|
||||
CREATE TABLE laion_dataset
|
||||
(
|
||||
`id` Int64,
|
||||
`url` String,
|
||||
`caption` String,
|
||||
`similarity` Float32,
|
||||
`image_embedding` Array(Float32),
|
||||
`text_embedding` Array(Float32)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity = 8192
|
||||
```
|
||||
|
||||
Fill table with data:
|
||||
|
||||
```sql
|
||||
INSERT INTO laion_dataset FROM INFILE '{path_to_csv_files}/*.csv'
|
||||
```
|
||||
|
||||
## Check data in table without indexes
|
||||
|
||||
Let's check the work of the following query on the part of the dataset (8 million records):
|
||||
|
||||
```sql
|
||||
select url, caption from test_laion where similarity > 0.2 order by L2Distance(image_embedding, {target:Array(Float32)}) limit 30
|
||||
```
|
||||
|
||||
Since the embeddings for images and texts may not match, let's also require a certain threshold of matching accuracy to get images that are more likely to satisfy our queries. The client parameter `target`, which is an array of 512 elements. See later in this article for a convenient way of obtaining such vectors. I used a random picture of a cat from the Internet as a target vector.
|
||||
|
||||
**The result**
|
||||
|
||||
```
|
||||
┌─url───────────────────────────────────────────────────────────────────────────────────────────────────────────┬─caption────────────────────────────────────────────────────────────────┐
|
||||
│ https://s3.amazonaws.com/filestore.rescuegroups.org/6685/pictures/animals/13884/13884995/63318230_463x463.jpg │ Adoptable Female Domestic Short Hair │
|
||||
│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/8/b/6/239905226.jpg │ Adopt A Pet :: Marzipan - New York, NY │
|
||||
│ http://d1n3ar4lqtlydb.cloudfront.net/9/2/4/248407625.jpg │ Adopt A Pet :: Butterscotch - New Castle, DE │
|
||||
│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/e/e/c/245615237.jpg │ Adopt A Pet :: Tiggy - Chicago, IL │
|
||||
│ http://pawsofcoronado.org/wp-content/uploads/2012/12/rsz_pumpkin.jpg │ Pumpkin an orange tabby kitten for adoption │
|
||||
│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/7/8/3/188700997.jpg │ Adopt A Pet :: Brian the Brad Pitt of cats - Frankfort, IL │
|
||||
│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/8/b/d/191533561.jpg │ Domestic Shorthair Cat for adoption in Mesa, Arizona - Charlie │
|
||||
│ https://s3.amazonaws.com/pet-uploads.adoptapet.com/0/1/2/221698235.jpg │ Domestic Shorthair Cat for adoption in Marietta, Ohio - Daisy (Spayed) │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────────────────┘
|
||||
|
||||
8 rows in set. Elapsed: 6.432 sec. Processed 19.65 million rows, 43.96 GB (3.06 million rows/s., 6.84 GB/s.)
|
||||
```
|
||||
|
||||
## Add indexes
|
||||
|
||||
Create a new table or follow instructions from [alter documentation](../../sql-reference/statements/alter/skipping-index.md).
|
||||
|
||||
```sql
|
||||
CREATE TABLE laion_dataset
|
||||
(
|
||||
`id` Int64,
|
||||
`url` String,
|
||||
`caption` String,
|
||||
`similarity` Float32,
|
||||
`image_embedding` Array(Float32),
|
||||
`text_embedding` Array(Float32),
|
||||
INDEX annoy_image image_embedding TYPE annoy(1000) GRANULARITY 1000,
|
||||
INDEX annoy_text text_embedding TYPE annoy(1000) GRANULARITY 1000
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity = 8192
|
||||
```
|
||||
|
||||
When created, the index will be built by L2Distance. You can read more about the parameters in the [annoy documentation](../../engines/table-engines/mergetree-family/annindexes.md#annoy-annoy). It makes sense to build indexes for a large number of granules. If you need good speed, then GRANULARITY should be several times larger than the expected number of results in the search.
|
||||
Now let's check again with the same query:
|
||||
|
||||
```sql
|
||||
select url, caption from test_indexes_laion where similarity > 0.2 order by L2Distance(image_embedding, {target:Array(Float32)}) limit 8
|
||||
```
|
||||
|
||||
**Result**
|
||||
|
||||
```
|
||||
┌─url──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬─caption──────────────────────────────────────────────────────────────┐
|
||||
│ http://tse1.mm.bing.net/th?id=OIP.R1CUoYp_4hbeFSHBaaB5-gHaFj │ bed bugs and pets can cats carry bed bugs pets adviser │
|
||||
│ http://pet-uploads.adoptapet.com/1/9/c/1963194.jpg?336w │ Domestic Longhair Cat for adoption in Quincy, Massachusetts - Ashley │
|
||||
│ https://thumbs.dreamstime.com/t/cat-bed-12591021.jpg │ Cat on bed Stock Image │
|
||||
│ https://us.123rf.com/450wm/penta/penta1105/penta110500004/9658511-portrait-of-british-short-hair-kitten-lieing-at-sofa-on-sun.jpg │ Portrait of british short hair kitten lieing at sofa on sun. │
|
||||
│ https://www.easypetmd.com/sites/default/files/Wirehaired%20Vizsla%20(2).jpg │ Vizsla (Wirehaired) image 3 │
|
||||
│ https://images.ctfassets.net/yixw23k2v6vo/0000000200009b8800000000/7950f4e1c1db335ef91bb2bc34428de9/dog-cat-flickr-Impatience_1.jpg?w=600&h=400&fm=jpg&fit=thumb&q=65&fl=progressive │ dog and cat image │
|
||||
│ https://i1.wallbox.ru/wallpapers/small/201523/eaa582ee76a31fd.jpg │ cats, kittens, faces, tonkinese │
|
||||
│ https://www.baxterboo.com/images/breeds/medium/cairn-terrier.jpg │ Cairn Terrier Photo │
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────────────────────────────────────────────────────┘
|
||||
|
||||
8 rows in set. Elapsed: 0.641 sec. Processed 22.06 thousand rows, 49.36 MB (91.53 thousand rows/s., 204.81 MB/s.)
|
||||
```
|
||||
|
||||
The speed has increased significantly. But now, the results sometimes differ from what you are looking for. This is due to the approximation of the search and the quality of the constructed embedding. Note that the example was given for picture embeddings, but there are also text embeddings in the dataset, which can also be used for searching.
|
||||
|
||||
## Scripts for embeddings
|
||||
|
||||
Usually, we do not want to get embeddings from existing data, but to get them for new data and look for similar ones in old data. We can use [UDF](../../sql-reference/functions/index.md#sql-user-defined-functions) for this purpose. They will allow you to set the `target` vector without leaving the client. All of the following scripts will be written for the `ViT-B/32` model, as it was used for this dataset. You can use any model, but it is necessary to build embeddings in the dataset and for new objects using the same model.
|
||||
|
||||
### Text embeddings
|
||||
|
||||
`encode_text.py`:
|
||||
```python
|
||||
#!/usr/bin/python3
|
||||
import clip
|
||||
import torch
|
||||
import numpy as np
|
||||
import sys
|
||||
|
||||
if __name__ == '__main__':
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
model, preprocess = clip.load("ViT-B/32", device=device)
|
||||
for text in sys.stdin:
|
||||
inputs = clip.tokenize(text)
|
||||
with torch.no_grad():
|
||||
text_features = model.encode_text(inputs)[0].tolist()
|
||||
sys.stdout.flush()
|
||||
```
|
||||
|
||||
`encode_text_function.xml`:
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
<type>executable</type>
|
||||
<name>encode_text</name>
|
||||
<return_type>Array(Float32)</return_type>
|
||||
<argument>
|
||||
<type>String</type>
|
||||
<name>text</name>
|
||||
</argument>
|
||||
<format>TabSeparated</format>
|
||||
<command>encode_text.py</command>
|
||||
<command_read_timeout>1000000</command_read_timeout>
|
||||
</function>
|
||||
</functions>
|
||||
```
|
||||
|
||||
Now we can simply use:
|
||||
|
||||
```sql
|
||||
SELECT encode_text('cat');
|
||||
```
|
||||
|
||||
The first use will be slow because the model needs to be loaded. But repeated queries will be fast. Then we copy the results to ``set param_target=...`` and can easily write queries
|
||||
|
||||
### Image embeddings
|
||||
|
||||
For pictures, the process is similar, but you send the path instead of the picture (if necessary, you can implement a download picture with processing, but it will take longer)
|
||||
|
||||
`encode_picture.py`
|
||||
```python
|
||||
#!/usr/bin/python3
|
||||
import clip
|
||||
import torch
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import sys
|
||||
|
||||
if __name__ == '__main__':
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
model, preprocess = clip.load("ViT-B/32", device=device)
|
||||
for text in sys.stdin:
|
||||
image = preprocess(Image.open(text.strip())).unsqueeze(0).to(device)
|
||||
with torch.no_grad():
|
||||
image_features = model.encode_image(image)[0].tolist()
|
||||
print(image_features)
|
||||
sys.stdout.flush()
|
||||
```
|
||||
|
||||
`encode_picture_function.xml`
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
<type>executable_pool</type>
|
||||
<name>encode_picture</name>
|
||||
<return_type>Array(Float32)</return_type>
|
||||
<argument>
|
||||
<type>String</type>
|
||||
<name>path</name>
|
||||
</argument>
|
||||
<format>TabSeparated</format>
|
||||
<command>encode_picture.py</command>
|
||||
<command_read_timeout>1000000</command_read_timeout>
|
||||
</function>
|
||||
</functions>
|
||||
```
|
||||
|
||||
The query:
|
||||
```sql
|
||||
SELECT encode_picture('some/path/to/your/picture');
|
||||
```
|
@ -22,6 +22,7 @@ functions in ClickHouse. The sample datasets include:
|
||||
- The [Cell Towers dataset](../getting-started/example-datasets/cell-towers.md) imports a CSV into ClickHouse
|
||||
- The [NYPD Complaint Data](../getting-started/example-datasets/nypd_complaint_data.md) demonstrates how to use data inference to simplify creating tables
|
||||
- The ["What's on the Menu?" dataset](../getting-started/example-datasets/menus.md) has an example of denormalizing data
|
||||
- The [Laion dataset](../getting-started/example-datasets/laion.md) has an example of [Approximate nearest neighbor search indexes](../engines/table-engines/mergetree-family/annindexes.md) usage
|
||||
- [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
|
||||
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
|
||||
- [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) shows how to generate random data if none of the above fit your needs.
|
||||
|
@ -83,6 +83,7 @@ The supported formats are:
|
||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||
| [MsgPack](#msgpack) | ✔ | ✔ |
|
||||
| [MySQLDump](#mysqldump) | ✔ | ✗ |
|
||||
| [Markdown](#markdown) | ✗ | ✔ |
|
||||
|
||||
|
||||
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings-formats.md) section.
|
||||
@ -2347,3 +2348,26 @@ FROM file(dump.sql, MySQLDump)
|
||||
│ 3 │
|
||||
└───┘
|
||||
```
|
||||
|
||||
## Markdown {#markdown}
|
||||
|
||||
You can export results using [Markdown](https://en.wikipedia.org/wiki/Markdown) format to generate output ready to be pasted into your `.md` files:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
number,
|
||||
number * 2
|
||||
FROM numbers(5)
|
||||
FORMAT Markdown
|
||||
```
|
||||
```results
|
||||
| number | multiply(number, 2) |
|
||||
|-:|-:|
|
||||
| 0 | 0 |
|
||||
| 1 | 2 |
|
||||
| 2 | 4 |
|
||||
| 3 | 6 |
|
||||
| 4 | 8 |
|
||||
```
|
||||
|
||||
Markdown table will be generated automatically and can be used on markdown-enabled platforms, like Github. This format is used only for output.
|
||||
|
@ -106,21 +106,20 @@ Possible values:
|
||||
Default value: 1.
|
||||
|
||||
The delay (in milliseconds) for `INSERT` is calculated by the formula:
|
||||
|
||||
```code
|
||||
max_k = parts_to_throw_insert - parts_to_delay_insert
|
||||
k = 1 + parts_count_in_partition - parts_to_delay_insert
|
||||
delay_milliseconds = pow(max_delay_to_insert * 1000, k / max_k)
|
||||
```
|
||||
For example, if a partition has 299 active parts and parts_to_throw_insert = 300, parts_to_delay_insert = 150, max_delay_to_insert = 1, `INSERT` is delayed for `pow( 1 * 1000, (1 + 299 - 150) / (300 - 150) ) = 1000` milliseconds.
|
||||
|
||||
Starting from version 23.1 formula has been changed to:
|
||||
```code
|
||||
allowed_parts_over_threshold = parts_to_throw_insert - parts_to_delay_insert + 1
|
||||
allowed_parts_over_threshold = parts_to_throw_insert - parts_to_delay_insert
|
||||
parts_over_threshold = parts_count_in_partition - parts_to_delay_insert + 1
|
||||
delay_milliseconds = max(min_delay_to_insert_ms, (max_delay_to_insert * 1000) * parts_over_threshold / allowed_parts_over_threshold)
|
||||
```
|
||||
|
||||
For example if a partition has 299 active parts and parts_to_throw_insert = 300, parts_to_delay_insert = 150, max_delay_to_insert = 1, `INSERT` is delayed for `pow( 1 * 1000, (1 + 299 - 150) / (300 - 150) ) = 1000` milliseconds.
|
||||
For example, if a partition has 224 active parts and parts_to_throw_insert = 300, parts_to_delay_insert = 150, max_delay_to_insert = 1, min_delay_to_insert_ms = 10, `INSERT` is delayed for `max( 10, 1 * 1000 * (224 - 150 + 1) / (300 - 150) ) = 500` milliseconds.
|
||||
|
||||
## max_parts_in_total {#max-parts-in-total}
|
||||
|
||||
|
@ -1915,6 +1915,21 @@ Possible values:
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## optimize_skip_merged_partitions {#optimize-skip-merged-partitions}
|
||||
|
||||
Enables or disables optimization for [OPTIMIZE TABLE ... FINAL](../../sql-reference/statements/optimize.md) query if there is only one part with level > 0 and it doesn't have expired TTL.
|
||||
|
||||
- `OPTIMIZE TABLE ... FINAL SETTINGS optimize_skip_merged_partitions=1`
|
||||
|
||||
By default, `OPTIMIZE TABLE ... FINAL` query rewrites the one part even if there is only a single part.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 - Enable optimization.
|
||||
- 0 - Disable optimization.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns}
|
||||
|
||||
Enables or disables optimization by transforming some functions to reading subcolumns. This reduces the amount of data to read.
|
||||
|
@ -9,6 +9,7 @@ Columns:
|
||||
|
||||
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||
- `description` ([String](../../sql-reference/data-types/string.md) - Metric description)
|
||||
|
||||
**Example**
|
||||
|
||||
@ -17,18 +18,18 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─metric──────────────────────────────────┬──────value─┐
|
||||
│ jemalloc.background_thread.run_interval │ 0 │
|
||||
│ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ jemalloc.background_thread.num_threads │ 0 │
|
||||
│ jemalloc.retained │ 422551552 │
|
||||
│ jemalloc.mapped │ 1682989056 │
|
||||
│ jemalloc.resident │ 1656446976 │
|
||||
│ jemalloc.metadata_thp │ 0 │
|
||||
│ jemalloc.metadata │ 10226856 │
|
||||
│ UncompressedCacheCells │ 0 │
|
||||
│ MarkCacheFiles │ 0 │
|
||||
└─────────────────────────────────────────┴────────────┘
|
||||
┌─metric──────────────────────────────────┬──────value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ AsynchronousMetricsCalculationTimeSpent │ 0.00179053 │ Time in seconds spent for calculation of asynchronous metrics (this is the overhead of asynchronous metrics). │
|
||||
│ NumberOfDetachedByUserParts │ 0 │ The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts and they can be removed. │
|
||||
│ NumberOfDetachedParts │ 0 │ The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts and they can be removed. │
|
||||
│ TotalRowsOfMergeTreeTables │ 2781309 │ Total amount of rows (records) stored in all tables of MergeTree family. │
|
||||
│ TotalBytesOfMergeTreeTables │ 7741926 │ Total amount of bytes (compressed, including data and indices) stored in all tables of MergeTree family. │
|
||||
│ NumberOfTables │ 93 │ Total number of tables summed across the databases on the server, excluding the databases that cannot contain MergeTree tables. The excluded database engines are those who generate the set of tables on the fly, like `Lazy`, `MySQL`, `PostgreSQL`, `SQlite`. │
|
||||
│ NumberOfDatabases │ 6 │ Total number of databases on the server. │
|
||||
│ MaxPartCountForPartition │ 6 │ Maximum number of parts per partition across all partitions of all tables of MergeTree family. Values larger than 300 indicates misconfiguration, overload, or massive data loading. │
|
||||
│ ReplicasSumMergesInQueue │ 0 │ Sum of merge operations in the queue (still to be applied) across Replicated tables. │
|
||||
│ ReplicasSumInsertsInQueue │ 0 │ Sum of INSERT operations in the queue (still to be replicated) across Replicated tables. │
|
||||
└─────────────────────────────────────────┴────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -290,15 +290,11 @@ This storage method works the same way as hashed and allows using date/time (arb
|
||||
Example: The table contains discounts for each advertiser in the format:
|
||||
|
||||
``` text
|
||||
+---------|-------------|-------------|------+
|
||||
| advertiser id | discount start date | discount end date | amount |
|
||||
+===============+=====================+===================+========+
|
||||
| 123 | 2015-01-01 | 2015-01-15 | 0.15 |
|
||||
+---------|-------------|-------------|------+
|
||||
| 123 | 2015-01-16 | 2015-01-31 | 0.25 |
|
||||
+---------|-------------|-------------|------+
|
||||
| 456 | 2015-01-01 | 2015-01-15 | 0.05 |
|
||||
+---------|-------------|-------------|------+
|
||||
┌─advertiser_id─┬─discount_start_date─┬─discount_end_date─┬─amount─┐
|
||||
│ 123 │ 2015-01-16 │ 2015-01-31 │ 0.25 │
|
||||
│ 123 │ 2015-01-01 │ 2015-01-15 │ 0.15 │
|
||||
│ 456 │ 2015-01-01 │ 2015-01-15 │ 0.05 │
|
||||
└───────────────┴─────────────────────┴───────────────────┴────────┘
|
||||
```
|
||||
|
||||
To use a sample for date ranges, define the `range_min` and `range_max` elements in the [structure](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md). These elements must contain elements `name` and `type` (if `type` is not specified, the default type will be used - Date). `type` can be any numeric type (Date / DateTime / UInt64 / Int32 / others).
|
||||
@ -307,19 +303,25 @@ To use a sample for date ranges, define the `range_min` and `range_max` elements
|
||||
Values of `range_min` and `range_max` should fit in `Int64` type.
|
||||
:::
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
``` xml
|
||||
<layout>
|
||||
<range_hashed>
|
||||
<!-- Strategy for overlapping ranges (min/max). Default: min (return a matching range with the min(range_min -> range_max) value) -->
|
||||
<range_lookup_strategy>min</range_lookup_strategy>
|
||||
</range_hashed>
|
||||
</layout>
|
||||
<structure>
|
||||
<id>
|
||||
<name>Id</name>
|
||||
<name>advertiser_id</name>
|
||||
</id>
|
||||
<range_min>
|
||||
<name>first</name>
|
||||
<name>discount_start_date</name>
|
||||
<type>Date</type>
|
||||
</range_min>
|
||||
<range_max>
|
||||
<name>last</name>
|
||||
<name>discount_end_date</name>
|
||||
<type>Date</type>
|
||||
</range_max>
|
||||
...
|
||||
@ -328,17 +330,17 @@ Example:
|
||||
or
|
||||
|
||||
``` sql
|
||||
CREATE DICTIONARY somedict (
|
||||
id UInt64,
|
||||
first Date,
|
||||
last Date,
|
||||
advertiser_id UInt64
|
||||
CREATE DICTIONARY discounts_dict (
|
||||
advertiser_id UInt64,
|
||||
discount_start_date Date,
|
||||
discount_end_date Date,
|
||||
amount Float64
|
||||
)
|
||||
PRIMARY KEY id
|
||||
SOURCE(CLICKHOUSE(TABLE 'date_table'))
|
||||
SOURCE(CLICKHOUSE(TABLE 'discounts'))
|
||||
LIFETIME(MIN 1 MAX 1000)
|
||||
LAYOUT(RANGE_HASHED())
|
||||
RANGE(MIN first MAX last)
|
||||
LAYOUT(RANGE_HASHED(range_lookup_strategy 'max'))
|
||||
RANGE(MIN discount_start_date MAX discount_end_date)
|
||||
```
|
||||
|
||||
To work with these dictionaries, you need to pass an additional argument to the `dictGet` function, for which a range is selected:
|
||||
@ -349,16 +351,17 @@ dictGet('dict_name', 'attr_name', id, date)
|
||||
Query example:
|
||||
|
||||
``` sql
|
||||
SELECT dictGet('somedict', 'advertiser_id', 1, '2022-10-20 23:20:10.000'::DateTime64::UInt64);
|
||||
SELECT dictGet('discounts_dict', 'amount', 1, '2022-10-20'::Date);
|
||||
```
|
||||
|
||||
This function returns the value for the specified `id`s and the date range that includes the passed date.
|
||||
|
||||
Details of the algorithm:
|
||||
|
||||
- If the `id` is not found or a range is not found for the `id`, it returns the default value for the dictionary.
|
||||
- If there are overlapping ranges, it returns value for any (random) range.
|
||||
- If the range delimiter is `NULL` or an invalid date (such as 1900-01-01), the range is open. The range can be open on both sides.
|
||||
- If the `id` is not found or a range is not found for the `id`, it returns the default value of the attribute's type.
|
||||
- If there are overlapping ranges and `range_lookup_strategy=min`, it returns a matching range with minimal `range_min`, if several ranges found, it returns a range with minimal `range_max`, if again several ranges found (several ranges had the same `range_min` and `range_max` it returns a random range of them.
|
||||
- If there are overlapping ranges and `range_lookup_strategy=max`, it returns a matching range with maximal `range_min`, if several ranges found, it returns a range with maximal `range_max`, if again several ranges found (several ranges had the same `range_min` and `range_max` it returns a random range of them.
|
||||
- If the `range_max` is `NULL`, the range is open. `NULL` is treated as maximal possible value. For the `range_min` `1970-01-01` or `0` (-MAX_INT) can be used as the open value.
|
||||
|
||||
Configuration example:
|
||||
|
||||
@ -407,6 +410,108 @@ PRIMARY KEY Abcdef
|
||||
RANGE(MIN StartTimeStamp MAX EndTimeStamp)
|
||||
```
|
||||
|
||||
Configuration example with overlapping ranges and open ranges:
|
||||
|
||||
```sql
|
||||
CREATE TABLE discounts
|
||||
(
|
||||
advertiser_id UInt64,
|
||||
discount_start_date Date,
|
||||
discount_end_date Nullable(Date),
|
||||
amount Float64
|
||||
)
|
||||
ENGINE = Memory;
|
||||
|
||||
INSERT INTO discounts VALUES (1, '2015-01-01', Null, 0.1);
|
||||
INSERT INTO discounts VALUES (1, '2015-01-15', Null, 0.2);
|
||||
INSERT INTO discounts VALUES (2, '2015-01-01', '2015-01-15', 0.3);
|
||||
INSERT INTO discounts VALUES (2, '2015-01-04', '2015-01-10', 0.4);
|
||||
INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-15', 0.5);
|
||||
INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-10', 0.6);
|
||||
|
||||
SELECT * FROM discounts ORDER BY advertiser_id, discount_start_date;
|
||||
┌─advertiser_id─┬─discount_start_date─┬─discount_end_date─┬─amount─┐
|
||||
│ 1 │ 2015-01-01 │ ᴺᵁᴸᴸ │ 0.1 │
|
||||
│ 1 │ 2015-01-15 │ ᴺᵁᴸᴸ │ 0.2 │
|
||||
│ 2 │ 2015-01-01 │ 2015-01-15 │ 0.3 │
|
||||
│ 2 │ 2015-01-04 │ 2015-01-10 │ 0.4 │
|
||||
│ 3 │ 1970-01-01 │ 2015-01-15 │ 0.5 │
|
||||
│ 3 │ 1970-01-01 │ 2015-01-10 │ 0.6 │
|
||||
└───────────────┴─────────────────────┴───────────────────┴────────┘
|
||||
|
||||
-- RANGE_LOOKUP_STRATEGY 'max'
|
||||
|
||||
CREATE DICTIONARY discounts_dict
|
||||
(
|
||||
advertiser_id UInt64,
|
||||
discount_start_date Date,
|
||||
discount_end_date Nullable(Date),
|
||||
amount Float64
|
||||
)
|
||||
PRIMARY KEY advertiser_id
|
||||
SOURCE(CLICKHOUSE(TABLE discounts))
|
||||
LIFETIME(MIN 600 MAX 900)
|
||||
LAYOUT(RANGE_HASHED(RANGE_LOOKUP_STRATEGY 'max'))
|
||||
RANGE(MIN discount_start_date MAX discount_end_date);
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res;
|
||||
┌─res─┐
|
||||
│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null
|
||||
└─────┘
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res;
|
||||
┌─res─┐
|
||||
│ 0.2 │ -- two ranges are matching, range_min 2015-01-15 (0.2) is bigger than 2015-01-01 (0.1)
|
||||
└─────┘
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 2, toDate('2015-01-06')) res;
|
||||
┌─res─┐
|
||||
│ 0.4 │ -- two ranges are matching, range_min 2015-01-04 (0.4) is bigger than 2015-01-01 (0.3)
|
||||
└─────┘
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 3, toDate('2015-01-01')) res;
|
||||
┌─res─┐
|
||||
│ 0.5 │ -- two ranges are matching, range_min are equal, 2015-01-15 (0.5) is bigger than 2015-01-10 (0.6)
|
||||
└─────┘
|
||||
|
||||
DROP DICTIONARY discounts_dict;
|
||||
|
||||
-- RANGE_LOOKUP_STRATEGY 'min'
|
||||
|
||||
CREATE DICTIONARY discounts_dict
|
||||
(
|
||||
advertiser_id UInt64,
|
||||
discount_start_date Date,
|
||||
discount_end_date Nullable(Date),
|
||||
amount Float64
|
||||
)
|
||||
PRIMARY KEY advertiser_id
|
||||
SOURCE(CLICKHOUSE(TABLE discounts))
|
||||
LIFETIME(MIN 600 MAX 900)
|
||||
LAYOUT(RANGE_HASHED(RANGE_LOOKUP_STRATEGY 'min'))
|
||||
RANGE(MIN discount_start_date MAX discount_end_date);
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-14')) res;
|
||||
┌─res─┐
|
||||
│ 0.1 │ -- the only one range is matching: 2015-01-01 - Null
|
||||
└─────┘
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 1, toDate('2015-01-16')) res;
|
||||
┌─res─┐
|
||||
│ 0.1 │ -- two ranges are matching, range_min 2015-01-01 (0.1) is less than 2015-01-15 (0.2)
|
||||
└─────┘
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 2, toDate('2015-01-06')) res;
|
||||
┌─res─┐
|
||||
│ 0.3 │ -- two ranges are matching, range_min 2015-01-01 (0.3) is less than 2015-01-04 (0.4)
|
||||
└─────┘
|
||||
|
||||
select dictGet('discounts_dict', 'amount', 3, toDate('2015-01-01')) res;
|
||||
┌─res─┐
|
||||
│ 0.6 │ -- two ranges are matching, range_min are equal, 2015-01-10 (0.6) is less than 2015-01-15 (0.5)
|
||||
└─────┘
|
||||
```
|
||||
|
||||
### complex_key_range_hashed
|
||||
|
||||
The dictionary is stored in memory in the form of a hash table with an ordered array of ranges and their corresponding values (see [range_hashed](#range-hashed)). This type of storage is for use with composite [keys](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-structure.md).
|
||||
|
@ -209,10 +209,25 @@ Aliases: `DAYOFMONTH`, `DAY`.
|
||||
|
||||
## toDayOfWeek
|
||||
|
||||
Converts a date or date with time to a UInt8 number containing the number of the day of the week (Monday is 1, and Sunday is 7).
|
||||
Converts a date or date with time to a UInt8 number containing the number of the day of the week.
|
||||
|
||||
The two-argument form of `toDayOfWeek()` enables you to specify whether the week starts on Monday or Sunday, and whether the return value should be in the range from 0 to 6 or 1 to 7. If the mode argument is ommited, the default mode is 0. The time zone of the date can be specified as the third argument.
|
||||
|
||||
| Mode | First day of week | Range |
|
||||
|------|-------------------|------------------------------------------------|
|
||||
| 0 | Monday | 1-7: Monday = 1, Tuesday = 2, ..., Sunday = 7 |
|
||||
| 1 | Monday | 0-6: Monday = 0, Tuesday = 1, ..., Sunday = 6 |
|
||||
| 2 | Sunday | 0-6: Sunday = 0, Monday = 1, ..., Saturday = 6 |
|
||||
| 3 | Sunday | 1-7: Sunday = 1, Monday = 2, ..., Saturday = 7 |
|
||||
|
||||
Alias: `DAYOFWEEK`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toDayOfWeek(t[, mode[, timezone]])
|
||||
```
|
||||
|
||||
## toHour
|
||||
|
||||
Converts a date with time to a UInt8 number containing the number of the hour in 24-hour time (0-23).
|
||||
@ -316,11 +331,17 @@ If `toLastDayOfMonth` is called with an argument of type `Date` greater then 214
|
||||
Rounds down a date, or date with time, to the nearest Monday.
|
||||
Returns the date.
|
||||
|
||||
## toStartOfWeek(t\[,mode\])
|
||||
## toStartOfWeek
|
||||
|
||||
Rounds down a date, or date with time, to the nearest Sunday or Monday by mode.
|
||||
Rounds a date or date with time down to the nearest Sunday or Monday.
|
||||
Returns the date.
|
||||
The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used.
|
||||
The mode argument works exactly like the mode argument in function `toWeek()`. If no mode is specified, mode is assumed as 0.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toStartOfWeek(t[, mode[, timezone]])
|
||||
```
|
||||
|
||||
## toStartOfDay
|
||||
|
||||
@ -455,10 +476,12 @@ Converts a date, or date with time, to a UInt16 number containing the ISO Year n
|
||||
|
||||
Converts a date, or date with time, to a UInt8 number containing the ISO Week number.
|
||||
|
||||
## toWeek(date\[,mode\])
|
||||
## toWeek
|
||||
|
||||
This function returns the week number for date or datetime. The two-argument form of `toWeek()` enables you to specify whether the week starts on Sunday or Monday and whether the return value should be in the range from 0 to 53 or from 1 to 53. If the mode argument is omitted, the default mode is 0.
|
||||
|
||||
`toISOWeek()` is a compatibility function that is equivalent to `toWeek(date,3)`.
|
||||
|
||||
This function returns the week number for date or datetime. The two-argument form of toWeek() enables you to specify whether the week starts on Sunday or Monday and whether the return value should be in the range from 0 to 53 or from 1 to 53. If the mode argument is omitted, the default mode is 0.
|
||||
`toISOWeek()`is a compatibility function that is equivalent to `toWeek(date,3)`.
|
||||
The following table describes how the mode argument works.
|
||||
|
||||
| Mode | First day of week | Range | Week 1 is the first week … |
|
||||
@ -482,13 +505,15 @@ For mode values with a meaning of “with 4 or more days this year,” weeks are
|
||||
|
||||
For mode values with a meaning of “contains January 1”, the week contains January 1 is week 1. It does not matter how many days in the new year the week contained, even if it contained only one day.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toWeek(date, [, mode][, Timezone])
|
||||
toWeek(t[, mode[, time_zone]])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `date` – Date or DateTime.
|
||||
- `t` – Date or DateTime.
|
||||
- `mode` – Optional parameter, Range of values is \[0,9\], default is 0.
|
||||
- `Timezone` – Optional parameter, it behaves like any other conversion function.
|
||||
|
||||
@ -504,13 +529,19 @@ SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS we
|
||||
└────────────┴───────┴───────┴───────┘
|
||||
```
|
||||
|
||||
## toYearWeek(date\[,mode\])
|
||||
## toYearWeek
|
||||
|
||||
Returns year and week for a date. The year in the result may be different from the year in the date argument for the first and the last week of the year.
|
||||
|
||||
The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used.
|
||||
The mode argument works exactly like the mode argument to `toWeek()`. For the single-argument syntax, a mode value of 0 is used.
|
||||
|
||||
`toISOYear()`is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`.
|
||||
`toISOYear()` is a compatibility function that is equivalent to `intDiv(toYearWeek(date,3),100)`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toYearWeek(t[, mode[, timezone]])
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -23,7 +23,7 @@ When `OPTIMIZE` is used with the [ReplicatedMergeTree](../../engines/table-engin
|
||||
|
||||
- If `OPTIMIZE` does not perform a merge for any reason, it does not notify the client. To enable notifications, use the [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) setting.
|
||||
- If you specify a `PARTITION`, only the specified partition is optimized. [How to set partition expression](alter/partition.md#how-to-set-partition-expression).
|
||||
- If you specify `FINAL`, optimization is performed even when all the data is already in one part. Also merge is forced even if concurrent merges are performed.
|
||||
- If you specify `FINAL`, optimization is performed even when all the data is already in one part. You can control this behaviour with [optimize_skip_merged_partitions](../../operations/settings/settings.md#optimize-skip-merged-partitions). Also, the merge is forced even if concurrent merges are performed.
|
||||
- If you specify `DEDUPLICATE`, then completely identical rows (unless by-clause is specified) will be deduplicated (all columns are compared), it makes sense only for the MergeTree engine.
|
||||
|
||||
You can specify how long (in seconds) to wait for inactive replicas to execute `OPTIMIZE` queries by the [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout) setting.
|
||||
|
74
docs/en/sql-reference/table-functions/mongodb.md
Normal file
74
docs/en/sql-reference/table-functions/mongodb.md
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/mongodb
|
||||
sidebar_position: 42
|
||||
sidebar_label: mongodb
|
||||
---
|
||||
|
||||
# mongodb
|
||||
|
||||
Allows `SELECT` queries to be performed on data that is stored on a remote MongoDB server.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
mongodb(host:port, database, collection, user, password, structure [, options])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `host:port` — MongoDB server address.
|
||||
|
||||
- `database` — Remote database name.
|
||||
|
||||
- `collection` — Remote collection name.
|
||||
|
||||
- `user` — MongoDB user.
|
||||
|
||||
- `password` — User password.
|
||||
|
||||
- `structure` - The schema for the ClickHouse table returned from this function.
|
||||
|
||||
- `options` - MongoDB connection string options (optional parameter).
|
||||
|
||||
|
||||
**Returned Value**
|
||||
|
||||
A table object with the same columns as the original MongoDB table.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
Suppose we have a collection named `my_collection` defined in a MongoDB database named `test`, and we insert a couple of documents:
|
||||
|
||||
```sql
|
||||
db.createUser({user:"test_user",pwd:"password",roles:[{role:"readWrite",db:"test"}]})
|
||||
|
||||
db.createCollection("my_collection")
|
||||
|
||||
db.my_collection.insertOne(
|
||||
{ log_type: "event", host: "120.5.33.9", command: "check-cpu-usage -w 75 -c 90" }
|
||||
)
|
||||
|
||||
db.my_collection.insertOne(
|
||||
{ log_type: "event", host: "120.5.33.4", command: "system-check"}
|
||||
)
|
||||
```
|
||||
|
||||
Let's query the collection using the `mongodb` table function:
|
||||
|
||||
```sql
|
||||
SELECT * FROM mongodb(
|
||||
'127.0.0.1:27017',
|
||||
'test',
|
||||
'my_collection',
|
||||
'test_user',
|
||||
'password',
|
||||
'log_type String, host String, command String',
|
||||
'connectTimeoutMS=10000'
|
||||
)
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [The `MongoDB` table engine](../../engines/table-engines/integrations/mongodb.md)
|
||||
- [Using MongoDB as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources/#mongodb)
|
@ -1997,6 +1997,21 @@ SELECT * FROM test_table
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
## optimize_skip_merged_partitions {#optimize-skip-merged-partitions}
|
||||
|
||||
Включает или отключает оптимизацию для запроса [OPTIMIZE TABLE ... FINAL](../../sql-reference/statements/optimize.md), когда есть только один парт с level > 0 и неистекший TTL.
|
||||
|
||||
- `OPTIMIZE TABLE ... FINAL SETTINGS optimize_skip_merged_partitions=1`
|
||||
|
||||
По умолчанию, `OPTIMIZE TABLE ... FINAL` перезапишет даже один парт.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 1 - Включена
|
||||
- 0 - Выключена
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
## optimize_functions_to_subcolumns {#optimize-functions-to-subcolumns}
|
||||
|
||||
Включает или отключает оптимизацию путем преобразования некоторых функций к чтению подстолбцов, таким образом уменьшая объем данных для чтения.
|
||||
|
@ -24,7 +24,7 @@ OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION I
|
||||
- По умолчанию, если запросу `OPTIMIZE` не удалось выполнить слияние, то
|
||||
ClickHouse не оповещает клиента. Чтобы включить оповещения, используйте настройку [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop).
|
||||
- Если указать `PARTITION`, то оптимизация выполняется только для указанной партиции. [Как задавать имя партиции в запросах](alter/index.md#alter-how-to-specify-part-expr).
|
||||
- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске данных. Кроме того, слияние является принудительным, даже если выполняются параллельные слияния.
|
||||
- Если указать `FINAL`, то оптимизация выполняется даже в том случае, если все данные уже лежат в одном куске данных. Можно контролировать с помощью настройки [optimize_skip_merged_partitions](../../operations/settings/settings.md#optimize-skip-merged-partitions). Кроме того, слияние является принудительным, даже если выполняются параллельные слияния.
|
||||
- Если указать `DEDUPLICATE`, то произойдет схлопывание полностью одинаковых строк (сравниваются значения во всех столбцах), имеет смысл только для движка MergeTree.
|
||||
|
||||
Вы можете указать время ожидания (в секундах) выполнения запросов `OPTIMIZE` для неактивных реплик с помощью настройки [replication_wait_for_inactive_replica_timeout](../../operations/settings/settings.md#replication-wait-for-inactive-replica-timeout).
|
||||
@ -196,4 +196,5 @@ SELECT * FROM example;
|
||||
┌─primary_key─┬─secondary_key─┬─value─┬─partition_key─┐
|
||||
│ 1 │ 1 │ 2 │ 3 │
|
||||
└─────────────┴───────────────┴───────┴───────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
|
@ -277,7 +277,7 @@ private:
|
||||
}
|
||||
|
||||
if (queries.empty())
|
||||
throw Exception("Empty list of queries.", ErrorCodes::EMPTY_DATA_PASSED);
|
||||
throw Exception(ErrorCodes::EMPTY_DATA_PASSED, "Empty list of queries.");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -719,7 +719,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
// uniformity.
|
||||
// Surprisingly, this is a client exception, because we get the
|
||||
// server exception w/o throwing (see onReceiveException()).
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
@ -854,7 +854,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
|
@ -165,9 +165,8 @@ int mainEntryClickHouseFormat(int argc, char ** argv)
|
||||
/// should throw exception early and make exception message more readable.
|
||||
if (const auto * insert_query = res->as<ASTInsertQuery>(); insert_query && insert_query->data)
|
||||
{
|
||||
throw Exception(
|
||||
"Can't format ASTInsertQuery with data, since data will be lost",
|
||||
DB::ErrorCodes::INVALID_FORMAT_INSERT_QUERY_WITH_DATA);
|
||||
throw Exception(DB::ErrorCodes::INVALID_FORMAT_INSERT_QUERY_WITH_DATA,
|
||||
"Can't format ASTInsertQuery with data, since data will be lost");
|
||||
}
|
||||
if (!quiet)
|
||||
{
|
||||
|
@ -196,7 +196,7 @@ void Keeper::createServer(const std::string & listen_host, const char * port_nam
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{message, ErrorCodes::NETWORK_ERROR};
|
||||
throw Exception::createDeprecated(message, ErrorCodes::NETWORK_ERROR);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -375,7 +375,7 @@ try
|
||||
if (effective_user_id == 0)
|
||||
{
|
||||
message += " Run under 'sudo -u " + data_owner + "'.";
|
||||
throw Exception(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA);
|
||||
throw Exception::createDeprecated(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -243,7 +243,6 @@ ColumnFloat64::MutablePtr CatBoostLibraryHandler::evalImpl(
|
||||
const ColumnRawPtrs & columns,
|
||||
bool cat_features_are_strings) const
|
||||
{
|
||||
std::string error_msg = "Error occurred while applying CatBoost model: ";
|
||||
size_t column_size = columns.front()->size();
|
||||
|
||||
auto result = ColumnFloat64::create(column_size * tree_count);
|
||||
@ -265,7 +264,8 @@ ColumnFloat64::MutablePtr CatBoostLibraryHandler::evalImpl(
|
||||
result_buf, column_size * tree_count))
|
||||
{
|
||||
|
||||
throw Exception(error_msg + api.GetErrorString(), ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL);
|
||||
throw Exception(ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL,
|
||||
"Error occurred while applying CatBoost model: {}", api.GetErrorString());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -288,7 +288,8 @@ ColumnFloat64::MutablePtr CatBoostLibraryHandler::evalImpl(
|
||||
cat_features_buf, cat_features_count,
|
||||
result_buf, column_size * tree_count))
|
||||
{
|
||||
throw Exception(error_msg + api.GetErrorString(), ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL);
|
||||
throw Exception(ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL,
|
||||
"Error occurred while applying CatBoost model: {}", api.GetErrorString());
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -304,7 +305,8 @@ ColumnFloat64::MutablePtr CatBoostLibraryHandler::evalImpl(
|
||||
cat_features_buf, cat_features_count,
|
||||
result_buf, column_size * tree_count))
|
||||
{
|
||||
throw Exception(error_msg + api.GetErrorString(), ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL);
|
||||
throw Exception(ErrorCodes::CANNOT_APPLY_CATBOOST_MODEL,
|
||||
"Error occurred while applying CatBoost model: {}", api.GetErrorString());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -416,7 +416,7 @@ void Server::createServer(
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{message, ErrorCodes::NETWORK_ERROR};
|
||||
throw Exception::createDeprecated(message, ErrorCodes::NETWORK_ERROR);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -946,7 +946,7 @@ try
|
||||
if (effective_user_id == 0)
|
||||
{
|
||||
message += " Run under 'sudo -u " + data_owner + "'.";
|
||||
throw Exception(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA);
|
||||
throw Exception::createDeprecated(message, ErrorCodes::MISMATCHING_USERS_FOR_PROCESS_AND_DATA);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -334,6 +334,19 @@
|
||||
|
||||
<max_thread_pool_size>10000</max_thread_pool_size>
|
||||
|
||||
<!-- Configure other thread pools: -->
|
||||
<!--
|
||||
<background_buffer_flush_schedule_pool_size>16</background_buffer_flush_schedule_pool_size>
|
||||
<background_pool_size>16</background_pool_size>
|
||||
<background_merges_mutations_concurrency_ratio>2</background_merges_mutations_concurrency_ratio>
|
||||
<background_move_pool_size>8</background_move_pool_size>
|
||||
<background_fetches_pool_size>8</background_fetches_pool_size>
|
||||
<background_common_pool_size>8</background_common_pool_size>
|
||||
<background_schedule_pool_size>128</background_schedule_pool_size>
|
||||
<background_message_broker_schedule_pool_size>16</background_message_broker_schedule_pool_size>
|
||||
<background_distributed_schedule_pool_size>16</background_distributed_schedule_pool_size>
|
||||
-->
|
||||
|
||||
<!-- Number of workers to recycle connections in background (see also drain_timeout).
|
||||
If the pool is full, connection will be drained synchronously. -->
|
||||
<!-- <max_threads_for_connection_collector>10</max_threads_for_connection_collector> -->
|
||||
|
@ -79,9 +79,7 @@ AuthenticationData::Digest AuthenticationData::Util::encodeSHA256(std::string_vi
|
||||
::DB::encodeSHA256(text, hash.data());
|
||||
return hash;
|
||||
#else
|
||||
throw DB::Exception(
|
||||
"SHA256 passwords support is disabled, because ClickHouse was built without SSL library",
|
||||
DB::ErrorCodes::SUPPORT_IS_DISABLED);
|
||||
throw DB::Exception(DB::ErrorCodes::SUPPORT_IS_DISABLED, "SHA256 passwords support is disabled, because ClickHouse was built without SSL library");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -484,13 +484,15 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
||||
return true;
|
||||
};
|
||||
|
||||
auto access_denied = [&](const String & error_msg, int error_code [[maybe_unused]])
|
||||
auto access_denied = [&]<typename... FmtArgs>(int error_code [[maybe_unused]],
|
||||
FormatStringHelper<String, FmtArgs...> fmt_string [[maybe_unused]],
|
||||
FmtArgs && ...fmt_args [[maybe_unused]])
|
||||
{
|
||||
if (trace_log)
|
||||
LOG_TRACE(trace_log, "Access denied: {}{}", (AccessRightsElement{flags, args...}.toStringWithoutOptions()),
|
||||
(grant_option ? " WITH GRANT OPTION" : ""));
|
||||
if constexpr (throw_if_denied)
|
||||
throw Exception(getUserName() + ": " + error_msg, error_code);
|
||||
throw Exception(error_code, std::move(fmt_string), getUserName(), std::forward<FmtArgs>(fmt_args)...);
|
||||
return false;
|
||||
};
|
||||
|
||||
@ -519,18 +521,16 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
||||
{
|
||||
if (grant_option && acs->isGranted(flags, args...))
|
||||
{
|
||||
return access_denied(
|
||||
"Not enough privileges. "
|
||||
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||
"{}: Not enough privileges. "
|
||||
"The required privileges have been granted, but without grant option. "
|
||||
"To execute this query it's necessary to have grant "
|
||||
+ AccessRightsElement{flags, args...}.toStringWithoutOptions() + " WITH GRANT OPTION",
|
||||
ErrorCodes::ACCESS_DENIED);
|
||||
"To execute this query it's necessary to have grant {} WITH GRANT OPTION",
|
||||
AccessRightsElement{flags, args...}.toStringWithoutOptions());
|
||||
}
|
||||
|
||||
return access_denied(
|
||||
"Not enough privileges. To execute this query it's necessary to have grant "
|
||||
+ AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : ""),
|
||||
ErrorCodes::ACCESS_DENIED);
|
||||
return access_denied(ErrorCodes::ACCESS_DENIED,
|
||||
"{}: Not enough privileges. To execute this query it's necessary to have grant {}",
|
||||
AccessRightsElement{flags, args...}.toStringWithoutOptions() + (grant_option ? " WITH GRANT OPTION" : ""));
|
||||
}
|
||||
|
||||
struct PrecalculatedFlags
|
||||
@ -557,32 +557,34 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
||||
if (params.readonly)
|
||||
{
|
||||
if constexpr (grant_option)
|
||||
return access_denied("Cannot change grants in readonly mode.", ErrorCodes::READONLY);
|
||||
return access_denied(ErrorCodes::READONLY, "{}: Cannot change grants in readonly mode.");
|
||||
if ((flags & precalc.not_readonly_flags) ||
|
||||
((params.readonly == 1) && (flags & precalc.not_readonly_1_flags)))
|
||||
{
|
||||
if (params.interface == ClientInfo::Interface::HTTP && params.http_method == ClientInfo::HTTPMethod::GET)
|
||||
{
|
||||
return access_denied(
|
||||
"Cannot execute query in readonly mode. "
|
||||
"For queries over HTTP, method GET implies readonly. You should use method POST for modifying queries",
|
||||
ErrorCodes::READONLY);
|
||||
return access_denied(ErrorCodes::READONLY,
|
||||
"{}: Cannot execute query in readonly mode. "
|
||||
"For queries over HTTP, method GET implies readonly. "
|
||||
"You should use method POST for modifying queries");
|
||||
}
|
||||
else
|
||||
return access_denied("Cannot execute query in readonly mode", ErrorCodes::READONLY);
|
||||
return access_denied(ErrorCodes::READONLY, "{}: Cannot execute query in readonly mode");
|
||||
}
|
||||
}
|
||||
|
||||
if (!params.allow_ddl && !grant_option)
|
||||
{
|
||||
if (flags & precalc.ddl_flags)
|
||||
return access_denied("Cannot execute query. DDL queries are prohibited for the user", ErrorCodes::QUERY_IS_PROHIBITED);
|
||||
return access_denied(ErrorCodes::QUERY_IS_PROHIBITED,
|
||||
"Cannot execute query. DDL queries are prohibited for the user {}");
|
||||
}
|
||||
|
||||
if (!params.allow_introspection && !grant_option)
|
||||
{
|
||||
if (flags & precalc.introspection_flags)
|
||||
return access_denied("Introspection functions are disabled, because setting 'allow_introspection_functions' is set to 0", ErrorCodes::FUNCTION_NOT_ALLOWED);
|
||||
return access_denied(ErrorCodes::FUNCTION_NOT_ALLOWED, "{}: Introspection functions are disabled, "
|
||||
"because setting 'allow_introspection_functions' is set to 0");
|
||||
}
|
||||
|
||||
return access_granted();
|
||||
@ -679,11 +681,13 @@ void ContextAccess::checkGrantOption(const AccessRightsElements & elements) cons
|
||||
template <bool throw_if_denied, typename Container, typename GetNameFunction>
|
||||
bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const GetNameFunction & get_name_function) const
|
||||
{
|
||||
auto show_error = [this](const String & msg, int error_code [[maybe_unused]])
|
||||
auto show_error = []<typename... FmtArgs>(int error_code [[maybe_unused]],
|
||||
FormatStringHelper<FmtArgs...> fmt_string [[maybe_unused]],
|
||||
FmtArgs && ...fmt_args [[maybe_unused]])
|
||||
{
|
||||
UNUSED(this);
|
||||
if constexpr (throw_if_denied)
|
||||
throw Exception(getUserName() + ": " + msg, error_code);
|
||||
throw Exception(error_code, std::move(fmt_string), std::forward<FmtArgs>(fmt_args)...);
|
||||
return false;
|
||||
};
|
||||
|
||||
if (is_full_access)
|
||||
@ -691,7 +695,7 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
|
||||
|
||||
if (user_was_dropped)
|
||||
{
|
||||
show_error("User has been dropped", ErrorCodes::UNKNOWN_USER);
|
||||
show_error(ErrorCodes::UNKNOWN_USER, "User has been dropped");
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -716,14 +720,15 @@ bool ContextAccess::checkAdminOptionImplHelper(const Container & role_ids, const
|
||||
role_name = "ID {" + toString(role_id) + "}";
|
||||
|
||||
if (info->enabled_roles.count(role_id))
|
||||
show_error("Not enough privileges. "
|
||||
"Role " + backQuote(*role_name) + " is granted, but without ADMIN option. "
|
||||
"To execute this query it's necessary to have the role " + backQuoteIfNeed(*role_name) + " granted with ADMIN option.",
|
||||
ErrorCodes::ACCESS_DENIED);
|
||||
show_error(ErrorCodes::ACCESS_DENIED,
|
||||
"Not enough privileges. "
|
||||
"Role {} is granted, but without ADMIN option. "
|
||||
"To execute this query it's necessary to have the role {} granted with ADMIN option.",
|
||||
backQuote(*role_name), backQuoteIfNeed(*role_name));
|
||||
else
|
||||
show_error("Not enough privileges. "
|
||||
"To execute this query it's necessary to have the role " + backQuoteIfNeed(*role_name) + " granted with ADMIN option.",
|
||||
ErrorCodes::ACCESS_DENIED);
|
||||
show_error(ErrorCodes::ACCESS_DENIED, "Not enough privileges. "
|
||||
"To execute this query it's necessary to have the role {} granted with ADMIN option.",
|
||||
backQuoteIfNeed(*role_name));
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -81,7 +81,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
{
|
||||
ret = krb5_cc_resolve(k5.ctx, cache_name.c_str(), &k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving cache{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving cache: {}", fmtError(ret));
|
||||
LOG_TRACE(log,"Resolved cache");
|
||||
}
|
||||
else
|
||||
@ -89,7 +89,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Resolve the default cache and get its type and default principal (if it is initialized).
|
||||
ret = krb5_cc_default(k5.ctx, &defcache);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while getting default cache{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while getting default cache: {}", fmtError(ret));
|
||||
LOG_TRACE(log,"Resolved default cache");
|
||||
deftype = krb5_cc_get_type(k5.ctx, defcache);
|
||||
if (krb5_cc_get_principal(k5.ctx, defcache, &defcache_princ) != 0)
|
||||
@ -99,7 +99,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Use the specified principal name.
|
||||
ret = krb5_parse_name_flags(k5.ctx, principal.c_str(), 0, &k5.me);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when parsing principal name {}", principal + fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when parsing principal name ({}): {}", principal, fmtError(ret));
|
||||
|
||||
// Cache related commands
|
||||
if (k5.out_cc == nullptr && krb5_cc_support_switch(k5.ctx, deftype))
|
||||
@ -107,7 +107,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Use an existing cache for the client principal if we can.
|
||||
ret = krb5_cc_cache_match(k5.ctx, k5.me, &k5.out_cc);
|
||||
if (ret && ret != KRB5_CC_NOTFOUND)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while searching for cache for {}", principal + fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while searching for cache for ({}): {}", principal, fmtError(ret));
|
||||
if (0 == ret)
|
||||
{
|
||||
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
|
||||
@ -118,7 +118,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Create a new cache to avoid overwriting the initialized default cache.
|
||||
ret = krb5_cc_new_unique(k5.ctx, deftype, nullptr, &k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while generating new cache{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while generating new cache: {}", fmtError(ret));
|
||||
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
|
||||
k5.switch_to_cache = 1;
|
||||
}
|
||||
@ -134,24 +134,24 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
|
||||
ret = krb5_unparse_name(k5.ctx, k5.me, &k5.name);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when unparsing name{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when unparsing name: {}", fmtError(ret));
|
||||
LOG_TRACE(log,"Using principal: {}", k5.name);
|
||||
|
||||
// Allocate a new initial credential options structure.
|
||||
ret = krb5_get_init_creds_opt_alloc(k5.ctx, &options);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in options allocation{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in options allocation: {}", fmtError(ret));
|
||||
|
||||
// Resolve keytab
|
||||
ret = krb5_kt_resolve(k5.ctx, keytab_file.c_str(), &keytab);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving keytab {}{}", keytab_file, fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving keytab ({}): {}", keytab_file, fmtError(ret));
|
||||
LOG_TRACE(log,"Using keytab: {}", keytab_file);
|
||||
|
||||
// Set an output credential cache in initial credential options.
|
||||
ret = krb5_get_init_creds_opt_set_out_ccache(k5.ctx, options, k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in setting output credential cache{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in setting output credential cache: {}", fmtError(ret));
|
||||
|
||||
// Action: init or renew
|
||||
LOG_TRACE(log,"Trying to renew credentials");
|
||||
@ -165,7 +165,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Request KDC for an initial credentials using keytab.
|
||||
ret = krb5_get_init_creds_keytab(k5.ctx, &my_creds, k5.me, keytab, 0, nullptr, options);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials: {}", fmtError(ret));
|
||||
else
|
||||
LOG_TRACE(log,"Got initial credentials");
|
||||
}
|
||||
@ -175,7 +175,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Initialize a credential cache. Destroy any existing contents of cache and initialize it for the default principal.
|
||||
ret = krb5_cc_initialize(k5.ctx, k5.out_cc, k5.me);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when initializing cache{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when initializing cache: {}", fmtError(ret));
|
||||
LOG_TRACE(log,"Initialized cache");
|
||||
// Store credentials in a credential cache.
|
||||
ret = krb5_cc_store_cred(k5.ctx, k5.out_cc, &my_creds);
|
||||
@ -189,7 +189,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Make a credential cache the primary cache for its collection.
|
||||
ret = krb5_cc_switch(k5.ctx, k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while switching to new cache{}", fmtError(ret));
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while switching to new cache: {}", fmtError(ret));
|
||||
}
|
||||
|
||||
LOG_TRACE(log,"Authenticated to Kerberos v5");
|
||||
|
@ -205,7 +205,7 @@ void LDAPClient::handleError(int result_code, String text)
|
||||
}
|
||||
}
|
||||
|
||||
throw Exception(text, ErrorCodes::LDAP_ERROR);
|
||||
throw Exception::createDeprecated(text, ErrorCodes::LDAP_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
@ -569,7 +569,7 @@ LDAPClient::SearchResults LDAPClient::search(const SearchParams & search_params)
|
||||
message += matched_msg;
|
||||
}
|
||||
|
||||
throw Exception(message, ErrorCodes::LDAP_ERROR);
|
||||
throw Exception::createDeprecated(message, ErrorCodes::LDAP_ERROR);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -266,7 +266,7 @@ bool SettingsConstraints::Checker::check(SettingChange & change, const Field & n
|
||||
if (!explain.empty())
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
throw Exception(explain, code);
|
||||
throw Exception::createDeprecated(explain, code);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ public:
|
||||
default:
|
||||
throw Exception(
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Map key type " + key_type->getName() + " is not is not supported by combinator " + getName());
|
||||
"Map key type {} is not is not supported by combinator {}", key_type->getName(), getName());
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -66,13 +66,13 @@ public:
|
||||
, kind(kind_)
|
||||
{
|
||||
if (!isNativeNumber(arguments[0]))
|
||||
throw Exception{getName() + ": first argument must be represented by integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "{}: first argument must be represented by integer", getName());
|
||||
|
||||
if (!isNativeNumber(arguments[1]))
|
||||
throw Exception{getName() + ": second argument must be represented by integer", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "{}: second argument must be represented by integer", getName());
|
||||
|
||||
if (!arguments[0]->equals(*arguments[1]))
|
||||
throw Exception{getName() + ": arguments must have the same type", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT};
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "{}: arguments must have the same type", getName());
|
||||
}
|
||||
|
||||
String getName() const override
|
||||
|
@ -88,9 +88,9 @@ createAggregateFunctionSequenceNode(const std::string & name, const DataTypes &
|
||||
name, toString(min_required_args + 1));
|
||||
|
||||
if (argument_types.size() > max_events_size + min_required_args)
|
||||
throw Exception(fmt::format(
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Aggregate function '{}' requires at most {} (timestamp, value_column, ...{} events) arguments.",
|
||||
name, max_events_size + min_required_args, max_events_size), ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
name, max_events_size + min_required_args, max_events_size);
|
||||
|
||||
if (const auto * cond_arg = argument_types[2].get(); cond_arg && !isUInt8(cond_arg))
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of third argument of aggregate function {}, "
|
||||
@ -100,9 +100,8 @@ createAggregateFunctionSequenceNode(const std::string & name, const DataTypes &
|
||||
{
|
||||
const auto * cond_arg = argument_types[i].get();
|
||||
if (!isUInt8(cond_arg))
|
||||
throw Exception(fmt::format(
|
||||
"Illegal type '{}' of {} argument of aggregate function '{}', must be UInt8", cond_arg->getName(), i + 1, name),
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Illegal type '{}' of {} argument of aggregate function '{}', must be UInt8", cond_arg->getName(), i + 1, name);
|
||||
}
|
||||
|
||||
if (WhichDataType(argument_types[1].get()).idx != TypeIndex::String)
|
||||
|
@ -235,7 +235,7 @@ private:
|
||||
if (skip_degree_ == skip_degree)
|
||||
return;
|
||||
if (skip_degree_ > detail::MAX_SKIP_DEGREE)
|
||||
throw DB::Exception{"skip_degree exceeds maximum value", DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED};
|
||||
throw DB::Exception(DB::ErrorCodes::MEMORY_LIMIT_EXCEEDED, "skip_degree exceeds maximum value");
|
||||
skip_degree = skip_degree_;
|
||||
if (skip_degree == detail::MAX_SKIP_DEGREE)
|
||||
skip_mask = static_cast<UInt32>(-1);
|
||||
|
@ -71,7 +71,15 @@ void ColumnNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & state, size_t
|
||||
bool ColumnNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
||||
{
|
||||
const auto & rhs_typed = assert_cast<const ColumnNode &>(rhs);
|
||||
return column == rhs_typed.column;
|
||||
|
||||
auto source = getColumnSourceOrNull();
|
||||
auto rhs_source = rhs_typed.getColumnSourceOrNull();
|
||||
if (source && !rhs_source)
|
||||
return false;
|
||||
if (!source && rhs_source)
|
||||
return false;
|
||||
|
||||
return column == rhs_typed.column && (!source || source->isEqual(*rhs_source));
|
||||
}
|
||||
|
||||
void ColumnNode::updateTreeHashImpl(HashState & hash_state) const
|
||||
|
@ -1,14 +1,15 @@
|
||||
#pragma once
|
||||
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
#include <Common/SettingsChanges.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
#include <Core/Settings.h>
|
||||
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
#include <Analyzer/UnionNode.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -89,4 +90,134 @@ private:
|
||||
template <typename Derived>
|
||||
using ConstInDepthQueryTreeVisitor = InDepthQueryTreeVisitor<Derived, true /*const_visitor*/>;
|
||||
|
||||
/** Same as InDepthQueryTreeVisitor and additionally keeps track of current scope context.
|
||||
* This can be useful if your visitor has special logic that depends on current scope context.
|
||||
*/
|
||||
template <typename Derived, bool const_visitor = false>
|
||||
class InDepthQueryTreeVisitorWithContext
|
||||
{
|
||||
public:
|
||||
using VisitQueryTreeNodeType = std::conditional_t<const_visitor, const QueryTreeNodePtr, QueryTreeNodePtr>;
|
||||
|
||||
explicit InDepthQueryTreeVisitorWithContext(ContextPtr context)
|
||||
: current_context(std::move(context))
|
||||
{}
|
||||
|
||||
/// Return true if visitor should traverse tree top to bottom, false otherwise
|
||||
bool shouldTraverseTopToBottom() const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Return true if visitor should visit child, false otherwise
|
||||
bool needChildVisit(VisitQueryTreeNodeType & parent [[maybe_unused]], VisitQueryTreeNodeType & child [[maybe_unused]])
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
const ContextPtr & getContext() const
|
||||
{
|
||||
return current_context;
|
||||
}
|
||||
|
||||
const Settings & getSettings() const
|
||||
{
|
||||
return current_context->getSettingsRef();
|
||||
}
|
||||
|
||||
void visit(VisitQueryTreeNodeType & query_tree_node)
|
||||
{
|
||||
auto current_scope_context_ptr = current_context;
|
||||
SCOPE_EXIT(
|
||||
current_context = std::move(current_scope_context_ptr);
|
||||
);
|
||||
|
||||
if (auto * query_node = query_tree_node->template as<QueryNode>())
|
||||
current_context = query_node->getContext();
|
||||
else if (auto * union_node = query_tree_node->template as<UnionNode>())
|
||||
current_context = union_node->getContext();
|
||||
|
||||
bool traverse_top_to_bottom = getDerived().shouldTraverseTopToBottom();
|
||||
if (!traverse_top_to_bottom)
|
||||
visitChildren(query_tree_node);
|
||||
|
||||
getDerived().visitImpl(query_tree_node);
|
||||
|
||||
if (traverse_top_to_bottom)
|
||||
visitChildren(query_tree_node);
|
||||
}
|
||||
private:
|
||||
Derived & getDerived()
|
||||
{
|
||||
return *static_cast<Derived *>(this);
|
||||
}
|
||||
|
||||
const Derived & getDerived() const
|
||||
{
|
||||
return *static_cast<Derived *>(this);
|
||||
}
|
||||
|
||||
void visitChildren(VisitQueryTreeNodeType & expression)
|
||||
{
|
||||
for (auto & child : expression->getChildren())
|
||||
{
|
||||
if (!child)
|
||||
continue;
|
||||
|
||||
bool need_visit_child = getDerived().needChildVisit(expression, child);
|
||||
|
||||
if (need_visit_child)
|
||||
visit(child);
|
||||
}
|
||||
}
|
||||
|
||||
ContextPtr current_context;
|
||||
};
|
||||
|
||||
template <typename Derived>
|
||||
using ConstInDepthQueryTreeVisitorWithContext = InDepthQueryTreeVisitorWithContext<Derived, true /*const_visitor*/>;
|
||||
|
||||
/** Visitor that use another visitor to visit node only if condition for visiting node is true.
|
||||
* For example, your visitor need to visit only query tree nodes or union nodes.
|
||||
*
|
||||
* Condition interface:
|
||||
* struct Condition
|
||||
* {
|
||||
* bool operator()(VisitQueryTreeNodeType & node)
|
||||
* {
|
||||
* return shouldNestedVisitorVisitNode(node);
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
template <typename Visitor, typename Condition, bool const_visitor = false>
|
||||
class InDepthQueryTreeConditionalVisitor : public InDepthQueryTreeVisitor<InDepthQueryTreeConditionalVisitor<Visitor, Condition, const_visitor>, const_visitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitor<InDepthQueryTreeConditionalVisitor<Visitor, Condition, const_visitor>, const_visitor>;
|
||||
using VisitQueryTreeNodeType = typename Base::VisitQueryTreeNodeType;
|
||||
|
||||
explicit InDepthQueryTreeConditionalVisitor(Visitor & visitor_, Condition & condition_)
|
||||
: visitor(visitor_)
|
||||
, condition(condition_)
|
||||
{
|
||||
}
|
||||
|
||||
bool shouldTraverseTopToBottom() const
|
||||
{
|
||||
return visitor.shouldTraverseTopToBottom();
|
||||
}
|
||||
|
||||
void visitImpl(VisitQueryTreeNodeType & query_tree_node)
|
||||
{
|
||||
if (condition(query_tree_node))
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
Visitor & visitor;
|
||||
Condition & condition;
|
||||
};
|
||||
|
||||
template <typename Visitor, typename Condition>
|
||||
using ConstInDepthQueryTreeConditionalVisitor = InDepthQueryTreeConditionalVisitor<Visitor, Condition, true /*const_visitor*/>;
|
||||
|
||||
}
|
||||
|
@ -45,12 +45,11 @@ Field zeroField(const Field & value)
|
||||
* TODO: Support `groupBitAnd`, `groupBitOr`, `groupBitXor` functions.
|
||||
* TODO: Support rewrite `f((2 * n) * n)` into '2 * f(n * n)'.
|
||||
*/
|
||||
class AggregateFunctionsArithmericOperationsVisitor : public InDepthQueryTreeVisitor<AggregateFunctionsArithmericOperationsVisitor>
|
||||
class AggregateFunctionsArithmericOperationsVisitor : public InDepthQueryTreeVisitorWithContext<AggregateFunctionsArithmericOperationsVisitor>
|
||||
{
|
||||
public:
|
||||
explicit AggregateFunctionsArithmericOperationsVisitor(ContextPtr context_)
|
||||
: context(std::move(context_))
|
||||
{}
|
||||
using Base = InDepthQueryTreeVisitorWithContext<AggregateFunctionsArithmericOperationsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
/// Traverse tree bottom to top
|
||||
static bool shouldTraverseTopToBottom()
|
||||
@ -60,6 +59,9 @@ public:
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_arithmetic_operations_in_aggregate_functions)
|
||||
return;
|
||||
|
||||
auto * aggregate_function_node = node->as<FunctionNode>();
|
||||
if (!aggregate_function_node || !aggregate_function_node->isAggregateFunction())
|
||||
return;
|
||||
@ -175,7 +177,7 @@ private:
|
||||
|
||||
inline void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, context);
|
||||
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
|
||||
@ -191,8 +193,6 @@ private:
|
||||
|
||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
}
|
||||
|
||||
ContextPtr context;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,17 +1,23 @@
|
||||
#include <Analyzer/Passes/ConvertOrLikeChainPass.h>
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <Analyzer/Passes/ConvertOrLikeChainPass.h>
|
||||
|
||||
#include <Core/Field.h>
|
||||
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/likePatternToRegexp.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
#include <Analyzer/ConstantNode.h>
|
||||
#include <Analyzer/UnionNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/HashUtils.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Core/Field.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/likePatternToRegexp.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -19,36 +25,28 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class ConvertOrLikeChainVisitor : public InDepthQueryTreeVisitor<ConvertOrLikeChainVisitor>
|
||||
class ConvertOrLikeChainVisitor : public InDepthQueryTreeVisitorWithContext<ConvertOrLikeChainVisitor>
|
||||
{
|
||||
using FunctionNodes = std::vector<std::shared_ptr<FunctionNode>>;
|
||||
|
||||
const FunctionOverloadResolverPtr match_function_ref;
|
||||
const FunctionOverloadResolverPtr or_function_resolver;
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ConvertOrLikeChainVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
explicit ConvertOrLikeChainVisitor(ContextPtr context)
|
||||
: InDepthQueryTreeVisitor<ConvertOrLikeChainVisitor>()
|
||||
, match_function_ref(FunctionFactory::instance().get("multiMatchAny", context))
|
||||
, or_function_resolver(FunctionFactory::instance().get("or", context))
|
||||
explicit ConvertOrLikeChainVisitor(FunctionOverloadResolverPtr or_function_resolver_,
|
||||
FunctionOverloadResolverPtr match_function_resolver_,
|
||||
ContextPtr context)
|
||||
: Base(std::move(context))
|
||||
, or_function_resolver(std::move(or_function_resolver_))
|
||||
, match_function_resolver(std::move(match_function_resolver_))
|
||||
{}
|
||||
|
||||
static bool needChildVisit(VisitQueryTreeNodeType & parent, VisitQueryTreeNodeType &)
|
||||
bool needChildVisit(VisitQueryTreeNodeType &, VisitQueryTreeNodeType &)
|
||||
{
|
||||
ContextPtr context;
|
||||
if (auto * query = parent->as<QueryNode>())
|
||||
context = query->getContext();
|
||||
else if (auto * union_node = parent->as<UnionNode>())
|
||||
context = union_node->getContext();
|
||||
if (context)
|
||||
{
|
||||
const auto & settings = context->getSettingsRef();
|
||||
return settings.optimize_or_like_chain
|
||||
&& settings.allow_hyperscan
|
||||
&& settings.max_hyperscan_regexp_length == 0
|
||||
&& settings.max_hyperscan_regexp_total_length == 0;
|
||||
}
|
||||
return true;
|
||||
const auto & settings = getSettings();
|
||||
|
||||
return settings.optimize_or_like_chain
|
||||
&& settings.allow_hyperscan
|
||||
&& settings.max_hyperscan_regexp_length == 0
|
||||
&& settings.max_hyperscan_regexp_total_length == 0;
|
||||
}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
@ -61,27 +59,28 @@ public:
|
||||
|
||||
QueryTreeNodePtrWithHashMap<Array> node_to_patterns;
|
||||
FunctionNodes match_functions;
|
||||
for (auto & arg : function_node->getArguments())
|
||||
{
|
||||
unique_elems.push_back(arg);
|
||||
|
||||
auto * arg_func = arg->as<FunctionNode>();
|
||||
if (!arg_func)
|
||||
for (auto & argument : function_node->getArguments())
|
||||
{
|
||||
unique_elems.push_back(argument);
|
||||
|
||||
auto * argument_function = argument->as<FunctionNode>();
|
||||
if (!argument_function)
|
||||
continue;
|
||||
|
||||
const bool is_like = arg_func->getFunctionName() == "like";
|
||||
const bool is_ilike = arg_func->getFunctionName() == "ilike";
|
||||
const bool is_like = argument_function->getFunctionName() == "like";
|
||||
const bool is_ilike = argument_function->getFunctionName() == "ilike";
|
||||
|
||||
/// Not {i}like -> bail out.
|
||||
if (!is_like && !is_ilike)
|
||||
continue;
|
||||
|
||||
const auto & like_arguments = arg_func->getArguments().getNodes();
|
||||
const auto & like_arguments = argument_function->getArguments().getNodes();
|
||||
if (like_arguments.size() != 2)
|
||||
continue;
|
||||
|
||||
auto identifier = like_arguments[0];
|
||||
auto * pattern = like_arguments[1]->as<ConstantNode>();
|
||||
const auto & like_first_argument = like_arguments[0];
|
||||
const auto * pattern = like_arguments[1]->as<ConstantNode>();
|
||||
if (!pattern || !isString(pattern->getResultType()))
|
||||
continue;
|
||||
|
||||
@ -91,17 +90,20 @@ public:
|
||||
regexp = "(?i)" + regexp;
|
||||
|
||||
unique_elems.pop_back();
|
||||
auto it = node_to_patterns.find(identifier);
|
||||
|
||||
auto it = node_to_patterns.find(like_first_argument);
|
||||
if (it == node_to_patterns.end())
|
||||
{
|
||||
it = node_to_patterns.insert({identifier, Array{}}).first;
|
||||
it = node_to_patterns.insert({like_first_argument, Array{}}).first;
|
||||
|
||||
/// The second argument will be added when all patterns are known.
|
||||
auto match_function = std::make_shared<FunctionNode>("multiMatchAny");
|
||||
match_function->getArguments().getNodes().push_back(identifier);
|
||||
|
||||
match_function->getArguments().getNodes().push_back(like_first_argument);
|
||||
match_functions.push_back(match_function);
|
||||
|
||||
unique_elems.push_back(std::move(match_function));
|
||||
}
|
||||
|
||||
it->second.push_back(regexp);
|
||||
}
|
||||
|
||||
@ -111,23 +113,29 @@ public:
|
||||
auto & arguments = match_function->getArguments().getNodes();
|
||||
auto & patterns = node_to_patterns.at(arguments[0]);
|
||||
arguments.push_back(std::make_shared<ConstantNode>(Field{std::move(patterns)}));
|
||||
match_function->resolveAsFunction(match_function_ref);
|
||||
match_function->resolveAsFunction(match_function_resolver);
|
||||
}
|
||||
|
||||
/// OR must have at least two arguments.
|
||||
if (unique_elems.size() == 1)
|
||||
unique_elems.push_back(std::make_shared<ConstantNode>(false));
|
||||
unique_elems.push_back(std::make_shared<ConstantNode>(static_cast<UInt8>(0)));
|
||||
|
||||
function_node->getArguments().getNodes() = std::move(unique_elems);
|
||||
function_node->resolveAsFunction(or_function_resolver);
|
||||
}
|
||||
private:
|
||||
using FunctionNodes = std::vector<std::shared_ptr<FunctionNode>>;
|
||||
const FunctionOverloadResolverPtr or_function_resolver;
|
||||
const FunctionOverloadResolverPtr match_function_resolver;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void ConvertOrLikeChainPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
void ConvertOrLikeChainPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
ConvertOrLikeChainVisitor visitor(context);
|
||||
auto or_function_resolver = FunctionFactory::instance().get("or", context);
|
||||
auto match_function_resolver = FunctionFactory::instance().get("multiMatchAny", context);
|
||||
ConvertOrLikeChainVisitor visitor(std::move(or_function_resolver), std::move(match_function_resolver), std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
|
@ -16,11 +16,17 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class CountDistinctVisitor : public InDepthQueryTreeVisitor<CountDistinctVisitor>
|
||||
class CountDistinctVisitor : public InDepthQueryTreeVisitorWithContext<CountDistinctVisitor>
|
||||
{
|
||||
public:
|
||||
static void visitImpl(QueryTreeNodePtr & node)
|
||||
using Base = InDepthQueryTreeVisitorWithContext<CountDistinctVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().count_distinct_optimization)
|
||||
return;
|
||||
|
||||
auto * query_node = node->as<QueryNode>();
|
||||
|
||||
/// Check that query has only SELECT clause
|
||||
@ -78,9 +84,9 @@ public:
|
||||
|
||||
}
|
||||
|
||||
void CountDistinctPass::run(QueryTreeNodePtr query_tree_node, ContextPtr)
|
||||
void CountDistinctPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
CountDistinctVisitor visitor;
|
||||
CountDistinctVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
|
@ -16,12 +16,11 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class CustomizeFunctionsVisitor : public InDepthQueryTreeVisitor<CustomizeFunctionsVisitor>
|
||||
class CustomizeFunctionsVisitor : public InDepthQueryTreeVisitorWithContext<CustomizeFunctionsVisitor>
|
||||
{
|
||||
public:
|
||||
explicit CustomizeFunctionsVisitor(ContextPtr & context_)
|
||||
: context(context_)
|
||||
{}
|
||||
using Base = InDepthQueryTreeVisitorWithContext<CustomizeFunctionsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
@ -29,7 +28,7 @@ public:
|
||||
if (!function_node)
|
||||
return;
|
||||
|
||||
const auto & settings = context->getSettingsRef();
|
||||
const auto & settings = getSettings();
|
||||
|
||||
/// After successful function replacement function name and function name lowercase must be recalculated
|
||||
auto function_name = function_node->getFunctionName();
|
||||
@ -154,19 +153,16 @@ public:
|
||||
|
||||
inline void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, context);
|
||||
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
|
||||
private:
|
||||
ContextPtr & context;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void CustomizeFunctionsPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
CustomizeFunctionsVisitor visitor(context);
|
||||
CustomizeFunctionsVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
|
@ -22,15 +22,17 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class FunctionToSubcolumnsVisitor : public InDepthQueryTreeVisitor<FunctionToSubcolumnsVisitor>
|
||||
class FunctionToSubcolumnsVisitor : public InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitor>
|
||||
{
|
||||
public:
|
||||
explicit FunctionToSubcolumnsVisitor(ContextPtr & context_)
|
||||
: context(context_)
|
||||
{}
|
||||
using Base = InDepthQueryTreeVisitorWithContext<FunctionToSubcolumnsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node) const
|
||||
{
|
||||
if (!getSettings().optimize_functions_to_subcolumns)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node)
|
||||
return;
|
||||
@ -192,11 +194,9 @@ public:
|
||||
private:
|
||||
inline void resolveOrdinaryFunctionNode(FunctionNode & function_node, const String & function_name) const
|
||||
{
|
||||
auto function = FunctionFactory::instance().get(function_name, context);
|
||||
auto function = FunctionFactory::instance().get(function_name, getContext());
|
||||
function_node.resolveAsFunction(function->build(function_node.getArgumentColumns()));
|
||||
}
|
||||
|
||||
ContextPtr & context;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Transform functions to subcolumns.
|
||||
/** Transform functions to subcolumns. Enabled using setting optimize_functions_to_subcolumns.
|
||||
* It can help to reduce amount of read data.
|
||||
*
|
||||
* Example: SELECT tupleElement(column, subcolumn) FROM test_table;
|
||||
|
@ -26,16 +26,22 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
class FuseFunctionsVisitor : public InDepthQueryTreeVisitor<FuseFunctionsVisitor>
|
||||
class FuseFunctionsVisitor : public InDepthQueryTreeVisitorWithContext<FuseFunctionsVisitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<FuseFunctionsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
explicit FuseFunctionsVisitor(const std::unordered_set<String> names_to_collect_)
|
||||
: names_to_collect(names_to_collect_)
|
||||
explicit FuseFunctionsVisitor(const std::unordered_set<String> names_to_collect_, ContextPtr context)
|
||||
: Base(std::move(context))
|
||||
, names_to_collect(names_to_collect_)
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_syntax_fuse_functions)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || !function_node->isAggregateFunction() || !names_to_collect.contains(function_node->getFunctionName()))
|
||||
return;
|
||||
@ -201,7 +207,7 @@ FunctionNodePtr createFusedQuantilesNode(std::vector<QueryTreeNodePtr *> & nodes
|
||||
|
||||
void tryFuseSumCountAvg(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
FuseFunctionsVisitor visitor({"sum", "count", "avg"});
|
||||
FuseFunctionsVisitor visitor({"sum", "count", "avg"}, context);
|
||||
visitor.visit(query_tree_node);
|
||||
|
||||
for (auto & [argument, nodes] : visitor.argument_to_functions_mapping)
|
||||
@ -220,7 +226,7 @@ void tryFuseSumCountAvg(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
|
||||
void tryFuseQuantiles(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
FuseFunctionsVisitor visitor_quantile({"quantile"});
|
||||
FuseFunctionsVisitor visitor_quantile({"quantile"}, context);
|
||||
visitor_quantile.visit(query_tree_node);
|
||||
|
||||
for (auto & [argument, nodes_set] : visitor_quantile.argument_to_functions_mapping)
|
||||
|
@ -12,15 +12,22 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class IfChainToMultiIfPassVisitor : public InDepthQueryTreeVisitor<IfChainToMultiIfPassVisitor>
|
||||
class IfChainToMultiIfPassVisitor : public InDepthQueryTreeVisitorWithContext<IfChainToMultiIfPassVisitor>
|
||||
{
|
||||
public:
|
||||
explicit IfChainToMultiIfPassVisitor(FunctionOverloadResolverPtr multi_if_function_ptr_)
|
||||
: multi_if_function_ptr(std::move(multi_if_function_ptr_))
|
||||
using Base = InDepthQueryTreeVisitorWithContext<IfChainToMultiIfPassVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
explicit IfChainToMultiIfPassVisitor(FunctionOverloadResolverPtr multi_if_function_ptr_, ContextPtr context)
|
||||
: Base(std::move(context))
|
||||
, multi_if_function_ptr(std::move(multi_if_function_ptr_))
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_if_chain_to_multiif)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || function_node->getFunctionName() != "if" || function_node->getArguments().getNodes().size() != 3)
|
||||
return;
|
||||
@ -68,7 +75,8 @@ private:
|
||||
|
||||
void IfChainToMultiIfPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
IfChainToMultiIfPassVisitor visitor(FunctionFactory::instance().get("multiIf", context));
|
||||
auto multi_if_function_ptr = FunctionFactory::instance().get("multiIf", context);
|
||||
IfChainToMultiIfPassVisitor visitor(std::move(multi_if_function_ptr), std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
|
@ -107,21 +107,24 @@ void wrapIntoToString(FunctionNode & function_node, QueryTreeNodePtr arg, Contex
|
||||
assert(isString(function_node.getResultType()));
|
||||
}
|
||||
|
||||
class ConvertStringsToEnumVisitor : public InDepthQueryTreeVisitor<ConvertStringsToEnumVisitor>
|
||||
class ConvertStringsToEnumVisitor : public InDepthQueryTreeVisitorWithContext<ConvertStringsToEnumVisitor>
|
||||
{
|
||||
public:
|
||||
explicit ConvertStringsToEnumVisitor(ContextPtr context_)
|
||||
: context(std::move(context_))
|
||||
{
|
||||
}
|
||||
using Base = InDepthQueryTreeVisitorWithContext<ConvertStringsToEnumVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_if_transform_strings_to_enum)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
|
||||
if (!function_node)
|
||||
return;
|
||||
|
||||
const auto & context = getContext();
|
||||
|
||||
/// to preserve return type (String) of the current function_node, we wrap the newly
|
||||
/// generated function nodes into toString
|
||||
|
||||
@ -198,16 +201,13 @@ public:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
ContextPtr context;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void IfTransformStringsToEnumPass::run(QueryTreeNodePtr query, ContextPtr context)
|
||||
{
|
||||
ConvertStringsToEnumVisitor visitor(context);
|
||||
ConvertStringsToEnumVisitor visitor(std::move(context));
|
||||
visitor.visit(query);
|
||||
}
|
||||
|
||||
|
@ -10,15 +10,22 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class MultiIfToIfVisitor : public InDepthQueryTreeVisitor<MultiIfToIfVisitor>
|
||||
class MultiIfToIfVisitor : public InDepthQueryTreeVisitorWithContext<MultiIfToIfVisitor>
|
||||
{
|
||||
public:
|
||||
explicit MultiIfToIfVisitor(FunctionOverloadResolverPtr if_function_ptr_)
|
||||
: if_function_ptr(if_function_ptr_)
|
||||
using Base = InDepthQueryTreeVisitorWithContext<MultiIfToIfVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
explicit MultiIfToIfVisitor(FunctionOverloadResolverPtr if_function_ptr_, ContextPtr context)
|
||||
: Base(std::move(context))
|
||||
, if_function_ptr(std::move(if_function_ptr_))
|
||||
{}
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_multiif_to_if)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || function_node->getFunctionName() != "multiIf")
|
||||
return;
|
||||
@ -38,7 +45,8 @@ private:
|
||||
|
||||
void MultiIfToIfPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
MultiIfToIfVisitor visitor(FunctionFactory::instance().get("if", context));
|
||||
auto if_function_ptr = FunctionFactory::instance().get("if", context);
|
||||
MultiIfToIfVisitor visitor(std::move(if_function_ptr), std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
|
@ -14,12 +14,17 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class NormalizeCountVariantsVisitor : public InDepthQueryTreeVisitor<NormalizeCountVariantsVisitor>
|
||||
class NormalizeCountVariantsVisitor : public InDepthQueryTreeVisitorWithContext<NormalizeCountVariantsVisitor>
|
||||
{
|
||||
public:
|
||||
explicit NormalizeCountVariantsVisitor(ContextPtr context_) : context(std::move(context_)) {}
|
||||
using Base = InDepthQueryTreeVisitorWithContext<NormalizeCountVariantsVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_normalize_count_variants)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || !function_node->isAggregateFunction() || (function_node->getFunctionName() != "count" && function_node->getFunctionName() != "sum"))
|
||||
return;
|
||||
@ -42,15 +47,13 @@ public:
|
||||
else if (function_node->getFunctionName() == "sum" &&
|
||||
first_argument_constant_literal.getType() == Field::Types::UInt64 &&
|
||||
first_argument_constant_literal.get<UInt64>() == 1 &&
|
||||
!context->getSettingsRef().aggregate_functions_null_for_empty)
|
||||
!getSettings().aggregate_functions_null_for_empty)
|
||||
{
|
||||
resolveAsCountAggregateFunction(*function_node);
|
||||
function_node->getArguments().getNodes().clear();
|
||||
}
|
||||
}
|
||||
private:
|
||||
ContextPtr context;
|
||||
|
||||
static inline void resolveAsCountAggregateFunction(FunctionNode & function_node)
|
||||
{
|
||||
AggregateFunctionProperties properties;
|
||||
|
@ -1,26 +1,33 @@
|
||||
#include <Analyzer/Passes/OptimizeGroupByFunctionKeysPass.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <queue>
|
||||
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/HashUtils.h>
|
||||
#include <Analyzer/IQueryTreeNode.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <queue>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class OptimizeGroupByFunctionKeysVisitor : public InDepthQueryTreeVisitor<OptimizeGroupByFunctionKeysVisitor>
|
||||
class OptimizeGroupByFunctionKeysVisitor : public InDepthQueryTreeVisitorWithContext<OptimizeGroupByFunctionKeysVisitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<OptimizeGroupByFunctionKeysVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
static bool needChildVisit(QueryTreeNodePtr & /*parent*/, QueryTreeNodePtr & child)
|
||||
{
|
||||
return !child->as<FunctionNode>();
|
||||
}
|
||||
|
||||
static void visitImpl(QueryTreeNodePtr & node)
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_group_by_function_keys)
|
||||
return;
|
||||
|
||||
auto * query = node->as<QueryNode>();
|
||||
if (!query)
|
||||
return;
|
||||
@ -41,6 +48,11 @@ public:
|
||||
optimizeGroupingSet(group_by);
|
||||
}
|
||||
private:
|
||||
struct NodeWithInfo
|
||||
{
|
||||
QueryTreeNodePtr node;
|
||||
bool parents_are_only_deterministic = false;
|
||||
};
|
||||
|
||||
static bool canBeEliminated(QueryTreeNodePtr & node, const QueryTreeNodePtrWithHashSet & group_by_keys)
|
||||
{
|
||||
@ -48,16 +60,17 @@ private:
|
||||
if (!function || function->getArguments().getNodes().empty())
|
||||
return false;
|
||||
|
||||
QueryTreeNodes candidates;
|
||||
std::vector<NodeWithInfo> candidates;
|
||||
auto & function_arguments = function->getArguments().getNodes();
|
||||
bool is_deterministic = function->getFunction()->isDeterministicInScopeOfQuery();
|
||||
for (auto it = function_arguments.rbegin(); it != function_arguments.rend(); ++it)
|
||||
candidates.push_back(*it);
|
||||
candidates.push_back({ *it, is_deterministic });
|
||||
|
||||
// Using DFS we traverse function tree and try to find if it uses other keys as function arguments.
|
||||
// TODO: Also process CONSTANT here. We can simplify GROUP BY x, x + 1 to GROUP BY x.
|
||||
while (!candidates.empty())
|
||||
{
|
||||
auto candidate = candidates.back();
|
||||
auto [candidate, parents_are_only_deterministic] = candidates.back();
|
||||
candidates.pop_back();
|
||||
|
||||
bool found = group_by_keys.contains(candidate);
|
||||
@ -73,8 +86,9 @@ private:
|
||||
|
||||
if (!found)
|
||||
{
|
||||
bool is_deterministic_function = parents_are_only_deterministic && function->getFunction()->isDeterministicInScopeOfQuery();
|
||||
for (auto it = arguments.rbegin(); it != arguments.rend(); ++it)
|
||||
candidates.push_back(*it);
|
||||
candidates.push_back({ *it, is_deterministic_function });
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -82,6 +96,10 @@ private:
|
||||
if (!found)
|
||||
return false;
|
||||
break;
|
||||
case QueryTreeNodeType::CONSTANT:
|
||||
if (!parents_are_only_deterministic)
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
@ -105,9 +123,10 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
void OptimizeGroupByFunctionKeysPass::run(QueryTreeNodePtr query_tree_node, ContextPtr /*context*/)
|
||||
void OptimizeGroupByFunctionKeysPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
OptimizeGroupByFunctionKeysVisitor().visit(query_tree_node);
|
||||
OptimizeGroupByFunctionKeysVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,11 +1,13 @@
|
||||
#include <Analyzer/Passes/OptimizeRedundantFunctionsInOrderByPass.h>
|
||||
|
||||
#include <Functions/IFunction.h>
|
||||
|
||||
#include <Analyzer/ColumnNode.h>
|
||||
#include <Analyzer/FunctionNode.h>
|
||||
#include <Analyzer/HashUtils.h>
|
||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||
#include <Analyzer/QueryNode.h>
|
||||
#include <Analyzer/SortNode.h>
|
||||
#include <Functions/IFunction.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -13,9 +15,12 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class OptimizeRedundantFunctionsInOrderByVisitor : public InDepthQueryTreeVisitor<OptimizeRedundantFunctionsInOrderByVisitor>
|
||||
class OptimizeRedundantFunctionsInOrderByVisitor : public InDepthQueryTreeVisitorWithContext<OptimizeRedundantFunctionsInOrderByVisitor>
|
||||
{
|
||||
public:
|
||||
using Base = InDepthQueryTreeVisitorWithContext<OptimizeRedundantFunctionsInOrderByVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
static bool needChildVisit(QueryTreeNodePtr & node, QueryTreeNodePtr & /*parent*/)
|
||||
{
|
||||
if (node->as<FunctionNode>())
|
||||
@ -25,6 +30,9 @@ public:
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_redundant_functions_in_order_by)
|
||||
return;
|
||||
|
||||
auto * query = node->as<QueryNode>();
|
||||
if (!query)
|
||||
return;
|
||||
@ -116,9 +124,10 @@ private:
|
||||
|
||||
}
|
||||
|
||||
void OptimizeRedundantFunctionsInOrderByPass::run(QueryTreeNodePtr query_tree_node, ContextPtr /*context*/)
|
||||
void OptimizeRedundantFunctionsInOrderByPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
OptimizeRedundantFunctionsInOrderByVisitor().visit(query_tree_node);
|
||||
OptimizeRedundantFunctionsInOrderByVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1943,7 +1943,7 @@ void QueryAnalyzer::validateTableExpressionModifiers(const QueryTreeNodePtr & ta
|
||||
|
||||
if (!table_node && !table_function_node && !query_node && !union_node)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Unexpected table expression. Expected table, table function, query or union node. Actual {}",
|
||||
"Unexpected table expression. Expected table, table function, query or union node. Table node: {}, scope node: {}",
|
||||
table_expression_node->formatASTForErrorMessage(),
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
|
||||
@ -4366,12 +4366,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
||||
{
|
||||
if (!AggregateFunctionFactory::instance().isAggregateFunctionName(function_name))
|
||||
{
|
||||
std::string error_message = fmt::format("Aggregate function with name '{}' does not exists. In scope {}",
|
||||
function_name,
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
|
||||
AggregateFunctionFactory::instance().appendHintsMessage(error_message, function_name);
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, error_message);
|
||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Aggregate function with name '{}' does not exists. In scope {}{}",
|
||||
function_name, scope.scope_node->formatASTForErrorMessage(),
|
||||
getHintsErrorMessageSuffix(AggregateFunctionFactory::instance().getHints(function_name)));
|
||||
}
|
||||
|
||||
if (!function_lambda_arguments_indexes.empty())
|
||||
@ -5726,7 +5723,7 @@ void QueryAnalyzer::resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node,
|
||||
case QueryTreeNodeType::IDENTIFIER:
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Identifiers in FROM section must be already resolved. In scope {}",
|
||||
"Identifiers in FROM section must be already resolved. Node {}, scope {}",
|
||||
join_tree_node->formatASTForErrorMessage(),
|
||||
scope.scope_node->formatASTForErrorMessage());
|
||||
}
|
||||
|
@ -20,15 +20,17 @@ namespace DB
|
||||
namespace
|
||||
{
|
||||
|
||||
class SumIfToCountIfVisitor : public InDepthQueryTreeVisitor<SumIfToCountIfVisitor>
|
||||
class SumIfToCountIfVisitor : public InDepthQueryTreeVisitorWithContext<SumIfToCountIfVisitor>
|
||||
{
|
||||
public:
|
||||
explicit SumIfToCountIfVisitor(ContextPtr & context_)
|
||||
: context(context_)
|
||||
{}
|
||||
using Base = InDepthQueryTreeVisitorWithContext<SumIfToCountIfVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_rewrite_sum_if_to_count_if)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || !function_node->isAggregateFunction())
|
||||
return;
|
||||
@ -56,7 +58,7 @@ public:
|
||||
if (!isInt64OrUInt64FieldType(constant_value_literal.getType()))
|
||||
return;
|
||||
|
||||
if (constant_value_literal.get<UInt64>() != 1 || context->getSettingsRef().aggregate_functions_null_for_empty)
|
||||
if (constant_value_literal.get<UInt64>() != 1 || getSettings().aggregate_functions_null_for_empty)
|
||||
return;
|
||||
|
||||
function_node_arguments_nodes[0] = std::move(function_node_arguments_nodes[1]);
|
||||
@ -122,7 +124,7 @@ public:
|
||||
auto & not_function_arguments = not_function->getArguments().getNodes();
|
||||
not_function_arguments.push_back(nested_if_function_arguments_nodes[0]);
|
||||
|
||||
not_function->resolveAsFunction(FunctionFactory::instance().get("not", context)->build(not_function->getArgumentColumns()));
|
||||
not_function->resolveAsFunction(FunctionFactory::instance().get("not", getContext())->build(not_function->getArgumentColumns()));
|
||||
|
||||
function_node_arguments_nodes[0] = std::move(not_function);
|
||||
function_node_arguments_nodes.resize(1);
|
||||
@ -143,8 +145,6 @@ private:
|
||||
|
||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function));
|
||||
}
|
||||
|
||||
ContextPtr & context;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -25,11 +25,17 @@ bool isUniqFunction(const String & function_name)
|
||||
function_name == "uniqTheta";
|
||||
}
|
||||
|
||||
class UniqInjectiveFunctionsEliminationVisitor : public InDepthQueryTreeVisitor<UniqInjectiveFunctionsEliminationVisitor>
|
||||
class UniqInjectiveFunctionsEliminationVisitor : public InDepthQueryTreeVisitorWithContext<UniqInjectiveFunctionsEliminationVisitor>
|
||||
{
|
||||
public:
|
||||
static void visitImpl(QueryTreeNodePtr & node)
|
||||
using Base = InDepthQueryTreeVisitorWithContext<UniqInjectiveFunctionsEliminationVisitor>;
|
||||
using Base::Base;
|
||||
|
||||
void visitImpl(QueryTreeNodePtr & node)
|
||||
{
|
||||
if (!getSettings().optimize_injective_functions_inside_uniq)
|
||||
return;
|
||||
|
||||
auto * function_node = node->as<FunctionNode>();
|
||||
if (!function_node || !function_node->isAggregateFunction() || !isUniqFunction(function_node->getFunctionName()))
|
||||
return;
|
||||
@ -81,9 +87,9 @@ public:
|
||||
|
||||
}
|
||||
|
||||
void UniqInjectiveFunctionsEliminationPass::run(QueryTreeNodePtr query_tree_node, ContextPtr)
|
||||
void UniqInjectiveFunctionsEliminationPass::run(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
||||
{
|
||||
UniqInjectiveFunctionsEliminationVisitor visitor;
|
||||
UniqInjectiveFunctionsEliminationVisitor visitor(std::move(context));
|
||||
visitor.visit(query_tree_node);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include <Analyzer/QueryNode.h>
|
||||
|
||||
#include <fmt/core.h>
|
||||
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/FieldVisitorToString.h>
|
||||
|
||||
@ -17,7 +19,6 @@
|
||||
#include <Parsers/ASTSetQuery.h>
|
||||
|
||||
#include <Analyzer/Utils.h>
|
||||
#include <fmt/core.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -36,7 +37,7 @@ QueryNode::QueryNode(ContextMutablePtr context_, SettingsChanges settings_change
|
||||
}
|
||||
|
||||
QueryNode::QueryNode(ContextMutablePtr context_)
|
||||
: QueryNode(context_, {} /*settings_changes*/)
|
||||
: QueryNode(std::move(context_), {} /*settings_changes*/)
|
||||
{}
|
||||
|
||||
void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const
|
||||
@ -185,10 +186,7 @@ void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
||||
{
|
||||
buffer << '\n' << std::string(indent + 2, ' ') << "SETTINGS";
|
||||
for (const auto & change : settings_changes)
|
||||
{
|
||||
buffer << fmt::format(" {}={}", change.name, toString(change.value));
|
||||
}
|
||||
buffer << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <memory>
|
||||
#include <Analyzer/QueryTreePassManager.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -133,7 +134,6 @@ private:
|
||||
* TODO: Support setting optimize_aggregators_of_group_by_keys.
|
||||
* TODO: Support setting optimize_duplicate_order_by_and_distinct.
|
||||
* TODO: Support setting optimize_monotonous_functions_in_order_by.
|
||||
* TODO: Support settings.optimize_or_like_chain.
|
||||
* TODO: Add optimizations based on function semantics. Example: SELECT * FROM test_table WHERE id != id. (id is not nullable column).
|
||||
*/
|
||||
|
||||
@ -210,53 +210,31 @@ void QueryTreePassManager::dump(WriteBuffer & buffer, size_t up_to_pass_index)
|
||||
|
||||
void addQueryTreePasses(QueryTreePassManager & manager)
|
||||
{
|
||||
auto context = manager.getContext();
|
||||
const auto & settings = context->getSettingsRef();
|
||||
|
||||
manager.addPass(std::make_unique<QueryAnalysisPass>());
|
||||
manager.addPass(std::make_unique<FunctionToSubcolumnsPass>());
|
||||
|
||||
if (settings.optimize_functions_to_subcolumns)
|
||||
manager.addPass(std::make_unique<FunctionToSubcolumnsPass>());
|
||||
|
||||
if (settings.count_distinct_optimization)
|
||||
manager.addPass(std::make_unique<CountDistinctPass>());
|
||||
|
||||
if (settings.optimize_rewrite_sum_if_to_count_if)
|
||||
manager.addPass(std::make_unique<SumIfToCountIfPass>());
|
||||
|
||||
if (settings.optimize_normalize_count_variants)
|
||||
manager.addPass(std::make_unique<NormalizeCountVariantsPass>());
|
||||
manager.addPass(std::make_unique<CountDistinctPass>());
|
||||
manager.addPass(std::make_unique<SumIfToCountIfPass>());
|
||||
manager.addPass(std::make_unique<NormalizeCountVariantsPass>());
|
||||
|
||||
manager.addPass(std::make_unique<CustomizeFunctionsPass>());
|
||||
|
||||
if (settings.optimize_arithmetic_operations_in_aggregate_functions)
|
||||
manager.addPass(std::make_unique<AggregateFunctionsArithmericOperationsPass>());
|
||||
|
||||
if (settings.optimize_injective_functions_inside_uniq)
|
||||
manager.addPass(std::make_unique<UniqInjectiveFunctionsEliminationPass>());
|
||||
|
||||
if (settings.optimize_group_by_function_keys)
|
||||
manager.addPass(std::make_unique<OptimizeGroupByFunctionKeysPass>());
|
||||
|
||||
if (settings.optimize_multiif_to_if)
|
||||
manager.addPass(std::make_unique<MultiIfToIfPass>());
|
||||
manager.addPass(std::make_unique<AggregateFunctionsArithmericOperationsPass>());
|
||||
manager.addPass(std::make_unique<UniqInjectiveFunctionsEliminationPass>());
|
||||
manager.addPass(std::make_unique<OptimizeGroupByFunctionKeysPass>());
|
||||
|
||||
manager.addPass(std::make_unique<MultiIfToIfPass>());
|
||||
manager.addPass(std::make_unique<IfConstantConditionPass>());
|
||||
manager.addPass(std::make_unique<IfChainToMultiIfPass>());
|
||||
|
||||
if (settings.optimize_if_chain_to_multiif)
|
||||
manager.addPass(std::make_unique<IfChainToMultiIfPass>());
|
||||
|
||||
if (settings.optimize_redundant_functions_in_order_by)
|
||||
manager.addPass(std::make_unique<OptimizeRedundantFunctionsInOrderByPass>());
|
||||
manager.addPass(std::make_unique<OptimizeRedundantFunctionsInOrderByPass>());
|
||||
|
||||
manager.addPass(std::make_unique<OrderByTupleEliminationPass>());
|
||||
manager.addPass(std::make_unique<OrderByLimitByDuplicateEliminationPass>());
|
||||
|
||||
if (settings.optimize_syntax_fuse_functions)
|
||||
manager.addPass(std::make_unique<FuseFunctionsPass>());
|
||||
manager.addPass(std::make_unique<FuseFunctionsPass>());
|
||||
|
||||
if (settings.optimize_if_transform_strings_to_enum)
|
||||
manager.addPass(std::make_unique<IfTransformStringsToEnumPass>());
|
||||
manager.addPass(std::make_unique<IfTransformStringsToEnumPass>());
|
||||
|
||||
manager.addPass(std::make_unique<ConvertOrLikeChainPass>());
|
||||
|
||||
|
@ -79,7 +79,7 @@ namespace
|
||||
request.SetMaxKeys(1);
|
||||
auto outcome = client.ListObjects(request);
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
return outcome.GetResult().GetContents();
|
||||
}
|
||||
|
||||
@ -233,7 +233,7 @@ void BackupWriterS3::removeFile(const String & file_name)
|
||||
request.SetKey(fs::path(s3_uri.key) / file_name);
|
||||
auto outcome = client->DeleteObject(request);
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
|
||||
void BackupWriterS3::removeFiles(const Strings & file_names)
|
||||
@ -291,7 +291,7 @@ void BackupWriterS3::removeFilesBatch(const Strings & file_names)
|
||||
|
||||
auto outcome = client->DeleteObjects(request);
|
||||
if (!outcome.IsSuccess() && !isNotFoundError(outcome.GetError().GetErrorType()))
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
throw Exception::createDeprecated(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ namespace
|
||||
catch (...)
|
||||
{
|
||||
if (coordination)
|
||||
coordination->setError(current_host, Exception{getCurrentExceptionCode(), getCurrentExceptionMessage(true, true)});
|
||||
coordination->setError(current_host, Exception(getCurrentExceptionMessageAndPattern(true, true), getCurrentExceptionCode()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -451,7 +451,7 @@ void ClientBase::onData(Block & block, ASTPtr parsed_query)
|
||||
catch (const Exception &)
|
||||
{
|
||||
/// Catch client errors like NO_ROW_DELIMITER
|
||||
throw LocalFormatError(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
throw LocalFormatError(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
|
||||
}
|
||||
|
||||
/// Received data block is immediately displayed to the user.
|
||||
@ -629,7 +629,7 @@ try
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
throw LocalFormatError(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
throw LocalFormatError(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
|
||||
}
|
||||
|
||||
|
||||
@ -1897,7 +1897,7 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
||||
{
|
||||
// Surprisingly, this is a client error. A server error would
|
||||
// have been reported without throwing (see onReceiveSeverException()).
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessage(print_stack_trace), getCurrentExceptionCode());
|
||||
client_exception = std::make_unique<Exception>(getCurrentExceptionMessageAndPattern(print_stack_trace), getCurrentExceptionCode());
|
||||
have_error = true;
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,7 @@ void LocalConnection::sendQuery(
|
||||
catch (...)
|
||||
{
|
||||
state->io.onException();
|
||||
state->exception = std::make_unique<Exception>("Unknown exception", ErrorCodes::UNKNOWN_EXCEPTION);
|
||||
state->exception = std::make_unique<Exception>(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception");
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,7 +291,7 @@ bool LocalConnection::poll(size_t)
|
||||
catch (...)
|
||||
{
|
||||
state->io.onException();
|
||||
state->exception = std::make_unique<Exception>("Unknown exception", ErrorCodes::UNKNOWN_EXCEPTION);
|
||||
state->exception = std::make_unique<Exception>(ErrorCodes::UNKNOWN_EXCEPTION, "Unknown exception");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ template <is_decimal T>
|
||||
UInt64 ColumnDecimal<T>::get64([[maybe_unused]] size_t n) const
|
||||
{
|
||||
if constexpr (sizeof(T) > sizeof(UInt64))
|
||||
throw Exception(String("Method get64 is not supported for ") + getFamilyName(), ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method get64 is not supported for {}", getFamilyName());
|
||||
else
|
||||
return static_cast<NativeT>(data[n]);
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ ColumnNullable::ColumnNullable(MutableColumnPtr && nested_column_, MutableColumn
|
||||
nested_column = getNestedColumn().convertToFullColumnIfConst();
|
||||
|
||||
if (!getNestedColumn().canBeInsideNullable())
|
||||
throw Exception{getNestedColumn().getName() + " cannot be inside Nullable column", ErrorCodes::ILLEGAL_COLUMN};
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "{} cannot be inside Nullable column", getNestedColumn().getName());
|
||||
|
||||
if (isColumnConst(*null_map))
|
||||
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "ColumnNullable cannot have constant null map");
|
||||
|
@ -207,8 +207,9 @@ private:
|
||||
if (size >= MMAP_THRESHOLD)
|
||||
{
|
||||
if (alignment > mmap_min_alignment)
|
||||
throw DB::Exception(fmt::format("Too large alignment {}: more than page size when allocating {}.",
|
||||
ReadableSize(alignment), ReadableSize(size)), DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS,
|
||||
"Too large alignment {}: more than page size when allocating {}.",
|
||||
ReadableSize(alignment), ReadableSize(size));
|
||||
|
||||
buf = mmap(getMmapHint(), size, PROT_READ | PROT_WRITE,
|
||||
mmap_flags, -1, 0);
|
||||
|
@ -66,6 +66,9 @@ AsynchronousMetrics::AsynchronousMetrics(
|
||||
openFileIfExists("/proc/uptime", uptime);
|
||||
openFileIfExists("/proc/net/dev", net_dev);
|
||||
|
||||
openFileIfExists("/sys/fs/cgroup/memory/memory.limit_in_bytes", cgroupmem_limit_in_bytes);
|
||||
openFileIfExists("/sys/fs/cgroup/memory/memory.usage_in_bytes", cgroupmem_usage_in_bytes);
|
||||
|
||||
openSensors();
|
||||
openBlockDevices();
|
||||
openEDAC();
|
||||
@ -879,6 +882,35 @@ void AsynchronousMetrics::update(TimePoint update_time)
|
||||
}
|
||||
}
|
||||
|
||||
if (cgroupmem_limit_in_bytes && cgroupmem_usage_in_bytes)
|
||||
{
|
||||
try {
|
||||
cgroupmem_limit_in_bytes->rewind();
|
||||
cgroupmem_usage_in_bytes->rewind();
|
||||
|
||||
uint64_t cgroup_mem_limit_in_bytes = 0;
|
||||
uint64_t cgroup_mem_usage_in_bytes = 0;
|
||||
|
||||
readText(cgroup_mem_limit_in_bytes, *cgroupmem_limit_in_bytes);
|
||||
readText(cgroup_mem_usage_in_bytes, *cgroupmem_usage_in_bytes);
|
||||
|
||||
if (cgroup_mem_limit_in_bytes && cgroup_mem_usage_in_bytes)
|
||||
{
|
||||
new_values["CgroupMemoryTotal"] = { cgroup_mem_limit_in_bytes, "The total amount of memory in cgroup, in bytes." };
|
||||
new_values["CgroupMemoryUsed"] = { cgroup_mem_usage_in_bytes, "The amount of memory used in cgroup, in bytes." };
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG_DEBUG(log, "Cannot read statistics about the cgroup memory total and used. Total got '{}', Used got '{}'.",
|
||||
cgroup_mem_limit_in_bytes, cgroup_mem_usage_in_bytes);
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
if (meminfo)
|
||||
{
|
||||
try
|
||||
|
@ -108,6 +108,9 @@ private:
|
||||
std::optional<ReadBufferFromFilePRead> uptime;
|
||||
std::optional<ReadBufferFromFilePRead> net_dev;
|
||||
|
||||
std::optional<ReadBufferFromFilePRead> cgroupmem_limit_in_bytes;
|
||||
std::optional<ReadBufferFromFilePRead> cgroupmem_usage_in_bytes;
|
||||
|
||||
std::vector<std::unique_ptr<ReadBufferFromFilePRead>> thermal;
|
||||
|
||||
std::unordered_map<String /* device name */,
|
||||
|
@ -140,7 +140,7 @@ void CancelToken::raise()
|
||||
{
|
||||
std::unique_lock lock(signal_mutex);
|
||||
if (exception_code != 0)
|
||||
throw DB::Exception(
|
||||
throw DB::Exception::createRuntime(
|
||||
std::exchange(exception_code, 0),
|
||||
std::exchange(exception_message, {}));
|
||||
else
|
||||
|
@ -88,7 +88,7 @@ public:
|
||||
{
|
||||
/// A more understandable error message.
|
||||
if (e.code() == DB::ErrorCodes::CANNOT_READ_ALL_DATA || e.code() == DB::ErrorCodes::ATTEMPT_TO_READ_AFTER_EOF)
|
||||
throw DB::ParsingException("File " + path + " is empty. You must fill it manually with appropriate value.", e.code());
|
||||
throw DB::ParsingException(e.code(), "File {} is empty. You must fill it manually with appropriate value.", path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
|
@ -39,6 +39,15 @@ enum class WeekModeFlag : UInt8
|
||||
};
|
||||
using YearWeek = std::pair<UInt16, UInt8>;
|
||||
|
||||
/// Modes for toDayOfWeek() function.
|
||||
enum class WeekDayMode
|
||||
{
|
||||
WeekStartsMonday1 = 0,
|
||||
WeekStartsMonday0 = 1,
|
||||
WeekStartsSunday0 = 2,
|
||||
WeekStartsSunday1 = 3
|
||||
};
|
||||
|
||||
/** Lookup table to conversion of time to date, and to month / year / day of week / day of month and so on.
|
||||
* First time was implemented for OLAPServer, that needed to do billions of such transformations.
|
||||
*/
|
||||
@ -619,9 +628,28 @@ public:
|
||||
template <typename DateOrTime>
|
||||
inline Int16 toYear(DateOrTime v) const { return lut[toLUTIndex(v)].year; }
|
||||
|
||||
/// 1-based, starts on Monday
|
||||
template <typename DateOrTime>
|
||||
inline UInt8 toDayOfWeek(DateOrTime v) const { return lut[toLUTIndex(v)].day_of_week; }
|
||||
|
||||
template <typename DateOrTime>
|
||||
inline UInt8 toDayOfWeek(DateOrTime v, UInt8 week_day_mode) const
|
||||
{
|
||||
WeekDayMode mode = check_week_day_mode(week_day_mode);
|
||||
|
||||
UInt8 res = toDayOfWeek(v);
|
||||
using enum WeekDayMode;
|
||||
bool start_from_sunday = (mode == WeekStartsSunday0 || mode == WeekStartsSunday1);
|
||||
bool zero_based = (mode == WeekStartsMonday0 || mode == WeekStartsSunday0);
|
||||
|
||||
if (start_from_sunday)
|
||||
res = res % 7 + 1;
|
||||
if (zero_based)
|
||||
--res;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
template <typename DateOrTime>
|
||||
inline UInt8 toDayOfMonth(DateOrTime v) const { return lut[toLUTIndex(v)].day_of_month; }
|
||||
|
||||
@ -844,6 +872,12 @@ public:
|
||||
return week_format;
|
||||
}
|
||||
|
||||
/// Check and change mode to effective.
|
||||
inline WeekDayMode check_week_day_mode(UInt8 mode) const /// NOLINT
|
||||
{
|
||||
return static_cast<WeekDayMode>(mode & 3);
|
||||
}
|
||||
|
||||
/** Calculate weekday from d.
|
||||
* Returns 0 for monday, 1 for tuesday...
|
||||
*/
|
||||
|
@ -252,7 +252,7 @@ uint64_t readOffset(std::string_view & sp, bool is64_bit)
|
||||
// Read "len" bytes
|
||||
std::string_view readBytes(std::string_view & sp, uint64_t len)
|
||||
{
|
||||
SAFE_CHECK(len <= sp.size(), "invalid string length: " + std::to_string(len) + " vs. " + std::to_string(sp.size()));
|
||||
SAFE_CHECK(len <= sp.size(), "invalid string length: {} vs. {}", len, sp.size());
|
||||
std::string_view ret(sp.data(), len);
|
||||
sp.remove_prefix(len);
|
||||
return ret;
|
||||
@ -953,7 +953,7 @@ bool Dwarf::findDebugInfoOffset(uintptr_t address, std::string_view aranges, uin
|
||||
|
||||
Dwarf::Die Dwarf::getDieAtOffset(const CompilationUnit & cu, uint64_t offset) const
|
||||
{
|
||||
SAFE_CHECK(offset < info_.size(), fmt::format("unexpected offset {}, info size {}", offset, info_.size()));
|
||||
SAFE_CHECK(offset < info_.size(), "unexpected offset {}, info size {}", offset, info_.size());
|
||||
Die die;
|
||||
std::string_view sp{info_.data() + offset, cu.offset + cu.size - offset};
|
||||
die.offset = offset;
|
||||
|
@ -188,7 +188,7 @@ static void tryLogCurrentExceptionImpl(Poco::Logger * logger, const std::string
|
||||
{
|
||||
PreformattedMessage message = getCurrentExceptionMessageAndPattern(true);
|
||||
if (!start_of_message.empty())
|
||||
message.message = fmt::format("{}: {}", start_of_message, message.message);
|
||||
message.text = fmt::format("{}: {}", start_of_message, message.text);
|
||||
|
||||
LOG_ERROR(logger, message);
|
||||
}
|
||||
@ -339,7 +339,7 @@ std::string getExtraExceptionInfo(const std::exception & e)
|
||||
|
||||
std::string getCurrentExceptionMessage(bool with_stacktrace, bool check_embedded_stacktrace /*= false*/, bool with_extra_info /*= true*/)
|
||||
{
|
||||
return getCurrentExceptionMessageAndPattern(with_stacktrace, check_embedded_stacktrace, with_extra_info).message;
|
||||
return getCurrentExceptionMessageAndPattern(with_stacktrace, check_embedded_stacktrace, with_extra_info).text;
|
||||
}
|
||||
|
||||
PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, bool check_embedded_stacktrace /*= false*/, bool with_extra_info /*= true*/)
|
||||
@ -481,7 +481,7 @@ void tryLogException(std::exception_ptr e, Poco::Logger * logger, const std::str
|
||||
|
||||
std::string getExceptionMessage(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace)
|
||||
{
|
||||
return getExceptionMessageAndPattern(e, with_stacktrace, check_embedded_stacktrace).message;
|
||||
return getExceptionMessageAndPattern(e, with_stacktrace, check_embedded_stacktrace).text;
|
||||
}
|
||||
|
||||
PreformattedMessage getExceptionMessageAndPattern(const Exception & e, bool with_stacktrace, bool check_embedded_stacktrace)
|
||||
@ -577,10 +577,6 @@ ParsingException::ParsingException(const std::string & msg, int code)
|
||||
: Exception(msg, code)
|
||||
{
|
||||
}
|
||||
ParsingException::ParsingException(int code, const std::string & message)
|
||||
: Exception(message, code)
|
||||
{
|
||||
}
|
||||
|
||||
/// We use additional field formatted_message_ to make this method const.
|
||||
std::string ParsingException::displayText() const
|
||||
|
@ -16,26 +16,6 @@
|
||||
|
||||
namespace Poco { class Logger; }
|
||||
|
||||
/// Extract format string from a string literal and constructs consteval fmt::format_string
|
||||
template <typename... Args>
|
||||
struct FormatStringHelperImpl
|
||||
{
|
||||
std::string_view message_format_string;
|
||||
fmt::format_string<Args...> fmt_str;
|
||||
template<typename T>
|
||||
consteval FormatStringHelperImpl(T && str) : message_format_string(tryGetStaticFormatString(str)), fmt_str(std::forward<T>(str)) {}
|
||||
template<typename T>
|
||||
FormatStringHelperImpl(fmt::basic_runtime<T> && str) : message_format_string(), fmt_str(std::forward<fmt::basic_runtime<T>>(str)) {}
|
||||
|
||||
PreformattedMessage format(Args && ...args) const
|
||||
{
|
||||
return PreformattedMessage{fmt::format(fmt_str, std::forward<Args...>(args)...), message_format_string};
|
||||
}
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
using FormatStringHelper = FormatStringHelperImpl<std::type_identity_t<Args>...>;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -48,6 +28,17 @@ public:
|
||||
|
||||
Exception() = default;
|
||||
|
||||
Exception(const PreformattedMessage & msg, int code): Exception(msg.text, code)
|
||||
{
|
||||
message_format_string = msg.format_string;
|
||||
}
|
||||
|
||||
Exception(PreformattedMessage && msg, int code): Exception(std::move(msg.text), code)
|
||||
{
|
||||
message_format_string = msg.format_string;
|
||||
}
|
||||
|
||||
protected:
|
||||
// used to remove the sensitive information from exceptions if query_masking_rules is configured
|
||||
struct MessageMasked
|
||||
{
|
||||
@ -62,11 +53,16 @@ public:
|
||||
// delegating constructor to mask sensitive information from the message
|
||||
Exception(const std::string & msg, int code, bool remote_ = false): Exception(MessageMasked(msg), code, remote_) {}
|
||||
Exception(std::string && msg, int code, bool remote_ = false): Exception(MessageMasked(std::move(msg)), code, remote_) {}
|
||||
Exception(PreformattedMessage && msg, int code): Exception(std::move(msg.message), code)
|
||||
|
||||
public:
|
||||
/// This creator is for exceptions that should format a message using fmt::format from the variadic ctor Exception(code, fmt, ...),
|
||||
/// but were not rewritten yet. It will be removed.
|
||||
static Exception createDeprecated(const std::string & msg, int code, bool remote_ = false)
|
||||
{
|
||||
message_format_string = msg.format_string;
|
||||
return Exception(msg, code, remote_);
|
||||
}
|
||||
|
||||
/// Message must be a compile-time constant
|
||||
template<typename T, typename = std::enable_if_t<std::is_convertible_v<T, String>>>
|
||||
Exception(int code, T && message)
|
||||
: Exception(message, code)
|
||||
@ -74,9 +70,11 @@ public:
|
||||
message_format_string = tryGetStaticFormatString(message);
|
||||
}
|
||||
|
||||
template<> Exception(int code, const String & message) : Exception(message, code) {}
|
||||
template<> Exception(int code, String & message) : Exception(message, code) {}
|
||||
template<> Exception(int code, String && message) : Exception(std::move(message), code) {}
|
||||
/// These creators are for messages that were received by network or generated by a third-party library in runtime.
|
||||
/// Please use a constructor for all other cases.
|
||||
static Exception createRuntime(int code, const String & message) { return Exception(message, code); }
|
||||
static Exception createRuntime(int code, String & message) { return Exception(message, code); }
|
||||
static Exception createRuntime(int code, String && message) { return Exception(std::move(message), code); }
|
||||
|
||||
// Format message with fmt::format, like the logging functions.
|
||||
template <typename... Args>
|
||||
@ -167,11 +165,9 @@ private:
|
||||
/// more convenient calculation of problem line number.
|
||||
class ParsingException : public Exception
|
||||
{
|
||||
ParsingException(const std::string & msg, int code);
|
||||
public:
|
||||
ParsingException();
|
||||
ParsingException(const std::string & msg, int code);
|
||||
ParsingException(int code, const std::string & message);
|
||||
ParsingException(int code, std::string && message) : Exception(message, code) {}
|
||||
|
||||
// Format message with fmt::format, like the logging functions.
|
||||
template <typename... Args>
|
||||
|
7
src/Common/LoggingFormatStringHelpers.cpp
Normal file
7
src/Common/LoggingFormatStringHelpers.cpp
Normal file
@ -0,0 +1,7 @@
|
||||
#include <Common/LoggingFormatStringHelpers.h>
|
||||
|
||||
[[noreturn]] void functionThatFailsCompilationOfConstevalFunctions(const char * error)
|
||||
{
|
||||
throw std::runtime_error(error);
|
||||
}
|
||||
|
@ -2,24 +2,70 @@
|
||||
#include <base/defines.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
struct PreformattedMessage;
|
||||
consteval void formatStringCheckArgsNumImpl(std::string_view str, size_t nargs);
|
||||
template <typename T> constexpr std::string_view tryGetStaticFormatString(T && x);
|
||||
|
||||
/// Extract format string from a string literal and constructs consteval fmt::format_string
|
||||
template <typename... Args>
|
||||
struct FormatStringHelperImpl
|
||||
{
|
||||
std::string_view message_format_string;
|
||||
fmt::format_string<Args...> fmt_str;
|
||||
template<typename T>
|
||||
consteval FormatStringHelperImpl(T && str) : message_format_string(tryGetStaticFormatString(str)), fmt_str(std::forward<T>(str))
|
||||
{
|
||||
formatStringCheckArgsNumImpl(message_format_string, sizeof...(Args));
|
||||
}
|
||||
template<typename T>
|
||||
FormatStringHelperImpl(fmt::basic_runtime<T> && str) : message_format_string(), fmt_str(std::forward<fmt::basic_runtime<T>>(str)) {}
|
||||
|
||||
PreformattedMessage format(Args && ...args) const;
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
using FormatStringHelper = FormatStringHelperImpl<std::type_identity_t<Args>...>;
|
||||
|
||||
/// Saves a format string for already formatted message
|
||||
struct PreformattedMessage
|
||||
{
|
||||
String message;
|
||||
std::string text;
|
||||
std::string_view format_string;
|
||||
|
||||
operator const String & () const { return message; }
|
||||
operator String () && { return std::move(message); }
|
||||
template <typename... Args>
|
||||
static PreformattedMessage create(FormatStringHelper<Args...> fmt, Args &&... args);
|
||||
|
||||
operator const std::string & () const { return text; }
|
||||
operator std::string () && { return std::move(text); }
|
||||
operator fmt::format_string<> () const { UNREACHABLE(); }
|
||||
};
|
||||
|
||||
template <typename... Args>
|
||||
PreformattedMessage FormatStringHelperImpl<Args...>::format(Args && ...args) const
|
||||
{
|
||||
return PreformattedMessage{fmt::format(fmt_str, std::forward<Args>(args)...), message_format_string};
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
PreformattedMessage PreformattedMessage::create(FormatStringHelper<Args...> fmt, Args && ...args)
|
||||
{
|
||||
return fmt.format(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template<typename T> struct is_fmt_runtime : std::false_type {};
|
||||
template<typename T> struct is_fmt_runtime<fmt::basic_runtime<T>> : std::true_type {};
|
||||
|
||||
template <typename T> constexpr std::string_view tryGetStaticFormatString(T && x)
|
||||
{
|
||||
/// Failure of this asserting indicates that something went wrong during type deduction.
|
||||
/// For example, a string literal was implicitly converted to std::string. It should not happen.
|
||||
/// Format string for an exception or log message must be a string literal (compile-time constant).
|
||||
/// Failure of this assertion may indicate one of the following issues:
|
||||
/// - A message was already formatted into std::string before passing to Exception(...) or LOG_XXXXX(...).
|
||||
/// Please use variadic constructor of Exception.
|
||||
/// Consider using PreformattedMessage or LogToStr if you want to avoid double formatting and/or copy-paste.
|
||||
/// - A string literal was converted to std::string (or const char *).
|
||||
/// - Use Exception::createRuntime or fmt::runtime if there's no format string
|
||||
/// and a message is generated in runtime by a third-party library
|
||||
/// or deserialized from somewhere.
|
||||
static_assert(!std::is_same_v<std::string, std::decay_t<T>>);
|
||||
|
||||
if constexpr (is_fmt_runtime<std::decay_t<T>>::value)
|
||||
@ -53,3 +99,60 @@ template <typename T, typename... Ts> constexpr auto firstArg(T && x, Ts &&...)
|
||||
/// For implicit conversion of fmt::basic_runtime<> to char* for std::string ctor
|
||||
template <typename T, typename... Ts> constexpr auto firstArg(fmt::basic_runtime<T> && data, Ts &&...) { return data.str.data(); }
|
||||
|
||||
consteval ssize_t formatStringCountArgsNum(const char * const str, size_t len)
|
||||
{
|
||||
/// It does not count named args, but we don't use them
|
||||
size_t cnt = 0;
|
||||
size_t i = 0;
|
||||
while (i + 1 < len)
|
||||
{
|
||||
if (str[i] == '{' && str[i + 1] == '}')
|
||||
{
|
||||
i += 2;
|
||||
cnt += 1;
|
||||
}
|
||||
else if (str[i] == '{')
|
||||
{
|
||||
/// Ignore checks for complex formatting like "{:.3f}"
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
[[noreturn]] void functionThatFailsCompilationOfConstevalFunctions(const char * error);
|
||||
|
||||
/// fmt::format checks that there are enough arguments, but ignores extra arguments (e.g. fmt::format("{}", 1, 2) compiles)
|
||||
/// This function will fail to compile if the number of "{}" substitutions does not exactly match
|
||||
consteval void formatStringCheckArgsNumImpl(std::string_view str, size_t nargs)
|
||||
{
|
||||
if (str.empty())
|
||||
return;
|
||||
ssize_t cnt = formatStringCountArgsNum(str.data(), str.size());
|
||||
if (0 <= cnt && cnt != nargs)
|
||||
functionThatFailsCompilationOfConstevalFunctions("unexpected number of arguments in a format string");
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
struct CheckArgsNumHelperImpl
|
||||
{
|
||||
template<typename T>
|
||||
consteval CheckArgsNumHelperImpl(T && str)
|
||||
{
|
||||
formatStringCheckArgsNumImpl(tryGetStaticFormatString(str), sizeof...(Args));
|
||||
}
|
||||
|
||||
/// No checks for fmt::runtime and PreformattedMessage
|
||||
template<typename T> CheckArgsNumHelperImpl(fmt::basic_runtime<T> &&) {}
|
||||
template<> CheckArgsNumHelperImpl(PreformattedMessage &) {}
|
||||
template<> CheckArgsNumHelperImpl(const PreformattedMessage &) {}
|
||||
template<> CheckArgsNumHelperImpl(PreformattedMessage &&) {}
|
||||
|
||||
};
|
||||
|
||||
template <typename... Args> using CheckArgsNumHelper = CheckArgsNumHelperImpl<std::type_identity_t<Args>...>;
|
||||
template <typename... Args> void formatStringCheckArgsNum(CheckArgsNumHelper<Args...>, Args &&...) {}
|
||||
|
@ -187,7 +187,7 @@ public:
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Value with key `{}` is used twice in the SET query",
|
||||
"Value with key `{}` is used twice in the SET query (collection name: {})",
|
||||
name, query.collection_name);
|
||||
}
|
||||
}
|
||||
|
@ -211,9 +211,8 @@ PoolWithFailoverBase<TNestedPool>::get(size_t max_ignored_errors, bool fallback_
|
||||
max_ignored_errors, fallback_to_stale_replicas,
|
||||
try_get_entry, get_priority);
|
||||
if (results.empty() || results[0].entry.isNull())
|
||||
throw DB::Exception(
|
||||
"PoolWithFailoverBase::getMany() returned less than min_entries entries.",
|
||||
DB::ErrorCodes::LOGICAL_ERROR);
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR,
|
||||
"PoolWithFailoverBase::getMany() returned less than min_entries entries.");
|
||||
return results[0].entry;
|
||||
}
|
||||
|
||||
@ -320,10 +319,8 @@ PoolWithFailoverBase<TNestedPool>::getMany(
|
||||
try_results.resize(up_to_date_count);
|
||||
}
|
||||
else
|
||||
throw DB::Exception(
|
||||
"Could not find enough connections to up-to-date replicas. Got: " + std::to_string(up_to_date_count)
|
||||
+ ", needed: " + std::to_string(min_entries),
|
||||
DB::ErrorCodes::ALL_REPLICAS_ARE_STALE);
|
||||
throw DB::Exception(DB::ErrorCodes::ALL_REPLICAS_ARE_STALE,
|
||||
"Could not find enough connections to up-to-date replicas. Got: {}, needed: {}", up_to_date_count, max_entries);
|
||||
|
||||
return try_results;
|
||||
}
|
||||
|
@ -62,10 +62,10 @@ public:
|
||||
, replacement(replacement_string)
|
||||
{
|
||||
if (!regexp.ok())
|
||||
throw DB::Exception(
|
||||
"SensitiveDataMasker: cannot compile re2: " + regexp_string_ + ", error: " + regexp.error()
|
||||
+ ". Look at https://github.com/google/re2/wiki/Syntax for reference.",
|
||||
DB::ErrorCodes::CANNOT_COMPILE_REGEXP);
|
||||
throw DB::Exception(DB::ErrorCodes::CANNOT_COMPILE_REGEXP,
|
||||
"SensitiveDataMasker: cannot compile re2: {}, error: {}. "
|
||||
"Look at https://github.com/google/re2/wiki/Syntax for reference.",
|
||||
regexp_string_, regexp.error());
|
||||
}
|
||||
|
||||
uint64_t apply(std::string & data) const
|
||||
|
@ -100,7 +100,7 @@ size_t TLDListsHolder::parseAndAddTldList(const std::string & name, const std::s
|
||||
tld_list_tmp.emplace(line, TLDType::TLD_REGULAR);
|
||||
}
|
||||
if (!in.eof())
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Not all list had been read", name);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Not all list had been read: {}", name);
|
||||
|
||||
TLDList tld_list(tld_list_tmp.size());
|
||||
for (const auto & [host, type] : tld_list_tmp)
|
||||
|
@ -58,7 +58,7 @@ UInt64 Throttler::add(size_t amount)
|
||||
}
|
||||
|
||||
if (limit && count_value > limit)
|
||||
throw Exception(limit_exceeded_exception_message + std::string(" Maximum: ") + toString(limit), ErrorCodes::LIMIT_EXCEEDED);
|
||||
throw Exception::createDeprecated(limit_exceeded_exception_message + std::string(" Maximum: ") + toString(limit), ErrorCodes::LIMIT_EXCEEDED);
|
||||
|
||||
/// Wait unless there is positive amount of tokens - throttling
|
||||
Int64 sleep_time = 0;
|
||||
|
@ -41,7 +41,7 @@ To assert_cast(From && from)
|
||||
}
|
||||
catch (const std::exception & e)
|
||||
{
|
||||
throw DB::Exception(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
throw DB::Exception::createDeprecated(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Bad cast from type {} to {}",
|
||||
|
@ -57,6 +57,7 @@ namespace
|
||||
if (_is_clients_log || _logger->is((PRIORITY))) \
|
||||
{ \
|
||||
std::string formatted_message = numArgs(__VA_ARGS__) > 1 ? fmt::format(__VA_ARGS__) : firstArg(__VA_ARGS__); \
|
||||
formatStringCheckArgsNum(__VA_ARGS__); \
|
||||
if (auto _channel = _logger->getChannel()) \
|
||||
{ \
|
||||
std::string file_function; \
|
||||
|
@ -171,9 +171,8 @@ TEST(Common, RWLockDeadlock)
|
||||
auto holder2 = lock2->getLock(RWLockImpl::Read, "q1", std::chrono::milliseconds(100));
|
||||
if (!holder2)
|
||||
{
|
||||
throw Exception(
|
||||
"Locking attempt timed out! Possible deadlock avoided. Client should retry.",
|
||||
ErrorCodes::DEADLOCK_AVOIDED);
|
||||
throw Exception(ErrorCodes::DEADLOCK_AVOIDED,
|
||||
"Locking attempt timed out! Possible deadlock avoided. Client should retry.");
|
||||
}
|
||||
}
|
||||
catch (const Exception & e)
|
||||
@ -202,9 +201,8 @@ TEST(Common, RWLockDeadlock)
|
||||
auto holder1 = lock1->getLock(RWLockImpl::Read, "q3", std::chrono::milliseconds(100));
|
||||
if (!holder1)
|
||||
{
|
||||
throw Exception(
|
||||
"Locking attempt timed out! Possible deadlock avoided. Client should retry.",
|
||||
ErrorCodes::DEADLOCK_AVOIDED);
|
||||
throw Exception(ErrorCodes::DEADLOCK_AVOIDED,
|
||||
"Locking attempt timed out! Possible deadlock avoided. Client should retry.");
|
||||
}
|
||||
}
|
||||
catch (const Exception & e)
|
||||
|
@ -37,7 +37,7 @@ To typeid_cast(From & from)
|
||||
}
|
||||
catch (const std::exception & e)
|
||||
{
|
||||
throw DB::Exception(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
throw DB::Exception::createDeprecated(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Bad cast from type {} to {}",
|
||||
@ -58,7 +58,7 @@ To typeid_cast(From * from)
|
||||
}
|
||||
catch (const std::exception & e)
|
||||
{
|
||||
throw DB::Exception(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
throw DB::Exception::createDeprecated(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
@ -93,6 +93,6 @@ To typeid_cast(const std::shared_ptr<From> & from)
|
||||
}
|
||||
catch (const std::exception & e)
|
||||
{
|
||||
throw DB::Exception(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
throw DB::Exception::createDeprecated(e.what(), DB::ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
{
|
||||
message << ". The mismatch is caused by single bit flip in data block at byte " << (bit_pos / 8) << ", bit " << (bit_pos % 8) << ". "
|
||||
<< message_hardware_failure;
|
||||
throw Exception(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
throw Exception::createDeprecated(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
}
|
||||
|
||||
flip_bit(tmp_data, bit_pos); /// Restore
|
||||
@ -101,10 +101,10 @@ static void validateChecksum(char * data, size_t size, const Checksum expected_c
|
||||
{
|
||||
message << ". The mismatch is caused by single bit flip in checksum. "
|
||||
<< message_hardware_failure;
|
||||
throw Exception(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
throw Exception::createDeprecated(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
}
|
||||
|
||||
throw Exception(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
throw Exception::createDeprecated(message.str(), ErrorCodes::CHECKSUM_DOESNT_MATCH);
|
||||
}
|
||||
|
||||
static void readHeaderAndGetCodecAndSize(
|
||||
|
@ -30,7 +30,7 @@ protected:
|
||||
bool isGenericCompression() const override { return false; }
|
||||
|
||||
private:
|
||||
UInt8 delta_bytes_size;
|
||||
const UInt8 delta_bytes_size;
|
||||
};
|
||||
|
||||
|
||||
@ -68,8 +68,8 @@ void compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
if (source_size % sizeof(T) != 0)
|
||||
throw Exception(ErrorCodes::CANNOT_COMPRESS, "Cannot delta compress, data size {} is not aligned to {}", source_size, sizeof(T));
|
||||
|
||||
T prev_src{};
|
||||
const char * source_end = source + source_size;
|
||||
T prev_src = 0;
|
||||
const char * const source_end = source + source_size;
|
||||
while (source < source_end)
|
||||
{
|
||||
T curr_src = unalignedLoad<T>(source);
|
||||
@ -84,17 +84,17 @@ void compressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
template <typename T>
|
||||
void decompressDataForType(const char * source, UInt32 source_size, char * dest, UInt32 output_size)
|
||||
{
|
||||
const char * output_end = dest + output_size;
|
||||
const char * const output_end = dest + output_size;
|
||||
|
||||
if (source_size % sizeof(T) != 0)
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot delta decompress, data size {} is not aligned to {}", source_size, sizeof(T));
|
||||
|
||||
T accumulator{};
|
||||
const char * source_end = source + source_size;
|
||||
const char * const source_end = source + source_size;
|
||||
while (source < source_end)
|
||||
{
|
||||
accumulator += unalignedLoad<T>(source);
|
||||
if (dest + sizeof(accumulator) > output_end)
|
||||
if (dest + sizeof(accumulator) > output_end) [[unlikely]]
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress the data");
|
||||
unalignedStore<T>(dest, accumulator);
|
||||
|
||||
@ -140,7 +140,7 @@ void CompressionCodecDelta::doDecompressData(const char * source, UInt32 source_
|
||||
|
||||
UInt8 bytes_size = source[0];
|
||||
|
||||
if (bytes_size == 0)
|
||||
if (!(bytes_size == 1 || bytes_size == 2 || bytes_size == 4 || bytes_size == 8))
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress. File has wrong header");
|
||||
|
||||
UInt8 bytes_to_skip = uncompressed_size % bytes_size;
|
||||
@ -190,7 +190,7 @@ UInt8 getDeltaBytesSize(const IDataType * column_type)
|
||||
void registerCodecDelta(CompressionCodecFactory & factory)
|
||||
{
|
||||
UInt8 method_code = static_cast<UInt8>(CompressionMethodByte::Delta);
|
||||
factory.registerCompressionCodecWithType("Delta", method_code, [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
auto codec_builder = [&](const ASTPtr & arguments, const IDataType * column_type) -> CompressionCodecPtr
|
||||
{
|
||||
UInt8 delta_bytes_size = 0;
|
||||
|
||||
@ -215,7 +215,8 @@ void registerCodecDelta(CompressionCodecFactory & factory)
|
||||
}
|
||||
|
||||
return std::make_shared<CompressionCodecDelta>(delta_bytes_size);
|
||||
});
|
||||
};
|
||||
factory.registerCompressionCodecWithType("Delta", method_code, codec_builder);
|
||||
}
|
||||
|
||||
CompressionCodecPtr getCompressionCodecDelta(UInt8 delta_bytes_size)
|
||||
|
@ -141,7 +141,7 @@ size_t encrypt(std::string_view plaintext, char * ciphertext_and_tag, Encryption
|
||||
reinterpret_cast<const uint8_t*>(key.data()), key.size(),
|
||||
tag_size, nullptr);
|
||||
if (!ok_init)
|
||||
throw Exception(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
throw Exception::createDeprecated(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
|
||||
/// encrypt data using context and given nonce.
|
||||
size_t out_len;
|
||||
@ -152,7 +152,7 @@ size_t encrypt(std::string_view plaintext, char * ciphertext_and_tag, Encryption
|
||||
reinterpret_cast<const uint8_t *>(plaintext.data()), plaintext.size(),
|
||||
nullptr, 0);
|
||||
if (!ok_open)
|
||||
throw Exception(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
throw Exception::createDeprecated(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
|
||||
return out_len;
|
||||
}
|
||||
@ -171,7 +171,7 @@ size_t decrypt(std::string_view ciphertext, char * plaintext, EncryptionMethod m
|
||||
reinterpret_cast<const uint8_t*>(key.data()), key.size(),
|
||||
tag_size, nullptr);
|
||||
if (!ok_init)
|
||||
throw Exception(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
throw Exception::createDeprecated(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
|
||||
/// decrypt data using given nonce
|
||||
size_t out_len;
|
||||
@ -182,7 +182,7 @@ size_t decrypt(std::string_view ciphertext, char * plaintext, EncryptionMethod m
|
||||
reinterpret_cast<const uint8_t *>(ciphertext.data()), ciphertext.size(),
|
||||
nullptr, 0);
|
||||
if (!ok_open)
|
||||
throw Exception(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
throw Exception::createDeprecated(lastErrorString(), ErrorCodes::OPENSSL_ERROR);
|
||||
|
||||
return out_len;
|
||||
}
|
||||
|
@ -11,19 +11,18 @@
|
||||
#include <IO/ReadBufferFromMemory.h>
|
||||
#include <IO/BitHelpers.h>
|
||||
|
||||
#include <bitset>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
|
||||
#include <bitset>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/** Gorilla column codec implementation.
|
||||
*
|
||||
* Based on Gorilla paper: http://www.vldb.org/pvldb/vol8/p1816-teller.pdf
|
||||
* Based on Gorilla paper: https://dl.acm.org/doi/10.14778/2824032.2824078
|
||||
*
|
||||
* This codec is best used against monotonic floating sequences, like CPU usage percentage
|
||||
* or any other gauge.
|
||||
@ -125,7 +124,7 @@ protected:
|
||||
bool isGenericCompression() const override { return false; }
|
||||
|
||||
private:
|
||||
UInt8 data_bytes_size;
|
||||
const UInt8 data_bytes_size;
|
||||
};
|
||||
|
||||
|
||||
@ -139,7 +138,7 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
constexpr inline UInt8 getBitLengthOfLength(UInt8 data_bytes_size)
|
||||
constexpr UInt8 getBitLengthOfLength(UInt8 data_bytes_size)
|
||||
{
|
||||
// 1-byte value is 8 bits, and we need 4 bits to represent 8 : 1000,
|
||||
// 2-byte 16 bits => 5
|
||||
@ -147,21 +146,20 @@ constexpr inline UInt8 getBitLengthOfLength(UInt8 data_bytes_size)
|
||||
// 8-byte 64 bits => 7
|
||||
const UInt8 bit_lengths[] = {0, 4, 5, 0, 6, 0, 0, 0, 7};
|
||||
assert(data_bytes_size >= 1 && data_bytes_size < sizeof(bit_lengths) && bit_lengths[data_bytes_size] != 0);
|
||||
|
||||
return bit_lengths[data_bytes_size];
|
||||
}
|
||||
|
||||
|
||||
UInt32 getCompressedHeaderSize(UInt8 data_bytes_size)
|
||||
{
|
||||
const UInt8 items_count_size = 4;
|
||||
|
||||
constexpr UInt8 items_count_size = 4;
|
||||
return items_count_size + data_bytes_size;
|
||||
}
|
||||
|
||||
UInt32 getCompressedDataSize(UInt8 data_bytes_size, UInt32 uncompressed_size)
|
||||
{
|
||||
const UInt32 items_count = uncompressed_size / data_bytes_size;
|
||||
|
||||
static const auto DATA_BIT_LENGTH = getBitLengthOfLength(data_bytes_size);
|
||||
// -1 since there must be at least 1 non-zero bit.
|
||||
static const auto LEADING_ZEROES_BIT_LENGTH = DATA_BIT_LENGTH - 1;
|
||||
@ -182,7 +180,7 @@ struct BinaryValueInfo
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
BinaryValueInfo getLeadingAndTrailingBits(const T & value)
|
||||
BinaryValueInfo getBinaryValueInfo(const T & value)
|
||||
{
|
||||
constexpr UInt8 bit_size = sizeof(T) * 8;
|
||||
|
||||
@ -190,28 +188,25 @@ BinaryValueInfo getLeadingAndTrailingBits(const T & value)
|
||||
const UInt8 tz = getTrailingZeroBits(value);
|
||||
const UInt8 data_size = value == 0 ? 0 : static_cast<UInt8>(bit_size - lz - tz);
|
||||
|
||||
return BinaryValueInfo{lz, data_size, tz};
|
||||
return {lz, data_size, tz};
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest, UInt32 dest_size)
|
||||
{
|
||||
static const auto DATA_BIT_LENGTH = getBitLengthOfLength(sizeof(T));
|
||||
// -1 since there must be at least 1 non-zero bit.
|
||||
static const auto LEADING_ZEROES_BIT_LENGTH = DATA_BIT_LENGTH - 1;
|
||||
|
||||
if (source_size % sizeof(T) != 0)
|
||||
throw Exception(ErrorCodes::CANNOT_COMPRESS, "Cannot compress, data size {} is not aligned to {}", source_size, sizeof(T));
|
||||
const char * source_end = source + source_size;
|
||||
const char * dest_start = dest;
|
||||
const char * dest_end = dest + dest_size;
|
||||
|
||||
const char * const source_end = source + source_size;
|
||||
const char * const dest_start = dest;
|
||||
const char * const dest_end = dest + dest_size;
|
||||
|
||||
const UInt32 items_count = source_size / sizeof(T);
|
||||
|
||||
unalignedStoreLE<UInt32>(dest, items_count);
|
||||
dest += sizeof(items_count);
|
||||
|
||||
T prev_value{};
|
||||
T prev_value = 0;
|
||||
// That would cause first XORed value to be written in-full.
|
||||
BinaryValueInfo prev_xored_info{0, 0, 0};
|
||||
|
||||
@ -226,13 +221,17 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
|
||||
BitWriter writer(dest, dest_end - dest);
|
||||
|
||||
static const auto DATA_BIT_LENGTH = getBitLengthOfLength(sizeof(T));
|
||||
// -1 since there must be at least 1 non-zero bit.
|
||||
static const auto LEADING_ZEROES_BIT_LENGTH = DATA_BIT_LENGTH - 1;
|
||||
|
||||
while (source < source_end)
|
||||
{
|
||||
const T curr_value = unalignedLoadLE<T>(source);
|
||||
source += sizeof(curr_value);
|
||||
|
||||
const auto xored_data = curr_value ^ prev_value;
|
||||
const BinaryValueInfo curr_xored_info = getLeadingAndTrailingBits(xored_data);
|
||||
const BinaryValueInfo curr_xored_info = getBinaryValueInfo(xored_data);
|
||||
|
||||
if (xored_data == 0)
|
||||
{
|
||||
@ -265,11 +264,7 @@ UInt32 compressDataForType(const char * source, UInt32 source_size, char * dest,
|
||||
template <typename T>
|
||||
void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
{
|
||||
static const auto DATA_BIT_LENGTH = getBitLengthOfLength(sizeof(T));
|
||||
// -1 since there must be at least 1 non-zero bit.
|
||||
static const auto LEADING_ZEROES_BIT_LENGTH = DATA_BIT_LENGTH - 1;
|
||||
|
||||
const char * source_end = source + source_size;
|
||||
const char * const source_end = source + source_size;
|
||||
|
||||
if (source + sizeof(UInt32) > source_end)
|
||||
return;
|
||||
@ -277,7 +272,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
const UInt32 items_count = unalignedLoadLE<UInt32>(source);
|
||||
source += sizeof(items_count);
|
||||
|
||||
T prev_value{};
|
||||
T prev_value = 0;
|
||||
|
||||
// decoding first item
|
||||
if (source + sizeof(T) > source_end || items_count < 1)
|
||||
@ -293,13 +288,17 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
|
||||
BinaryValueInfo prev_xored_info{0, 0, 0};
|
||||
|
||||
static const auto DATA_BIT_LENGTH = getBitLengthOfLength(sizeof(T));
|
||||
// -1 since there must be at least 1 non-zero bit.
|
||||
static const auto LEADING_ZEROES_BIT_LENGTH = DATA_BIT_LENGTH - 1;
|
||||
|
||||
// since data is tightly packed, up to 1 bit per value, and last byte is padded with zeroes,
|
||||
// we have to keep track of items to avoid reading more that there is.
|
||||
for (UInt32 items_read = 1; items_read < items_count && !reader.eof(); ++items_read)
|
||||
{
|
||||
T curr_value = prev_value;
|
||||
BinaryValueInfo curr_xored_info = prev_xored_info;
|
||||
T xored_data{};
|
||||
T xored_data = 0;
|
||||
|
||||
if (reader.readBit() == 1)
|
||||
{
|
||||
@ -314,7 +313,7 @@ void decompressDataForType(const char * source, UInt32 source_size, char * dest)
|
||||
|
||||
if (curr_xored_info.leading_zero_bits == 0
|
||||
&& curr_xored_info.data_bits == 0
|
||||
&& curr_xored_info.trailing_zero_bits == 0)
|
||||
&& curr_xored_info.trailing_zero_bits == 0) [[unlikely]]
|
||||
{
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress gorilla-encoded data: corrupted input data.");
|
||||
}
|
||||
@ -403,7 +402,7 @@ UInt32 CompressionCodecGorilla::doCompressData(const char * source, UInt32 sourc
|
||||
break;
|
||||
}
|
||||
|
||||
return 1 + 1 + result_size;
|
||||
return 2 + bytes_to_skip + result_size;
|
||||
}
|
||||
|
||||
void CompressionCodecGorilla::doDecompressData(const char * source, UInt32 source_size, char * dest, UInt32 uncompressed_size) const
|
||||
|
@ -11,13 +11,6 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ICompressionCodec;
|
||||
|
||||
using CompressionCodecPtr = std::shared_ptr<ICompressionCodec>;
|
||||
using Codecs = std::vector<CompressionCodecPtr>;
|
||||
|
||||
class IDataType;
|
||||
|
||||
extern "C" int LLVMFuzzerTestOneInput(const uint8_t * data, size_t size);
|
||||
|
||||
/**
|
||||
@ -120,7 +113,7 @@ protected:
|
||||
/// Return size of compressed data without header
|
||||
virtual UInt32 getMaxCompressedDataSize(UInt32 uncompressed_size) const { return uncompressed_size; }
|
||||
|
||||
/// Actually compress data, without header
|
||||
/// Actually compress data without header
|
||||
virtual UInt32 doCompressData(const char * source, UInt32 source_size, char * dest) const = 0;
|
||||
|
||||
/// Actually decompress data without header
|
||||
@ -134,4 +127,7 @@ private:
|
||||
CodecMode decompressMode{CodecMode::Synchronous};
|
||||
};
|
||||
|
||||
using CompressionCodecPtr = std::shared_ptr<ICompressionCodec>;
|
||||
using Codecs = std::vector<CompressionCodecPtr>;
|
||||
|
||||
}
|
||||
|
@ -29,11 +29,13 @@ namespace ErrorCodes
|
||||
extern const int AMBIGUOUS_COLUMN_NAME;
|
||||
}
|
||||
|
||||
template <typename ReturnType>
|
||||
static ReturnType onError(const std::string & message [[maybe_unused]], int code [[maybe_unused]])
|
||||
template <typename ReturnType, typename... FmtArgs>
|
||||
static ReturnType onError(int code [[maybe_unused]],
|
||||
FormatStringHelper<FmtArgs...> fmt_string [[maybe_unused]],
|
||||
FmtArgs && ...fmt_args [[maybe_unused]])
|
||||
{
|
||||
if constexpr (std::is_same_v<ReturnType, void>)
|
||||
throw Exception(message, code);
|
||||
throw Exception(code, std::move(fmt_string), std::forward<FmtArgs>(fmt_args)...);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
@ -44,13 +46,13 @@ static ReturnType checkColumnStructure(const ColumnWithTypeAndName & actual, con
|
||||
std::string_view context_description, bool allow_materialize, int code)
|
||||
{
|
||||
if (actual.name != expected.name)
|
||||
return onError<ReturnType>("Block structure mismatch in " + std::string(context_description) + " stream: different names of columns:\n"
|
||||
+ actual.dumpStructure() + "\n" + expected.dumpStructure(), code);
|
||||
return onError<ReturnType>(code, "Block structure mismatch in {} stream: different names of columns:\n{}\n{}",
|
||||
context_description, actual.dumpStructure(), expected.dumpStructure());
|
||||
|
||||
if ((actual.type && !expected.type) || (!actual.type && expected.type)
|
||||
|| (actual.type && expected.type && !actual.type->equals(*expected.type)))
|
||||
return onError<ReturnType>("Block structure mismatch in " + std::string(context_description) + " stream: different types:\n"
|
||||
+ actual.dumpStructure() + "\n" + expected.dumpStructure(), code);
|
||||
return onError<ReturnType>(code, "Block structure mismatch in {} stream: different types:\n{}\n{}",
|
||||
context_description, actual.dumpStructure(), expected.dumpStructure());
|
||||
|
||||
if (!actual.column || !expected.column)
|
||||
return ReturnType(true);
|
||||
@ -74,22 +76,18 @@ static ReturnType checkColumnStructure(const ColumnWithTypeAndName & actual, con
|
||||
if (actual_column_maybe_agg && expected_column_maybe_agg)
|
||||
{
|
||||
if (!actual_column_maybe_agg->getAggregateFunction()->haveSameStateRepresentation(*expected_column_maybe_agg->getAggregateFunction()))
|
||||
return onError<ReturnType>(
|
||||
fmt::format(
|
||||
return onError<ReturnType>(code,
|
||||
"Block structure mismatch in {} stream: different columns:\n{}\n{}",
|
||||
context_description,
|
||||
actual.dumpStructure(),
|
||||
expected.dumpStructure()),
|
||||
code);
|
||||
expected.dumpStructure());
|
||||
}
|
||||
else if (actual_column->getName() != expected.column->getName())
|
||||
return onError<ReturnType>(
|
||||
fmt::format(
|
||||
return onError<ReturnType>(code,
|
||||
"Block structure mismatch in {} stream: different columns:\n{}\n{}",
|
||||
context_description,
|
||||
actual.dumpStructure(),
|
||||
expected.dumpStructure()),
|
||||
code);
|
||||
expected.dumpStructure());
|
||||
|
||||
if (isColumnConst(*actual.column) && isColumnConst(*expected.column)
|
||||
&& !actual.column->empty() && !expected.column->empty()) /// don't check values in empty columns
|
||||
@ -98,14 +96,12 @@ static ReturnType checkColumnStructure(const ColumnWithTypeAndName & actual, con
|
||||
Field expected_value = assert_cast<const ColumnConst &>(*expected.column).getField();
|
||||
|
||||
if (actual_value != expected_value)
|
||||
return onError<ReturnType>(
|
||||
fmt::format(
|
||||
return onError<ReturnType>(code,
|
||||
"Block structure mismatch in {} stream: different values of constants in column '{}': actual: {}, expected: {}",
|
||||
context_description,
|
||||
actual.name,
|
||||
applyVisitor(FieldVisitorToString(), actual_value),
|
||||
applyVisitor(FieldVisitorToString(), expected_value)),
|
||||
code);
|
||||
applyVisitor(FieldVisitorToString(), expected_value));
|
||||
}
|
||||
|
||||
return ReturnType(true);
|
||||
@ -117,8 +113,8 @@ static ReturnType checkBlockStructure(const Block & lhs, const Block & rhs, std:
|
||||
{
|
||||
size_t columns = rhs.columns();
|
||||
if (lhs.columns() != columns)
|
||||
return onError<ReturnType>("Block structure mismatch in " + std::string(context_description) + " stream: different number of columns:\n"
|
||||
+ lhs.dumpStructure() + "\n" + rhs.dumpStructure(), ErrorCodes::LOGICAL_ERROR);
|
||||
return onError<ReturnType>(ErrorCodes::LOGICAL_ERROR, "Block structure mismatch in {} stream: different number of columns:\n{}\n{}",
|
||||
context_description, lhs.dumpStructure(), rhs.dumpStructure());
|
||||
|
||||
for (size_t i = 0; i < columns; ++i)
|
||||
{
|
||||
|
@ -95,7 +95,7 @@ void MySQLClient::handshake()
|
||||
packet_endpoint->resetSequenceId();
|
||||
|
||||
if (packet_response.getType() == PACKET_ERR)
|
||||
throw Exception(packet_response.err.error_message, ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
|
||||
throw Exception::createDeprecated(packet_response.err.error_message, ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
|
||||
else if (packet_response.getType() == PACKET_AUTH_SWITCH)
|
||||
throw Exception(ErrorCodes::UNKNOWN_PACKET_FROM_SERVER, "Access denied for user {}", user);
|
||||
}
|
||||
@ -110,7 +110,7 @@ void MySQLClient::writeCommand(char command, String query)
|
||||
switch (packet_response.getType())
|
||||
{
|
||||
case PACKET_ERR:
|
||||
throw Exception(packet_response.err.error_message, ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
|
||||
throw Exception::createDeprecated(packet_response.err.error_message, ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
|
||||
case PACKET_OK:
|
||||
break;
|
||||
default:
|
||||
@ -128,7 +128,7 @@ void MySQLClient::registerSlaveOnMaster(UInt32 slave_id)
|
||||
packet_endpoint->receivePacket(packet_response);
|
||||
packet_endpoint->resetSequenceId();
|
||||
if (packet_response.getType() == PACKET_ERR)
|
||||
throw Exception(packet_response.err.error_message, ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
|
||||
throw Exception::createDeprecated(packet_response.err.error_message, ErrorCodes::UNKNOWN_PACKET_FROM_SERVER);
|
||||
}
|
||||
|
||||
void MySQLClient::ping()
|
||||
|
@ -111,7 +111,7 @@ namespace MySQLReplication
|
||||
else if (query.starts_with("XA"))
|
||||
{
|
||||
if (query.starts_with("XA ROLLBACK"))
|
||||
throw ReplicationError("ParseQueryEvent: Unsupported query event:" + query, ErrorCodes::LOGICAL_ERROR);
|
||||
throw ReplicationError(ErrorCodes::LOGICAL_ERROR, "ParseQueryEvent: Unsupported query event: {}", query);
|
||||
typ = QUERY_EVENT_XA;
|
||||
if (!query.starts_with("XA COMMIT"))
|
||||
transaction_complete = false;
|
||||
@ -247,7 +247,7 @@ namespace MySQLReplication
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw ReplicationError("ParseMetaData: Unhandled data type:" + std::to_string(typ), ErrorCodes::UNKNOWN_EXCEPTION);
|
||||
throw ReplicationError(ErrorCodes::UNKNOWN_EXCEPTION, "ParseMetaData: Unhandled data type: {}", std::to_string(typ));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -770,8 +770,8 @@ namespace MySQLReplication
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw ReplicationError(
|
||||
"ParseRow: Unhandled MySQL field type:" + std::to_string(field_type), ErrorCodes::UNKNOWN_EXCEPTION);
|
||||
throw ReplicationError(ErrorCodes::UNKNOWN_EXCEPTION,
|
||||
"ParseRow: Unhandled MySQL field type: {}", std::to_string(field_type));
|
||||
}
|
||||
}
|
||||
null_index++;
|
||||
@ -873,7 +873,7 @@ namespace MySQLReplication
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw ReplicationError("Position update with unsupported event", ErrorCodes::LOGICAL_ERROR);
|
||||
throw ReplicationError(ErrorCodes::LOGICAL_ERROR, "Position update with unsupported event");
|
||||
}
|
||||
}
|
||||
|
||||
@ -901,11 +901,11 @@ namespace MySQLReplication
|
||||
switch (header)
|
||||
{
|
||||
case PACKET_EOF:
|
||||
throw ReplicationError("Master maybe lost", ErrorCodes::CANNOT_READ_ALL_DATA);
|
||||
throw ReplicationError(ErrorCodes::CANNOT_READ_ALL_DATA, "Master maybe lost");
|
||||
case PACKET_ERR:
|
||||
ERRPacket err;
|
||||
err.readPayloadWithUnpacked(payload);
|
||||
throw ReplicationError(err.error_message, ErrorCodes::UNKNOWN_EXCEPTION);
|
||||
throw ReplicationError::createDeprecated(err.error_message, ErrorCodes::UNKNOWN_EXCEPTION);
|
||||
}
|
||||
// skip the generic response packets header flag.
|
||||
payload.ignore(1);
|
||||
|
@ -74,7 +74,7 @@ ConnectionHolderPtr PoolWithFailover::get()
|
||||
if (replicas_with_priority.empty())
|
||||
throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "No address specified");
|
||||
|
||||
DB::WriteBufferFromOwnString error_message;
|
||||
PreformattedMessage error_message;
|
||||
for (size_t try_idx = 0; try_idx < max_tries; ++try_idx)
|
||||
{
|
||||
for (auto & priority : replicas_with_priority)
|
||||
@ -107,7 +107,7 @@ ConnectionHolderPtr PoolWithFailover::get()
|
||||
catch (const pqxx::broken_connection & pqxx_error)
|
||||
{
|
||||
LOG_ERROR(log, "Connection error: {}", pqxx_error.what());
|
||||
error_message << fmt::format(
|
||||
error_message = PreformattedMessage::create(
|
||||
"Try {}. Connection to {} failed with error: {}\n",
|
||||
try_idx + 1, DB::backQuote(replica.connection_info.host_port), pqxx_error.what());
|
||||
|
||||
@ -131,7 +131,7 @@ ConnectionHolderPtr PoolWithFailover::get()
|
||||
}
|
||||
}
|
||||
|
||||
throw DB::Exception(DB::ErrorCodes::POSTGRESQL_CONNECTION_FAILURE, error_message.str());
|
||||
throw DB::Exception(error_message, DB::ErrorCodes::POSTGRESQL_CONNECTION_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user