mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
92fa39798b
34
.github/workflows/release.yml
vendored
34
.github/workflows/release.yml
vendored
@ -12,38 +12,10 @@ jobs:
|
||||
ReleasePublish:
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
- name: Deploy packages and assets
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
JFROG_API_KEY=${{ secrets.JFROG_ARTIFACTORY_API_KEY }}
|
||||
TEMP_PATH=${{runner.temp}}/release_packages
|
||||
REPO_COPY=${{runner.temp}}/release_packages/ClickHouse
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
# Always use the most recent script version
|
||||
ref: master
|
||||
- name: Download packages and push to Artifactory
|
||||
run: |
|
||||
rm -rf "$TEMP_PATH" && mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY"
|
||||
# Download and push packages to artifactory
|
||||
python3 ./tests/ci/push_to_artifactory.py --release '${{ github.ref }}' \
|
||||
--commit '${{ github.sha }}' --artifactory-url '${{ secrets.JFROG_ARTIFACTORY_URL }}' --all
|
||||
# Download macos binaries to ${{runner.temp}}/download_binary
|
||||
python3 ./tests/ci/download_binary.py --version '${{ github.ref }}' \
|
||||
--commit '${{ github.sha }}' binary_darwin binary_darwin_aarch64
|
||||
mv '${{runner.temp}}/download_binary/'clickhouse-* '${{runner.temp}}/push_to_artifactory'
|
||||
- name: Upload packages to release assets
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: ${{runner.temp}}/push_to_artifactory/*
|
||||
overwrite: true
|
||||
tag: ${{ github.ref }}
|
||||
file_glob: true
|
||||
GITHUB_TAG="${GITHUB_REF#refs/tags/}"
|
||||
curl '${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' -d ''
|
||||
############################################################################################
|
||||
##################################### Docker images #######################################
|
||||
############################################################################################
|
||||
|
16
SECURITY.md
16
SECURITY.md
@ -13,9 +13,10 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 23.1 | ✔️ |
|
||||
| 22.12 | ✔️ |
|
||||
| 22.11 | ✔️ |
|
||||
| 22.10 | ✔️ |
|
||||
| 22.10 | ❌ |
|
||||
| 22.9 | ❌ |
|
||||
| 22.8 | ✔️ |
|
||||
| 22.7 | ❌ |
|
||||
@ -25,18 +26,7 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
| 22.3 | ✔️ |
|
||||
| 22.2 | ❌ |
|
||||
| 22.1 | ❌ |
|
||||
| 21.12 | ❌ |
|
||||
| 21.11 | ❌ |
|
||||
| 21.10 | ❌ |
|
||||
| 21.9 | ❌ |
|
||||
| 21.8 | ❌ |
|
||||
| 21.7 | ❌ |
|
||||
| 21.6 | ❌ |
|
||||
| 21.5 | ❌ |
|
||||
| 21.4 | ❌ |
|
||||
| 21.3 | ❌ |
|
||||
| 21.2 | ❌ |
|
||||
| 21.1 | ❌ |
|
||||
| 21.* | ❌ |
|
||||
| 20.* | ❌ |
|
||||
| 19.* | ❌ |
|
||||
| 18.* | ❌ |
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54470)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 13)
|
||||
SET(VERSION_REVISION 54471)
|
||||
SET(VERSION_MAJOR 23)
|
||||
SET(VERSION_MINOR 2)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 688e488e930c83eefeac4f87c4cc029cc5b231e3)
|
||||
SET(VERSION_DESCRIBE v22.13.1.1-testing)
|
||||
SET(VERSION_STRING 22.13.1.1)
|
||||
SET(VERSION_GITHASH dcaac47702510cc87ddf266bc524f6b7ce0a8e6e)
|
||||
SET(VERSION_DESCRIBE v23.2.1.1-testing)
|
||||
SET(VERSION_STRING 23.2.1.1)
|
||||
# end of autochange
|
||||
|
2
contrib/NuRaft
vendored
2
contrib/NuRaft
vendored
@ -1 +1 @@
|
||||
Subproject commit afc36dfa9b0beb45bc4cd935060631cc80ba04a5
|
||||
Subproject commit 545b8c810a956b2efdc116e86be219af7e83d68a
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit 450a5638704386356f8e520080468fc9bc8bcaf8
|
||||
Subproject commit d03245f801f798c63ee9a7d2b8914a9e5c5cd666
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="22.12.3.5"
|
||||
ARG VERSION="23.1.1.3077"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="22.12.3.5"
|
||||
ARG VERSION="23.1.1.3077"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -146,6 +146,12 @@ def prepare_for_hung_check(drop_databases):
|
||||
"KILL QUERY WHERE query LIKE 'SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u %'"
|
||||
)
|
||||
)
|
||||
# Long query from 02136_kill_scalar_queries
|
||||
call_with_retry(
|
||||
make_query_command(
|
||||
"KILL QUERY WHERE query LIKE 'SELECT (SELECT number FROM system.numbers WHERE number = 1000000000000)%'"
|
||||
)
|
||||
)
|
||||
|
||||
if drop_databases:
|
||||
for i in range(5):
|
||||
|
21
docs/changelogs/v22.10.7.13-stable.md
Normal file
21
docs/changelogs/v22.10.7.13-stable.md
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.10.7.13-stable (d261d9036cc) FIXME as compared to v22.10.6.3-stable (645a66d221f)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Backported in [#44998](https://github.com/ClickHouse/ClickHouse/issues/44998): Another fix for `Cannot read all data` error which could happen while reading `LowCardinality` dictionary from remote fs. Fixes [#44709](https://github.com/ClickHouse/ClickHouse/issues/44709). [#44875](https://github.com/ClickHouse/ClickHouse/pull/44875) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#45551](https://github.com/ClickHouse/ClickHouse/issues/45551): Fix `SELECT ... FROM system.dictionaries` exception when there is a dictionary with a bad structure (e.g. incorrect type in xml config). [#45399](https://github.com/ClickHouse/ClickHouse/pull/45399) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Automatically merge green backport PRs and green approved PRs [#41110](https://github.com/ClickHouse/ClickHouse/pull/41110) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Improve release scripts [#45074](https://github.com/ClickHouse/ClickHouse/pull/45074) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix wrong approved_at, simplify conditions [#45302](https://github.com/ClickHouse/ClickHouse/pull/45302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Get rid of artifactory in favor of r2 + ch-repos-manager [#45421](https://github.com/ClickHouse/ClickHouse/pull/45421) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
592
docs/changelogs/v23.1.1.3077-stable.md
Normal file
592
docs/changelogs/v23.1.1.3077-stable.md
Normal file
@ -0,0 +1,592 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.1.1.3077-stable (dcaac477025) FIXME as compared to v22.12.1.1752-stable (688e488e930)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Remove query `SYSTEM RESTART DISK`. [#44647](https://github.com/ClickHouse/ClickHouse/pull/44647) ([alesapin](https://github.com/alesapin)).
|
||||
* Disallow Gorilla compression on columns of non-Float32 or non-Float64 type. [#45252](https://github.com/ClickHouse/ClickHouse/pull/45252) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove PREALLOCATE for HASHED/SPARSE_HASHED dictionaries. [#45388](https://github.com/ClickHouse/ClickHouse/pull/45388) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Parallel quorum inserts might work incorrectly with `*MergeTree` tables created with deprecated syntax. Therefore, parallel quorum inserts support is completely disabled for such tables. It does not affect tables created with a new syntax. [#45430](https://github.com/ClickHouse/ClickHouse/pull/45430) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### New Feature
|
||||
* Add `quantileInterpolatedWeighted`/`quantilesInterpolatedWeighted` functions. [#38252](https://github.com/ClickHouse/ClickHouse/pull/38252) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add an experimental inverted index as a new secondary index type for efficient text search. [#38667](https://github.com/ClickHouse/ClickHouse/pull/38667) ([larryluogit](https://github.com/larryluogit)).
|
||||
* Add column `ptr` to `system.trace_log` for `trace_type = 'MemorySample'`. This column contains an address of allocation. Added function `flameGraph` which can build flamegraph containing allocated and not released memory. Reworking of [#38391](https://github.com/ClickHouse/ClickHouse/issues/38391). [#38953](https://github.com/ClickHouse/ClickHouse/pull/38953) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Dictionary source for extracting keys by traversing regular expressions tree. [#40878](https://github.com/ClickHouse/ClickHouse/pull/40878) ([Vage Ogannisian](https://github.com/nooblose)).
|
||||
* Added parametrized view functionality, now it's possible to specify query parameters for View table engine. resolves [#40907](https://github.com/ClickHouse/ClickHouse/issues/40907). [#41687](https://github.com/ClickHouse/ClickHouse/pull/41687) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* added extendable and configurable scheduling subsystem for IO requests (not yet integrated with IO code itself). [#41840](https://github.com/ClickHouse/ClickHouse/pull/41840) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Added `SYSTEM DROP DATABASE REPLICA` that removes metadata of dead replica of `Replicated` database. Resolves [#41794](https://github.com/ClickHouse/ClickHouse/issues/41794). [#42807](https://github.com/ClickHouse/ClickHouse/pull/42807) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Array join support map type, like function explode in spark. [#43239](https://github.com/ClickHouse/ClickHouse/pull/43239) ([李扬](https://github.com/taiyang-li)).
|
||||
* Support SQL standard binary and hex string literals. [#43785](https://github.com/ClickHouse/ClickHouse/pull/43785) ([Mo Xuan](https://github.com/mo-avatar)).
|
||||
* Add experimental query result cache. [#43797](https://github.com/ClickHouse/ClickHouse/pull/43797) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* format datetime in joda datetime style. Refer to https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html. [#43818](https://github.com/ClickHouse/ClickHouse/pull/43818) ([李扬](https://github.com/taiyang-li)).
|
||||
* to merge [#40878](https://github.com/ClickHouse/ClickHouse/issues/40878) , supporting regexp dictionary. [#43858](https://github.com/ClickHouse/ClickHouse/pull/43858) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Implemented a fractional second formatter (`%f`) for formatDateTime. [#44060](https://github.com/ClickHouse/ClickHouse/pull/44060) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Added age function to calculate difference between two dates or dates with time values expressed as number of full units. Closes [#41115](https://github.com/ClickHouse/ClickHouse/issues/41115). [#44421](https://github.com/ClickHouse/ClickHouse/pull/44421) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Implemented a fractional second formatter (%f) for formatDateTime. This is slightly modified PR [#44060](https://github.com/ClickHouse/ClickHouse/issues/44060) by @ltrk2. [#44497](https://github.com/ClickHouse/ClickHouse/pull/44497) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Add null source for dictionaries. Closes [#44240](https://github.com/ClickHouse/ClickHouse/issues/44240). [#44502](https://github.com/ClickHouse/ClickHouse/pull/44502) ([mayamika](https://github.com/mayamika)).
|
||||
* We can use `s3_storage_class` to set different tier. Such as ``` <disks> <s3> <type>s3</type> <endpoint>xxx</endpoint> <access_key_id>xxx</access_key_id> <secret_access_key>xxx</secret_access_key> <s3_storage_class>STANDARD/INTELLIGENT_TIERING</s3_storage_class> </s3> </disks> ``` Closes [#44443](https://github.com/ClickHouse/ClickHouse/issues/44443). [#44707](https://github.com/ClickHouse/ClickHouse/pull/44707) ([chen](https://github.com/xiedeyantu)).
|
||||
* Try to detect header with column names (and maybe types) for CSV/TSV/CustomSeparated input formats. Add settings `input_format_tsv/csv/custom_detect_header` that enables this behaviour (enabled by default). Closes [#44640](https://github.com/ClickHouse/ClickHouse/issues/44640). [#44953](https://github.com/ClickHouse/ClickHouse/pull/44953) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Insert default values in case of missing elements in JSON object while parsing named tuple. Add setting `input_format_json_defaults_for_missing_elements_in_named_tuple` that controls this behaviour. Closes [#45142](https://github.com/ClickHouse/ClickHouse/issues/45142)#issuecomment-1380153217. [#45231](https://github.com/ClickHouse/ClickHouse/pull/45231) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Add total memory and used memory metrics with respect to cgroup in AsyncMetrics (https://github.com/ClickHouse/ClickHouse/issues/37983). [#45301](https://github.com/ClickHouse/ClickHouse/pull/45301) ([sichenzhao](https://github.com/sichenzhao)).
|
||||
* Introduce non-throwing variants of hasToken and hasTokenCaseInsensitive. [#45341](https://github.com/ClickHouse/ClickHouse/pull/45341) ([ltrk2](https://github.com/ltrk2)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Added sharding support in HashedDictionary to allow parallel load (almost linear scaling based on number of shards). [#40003](https://github.com/ClickHouse/ClickHouse/pull/40003) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Do not load inactive parts at startup of `MergeTree` tables. [#42181](https://github.com/ClickHouse/ClickHouse/pull/42181) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* - Speed up query parsing. [#42284](https://github.com/ClickHouse/ClickHouse/pull/42284) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Always replace OR chain `expr = x1 OR ... OR expr = xN` to `expr IN (x1, ..., xN)` in case if `expr` is a `LowCardinality` column. Setting `optimize_min_equality_disjunction_chain_length` is ignored in this case. [#42889](https://github.com/ClickHouse/ClickHouse/pull/42889) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
* > Original changelog In the original implementation, the memory of ThreadGroupStatus:: finished_threads_counters_memory is released by moving it to a temporary std::vector, which soon expired and gets destructed. This method is viable, however not straightforward enough. To enhance the code readability, this commit releases the memory in the vector by firstly resizing it to 0 and then shrinking the capacity accordingly. [#43586](https://github.com/ClickHouse/ClickHouse/pull/43586) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* As a follow-up of [#42214](https://github.com/ClickHouse/ClickHouse/issues/42214), this PR tries to optimize the column-wise ternary logic evaluation by achieving auto-vectorization. In the performance test of this [microbenchmark](https://github.com/ZhiguoZh/ClickHouse/blob/20221123-ternary-logic-opt-example/src/Functions/examples/associative_applier_perf.cpp), we've observed a peak **performance gain** of **21x** on the ICX device (Intel Xeon Platinum 8380 CPU). [#43669](https://github.com/ClickHouse/ClickHouse/pull/43669) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Improved latency of reading from storage `S3` and table function `s3` with large number of small files. Now settings `remote_filesystem_read_method` and `remote_filesystem_read_prefetch` take effect while reading from storage `S3`. [#43726](https://github.com/ClickHouse/ClickHouse/pull/43726) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* - Avoid acquiring read locks in system.tables if possible. [#43840](https://github.com/ClickHouse/ClickHouse/pull/43840) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* The performance experiments of SSB (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could effectively decrease the lock contention for ThreadPoolImpl::mutex by **75%**, increasing the CPU utilization and improving the overall performance by **2.4%**. [#44308](https://github.com/ClickHouse/ClickHouse/pull/44308) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Now optimisation is applied only if the cached HT size is sufficiently large (thresholds were determined empirically and hardcoded). [#44455](https://github.com/ClickHouse/ClickHouse/pull/44455) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* ... The whole struct field will be loaded at current, even though we just want to read one field of the struct. [#44484](https://github.com/ClickHouse/ClickHouse/pull/44484) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Small performance improvement for asynchronous reading from remote fs. [#44868](https://github.com/ClickHouse/ClickHouse/pull/44868) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Switched to faster shared (RW) mutex implementation. Performance may be improved in queries with a lot of thread synchronization or for data structures experiencing heavy contention. [#45007](https://github.com/ClickHouse/ClickHouse/pull/45007) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add fast path for: - col like '%%' - col like '%' - col not like '%' - col not like '%' - match(col, '.*'). [#45244](https://github.com/ClickHouse/ClickHouse/pull/45244) ([李扬](https://github.com/taiyang-li)).
|
||||
* todo. [#45289](https://github.com/ClickHouse/ClickHouse/pull/45289) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### Improvement
|
||||
* Refactor and Improve streaming engines Kafka/RabbitMQ/NATS and add support for all formats, also refactor formats a bit: - Fix producing messages in row-based formats with suffixes/prefixes. Now every message is formatted complitely with all delimiters and can be parsed back using input format. - Support block-based formats like Native, Parquet, ORC, etc. Every block is formatted as a separated message. The number of rows in one message depends on block size, so you can control it via setting `max_block_size`. - Add new engine settings `kafka_max_rows_per_message/rabbitmq_max_rows_per_message/nats_max_rows_per_message`. They control the number of rows formatted in one message in row-based formats. Default value: 1. - Fix high memory consumption in NATS table engine. - Support arbitrary binary data in NATS producer (previously it worked only with strings contained \0 at the end) - Add missing Kafka/RabbitMQ/NATS engine settings in documentation. - Refactor producing and consuming in Kafka/RabbitMQ/NATS, separate it from WriteBuffers/ReadBuffers semantic. - Refactor output formats: remove callbacks on each row used in Kafka/RabbitMQ/NATS (now we don't use callbacks there), allow to use IRowOutputFormat directly, clarify row end and row between delimiters, make it possible to reset output format to start formatting again - Add proper implementation in formatRow function (bonus after formats refactoring). [#42777](https://github.com/ClickHouse/ClickHouse/pull/42777) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support `optimize_or_like_chain` in the new infrastructure. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#42797](https://github.com/ClickHouse/ClickHouse/pull/42797) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Improve the Asterisk and ColumnMatcher parsers. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#42884](https://github.com/ClickHouse/ClickHouse/pull/42884) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Implement `optimize_redundant_functions_in_order_by` on top of QueryTree. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#42970](https://github.com/ClickHouse/ClickHouse/pull/42970) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Support `optimize_group_by_function_keys` in the new analyzer architecture. Also, add support for optimizing GROUPING SETS keys. Part of [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#43261](https://github.com/ClickHouse/ClickHouse/pull/43261) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Improve reading CSV field in CustomSeparated/Template format. Closes [#42352](https://github.com/ClickHouse/ClickHouse/issues/42352) Closes [#39620](https://github.com/ClickHouse/ClickHouse/issues/39620). [#43332](https://github.com/ClickHouse/ClickHouse/pull/43332) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support reading/writing `Nested` tables as `List` of `Struct` in CapnProto format. Read/write `Decimal32/64` as `Int32/64`. Closes [#43319](https://github.com/ClickHouse/ClickHouse/issues/43319). [#43379](https://github.com/ClickHouse/ClickHouse/pull/43379) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Unify query elapsed time measurements. [#43455](https://github.com/ClickHouse/ClickHouse/pull/43455) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Support scalar subqueries cache Implementation: * Added a map with hash of the node (without alias) and the evaluated value to Context. Testing: * Added a test-case with new analyser in 02174_cte_scalar_cache.sql. [#43640](https://github.com/ClickHouse/ClickHouse/pull/43640) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Improve automatic usage of structure from insertion table in table functions file/hdfs/s3 when virtual columns present in select query, it fixes possible error `Block structure mismatch` or `number of columns mismatch`. [#43695](https://github.com/ClickHouse/ClickHouse/pull/43695) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for signed arguments in range(). Fixes [#43333](https://github.com/ClickHouse/ClickHouse/issues/43333). [#43733](https://github.com/ClickHouse/ClickHouse/pull/43733) ([sanyu](https://github.com/wineternity)).
|
||||
* Remove redundant sorting, for example, sorting related ORDER BY clauses in subqueries. Implemented on top of query plan. It does similar optimization as `optimize_duplicate_order_by_and_distinct` regarding `ORDER BY` clauses, but more generic, since it's applied to any redundant sorting steps (not only caused by ORDER BY clause) and applied to subqueries of any depth. Related to [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#43905](https://github.com/ClickHouse/ClickHouse/pull/43905) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Added mmap support for StorageFile, which should improve the performance of clickhouse-local. [#43927](https://github.com/ClickHouse/ClickHouse/pull/43927) ([pufit](https://github.com/pufit)).
|
||||
* Add ability to disable deduplication for BACKUP (for backups wiithout deduplication ATTACH can be used instead of full RESTORE), example `BACKUP foo TO S3(...) SETTINGS deduplicate_files=0` (default `deduplicate_files=1`). [#43947](https://github.com/ClickHouse/ClickHouse/pull/43947) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make `system.replicas` table do parallel fetches of replicas statuses. Closes [#43918](https://github.com/ClickHouse/ClickHouse/issues/43918). [#43998](https://github.com/ClickHouse/ClickHouse/pull/43998) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Refactor and improve schema inference for text formats. Add new setting `schema_inference_make_columns_nullable` that controls making result types `Nullable` (enabled by default);. [#44019](https://github.com/ClickHouse/ClickHouse/pull/44019) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Better support for PROXYv1. [#44135](https://github.com/ClickHouse/ClickHouse/pull/44135) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Add information about the latest part check by cleanup thread into `system.parts` table. [#44244](https://github.com/ClickHouse/ClickHouse/pull/44244) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Disable functions in readonly for inserts. [#44290](https://github.com/ClickHouse/ClickHouse/pull/44290) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Add a setting `simultaneous_parts_removal_limit` to allow to limit the number of parts being processed by one iteration of CleanupThread. [#44461](https://github.com/ClickHouse/ClickHouse/pull/44461) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* If user only need virtual columns, we don't need to initialize ReadBufferFromS3. May be helpful to [#44246](https://github.com/ClickHouse/ClickHouse/issues/44246). [#44493](https://github.com/ClickHouse/ClickHouse/pull/44493) ([chen](https://github.com/xiedeyantu)).
|
||||
* Prevent duplicate column names hints. Closes [#44130](https://github.com/ClickHouse/ClickHouse/issues/44130). [#44519](https://github.com/ClickHouse/ClickHouse/pull/44519) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||
* Allow macro substitution in endpoint of disks resolve [#40951](https://github.com/ClickHouse/ClickHouse/issues/40951). [#44533](https://github.com/ClickHouse/ClickHouse/pull/44533) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Added a `message_format_string` column to `system.text_log`. The column contains a pattern that was used to format the message. [#44543](https://github.com/ClickHouse/ClickHouse/pull/44543) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve schema inference when `input_format_json_read_object_as_string` is enabled. [#44546](https://github.com/ClickHouse/ClickHouse/pull/44546) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add user-level setting `database_replicated_allow_replicated_engine_arguments` which allow to ban creation of `ReplicatedMergeTree` tables with arguments in `DatabaseReplicated`. [#44566](https://github.com/ClickHouse/ClickHouse/pull/44566) ([alesapin](https://github.com/alesapin)).
|
||||
* Prevent users from mistakenly specifying zero (invalid) value for `index_granularity`. This closes [#44536](https://github.com/ClickHouse/ClickHouse/issues/44536). [#44578](https://github.com/ClickHouse/ClickHouse/pull/44578) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added possibility to set path to service keytab file in `keytab` parameter in `kerberos` section of config.xml. [#44594](https://github.com/ClickHouse/ClickHouse/pull/44594) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Use already written part of the query for fuzzy search (pass to skim). [#44600](https://github.com/ClickHouse/ClickHouse/pull/44600) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable input_format_json_read_objects_as_strings by default to be able to read nested JSON objects while JSON Object type is experimental. [#44657](https://github.com/ClickHouse/ClickHouse/pull/44657) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* When users do duplicate async inserts, we should dedup inside the memory before we query keeper. [#44682](https://github.com/ClickHouse/ClickHouse/pull/44682) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Input/ouptut Avro bool type as ClickHouse bool type. [#44684](https://github.com/ClickHouse/ClickHouse/pull/44684) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Don't parse beyond the quotes when reading UUIDs. [#44686](https://github.com/ClickHouse/ClickHouse/pull/44686) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Infer UInt64 in case of Int64 overflow and fix some transforms in schema inference. [#44696](https://github.com/ClickHouse/ClickHouse/pull/44696) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Previously dependency resolving inside DatabaseReplicated was done in a hacky way and now it done right using an explicit graph. [#44697](https://github.com/ClickHouse/ClickHouse/pull/44697) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Support Bool type in Arrow/Parquet/ORC. Closes [#43970](https://github.com/ClickHouse/ClickHouse/issues/43970). [#44698](https://github.com/ClickHouse/ClickHouse/pull/44698) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `output_format_pretty_row_numbers` does not preserve the counter across the blocks. Closes [#44815](https://github.com/ClickHouse/ClickHouse/issues/44815). [#44832](https://github.com/ClickHouse/ClickHouse/pull/44832) ([flynn](https://github.com/ucasfl)).
|
||||
* Extend function "toDayOfWeek" with a mode argument describing if a) the week starts on Monday or Sunday and b) if counting starts at 0 or 1. [#44860](https://github.com/ClickHouse/ClickHouse/pull/44860) ([李扬](https://github.com/taiyang-li)).
|
||||
* - Don't report errors in system.errors due to parts being merged concurrently with the background cleanup process. [#44874](https://github.com/ClickHouse/ClickHouse/pull/44874) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Optimize and fix metrics for Distributed async INSERT. [#44922](https://github.com/ClickHouse/ClickHouse/pull/44922) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added settings to disallow concurrent backups and restores resolves [#43891](https://github.com/ClickHouse/ClickHouse/issues/43891) Implementation: * Added server level settings to disallow concurrent backups and restores, which are read and set when BackupWorker is created in Context. * Settings are set to true by default. * Before starting backup or restores, added a check to see if any other backups/restores are running. For internal request it checks if its from the self node using backup_uuid. [#45072](https://github.com/ClickHouse/ClickHouse/pull/45072) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* add a cache for async block ids. This will reduce the requests of zookeeper when we enable async inserts deduplication. [#45106](https://github.com/ClickHouse/ClickHouse/pull/45106) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* CRC32 changes to address the WeakHash collision issue in PowerPC. [#45144](https://github.com/ClickHouse/ClickHouse/pull/45144) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||
* Optimize memory consumption during backup to S3: files to S3 now will be copied directly without using `WriteBufferFromS3` (which could use a lot of memory). [#45188](https://github.com/ClickHouse/ClickHouse/pull/45188) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Use structure from insertion table in generateRandom without arguments. [#45239](https://github.com/ClickHouse/ClickHouse/pull/45239) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Use `GetObjectAttributes` request instead of `HeadObject` request to get the size of an object in AWS S3. This change fixes handling endpoints without explicit region, for example. [#45288](https://github.com/ClickHouse/ClickHouse/pull/45288) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Add `<storage_policy>` config parameter for system logs. [#45320](https://github.com/ClickHouse/ClickHouse/pull/45320) ([Stig Bakken](https://github.com/stigsb)).
|
||||
* Remove redundant sorting, for example, sorting related ORDER BY clauses in subqueries. Implemented on top of query plan. It does similar optimization as `optimize_duplicate_order_by_and_distinct` regarding `ORDER BY` clauses, but more generic, since it's applied to any redundant sorting steps (not only caused by ORDER BY clause) and applied to subqueries of any depth. Related to [#42648](https://github.com/ClickHouse/ClickHouse/issues/42648). [#45420](https://github.com/ClickHouse/ClickHouse/pull/45420) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Allow to implicitly convert floats stored in string fields of JSON to integers in `JSONExtract` functions. E.g. `JSONExtract('{"a": "1000.111"}', 'a', 'UInt64')` -> `1000`, previously it returned 0. [#45432](https://github.com/ClickHouse/ClickHouse/pull/45432) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added fields `supports_parallel_parsing` and `supports_parallel_formatting` to table `system.formats` for better introspection. [#45499](https://github.com/ClickHouse/ClickHouse/pull/45499) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Attempt to improve fsync latency (by syncing all files at once during fetches and small files after mutations) and one tiny fix for fsync_part_directory. [#45537](https://github.com/ClickHouse/ClickHouse/pull/45537) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Bug Fix
|
||||
* Fix HTTP requests without path for AWS. After updating AWS SDK the sdk no longer adds a slash to requesting paths so we need to do it in our PocoHTTPClient to keep HTTP requests correct. [#45238](https://github.com/ClickHouse/ClickHouse/pull/45238) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix backup if mutations get killed during the backup process. [#45351](https://github.com/ClickHouse/ClickHouse/pull/45351) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Builtin skim for fuzzy search in clickhouse client/local history. [#44239](https://github.com/ClickHouse/ClickHouse/pull/44239) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Memory limit for server is set now in AST fuzz tests to avoid OOMs. [#44282](https://github.com/ClickHouse/ClickHouse/pull/44282) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* In rare cases, we don't rebuild binaries, because another task with a similar prefix succeeded. E.g. `binary_darwin` didn't restart because `binary_darwin_aarch64`. [#44311](https://github.com/ClickHouse/ClickHouse/pull/44311) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* The "universal.sh" now fetches a SSE2 build on systems which don't have SSE4.2. [#44366](https://github.com/ClickHouse/ClickHouse/pull/44366) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Retry the integration tests on compressing errors. [#44529](https://github.com/ClickHouse/ClickHouse/pull/44529) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* ... 1. Added pytest-random by default in integration tests runner 2. Disable TSAN checks for tests with GPRC ( like https://s3.amazonaws.com/clickhouse-test-reports/42807/e9d7407a58f6e3f7d88c0c534685704f23560704/integration_tests__tsan__[4/6].html ) 3. Cleanup tables after tests in odbc. [#44711](https://github.com/ClickHouse/ClickHouse/pull/44711) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* We removed support for shared linking because of Rust. Actually, Rust is only an excuse for this removal, and we wanted to remove it nevertheless. [#44828](https://github.com/ClickHouse/ClickHouse/pull/44828) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Checks will try to download images before running integration tests. If image, proxy or whatever is broken in infrastructure it will not make tests flaky. Images will be cached locally and download time will not be added to random tests. Compose images are now changed to be used without correct environment from helpers/cluster.py. [#44848](https://github.com/ClickHouse/ClickHouse/pull/44848) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix zookeeper downloading, update the version, and optimize the image size. [#44853](https://github.com/ClickHouse/ClickHouse/pull/44853) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* The performance tests were silently broken because `Errors` wasn't detected in the status message. [#44867](https://github.com/ClickHouse/ClickHouse/pull/44867) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove the dependency on the `adduser` tool from the packages, because we don't use it. This fixes [#44934](https://github.com/ClickHouse/ClickHouse/issues/44934). [#45011](https://github.com/ClickHouse/ClickHouse/pull/45011) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* SQLite library is updated to the latest. It is used for the SQLite database and table integration engines. Also, fixed a false-positive TSan report. This closes [#45027](https://github.com/ClickHouse/ClickHouse/issues/45027). [#45031](https://github.com/ClickHouse/ClickHouse/pull/45031) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix report sending in the case when FastTest failed. [#45588](https://github.com/ClickHouse/ClickHouse/pull/45588) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* #40651 [#41404](https://github.com/ClickHouse/ClickHouse/issues/41404). [#42126](https://github.com/ClickHouse/ClickHouse/pull/42126) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix possible use-of-unitialized value after executing expressions after sorting. Closes [#43386](https://github.com/ClickHouse/ClickHouse/issues/43386) CC: @nickitat. [#43635](https://github.com/ClickHouse/ClickHouse/pull/43635) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Better handling of NULL in aggregate combinators, fix possible segfault/logical error while using optimization `optimize_rewrite_sum_if_to_count_if`. Closes [#43758](https://github.com/ClickHouse/ClickHouse/issues/43758). [#43813](https://github.com/ClickHouse/ClickHouse/pull/43813) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix CREATE USER/ROLE query settings constraints. [#43993](https://github.com/ClickHouse/ClickHouse/pull/43993) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* * Fix wrong behavior of `JOIN ON t1.x = t2.x AND 1 = 1`, forbid such queries. [#44016](https://github.com/ClickHouse/ClickHouse/pull/44016) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed bug with non-parsable default value for EPHEMERAL column in table metadata. [#44026](https://github.com/ClickHouse/ClickHouse/pull/44026) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix parsing of bad version from compatibility setting. [#44224](https://github.com/ClickHouse/ClickHouse/pull/44224) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Bring interval subtraction from datetime in line with addition. [#44241](https://github.com/ClickHouse/ClickHouse/pull/44241) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Fix double-free in HashTable::clearAndShrink() with zero elements in it. [#44256](https://github.com/ClickHouse/ClickHouse/pull/44256) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove limits on maximum size of the result for view. [#44261](https://github.com/ClickHouse/ClickHouse/pull/44261) ([lizhuoyu5](https://github.com/lzydmxy)).
|
||||
* Fix possible logical error in cache if `do_not_evict_index_and_mrk_files=1`. Closes [#42142](https://github.com/ClickHouse/ClickHouse/issues/42142). [#44268](https://github.com/ClickHouse/ClickHouse/pull/44268) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible too early cache write interruption in write-through cache (caching could be stopped due to false assumption when it shouldn't have). [#44289](https://github.com/ClickHouse/ClickHouse/pull/44289) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible crash in case function `IN` with constant arguments was used as a constant argument together with `LowCardinality`. Fixes [#44221](https://github.com/ClickHouse/ClickHouse/issues/44221). [#44346](https://github.com/ClickHouse/ClickHouse/pull/44346) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix support for complex parameters (like arrays) of parametric aggregate functions. This closes [#30975](https://github.com/ClickHouse/ClickHouse/issues/30975). The aggregate function `sumMapFiltered` was unusable in distributed queries before this change. [#44358](https://github.com/ClickHouse/ClickHouse/pull/44358) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* * Fix possible nullptr deference in JoinSwitcher with `allow_experimental_analyzer`. [#44371](https://github.com/ClickHouse/ClickHouse/pull/44371) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix reading ObjectId in BSON schema inference. [#44382](https://github.com/ClickHouse/ClickHouse/pull/44382) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix race which can lead to premature temp parts removal before merge finished in ReplicatedMergeTree. This issue could lead to errors like `No such file or directory: xxx`. Fixes [#43983](https://github.com/ClickHouse/ClickHouse/issues/43983). [#44383](https://github.com/ClickHouse/ClickHouse/pull/44383) ([alesapin](https://github.com/alesapin)).
|
||||
* Some invalid `SYSTEM ... ON CLUSTER` queries worked in an unexpected way if a cluster name was not specified. It's fixed, now invalid queries throw `SYNTAX_ERROR` as they should. Fixes [#44264](https://github.com/ClickHouse/ClickHouse/issues/44264). [#44387](https://github.com/ClickHouse/ClickHouse/pull/44387) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix reading Map type in ORC format. [#44400](https://github.com/ClickHouse/ClickHouse/pull/44400) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading columns that are not presented in input data in Parquet/ORC formats. Previously it could lead to error `INCORRECT_NUMBER_OF_COLUMNS`. Closes [#44333](https://github.com/ClickHouse/ClickHouse/issues/44333). [#44405](https://github.com/ClickHouse/ClickHouse/pull/44405) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Previously bar() function used the same '▋' (U+258B "Left five eighths block") character to display both 5/8 and 6/8 bars. This change corrects this behavior by using '▊' (U+258A "Left three quarters block") for displaying 6/8 bar. [#44410](https://github.com/ClickHouse/ClickHouse/pull/44410) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Placing profile settings after profile settings constraints in the configuration file made constraints ineffective. [#44411](https://github.com/ClickHouse/ClickHouse/pull/44411) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Fix `SYNTAX_ERROR` while running `EXPLAIN AST INSERT` queries with data. Closes [#44207](https://github.com/ClickHouse/ClickHouse/issues/44207). [#44413](https://github.com/ClickHouse/ClickHouse/pull/44413) ([save-my-heart](https://github.com/save-my-heart)).
|
||||
* Fix reading bool value with CRLF in CSV format. Closes [#44401](https://github.com/ClickHouse/ClickHouse/issues/44401). [#44442](https://github.com/ClickHouse/ClickHouse/pull/44442) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Don't execute and/or/if/multiIf on LowCardinality dictionary, so the result type cannot be LowCardinality. It could lead to error `Illegal column ColumnLowCardinality` in some cases. Fixes [#43603](https://github.com/ClickHouse/ClickHouse/issues/43603). [#44469](https://github.com/ClickHouse/ClickHouse/pull/44469) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix mutations with setting `max_streams_for_merge_tree_reading`. [#44472](https://github.com/ClickHouse/ClickHouse/pull/44472) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix potential null pointer dereference with GROUPING SETS in ASTSelectQuery::formatImpl ([#43049](https://github.com/ClickHouse/ClickHouse/issues/43049)). [#44479](https://github.com/ClickHouse/ClickHouse/pull/44479) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Validate types in table function arguments, CAST function arguments, JSONAsObject schema inference according to settings. [#44501](https://github.com/ClickHouse/ClickHouse/pull/44501) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* - Fix IN function with LC and const column, close [#44503](https://github.com/ClickHouse/ClickHouse/issues/44503). [#44506](https://github.com/ClickHouse/ClickHouse/pull/44506) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fixed a bug in normalization of a `DEFAULT` expression in `CREATE TABLE` statement. The second argument of function `in` (or the right argument of operator `IN`) might be replaced with the result of its evaluation during CREATE query execution. Fixes [#44496](https://github.com/ClickHouse/ClickHouse/issues/44496). [#44547](https://github.com/ClickHouse/ClickHouse/pull/44547) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Projections do not work in presence of WITH ROLLUP, WITH CUBE and WITH TOTALS. In previous versions, a query produced an exception instead of skipping the usage of projections. This closes [#44614](https://github.com/ClickHouse/ClickHouse/issues/44614). This closes [#42772](https://github.com/ClickHouse/ClickHouse/issues/42772). [#44615](https://github.com/ClickHouse/ClickHouse/pull/44615) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* * Fix bug in experimental analyzer and `aggregate_functions_null_for_empty = 1`. Close [#44644](https://github.com/ClickHouse/ClickHouse/issues/44644). [#44648](https://github.com/ClickHouse/ClickHouse/pull/44648) ([Vladimir C](https://github.com/vdimir)).
|
||||
* async blocks are not cleaned because the function `get all blocks sorted by time` didn't get async blocks. [#44651](https://github.com/ClickHouse/ClickHouse/pull/44651) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix `LOGICAL_ERROR` `The top step of the right pipeline should be ExpressionStep` for JOIN with subquery, UNION, and TOTALS. Fixes [#43687](https://github.com/ClickHouse/ClickHouse/issues/43687). [#44673](https://github.com/ClickHouse/ClickHouse/pull/44673) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Avoid std::out_of_range exception in StorageExecutable. [#44681](https://github.com/ClickHouse/ClickHouse/pull/44681) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Do not apply `optimize_syntax_fuse_functions` to quantiles on AST, close [#44712](https://github.com/ClickHouse/ClickHouse/issues/44712). [#44713](https://github.com/ClickHouse/ClickHouse/pull/44713) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix bug with wrong type in Merge table and PREWHERE, close [#43324](https://github.com/ClickHouse/ClickHouse/issues/43324). [#44716](https://github.com/ClickHouse/ClickHouse/pull/44716) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix possible crash during shutdown (while destroying TraceCollector). Fixes [#44757](https://github.com/ClickHouse/ClickHouse/issues/44757). [#44758](https://github.com/ClickHouse/ClickHouse/pull/44758) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix a possible crash in distributed query processing. The crash could happen if a query with totals or extremes returned an empty result and there are mismatched types in the Distrubuted and the local tables. Fixes [#44738](https://github.com/ClickHouse/ClickHouse/issues/44738). [#44760](https://github.com/ClickHouse/ClickHouse/pull/44760) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix fsync for fetches (`min_compressed_bytes_to_fsync_after_fetch`)/small files (ttl.txt, columns.txt) in mutations (`min_rows_to_fsync_after_merge`/`min_compressed_bytes_to_fsync_after_merge`). [#44781](https://github.com/ClickHouse/ClickHouse/pull/44781) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* A rare race condition was possible when querying the `system.parts` or `system.parts_columns` tables in the presence of parts being moved between disks. Introduced in [#41145](https://github.com/ClickHouse/ClickHouse/issues/41145). [#44809](https://github.com/ClickHouse/ClickHouse/pull/44809) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix the error `Context has expired` which could appear with enabled projections optimization. Can be reproduced for queries with specific functions, like `dictHas/dictGet` which use context in runtime. Fixes [#44844](https://github.com/ClickHouse/ClickHouse/issues/44844). [#44850](https://github.com/ClickHouse/ClickHouse/pull/44850) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Another fix for `Cannot read all data` error which could happen while reading `LowCardinality` dictionary from remote fs. Fixes [#44709](https://github.com/ClickHouse/ClickHouse/issues/44709). [#44875](https://github.com/ClickHouse/ClickHouse/pull/44875) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* - Ignore hwmon sensors on label read issues. [#44895](https://github.com/ClickHouse/ClickHouse/pull/44895) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Use `max_delay_to_insert` value in case calculated time to delay INSERT exceeds the setting value. Related to [#44902](https://github.com/ClickHouse/ClickHouse/issues/44902). [#44916](https://github.com/ClickHouse/ClickHouse/pull/44916) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix error `Different order of columns in UNION subquery` for queries with `UNION`. Fixes [#44866](https://github.com/ClickHouse/ClickHouse/issues/44866). [#44920](https://github.com/ClickHouse/ClickHouse/pull/44920) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Delay for INSERT can be calculated incorrectly, which can lead to always using `max_delay_to_insert` setting as delay instead of a correct value. Using simple formula `max_delay_to_insert * (parts_over_threshold/max_allowed_parts_over_threshold)` i.e. delay grows proportionally to parts over threshold. Closes [#44902](https://github.com/ClickHouse/ClickHouse/issues/44902). [#44954](https://github.com/ClickHouse/ClickHouse/pull/44954) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* fix alter table ttl error when wide part has light weight delete mask. [#44959](https://github.com/ClickHouse/ClickHouse/pull/44959) ([Mingliang Pan](https://github.com/liangliangpan)).
|
||||
* Follow-up fix for Replace domain IP types (IPv4, IPv6) with native [#43221](https://github.com/ClickHouse/ClickHouse/issues/43221). [#45024](https://github.com/ClickHouse/ClickHouse/pull/45024) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Follow-up fix for Replace domain IP types (IPv4, IPv6) with native https://github.com/ClickHouse/ClickHouse/pull/43221. [#45043](https://github.com/ClickHouse/ClickHouse/pull/45043) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* A buffer overflow was possible in the parser. Found by fuzzer. [#45047](https://github.com/ClickHouse/ClickHouse/pull/45047) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible cannot-read-all-data error in storage FileLog. Closes [#45051](https://github.com/ClickHouse/ClickHouse/issues/45051), [#38257](https://github.com/ClickHouse/ClickHouse/issues/38257). [#45057](https://github.com/ClickHouse/ClickHouse/pull/45057) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Memory efficient aggregation (setting `distributed_aggregation_memory_efficient`) is disabled when grouping sets are present in the query. [#45058](https://github.com/ClickHouse/ClickHouse/pull/45058) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix `RANGE_HASHED` dictionary to count range columns as part of primary key during updates when `update_field` is specified. Closes [#44588](https://github.com/ClickHouse/ClickHouse/issues/44588). [#45061](https://github.com/ClickHouse/ClickHouse/pull/45061) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix error `Cannot capture column` for `LowCardinality` captured argument of nested labmda. Fixes [#45028](https://github.com/ClickHouse/ClickHouse/issues/45028). [#45065](https://github.com/ClickHouse/ClickHouse/pull/45065) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix the wrong query result of `additional_table_filters` (additional filter was not applied) in case if minmax/count projection is used. [#45133](https://github.com/ClickHouse/ClickHouse/pull/45133) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* - Fixed bug in `histogram` function accepting negative values. [#45147](https://github.com/ClickHouse/ClickHouse/pull/45147) ([simpleton](https://github.com/rgzntrade)).
|
||||
* Follow-up fix for Replace domain IP types (IPv4, IPv6) with native https://github.com/ClickHouse/ClickHouse/pull/43221. [#45150](https://github.com/ClickHouse/ClickHouse/pull/45150) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix wrong column nullability in StoreageJoin, close [#44940](https://github.com/ClickHouse/ClickHouse/issues/44940). [#45184](https://github.com/ClickHouse/ClickHouse/pull/45184) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix `background_fetches_pool_size` settings reload (increase at runtime). [#45189](https://github.com/ClickHouse/ClickHouse/pull/45189) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Correctly process `SELECT` queries on KV engines (e.g. KeeperMap, EmbeddedRocksDB) using `IN` on the key with subquery producing different type. [#45215](https://github.com/ClickHouse/ClickHouse/pull/45215) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix logical error in SEMI JOIN & join_use_nulls in some cases, close [#45163](https://github.com/ClickHouse/ClickHouse/issues/45163), close [#45209](https://github.com/ClickHouse/ClickHouse/issues/45209). [#45230](https://github.com/ClickHouse/ClickHouse/pull/45230) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix heap-use-after-free in reading from s3. [#45253](https://github.com/ClickHouse/ClickHouse/pull/45253) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug when the Avro Union type is ['null', Nested type], closes [#45275](https://github.com/ClickHouse/ClickHouse/issues/45275). Fix bug that incorrectly infer `bytes` type to `Float`. [#45276](https://github.com/ClickHouse/ClickHouse/pull/45276) ([flynn](https://github.com/ucasfl)).
|
||||
* Throw a correct exception when explicit PREWHERE cannot be used with table using storage engine `Merge`. [#45319](https://github.com/ClickHouse/ClickHouse/pull/45319) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Under WSL1 Ubuntu self-extracting clickhouse fails to decompress due to inconsistency - /proc/self/maps reporting 32bit file's inode, while stat reporting 64bit inode. [#45339](https://github.com/ClickHouse/ClickHouse/pull/45339) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix race in Distributed table startup (that could lead to processing file of async INSERT multiple times). [#45360](https://github.com/ClickHouse/ClickHouse/pull/45360) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible crash while reading from storage `S3` and table function `s3` in case when `ListObject` request has failed. [#45371](https://github.com/ClickHouse/ClickHouse/pull/45371) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* * Fixed some bugs in JOINS with WHERE by disabling "move to prewhere" optimization for it, close [#44062](https://github.com/ClickHouse/ClickHouse/issues/44062). [#45391](https://github.com/ClickHouse/ClickHouse/pull/45391) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix `SELECT ... FROM system.dictionaries` exception when there is a dictionary with a bad structure (e.g. incorrect type in xml config). [#45399](https://github.com/ClickHouse/ClickHouse/pull/45399) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Fix s3Cluster schema inference when structure from insertion table is used in `INSERT INTO ... SELECT * FROM s3Cluster` queries. [#45422](https://github.com/ClickHouse/ClickHouse/pull/45422) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix bug in JSON/BSONEachRow parsing with HTTP that could lead to using default values for some columns instead of values from data. [#45424](https://github.com/ClickHouse/ClickHouse/pull/45424) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed bug (Code: 632. DB::Exception: Unexpected data ... after parsed IPv6 value ...) with typed parsing of IP types from text source. [#45425](https://github.com/ClickHouse/ClickHouse/pull/45425) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* close [#45297](https://github.com/ClickHouse/ClickHouse/issues/45297) Add check for empty regular expressions. [#45428](https://github.com/ClickHouse/ClickHouse/pull/45428) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix possible (likely distributed) query hung. [#45448](https://github.com/ClickHouse/ClickHouse/pull/45448) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix disabled two-level aggregation from HTTP. [#45450](https://github.com/ClickHouse/ClickHouse/pull/45450) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible deadlock with `allow_asynchronous_read_from_io_pool_for_merge_tree` enabled in case of exception from `ThreadPool::schedule`. [#45481](https://github.com/ClickHouse/ClickHouse/pull/45481) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible in-use table after DETACH. [#45493](https://github.com/ClickHouse/ClickHouse/pull/45493) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix rare abort in case when query is canceled and parallel parsing was used during its execution. [#45498](https://github.com/ClickHouse/ClickHouse/pull/45498) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix a race between Distributed table creation and INSERT into it (could lead to CANNOT_LINK during INSERT into the table). [#45502](https://github.com/ClickHouse/ClickHouse/pull/45502) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add proper default (SLRU) to cache policy getter. Closes [#45514](https://github.com/ClickHouse/ClickHouse/issues/45514). [#45524](https://github.com/ClickHouse/ClickHouse/pull/45524) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove AST-based optimization `optimize_fuse_sum_count_avg`, close [#45439](https://github.com/ClickHouse/ClickHouse/issues/45439). [#45558](https://github.com/ClickHouse/ClickHouse/pull/45558) ([Vladimir C](https://github.com/vdimir)).
|
||||
|
||||
#### Bug-fix
|
||||
|
||||
* Disallow arrayjoin in mutations closes [#42637](https://github.com/ClickHouse/ClickHouse/issues/42637) Implementation: * Added a new parameter to ActionsVisitor::Data disallow_arrayjoin, which is set by MutationsIterator when it appends expression. * ActionsVisitor uses disallow_arrayjoin and throws error when its used with mutations. Testing: * Added test for the same 02504_disallow_arrayjoin_in_mutations.sql. [#44447](https://github.com/ClickHouse/ClickHouse/pull/44447) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix for qualified asterisks with alias table name and column transformer resolves [#44736](https://github.com/ClickHouse/ClickHouse/issues/44736). [#44755](https://github.com/ClickHouse/ClickHouse/pull/44755) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Updated backup/restore status when concurrent backups & restores are not allowed resolves [#45486](https://github.com/ClickHouse/ClickHouse/issues/45486) Implementation: * Moved concurrent backup/restore check inside try-catch block which sets the status so that other nodes in cluster are aware of failures. * Renamed backup_uuid to restore_uuid in RestoreSettings. [#45497](https://github.com/ClickHouse/ClickHouse/pull/45497) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
|
||||
#### Build Improvement
|
||||
|
||||
* crc32 fix for s390x. [#43706](https://github.com/ClickHouse/ClickHouse/pull/43706) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* Fixed endian issues in transform function for s390x. [#45522](https://github.com/ClickHouse/ClickHouse/pull/45522) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
|
||||
#### Feature
|
||||
|
||||
* Record server startup time in ProfileEvents resolves [#43188](https://github.com/ClickHouse/ClickHouse/issues/43188) Implementation: * Added ProfileEvents::ServerStartupMilliseconds. * Recorded time from start of main till listening to sockets. Testing: * Added a test 02532_profileevents_server_startup_time.sql. [#45250](https://github.com/ClickHouse/ClickHouse/pull/45250) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "If user only need virtual columns, we don't need to initialize ReadBufferFromS3"'. [#44939](https://github.com/ClickHouse/ClickHouse/pull/44939) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* NO CL ENTRY: 'Revert "Custom reading for mutation"'. [#45121](https://github.com/ClickHouse/ClickHouse/pull/45121) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Custom reading for mutation""'. [#45122](https://github.com/ClickHouse/ClickHouse/pull/45122) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* NO CL ENTRY: 'Revert "update function DAYOFWEEK and add new function WEEKDAY for mysql/spark compatiability"'. [#45221](https://github.com/ClickHouse/ClickHouse/pull/45221) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Validate function arguments in query tree"'. [#45299](https://github.com/ClickHouse/ClickHouse/pull/45299) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Validate function arguments in query tree""'. [#45300](https://github.com/ClickHouse/ClickHouse/pull/45300) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* NO CL ENTRY: 'Revert "Support optimize_or_like_chain in QueryTreePassManager"'. [#45406](https://github.com/ClickHouse/ClickHouse/pull/45406) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* NO CL ENTRY: 'Resubmit Support optimize_or_like_chain in QueryTreePassManager'. [#45410](https://github.com/ClickHouse/ClickHouse/pull/45410) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* NO CL ENTRY: 'Revert "Remove redundant sorting"'. [#45414](https://github.com/ClickHouse/ClickHouse/pull/45414) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Automatically merge green backport PRs and green approved PRs [#41110](https://github.com/ClickHouse/ClickHouse/pull/41110) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix assertion in async read buffer from remote [#41231](https://github.com/ClickHouse/ClickHouse/pull/41231) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* add retries on ConnectionError [#42991](https://github.com/ClickHouse/ClickHouse/pull/42991) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Update aws-c* submodules [#43020](https://github.com/ClickHouse/ClickHouse/pull/43020) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Replace domain IP types (IPv4, IPv6) with native [#43221](https://github.com/ClickHouse/ClickHouse/pull/43221) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix aggregate functions optimisation in AggregateFunctionsArithmericOperationsPass [#43372](https://github.com/ClickHouse/ClickHouse/pull/43372) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Improve pytest --pdb experience by preserving dockerd on SIGINT [#43392](https://github.com/ClickHouse/ClickHouse/pull/43392) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* RFC: tests: add stacktraces for hunged queries [#43396](https://github.com/ClickHouse/ClickHouse/pull/43396) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Followup fixes for systemd notification ([#43400](https://github.com/ClickHouse/ClickHouse/issues/43400)) [#43597](https://github.com/ClickHouse/ClickHouse/pull/43597) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Refactor FunctionNode [#43761](https://github.com/ClickHouse/ClickHouse/pull/43761) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Some cleanup: grace hash join [#43851](https://github.com/ClickHouse/ClickHouse/pull/43851) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Temporary files evict fs cache - 2nd approach [#43972](https://github.com/ClickHouse/ClickHouse/pull/43972) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Randomize setting `enable_memory_bound_merging_of_aggregation_results` in tests [#43986](https://github.com/ClickHouse/ClickHouse/pull/43986) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Analyzer aggregate functions passes small fixes [#44013](https://github.com/ClickHouse/ClickHouse/pull/44013) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix wrong char in command [#44018](https://github.com/ClickHouse/ClickHouse/pull/44018) ([alesapin](https://github.com/alesapin)).
|
||||
* Analyzer support Set index [#44097](https://github.com/ClickHouse/ClickHouse/pull/44097) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Provide monotonicity info for `toUnixTimestamp64*` [#44116](https://github.com/ClickHouse/ClickHouse/pull/44116) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Avoid loading toolchain files multiple times [#44122](https://github.com/ClickHouse/ClickHouse/pull/44122) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* tests: exclude flaky columns from SHOW CLUSTERS test [#44123](https://github.com/ClickHouse/ClickHouse/pull/44123) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Bump libdivide (to gain some new optimizations) [#44132](https://github.com/ClickHouse/ClickHouse/pull/44132) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Make atomic counter relaxed in blockNumber() [#44193](https://github.com/ClickHouse/ClickHouse/pull/44193) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Try fix flaky 01072_window_view_multiple_columns_groupby [#44195](https://github.com/ClickHouse/ClickHouse/pull/44195) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Apply new code of named collections (from [#43147](https://github.com/ClickHouse/ClickHouse/issues/43147)) to external table engines part 1 [#44204](https://github.com/ClickHouse/ClickHouse/pull/44204) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add some settings under `compatibility` [#44209](https://github.com/ClickHouse/ClickHouse/pull/44209) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Recommend Slack over Telegram in the "Question" issue template [#44222](https://github.com/ClickHouse/ClickHouse/pull/44222) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||
* Forbid paths in timezone names [#44225](https://github.com/ClickHouse/ClickHouse/pull/44225) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Analyzer storage view crash fix [#44230](https://github.com/ClickHouse/ClickHouse/pull/44230) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add ThreadsInOvercommitTracker metric [#44233](https://github.com/ClickHouse/ClickHouse/pull/44233) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Analyzer expired Context crash fix [#44234](https://github.com/ClickHouse/ClickHouse/pull/44234) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix grace join memory consumption, pt1 [#44238](https://github.com/ClickHouse/ClickHouse/pull/44238) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fixed use-after-free of BLAKE3 error message [#44242](https://github.com/ClickHouse/ClickHouse/pull/44242) ([Joanna Hulboj](https://github.com/jh0x)).
|
||||
* Fix deadlock in StorageSystemDatabases [#44272](https://github.com/ClickHouse/ClickHouse/pull/44272) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Get rid of global Git object [#44273](https://github.com/ClickHouse/ClickHouse/pull/44273) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update version after release [#44275](https://github.com/ClickHouse/ClickHouse/pull/44275) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update version_date.tsv and changelogs after v22.12.1.1752-stable [#44281](https://github.com/ClickHouse/ClickHouse/pull/44281) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Do not hold data parts during insert [#44299](https://github.com/ClickHouse/ClickHouse/pull/44299) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Another fix `test_server_reload` [#44306](https://github.com/ClickHouse/ClickHouse/pull/44306) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Update version_date.tsv and changelogs after v22.9.7.34-stable [#44309](https://github.com/ClickHouse/ClickHouse/pull/44309) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* tests/perf: fix dependency check during DROP [#44312](https://github.com/ClickHouse/ClickHouse/pull/44312) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* (unused openssl integration, not for production) a follow-up [#44325](https://github.com/ClickHouse/ClickHouse/pull/44325) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||
* Replace old named collections code with new (from [#43147](https://github.com/ClickHouse/ClickHouse/issues/43147)) part 2 [#44327](https://github.com/ClickHouse/ClickHouse/pull/44327) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable "git-import" test in debug mode [#44328](https://github.com/ClickHouse/ClickHouse/pull/44328) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Check s3 part upload settings [#44335](https://github.com/ClickHouse/ClickHouse/pull/44335) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix typo [#44337](https://github.com/ClickHouse/ClickHouse/pull/44337) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for PowerBI [#44338](https://github.com/ClickHouse/ClickHouse/pull/44338) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#36038](https://github.com/ClickHouse/ClickHouse/issues/36038) [#44339](https://github.com/ClickHouse/ClickHouse/pull/44339) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#29386](https://github.com/ClickHouse/ClickHouse/issues/29386) [#44340](https://github.com/ClickHouse/ClickHouse/pull/44340) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#22929](https://github.com/ClickHouse/ClickHouse/issues/22929) [#44341](https://github.com/ClickHouse/ClickHouse/pull/44341) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#29883](https://github.com/ClickHouse/ClickHouse/issues/29883) [#44342](https://github.com/ClickHouse/ClickHouse/pull/44342) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix Docker [#44343](https://github.com/ClickHouse/ClickHouse/pull/44343) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* fix flack test "02481_async_insert_dedup.python" [#44349](https://github.com/ClickHouse/ClickHouse/pull/44349) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Add a test for [#22160](https://github.com/ClickHouse/ClickHouse/issues/22160) [#44355](https://github.com/ClickHouse/ClickHouse/pull/44355) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#34708](https://github.com/ClickHouse/ClickHouse/issues/34708) [#44356](https://github.com/ClickHouse/ClickHouse/pull/44356) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#30679](https://github.com/ClickHouse/ClickHouse/issues/30679) [#44357](https://github.com/ClickHouse/ClickHouse/pull/44357) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#34669](https://github.com/ClickHouse/ClickHouse/issues/34669) [#44359](https://github.com/ClickHouse/ClickHouse/pull/44359) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#34724](https://github.com/ClickHouse/ClickHouse/issues/34724) [#44360](https://github.com/ClickHouse/ClickHouse/pull/44360) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Try restarting ZK cluster on failed connection in `test_keeper_zookeeper_converted` [#44363](https://github.com/ClickHouse/ClickHouse/pull/44363) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Disable grase_hash in test 00172_parallel_join [#44367](https://github.com/ClickHouse/ClickHouse/pull/44367) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Add check for submodules sanity [#44386](https://github.com/ClickHouse/ClickHouse/pull/44386) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Lock table for share during startup for database ordinary [#44393](https://github.com/ClickHouse/ClickHouse/pull/44393) ([alesapin](https://github.com/alesapin)).
|
||||
* Implement a custom central checkout action [#44399](https://github.com/ClickHouse/ClickHouse/pull/44399) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Try fix some tests [#44406](https://github.com/ClickHouse/ClickHouse/pull/44406) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Better ParserAllCollectionsOfLiterals [#44408](https://github.com/ClickHouse/ClickHouse/pull/44408) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix bug with merge/mutate pool size increase [#44436](https://github.com/ClickHouse/ClickHouse/pull/44436) ([alesapin](https://github.com/alesapin)).
|
||||
* Update 01072_window_view_multiple_columns_groupby.sh [#44438](https://github.com/ClickHouse/ClickHouse/pull/44438) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable buggy tsan assertion for integration test [#44444](https://github.com/ClickHouse/ClickHouse/pull/44444) ([alesapin](https://github.com/alesapin)).
|
||||
* Respect setting settings.schema_inference_make_columns_nullable in Parquet/ORC/Arrow formats [#44446](https://github.com/ClickHouse/ClickHouse/pull/44446) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add tests as examples with errors of date(time) and string comparison that we should eliminate [#44462](https://github.com/ClickHouse/ClickHouse/pull/44462) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Parallel parts cleanup with zero copy replication [#44466](https://github.com/ClickHouse/ClickHouse/pull/44466) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix incorrect usages of `getPartName()` [#44468](https://github.com/ClickHouse/ClickHouse/pull/44468) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test `roaring_memory_tracking` [#44470](https://github.com/ClickHouse/ClickHouse/pull/44470) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Clarify query_id in test 01092_memory_profiler [#44483](https://github.com/ClickHouse/ClickHouse/pull/44483) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Default value for optional in SortNode::updateTreeHashImpl [#44491](https://github.com/ClickHouse/ClickHouse/pull/44491) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Do not try to remove WAL/move broken parts for static storage [#44495](https://github.com/ClickHouse/ClickHouse/pull/44495) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Removed parent pid check that breaks in containers [#44499](https://github.com/ClickHouse/ClickHouse/pull/44499) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Analyzer duplicate alias crash fix [#44508](https://github.com/ClickHouse/ClickHouse/pull/44508) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Minor code polishing [#44513](https://github.com/ClickHouse/ClickHouse/pull/44513) ([alesapin](https://github.com/alesapin)).
|
||||
* Better error message if named collection does not exist [#44517](https://github.com/ClickHouse/ClickHouse/pull/44517) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add the lambda to collect data for workflow_jobs [#44520](https://github.com/ClickHouse/ClickHouse/pull/44520) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Introduce groupArrayLast() (useful to store last X values) [#44521](https://github.com/ClickHouse/ClickHouse/pull/44521) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Infer numbers starting from zero as strings in TSV [#44522](https://github.com/ClickHouse/ClickHouse/pull/44522) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix wrong condition for enabling async reading from MergeTree. [#44530](https://github.com/ClickHouse/ClickHouse/pull/44530) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* tests: capture dmesg in integration tests [#44535](https://github.com/ClickHouse/ClickHouse/pull/44535) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Analyzer support distributed queries processing [#44540](https://github.com/ClickHouse/ClickHouse/pull/44540) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Followup [#43761](https://github.com/ClickHouse/ClickHouse/issues/43761) [#44541](https://github.com/ClickHouse/ClickHouse/pull/44541) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Drop unused columns after join on/using [#44545](https://github.com/ClickHouse/ClickHouse/pull/44545) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Improve inferring arrays with nulls in JSON formats [#44550](https://github.com/ClickHouse/ClickHouse/pull/44550) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Make BC check optional (if env var set) [#44564](https://github.com/ClickHouse/ClickHouse/pull/44564) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix extremely slow stack traces in debug build [#44569](https://github.com/ClickHouse/ClickHouse/pull/44569) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better command line argument name in `clickhouse-benchmark` [#44570](https://github.com/ClickHouse/ClickHouse/pull/44570) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix HDFS test [#44572](https://github.com/ClickHouse/ClickHouse/pull/44572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test_distributed_queries_stress [#44573](https://github.com/ClickHouse/ClickHouse/pull/44573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Switch "contrib/sysroot" back to master. [#44574](https://github.com/ClickHouse/ClickHouse/pull/44574) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Non-significant changes [#44575](https://github.com/ClickHouse/ClickHouse/pull/44575) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fuzzer HTML: fix trash [#44580](https://github.com/ClickHouse/ClickHouse/pull/44580) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better diagnostics on server stop for the stress test [#44593](https://github.com/ClickHouse/ClickHouse/pull/44593) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The position of the log message about the server environment was wrong [#44595](https://github.com/ClickHouse/ClickHouse/pull/44595) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad punctuation in log [#44596](https://github.com/ClickHouse/ClickHouse/pull/44596) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix misleading log message [#44598](https://github.com/ClickHouse/ClickHouse/pull/44598) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad log message about MergeTree metadata cache. [#44599](https://github.com/ClickHouse/ClickHouse/pull/44599) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Slightly cleanup interactive line reader code [#44601](https://github.com/ClickHouse/ClickHouse/pull/44601) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Rename `runlog.log` to `run.log` in tests [#44603](https://github.com/ClickHouse/ClickHouse/pull/44603) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix hung query in stress test [#44604](https://github.com/ClickHouse/ClickHouse/pull/44604) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve variable name [#44605](https://github.com/ClickHouse/ClickHouse/pull/44605) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Faster server startup after stress test [#44606](https://github.com/ClickHouse/ClickHouse/pull/44606) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix log messages in Coordination [#44607](https://github.com/ClickHouse/ClickHouse/pull/44607) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable Analyzer in fuzz and stress tests [#44609](https://github.com/ClickHouse/ClickHouse/pull/44609) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better log message [#44610](https://github.com/ClickHouse/ClickHouse/pull/44610) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Maybe fix a bogus MSan error [#44611](https://github.com/ClickHouse/ClickHouse/pull/44611) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix "too large allocation" message from MSan [#44613](https://github.com/ClickHouse/ClickHouse/pull/44613) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Do not fail the AST fuzzer if sanitizer is out of memory [#44616](https://github.com/ClickHouse/ClickHouse/pull/44616) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test `01111_create_drop_replicated_db_stress` [#44617](https://github.com/ClickHouse/ClickHouse/pull/44617) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* tests/integration: suppress exceptions during logging (due to pytest) [#44618](https://github.com/ClickHouse/ClickHouse/pull/44618) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix rust modules rebuild (previously ignores changes in cargo config.toml) [#44623](https://github.com/ClickHouse/ClickHouse/pull/44623) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Sometimes spot instances fail more than 20 times in a row [#44626](https://github.com/ClickHouse/ClickHouse/pull/44626) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix restart after quorum insert [#44628](https://github.com/ClickHouse/ClickHouse/pull/44628) ([alesapin](https://github.com/alesapin)).
|
||||
* Revert "Merge pull request [#38953](https://github.com/ClickHouse/ClickHouse/issues/38953) from ClickHouse/add-allocation-ptr-to-trace-log [#44629](https://github.com/ClickHouse/ClickHouse/pull/44629) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix lambdas parsing [#44639](https://github.com/ClickHouse/ClickHouse/pull/44639) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Function viewExplain accept SELECT and settings [#44641](https://github.com/ClickHouse/ClickHouse/pull/44641) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix test `02015_async_inserts_2` [#44642](https://github.com/ClickHouse/ClickHouse/pull/44642) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix flaky test `test_keeper_multinode_simple` [#44645](https://github.com/ClickHouse/ClickHouse/pull/44645) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add +x flag for run-fuzzer.sh [#44649](https://github.com/ClickHouse/ClickHouse/pull/44649) ([alesapin](https://github.com/alesapin)).
|
||||
* Custom reading for mutation [#44653](https://github.com/ClickHouse/ClickHouse/pull/44653) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix flaky test test_backup_restore_on_cluster [#44660](https://github.com/ClickHouse/ClickHouse/pull/44660) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* tests/integration: add missing kazoo client termination [#44666](https://github.com/ClickHouse/ClickHouse/pull/44666) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Move dmesg dumping out from runner to ci-runner.py [#44667](https://github.com/ClickHouse/ClickHouse/pull/44667) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove questdb (it makes a little sense but the test was flaky) [#44669](https://github.com/ClickHouse/ClickHouse/pull/44669) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix minor typo: replace validate_bugix_check with validate_bugfix_check [#44672](https://github.com/ClickHouse/ClickHouse/pull/44672) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Fix parsing of ANY operator [#44678](https://github.com/ClickHouse/ClickHouse/pull/44678) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix test `01130_in_memory_parts` [#44683](https://github.com/ClickHouse/ClickHouse/pull/44683) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Remove old code [#44685](https://github.com/ClickHouse/ClickHouse/pull/44685) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test git-import [#44687](https://github.com/ClickHouse/ClickHouse/pull/44687) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve odbc test [#44688](https://github.com/ClickHouse/ClickHouse/pull/44688) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add retries to HTTP requests in ClickHouse test [#44689](https://github.com/ClickHouse/ClickHouse/pull/44689) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix flaky tests [#44690](https://github.com/ClickHouse/ClickHouse/pull/44690) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix flaky test "01502_long_log_tinylog_deadlock_race" [#44693](https://github.com/ClickHouse/ClickHouse/pull/44693) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve handling of old parts [#44694](https://github.com/ClickHouse/ClickHouse/pull/44694) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Update entrypoint.sh [#44699](https://github.com/ClickHouse/ClickHouse/pull/44699) ([Denny Crane](https://github.com/den-crane)).
|
||||
* tests: more fixes for test_keeper_auth [#44702](https://github.com/ClickHouse/ClickHouse/pull/44702) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash on delete from materialized view [#44705](https://github.com/ClickHouse/ClickHouse/pull/44705) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix flaky filelog tests with database ordinary [#44706](https://github.com/ClickHouse/ClickHouse/pull/44706) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Make lightweight deletes always synchronous [#44718](https://github.com/ClickHouse/ClickHouse/pull/44718) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix deadlock in attach thread [#44719](https://github.com/ClickHouse/ClickHouse/pull/44719) ([alesapin](https://github.com/alesapin)).
|
||||
* A few improvements to AST Fuzzer [#44720](https://github.com/ClickHouse/ClickHouse/pull/44720) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test [#44721](https://github.com/ClickHouse/ClickHouse/pull/44721) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Rename log in stress test [#44722](https://github.com/ClickHouse/ClickHouse/pull/44722) ([alesapin](https://github.com/alesapin)).
|
||||
* Debug deadlock in stress test [#44723](https://github.com/ClickHouse/ClickHouse/pull/44723) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test "02102_row_binary_with_names_and_types.sh" [#44724](https://github.com/ClickHouse/ClickHouse/pull/44724) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Slightly better some tests [#44725](https://github.com/ClickHouse/ClickHouse/pull/44725) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix cases when clickhouse-server takes long time to start in functional tests with MSan [#44726](https://github.com/ClickHouse/ClickHouse/pull/44726) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Perf test: Log the time spent waiting for file sync [#44737](https://github.com/ClickHouse/ClickHouse/pull/44737) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix flaky test 02448_clone_replica_lost_part [#44759](https://github.com/ClickHouse/ClickHouse/pull/44759) ([alesapin](https://github.com/alesapin)).
|
||||
* Build rust modules from the binary directory [#44762](https://github.com/ClickHouse/ClickHouse/pull/44762) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove database ordinary from stress test [#44763](https://github.com/ClickHouse/ClickHouse/pull/44763) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix flaky test 02479_mysql_connect_to_self [#44768](https://github.com/ClickHouse/ClickHouse/pull/44768) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Print fatal messages in Fuzzer [#44769](https://github.com/ClickHouse/ClickHouse/pull/44769) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect docs [#44795](https://github.com/ClickHouse/ClickHouse/pull/44795) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added table name to error message [#44806](https://github.com/ClickHouse/ClickHouse/pull/44806) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Retry packages download if GitHub returned HTTP 500. [#44807](https://github.com/ClickHouse/ClickHouse/pull/44807) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Slightly better docs [#44808](https://github.com/ClickHouse/ClickHouse/pull/44808) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix total trash in stress test [#44810](https://github.com/ClickHouse/ClickHouse/pull/44810) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix ASan builds for glibc 2.36+ [#44811](https://github.com/ClickHouse/ClickHouse/pull/44811) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove the remainings of TestFlows [#44812](https://github.com/ClickHouse/ClickHouse/pull/44812) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `grep` [#44813](https://github.com/ClickHouse/ClickHouse/pull/44813) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad cast in monotonicity analysis [#44818](https://github.com/ClickHouse/ClickHouse/pull/44818) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Modern tools, part 1 [#44819](https://github.com/ClickHouse/ClickHouse/pull/44819) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Modern tools in CI, part 2. [#44820](https://github.com/ClickHouse/ClickHouse/pull/44820) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix data race in DDLWorker [#44821](https://github.com/ClickHouse/ClickHouse/pull/44821) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix tests for bridges [#44822](https://github.com/ClickHouse/ClickHouse/pull/44822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test_multiple_disks::test_jbod_overflow [#44823](https://github.com/ClickHouse/ClickHouse/pull/44823) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Less OOM in stress test [#44824](https://github.com/ClickHouse/ClickHouse/pull/44824) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix misleading integration tests reports for parametrized tests [#44825](https://github.com/ClickHouse/ClickHouse/pull/44825) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix two typos [#44826](https://github.com/ClickHouse/ClickHouse/pull/44826) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Adjust CSS [#44829](https://github.com/ClickHouse/ClickHouse/pull/44829) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix fuzzer report [#44830](https://github.com/ClickHouse/ClickHouse/pull/44830) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* check-style: check base for std::cerr/cout too [#44833](https://github.com/ClickHouse/ClickHouse/pull/44833) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Try fixing `test_keeper_snapshot_small_distance` with ZK restart [#44834](https://github.com/ClickHouse/ClickHouse/pull/44834) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Exclude cargo shared libraries from the artifacts [#44836](https://github.com/ClickHouse/ClickHouse/pull/44836) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add a tiny but important logging [#44837](https://github.com/ClickHouse/ClickHouse/pull/44837) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Escape submodules in style-check [#44838](https://github.com/ClickHouse/ClickHouse/pull/44838) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Move `test_dies_with_parent` to another module [#44839](https://github.com/ClickHouse/ClickHouse/pull/44839) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Remove unneeded softlink to official dev docs [#44841](https://github.com/ClickHouse/ClickHouse/pull/44841) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix data race in StorageS3 [#44842](https://github.com/ClickHouse/ClickHouse/pull/44842) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix rare race which can lead to queue hang [#44847](https://github.com/ClickHouse/ClickHouse/pull/44847) ([alesapin](https://github.com/alesapin)).
|
||||
* No more retries in integration tests [#44851](https://github.com/ClickHouse/ClickHouse/pull/44851) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Document usage of check_cxx_source_compiles instead of check_cxx_source_runs [#44854](https://github.com/ClickHouse/ClickHouse/pull/44854) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* More cases of OOM in Fuzzer [#44855](https://github.com/ClickHouse/ClickHouse/pull/44855) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix: sorted DISTINCT with empty string [#44856](https://github.com/ClickHouse/ClickHouse/pull/44856) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Try to fix MSan build [#44857](https://github.com/ClickHouse/ClickHouse/pull/44857) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Cleanup setup_minio.sh [#44858](https://github.com/ClickHouse/ClickHouse/pull/44858) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Wait for ZK process to stop in tests using snapshot [#44859](https://github.com/ClickHouse/ClickHouse/pull/44859) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky test and several typos [#44870](https://github.com/ClickHouse/ClickHouse/pull/44870) ([alesapin](https://github.com/alesapin)).
|
||||
* Upload status files to S3 report for bugfix check [#44871](https://github.com/ClickHouse/ClickHouse/pull/44871) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix flaky test `02503_insert_storage_snapshot` [#44873](https://github.com/ClickHouse/ClickHouse/pull/44873) ([alesapin](https://github.com/alesapin)).
|
||||
* Revert some changes from [#42777](https://github.com/ClickHouse/ClickHouse/issues/42777) to fix performance tests [#44876](https://github.com/ClickHouse/ClickHouse/pull/44876) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Rewrite test_postgres_protocol test [#44880](https://github.com/ClickHouse/ClickHouse/pull/44880) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix ConcurrentBoundedQueue::emplace() return value in case of finished queue [#44881](https://github.com/ClickHouse/ClickHouse/pull/44881) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Validate function arguments in query tree [#44882](https://github.com/ClickHouse/ClickHouse/pull/44882) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Rework CI reports to have a class and clarify the logic [#44883](https://github.com/ClickHouse/ClickHouse/pull/44883) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* fix-typo [#44886](https://github.com/ClickHouse/ClickHouse/pull/44886) ([Enrique Herreros](https://github.com/eherrerosj)).
|
||||
* Store ZK generated data in `test_keeper_snapshot_small_distance` [#44888](https://github.com/ClickHouse/ClickHouse/pull/44888) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix "AttributeError: 'BuildResult' object has no attribute 'libraries'" in BuilderReport and BuilderSpecialReport [#44890](https://github.com/ClickHouse/ClickHouse/pull/44890) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Convert integration test_dictionaries_update_field to a stateless [#44891](https://github.com/ClickHouse/ClickHouse/pull/44891) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Upgrade googletest to latest HEAD [#44894](https://github.com/ClickHouse/ClickHouse/pull/44894) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Try fix rabbitmq potential leak [#44897](https://github.com/ClickHouse/ClickHouse/pull/44897) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Try to fix flaky `test_storage_kafka::test_kafka_produce_key_timestamp` [#44898](https://github.com/ClickHouse/ClickHouse/pull/44898) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky `test_concurrent_queries_restriction_by_query_kind` [#44903](https://github.com/ClickHouse/ClickHouse/pull/44903) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Avoid Keeper crash on shutdown (fix `test_keeper_snapshot_on_exit`) [#44908](https://github.com/ClickHouse/ClickHouse/pull/44908) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Do not merge over a gap with outdated undeleted parts [#44909](https://github.com/ClickHouse/ClickHouse/pull/44909) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix logging message in MergeTreeDataMergerMutator (about merged parts) [#44917](https://github.com/ClickHouse/ClickHouse/pull/44917) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix flaky test `test_lost_part` [#44921](https://github.com/ClickHouse/ClickHouse/pull/44921) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add fast and cancellable shared_mutex alternatives [#44924](https://github.com/ClickHouse/ClickHouse/pull/44924) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fix deadlock in Keeper's changelog [#44937](https://github.com/ClickHouse/ClickHouse/pull/44937) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Stop merges to avoid a race between merge and freeze. [#44938](https://github.com/ClickHouse/ClickHouse/pull/44938) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix memory leak in Aws::InitAPI [#44942](https://github.com/ClickHouse/ClickHouse/pull/44942) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Change error code on invalid background_pool_size config [#44947](https://github.com/ClickHouse/ClickHouse/pull/44947) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix exception fix in TraceCollector dtor [#44948](https://github.com/ClickHouse/ClickHouse/pull/44948) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Parallel distributed insert select with s3Cluster [3] [#44955](https://github.com/ClickHouse/ClickHouse/pull/44955) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Do not check read result consistency when unwinding [#44956](https://github.com/ClickHouse/ClickHouse/pull/44956) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Up the log level of tables dependencies graphs [#44957](https://github.com/ClickHouse/ClickHouse/pull/44957) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Hipster's HTML [#44961](https://github.com/ClickHouse/ClickHouse/pull/44961) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Docs: Mention non-standard DOTALL behavior of ClickHouse's match() [#44977](https://github.com/ClickHouse/ClickHouse/pull/44977) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* tests: fix test_replicated_users flakiness [#44978](https://github.com/ClickHouse/ClickHouse/pull/44978) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Check what if disable some checks in storage Merge. [#44983](https://github.com/ClickHouse/ClickHouse/pull/44983) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix check for not existing input in ActionsDAG [#44987](https://github.com/ClickHouse/ClickHouse/pull/44987) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Update version_date.tsv and changelogs after v22.12.2.25-stable [#44988](https://github.com/ClickHouse/ClickHouse/pull/44988) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix test test_grpc_protocol/test.py::test_progress [#44996](https://github.com/ClickHouse/ClickHouse/pull/44996) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Improve S3 EC2 metadata tests [#45001](https://github.com/ClickHouse/ClickHouse/pull/45001) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix minmax_count_projection with _partition_value [#45003](https://github.com/ClickHouse/ClickHouse/pull/45003) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix strange trash in Fuzzer [#45006](https://github.com/ClickHouse/ClickHouse/pull/45006) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `dmesg.log` to Fuzzer [#45008](https://github.com/ClickHouse/ClickHouse/pull/45008) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `01961_roaring_memory_tracking` test, again [#45009](https://github.com/ClickHouse/ClickHouse/pull/45009) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Recognize more ok cases for Fuzzer [#45012](https://github.com/ClickHouse/ClickHouse/pull/45012) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Supposedly fix the "Download script failed" error [#45013](https://github.com/ClickHouse/ClickHouse/pull/45013) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add snapshot creation retry in Keeper tests using ZooKeeper [#45016](https://github.com/ClickHouse/ClickHouse/pull/45016) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* test for [#20098](https://github.com/ClickHouse/ClickHouse/issues/20098) [#45017](https://github.com/ClickHouse/ClickHouse/pull/45017) ([Denny Crane](https://github.com/den-crane)).
|
||||
* test for [#26473](https://github.com/ClickHouse/ClickHouse/issues/26473) [#45018](https://github.com/ClickHouse/ClickHouse/pull/45018) ([Denny Crane](https://github.com/den-crane)).
|
||||
* Remove the remainings of Testflows (2). [#45021](https://github.com/ClickHouse/ClickHouse/pull/45021) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enable the check that was commented [#45022](https://github.com/ClickHouse/ClickHouse/pull/45022) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix false positive in Fuzzer [#45025](https://github.com/ClickHouse/ClickHouse/pull/45025) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix false positive in Fuzzer, alternative variant [#45026](https://github.com/ClickHouse/ClickHouse/pull/45026) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix function `range` (the bug was unreleased) [#45030](https://github.com/ClickHouse/ClickHouse/pull/45030) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix OOM in Fuzzer [#45032](https://github.com/ClickHouse/ClickHouse/pull/45032) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Less OOM in Stress test [#45033](https://github.com/ClickHouse/ClickHouse/pull/45033) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#31361](https://github.com/ClickHouse/ClickHouse/issues/31361) [#45034](https://github.com/ClickHouse/ClickHouse/pull/45034) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test for [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729) [#45035](https://github.com/ClickHouse/ClickHouse/pull/45035) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix typos [#45036](https://github.com/ClickHouse/ClickHouse/pull/45036) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* I didn't understand the logic of this test, @azat [#45037](https://github.com/ClickHouse/ClickHouse/pull/45037) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Small fixes for Coordination unit tests [#45039](https://github.com/ClickHouse/ClickHouse/pull/45039) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky test (hilarious) [#45042](https://github.com/ClickHouse/ClickHouse/pull/45042) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Non significant changes [#45046](https://github.com/ClickHouse/ClickHouse/pull/45046) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Don't fix parallel formatting [#45050](https://github.com/ClickHouse/ClickHouse/pull/45050) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix (benign) data race in clickhouse-client [#45053](https://github.com/ClickHouse/ClickHouse/pull/45053) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Analyzer aggregation without column fix [#45055](https://github.com/ClickHouse/ClickHouse/pull/45055) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Analyzer ARRAY JOIN crash fix [#45059](https://github.com/ClickHouse/ClickHouse/pull/45059) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix data race in openSQLiteDB [#45062](https://github.com/ClickHouse/ClickHouse/pull/45062) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Analyzer function IN crash fix [#45064](https://github.com/ClickHouse/ClickHouse/pull/45064) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* JIT compilation float to bool conversion fix [#45067](https://github.com/ClickHouse/ClickHouse/pull/45067) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Update version_date.tsv and changelogs after v22.11.3.47-stable [#45069](https://github.com/ClickHouse/ClickHouse/pull/45069) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.10.5.54-stable [#45071](https://github.com/ClickHouse/ClickHouse/pull/45071) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.3.16.1190-lts [#45073](https://github.com/ClickHouse/ClickHouse/pull/45073) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Improve release scripts [#45074](https://github.com/ClickHouse/ClickHouse/pull/45074) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Change the color of links in dark reports a little bit [#45077](https://github.com/ClickHouse/ClickHouse/pull/45077) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix Fuzzer script [#45082](https://github.com/ClickHouse/ClickHouse/pull/45082) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Try fixing KeeperMap tests [#45094](https://github.com/ClickHouse/ClickHouse/pull/45094) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Update version_date.tsv and changelogs after v22.8.12.45-lts [#45098](https://github.com/ClickHouse/ClickHouse/pull/45098) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Try to fix flaky test_create_user_and_login/test.py::test_login_as_dropped_user_xml [#45099](https://github.com/ClickHouse/ClickHouse/pull/45099) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Update version_date.tsv and changelogs after v22.10.6.3-stable [#45107](https://github.com/ClickHouse/ClickHouse/pull/45107) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Docs: Make heading consistent with other headings in System Table docs [#45109](https://github.com/ClickHouse/ClickHouse/pull/45109) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update version_date.tsv and changelogs after v22.11.4.3-stable [#45110](https://github.com/ClickHouse/ClickHouse/pull/45110) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.12.3.5-stable [#45113](https://github.com/ClickHouse/ClickHouse/pull/45113) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Docs: Rewrite awkwardly phrased sentence about flush interval [#45114](https://github.com/ClickHouse/ClickHouse/pull/45114) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix data race in s3Cluster. [#45123](https://github.com/ClickHouse/ClickHouse/pull/45123) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Pull SQLancer image before check run [#45125](https://github.com/ClickHouse/ClickHouse/pull/45125) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix flaky azure test [#45134](https://github.com/ClickHouse/ClickHouse/pull/45134) ([alesapin](https://github.com/alesapin)).
|
||||
* Minor cleanup in stress/run.sh [#45136](https://github.com/ClickHouse/ClickHouse/pull/45136) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Performance report: "Partial queries" --> "Backward-incompatible queries [#45152](https://github.com/ClickHouse/ClickHouse/pull/45152) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix flaky test_tcp_handler_interserver_listen_host [#45156](https://github.com/ClickHouse/ClickHouse/pull/45156) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Clean trash from changelog for v22.3.16.1190-lts [#45159](https://github.com/ClickHouse/ClickHouse/pull/45159) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable `test_storage_rabbitmq` [#45161](https://github.com/ClickHouse/ClickHouse/pull/45161) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_ttl_move_memory_usage as too flaky. [#45162](https://github.com/ClickHouse/ClickHouse/pull/45162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* More logging to facilitate debugging of flaky test_ttl_replicated [#45165](https://github.com/ClickHouse/ClickHouse/pull/45165) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Try to fix flaky test_ttl_move_memory_usage [#45168](https://github.com/ClickHouse/ClickHouse/pull/45168) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test test_multiple_disks/test.py::test_rename [#45180](https://github.com/ClickHouse/ClickHouse/pull/45180) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Calculate only required columns in system.detached_parts [#45181](https://github.com/ClickHouse/ClickHouse/pull/45181) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Restart NightlyBuilds if the runner died [#45187](https://github.com/ClickHouse/ClickHouse/pull/45187) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix part ID generation for IP types for backward compatibility [#45191](https://github.com/ClickHouse/ClickHouse/pull/45191) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix integration test test_replicated_users::test_rename_replicated [#45192](https://github.com/ClickHouse/ClickHouse/pull/45192) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Add CACHE_INVALIDATOR for sqlancer builds [#45201](https://github.com/ClickHouse/ClickHouse/pull/45201) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix possible stack-use-after-return in LimitReadBuffer [#45203](https://github.com/ClickHouse/ClickHouse/pull/45203) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Disable check to make test_overcommit_tracker not flaky [#45206](https://github.com/ClickHouse/ClickHouse/pull/45206) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix flaky test `01961_roaring_memory_tracking` (3) [#45208](https://github.com/ClickHouse/ClickHouse/pull/45208) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove trash from stress test [#45211](https://github.com/ClickHouse/ClickHouse/pull/45211) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* remove unused function [#45212](https://github.com/ClickHouse/ClickHouse/pull/45212) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix flaky `test_keeper_three_nodes_two_alive` [#45213](https://github.com/ClickHouse/ClickHouse/pull/45213) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fuzz PREWHERE clause [#45222](https://github.com/ClickHouse/ClickHouse/pull/45222) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Added a test for merge join key condition with big int & decimal [#45228](https://github.com/ClickHouse/ClickHouse/pull/45228) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix rare logical error: `Too large alignment` [#45229](https://github.com/ClickHouse/ClickHouse/pull/45229) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Update version_date.tsv and changelogs after v22.3.17.13-lts [#45234](https://github.com/ClickHouse/ClickHouse/pull/45234) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* More verbose logs about replication log entries [#45235](https://github.com/ClickHouse/ClickHouse/pull/45235) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* One more attempt to fix race in TCPHandler [#45240](https://github.com/ClickHouse/ClickHouse/pull/45240) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Update clickhouse-test [#45251](https://github.com/ClickHouse/ClickHouse/pull/45251) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Planner small fixes [#45254](https://github.com/ClickHouse/ClickHouse/pull/45254) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix log level "Test" for send_logs_level in client [#45273](https://github.com/ClickHouse/ClickHouse/pull/45273) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* tests: fix clickhouse binaries detection [#45283](https://github.com/ClickHouse/ClickHouse/pull/45283) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* tests/ci: encode HTML entities in the reports [#45284](https://github.com/ClickHouse/ClickHouse/pull/45284) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Disable `02151_hash_table_sizes_stats_distributed` under TSAN [#45287](https://github.com/ClickHouse/ClickHouse/pull/45287) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix wrong approved_at, simplify conditions [#45302](https://github.com/ClickHouse/ClickHouse/pull/45302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable 02028_create_select_settings with Ordinary [#45307](https://github.com/ClickHouse/ClickHouse/pull/45307) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Save message format strings for DB::Exception [#45342](https://github.com/ClickHouse/ClickHouse/pull/45342) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Slightly better output for glibc check [#45353](https://github.com/ClickHouse/ClickHouse/pull/45353) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add checks for compilation of regexps [#45356](https://github.com/ClickHouse/ClickHouse/pull/45356) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Analyzer compound identifier typo correction fix [#45357](https://github.com/ClickHouse/ClickHouse/pull/45357) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Bump to newer version of debug-action [#45359](https://github.com/ClickHouse/ClickHouse/pull/45359) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Improve failed kafka startup logging [#45369](https://github.com/ClickHouse/ClickHouse/pull/45369) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix flaky ttl test [#45370](https://github.com/ClickHouse/ClickHouse/pull/45370) ([alesapin](https://github.com/alesapin)).
|
||||
* Add detailed profile events for throttling [#45373](https://github.com/ClickHouse/ClickHouse/pull/45373) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Update .gitignore [#45378](https://github.com/ClickHouse/ClickHouse/pull/45378) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Make test simpler to see errors [#45402](https://github.com/ClickHouse/ClickHouse/pull/45402) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Reduce an amount of trash in `tests_system_merges` [#45403](https://github.com/ClickHouse/ClickHouse/pull/45403) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix reading from encrypted disk with passed file size [#45418](https://github.com/ClickHouse/ClickHouse/pull/45418) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add delete by ttl for zookeeper_log [#45419](https://github.com/ClickHouse/ClickHouse/pull/45419) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Get rid of artifactory in favor of r2 + ch-repos-manager [#45421](https://github.com/ClickHouse/ClickHouse/pull/45421) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Minor improvements around reading from remote [#45442](https://github.com/ClickHouse/ClickHouse/pull/45442) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Docs: Beautify section on secondary index types [#45444](https://github.com/ClickHouse/ClickHouse/pull/45444) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix Buffer's offsets mismatch logical error in stress test [#45446](https://github.com/ClickHouse/ClickHouse/pull/45446) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Better formatting for exception messages [#45449](https://github.com/ClickHouse/ClickHouse/pull/45449) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add default GRANULARITY argument for secondary indexes [#45451](https://github.com/ClickHouse/ClickHouse/pull/45451) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Cleanup of inverted index [#45460](https://github.com/ClickHouse/ClickHouse/pull/45460) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* CherryPick: Fix a wrong staring search date [#45466](https://github.com/ClickHouse/ClickHouse/pull/45466) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix typos [#45470](https://github.com/ClickHouse/ClickHouse/pull/45470) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix possible aborts in arrow lib [#45478](https://github.com/ClickHouse/ClickHouse/pull/45478) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add more retries to AST Fuzzer [#45479](https://github.com/ClickHouse/ClickHouse/pull/45479) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix schema inference from insertion table in hdfsCluster [#45483](https://github.com/ClickHouse/ClickHouse/pull/45483) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove unnecessary getTotalRowCount function calls [#45485](https://github.com/ClickHouse/ClickHouse/pull/45485) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Use new copy s3 functions in S3ObjectStorage [#45487](https://github.com/ClickHouse/ClickHouse/pull/45487) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Forward declaration of ConcurrentBoundedQueue in ThreadStatus [#45489](https://github.com/ClickHouse/ClickHouse/pull/45489) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Revert "Merge pull request [#44922](https://github.com/ClickHouse/ClickHouse/issues/44922) from azat/dist/async-INSERT-metrics" [#45492](https://github.com/ClickHouse/ClickHouse/pull/45492) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Docs: Fix weird formatting [#45495](https://github.com/ClickHouse/ClickHouse/pull/45495) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Docs: Fix link to writing guide [#45496](https://github.com/ClickHouse/ClickHouse/pull/45496) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Improve logging for TeePopen.timeout exceeded [#45504](https://github.com/ClickHouse/ClickHouse/pull/45504) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix MSan build once again (too heavy translation units) [#45512](https://github.com/ClickHouse/ClickHouse/pull/45512) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Additional check in MergeTreeReadPool [#45515](https://github.com/ClickHouse/ClickHouse/pull/45515) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Update test_system_merges/test.py [#45516](https://github.com/ClickHouse/ClickHouse/pull/45516) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Revert "Merge pull request [#45493](https://github.com/ClickHouse/ClickHouse/issues/45493) from azat/fix-detach" [#45545](https://github.com/ClickHouse/ClickHouse/pull/45545) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update stress [#45546](https://github.com/ClickHouse/ClickHouse/pull/45546) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Ignore utf errors in clickhouse-test reportLogStats [#45556](https://github.com/ClickHouse/ClickHouse/pull/45556) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Resubmit "Fix possible in-use table after DETACH" [#45566](https://github.com/ClickHouse/ClickHouse/pull/45566) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Typo: "Granulesis" --> "Granules" [#45598](https://github.com/ClickHouse/ClickHouse/pull/45598) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix version in autogenerated_versions.txt [#45624](https://github.com/ClickHouse/ClickHouse/pull/45624) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
@ -21,6 +21,13 @@ ENGINE = HDFS(URI, format)
|
||||
`SELECT` queries, the format must be supported for input, and to perform
|
||||
`INSERT` queries – for output. The available formats are listed in the
|
||||
[Formats](../../../interfaces/formats.md#formats) section.
|
||||
- [PARTITION BY expr]
|
||||
|
||||
### PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
**Example:**
|
||||
|
||||
|
@ -13,6 +13,7 @@ This engine provides integration with [Amazon S3](https://aws.amazon.com/s3/) ec
|
||||
``` sql
|
||||
CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
ENGINE = S3(path, [aws_access_key_id, aws_secret_access_key,] format, [compression])
|
||||
[PARTITION BY expr]
|
||||
[SETTINGS ...]
|
||||
```
|
||||
|
||||
@ -23,6 +24,12 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will autodetect compression by file extension.
|
||||
|
||||
### PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
|
@ -43,12 +43,12 @@ To use the index, no special functions or syntax are required. Typical string se
|
||||
examples, consider:
|
||||
|
||||
```sql
|
||||
SELECT * from tab WHERE s == 'Hello World;;
|
||||
SELECT * from tab WHERE s == 'Hello World;
|
||||
SELECT * from tab WHERE s IN (‘Hello’, ‘World’);
|
||||
SELECT * from tab WHERE s LIKE ‘%Hello%’;
|
||||
SELECT * from tab WHERE multiSearchAny(s, ‘Hello’, ‘World’);
|
||||
SELECT * from tab WHERE hasToken(s, ‘Hello’);
|
||||
SELECT * from tab WHERE multiSearchAll(s, [‘Hello’, ‘World’])
|
||||
SELECT * from tab WHERE multiSearchAll(s, [‘Hello’, ‘World’]);
|
||||
```
|
||||
|
||||
The inverted index also works on columns of type `Array(String)`, `Array(FixedString)`, `Map(String)` and `Map(String)`.
|
||||
|
@ -77,7 +77,7 @@ Use the `ORDER BY tuple()` syntax, if you do not need sorting. See [Selecting th
|
||||
|
||||
#### PARTITION BY
|
||||
|
||||
`PARTITION BY` — The [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases you don't need partition key, and in most other cases you don't need partition key more granular than by months. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead make client identifier or name the first column in the ORDER BY expression).
|
||||
`PARTITION BY` — The [partitioning key](/docs/en/engines/table-engines/mergetree-family/custom-partitioning-key.md). Optional. In most cases, you don't need a partition key, and if you do need to partition, generally you do not need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
||||
@ -470,6 +470,9 @@ The `set` index can be used with all functions. Function subsets for other index
|
||||
| [empty](/docs/en/sql-reference/functions/array-functions#function-empty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| [notEmpty](/docs/en/sql-reference/functions/array-functions#function-notempty) | ✔ | ✔ | ✗ | ✗ | ✗ |
|
||||
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasTokenCaseInsensitive | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
| hasTokenCaseInsensitiveOrNull | ✗ | ✗ | ✗ | ✔ | ✗ |
|
||||
|
||||
Functions with a constant argument that is less than ngram size can’t be used by `ngrambf_v1` for query optimization.
|
||||
|
||||
|
@ -86,3 +86,9 @@ $ echo -e "1,2\n3,4" | clickhouse-local -q "CREATE TABLE table (a Int64, b Int64
|
||||
- `SELECT ... SAMPLE`
|
||||
- Indices
|
||||
- Replication
|
||||
|
||||
## PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
@ -96,3 +96,9 @@ SELECT * FROM url_engine_table
|
||||
- `ALTER` and `SELECT...SAMPLE` operations.
|
||||
- Indexes.
|
||||
- Replication.
|
||||
|
||||
## PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. It is possible to create separate files by partitioning the data on a partition key. In most cases, you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
||||
For partitioning by month, use the `toYYYYMM(date_column)` expression, where `date_column` is a column with a date of the type [Date](/docs/en/sql-reference/data-types/date.md). The partition names here have the `"YYYYMM"` format.
|
||||
|
@ -119,9 +119,9 @@ We use [CSVWithNames](../../interfaces/formats.md#csvwithnames) format as the da
|
||||
|
||||
We disable `format_csv_allow_single_quotes` as only double quotes are used for data fields and single quotes can be inside the values and should not confuse the CSV parser.
|
||||
|
||||
We disable [input_format_null_as_default](../../operations/settings/settings.md#settings-input-format-null-as-default) as our data does not have [NULL](../../sql-reference/syntax.md#null-literal). Otherwise ClickHouse will try to parse `\N` sequences and can be confused with `\` in data.
|
||||
We disable [input_format_null_as_default](../../operations/settings/settings-formats.md#settings-input-format-null-as-default) as our data does not have [NULL](../../sql-reference/syntax.md#null-literal). Otherwise ClickHouse will try to parse `\N` sequences and can be confused with `\` in data.
|
||||
|
||||
The setting [date_time_input_format best_effort](../../operations/settings/settings.md#settings-date_time_input_format) allows to parse [DateTime](../../sql-reference/data-types/datetime.md) fields in wide variety of formats. For example, ISO-8601 without seconds like '2000-01-01 01:02' will be recognized. Without this setting only fixed DateTime format is allowed.
|
||||
The setting [date_time_input_format best_effort](../../operations/settings/settings-formats.md#settings-date_time_input_format) allows to parse [DateTime](../../sql-reference/data-types/datetime.md) fields in wide variety of formats. For example, ISO-8601 without seconds like '2000-01-01 01:02' will be recognized. Without this setting only fixed DateTime format is allowed.
|
||||
|
||||
## Denormalize the Data {#denormalize-data}
|
||||
|
||||
|
@ -60,7 +60,7 @@ ls -1 flightlist_*.csv.gz | xargs -P100 -I{} bash -c 'gzip -c -d "{}" | clickhou
|
||||
`xargs -P100` specifies to use up to 100 parallel workers but as we only have 30 files, the number of workers will be only 30.
|
||||
- For every file, `xargs` will run a script with `bash -c`. The script has substitution in form of `{}` and the `xargs` command will substitute the filename to it (we have asked it for `xargs` with `-I{}`).
|
||||
- The script will decompress the file (`gzip -c -d "{}"`) to standard output (`-c` parameter) and the output is redirected to `clickhouse-client`.
|
||||
- We also asked to parse [DateTime](../../sql-reference/data-types/datetime.md) fields with extended parser ([--date_time_input_format best_effort](../../operations/settings/settings.md#settings-date_time_input_format)) to recognize ISO-8601 format with timezone offsets.
|
||||
- We also asked to parse [DateTime](../../sql-reference/data-types/datetime.md) fields with extended parser ([--date_time_input_format best_effort](../../operations/settings/settings-formats.md#settings-date_time_input_format)) to recognize ISO-8601 format with timezone offsets.
|
||||
|
||||
Finally, `clickhouse-client` will do insertion. It will read input data in [CSVWithNames](../../interfaces/formats.md#csvwithnames) format.
|
||||
|
||||
|
@ -22,8 +22,8 @@ functions in ClickHouse. The sample datasets include:
|
||||
- The [Cell Towers dataset](../getting-started/example-datasets/cell-towers.md) imports a CSV into ClickHouse
|
||||
- The [NYPD Complaint Data](../getting-started/example-datasets/nypd_complaint_data.md) demonstrates how to use data inference to simplify creating tables
|
||||
- The ["What's on the Menu?" dataset](../getting-started/example-datasets/menus.md) has an example of denormalizing data
|
||||
- The [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
|
||||
- The [Getting Data Into ClickHouse - Part 2 - A JSON detour](https://clickhouse.com/blog/getting-data-into-clickhouse-part-2-json) shows how JSON data can be loaded
|
||||
- The [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
|
||||
- [Getting Data Into ClickHouse - Part 1](https://clickhouse.com/blog/getting-data-into-clickhouse-part-1) provides examples of defining a schema and loading a small Hacker News dataset
|
||||
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3) has examples of loading data from s3
|
||||
- [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse) shows how to generate random data if none of the above fit your needs.
|
||||
|
||||
View the **Tutorials and Datasets** menu for a complete list of sample datasets.
|
||||
|
@ -85,7 +85,7 @@ The supported formats are:
|
||||
| [MySQLDump](#mysqldump) | ✔ | ✗ |
|
||||
|
||||
|
||||
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings.md) section.
|
||||
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](/docs/en/operations/settings/settings-formats.md) section.
|
||||
|
||||
## TabSeparated {#tabseparated}
|
||||
|
||||
@ -148,10 +148,10 @@ Only a small set of symbols are escaped. You can easily stumble onto a string va
|
||||
|
||||
Arrays are written as a list of comma-separated values in square brackets. Number items in the array are formatted as normally. `Date` and `DateTime` types are written in single quotes. Strings are written in single quotes with the same escaping rules as above.
|
||||
|
||||
[NULL](/docs/en/sql-reference/syntax.md) is formatted according to setting [format_tsv_null_representation](/docs/en/operations/settings/settings.md/#format_tsv_null_representation) (default value is `\N`).
|
||||
[NULL](/docs/en/sql-reference/syntax.md) is formatted according to setting [format_tsv_null_representation](/docs/en/operations/settings/settings-formats.md/#format_tsv_null_representation) (default value is `\N`).
|
||||
|
||||
In input data, ENUM values can be represented as names or as ids. First, we try to match the input value to the ENUM name. If we fail and the input value is a number, we try to match this number to ENUM id.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_tsv_enum_as_number](/docs/en/operations/settings/settings.md/#input_format_tsv_enum_as_number) to optimize ENUM parsing.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_tsv_enum_as_number](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_enum_as_number) to optimize ENUM parsing.
|
||||
|
||||
Each element of [Nested](/docs/en/sql-reference/data-types/nested-data-structures/nested.md) structures is represented as an array.
|
||||
|
||||
@ -183,12 +183,13 @@ SELECT * FROM nestedt FORMAT TSV
|
||||
|
||||
### TabSeparated format settings {#tabseparated-format-settings}
|
||||
|
||||
- [format_tsv_null_representation](/docs/en/operations/settings/settings.md/#format_tsv_null_representation) - custom NULL representation in TSV format. Default value - `\N`.
|
||||
- [input_format_tsv_empty_as_default](/docs/en/operations/settings/settings.md/#input_format_tsv_empty_as_default) - treat empty fields in TSV input as default values. Default value - `false`. For complex default expressions [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
- [input_format_tsv_enum_as_number](/docs/en/operations/settings/settings.md/#input_format_tsv_enum_as_number) - treat inserted enum values in TSV formats as enum indices. Default value - `false`.
|
||||
- [input_format_tsv_use_best_effort_in_schema_inference](/docs/en/operations/settings/settings.md/#input_format_tsv_use_best_effort_in_schema_inference) - use some tweaks and heuristics to infer schema in TSV format. If disabled, all fields will be inferred as Strings. Default value - `true`.
|
||||
- [output_format_tsv_crlf_end_of_line](/docs/en/operations/settings/settings.md/#output_format_tsv_crlf_end_of_line) - if it is set true, end of line in TSV output format will be `\r\n` instead of `\n`. Default value - `false`.
|
||||
- [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [format_tsv_null_representation](/docs/en/operations/settings/settings-formats.md/#format_tsv_null_representation) - custom NULL representation in TSV format. Default value - `\N`.
|
||||
- [input_format_tsv_empty_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_empty_as_default) - treat empty fields in TSV input as default values. Default value - `false`. For complex default expressions [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
- [input_format_tsv_enum_as_number](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_enum_as_number) - treat inserted enum values in TSV formats as enum indices. Default value - `false`.
|
||||
- [input_format_tsv_use_best_effort_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_use_best_effort_in_schema_inference) - use some tweaks and heuristics to infer schema in TSV format. If disabled, all fields will be inferred as Strings. Default value - `true`.
|
||||
- [output_format_tsv_crlf_end_of_line](/docs/en/operations/settings/settings-formats.md/#output_format_tsv_crlf_end_of_line) - if it is set true, end of line in TSV output format will be `\r\n` instead of `\n`. Default value - `false`.
|
||||
- [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [input_format_tsv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_detect_header) - automatically detect header with names and types in TSV format. Default value - `true`.
|
||||
|
||||
## TabSeparatedRaw {#tabseparatedraw}
|
||||
|
||||
@ -204,8 +205,8 @@ Differs from the `TabSeparated` format in that the column names are written in t
|
||||
During parsing, the first row is expected to contain the column names. You can use column names to determine their position and to check their correctness.
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from the input data will be mapped to the columns of the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from the input data will be mapped to the columns of the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
:::
|
||||
|
||||
@ -216,10 +217,10 @@ This format is also available under the name `TSVWithNames`.
|
||||
Differs from the `TabSeparated` format in that the column names are written to the first row, while the column types are in the second row.
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from the input data will be mapped to the columns in the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from the input data will be mapped to the columns in the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings.md/#input_format_with_types_use_header) is set to 1,
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
@ -427,49 +428,50 @@ Both data output and parsing are supported in this format. For parsing, any orde
|
||||
|
||||
Parsing allows the presence of the additional field `tskv` without the equal sign or a value. This field is ignored.
|
||||
|
||||
During import, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
During import, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
|
||||
## CSV {#csv}
|
||||
|
||||
Comma Separated Values format ([RFC](https://tools.ietf.org/html/rfc4180)).
|
||||
|
||||
When formatting, rows are enclosed in double quotes. A double quote inside a string is output as two double quotes in a row. There are no other rules for escaping characters. Date and date-time are enclosed in double quotes. Numbers are output without quotes. Values are separated by a delimiter character, which is `,` by default. The delimiter character is defined in the setting [format_csv_delimiter](/docs/en/operations/settings/settings.md/#format_csv_delimiter). Rows are separated using the Unix line feed (LF). Arrays are serialized in CSV as follows: first, the array is serialized to a string as in TabSeparated format, and then the resulting string is output to CSV in double quotes. Tuples in CSV format are serialized as separate columns (that is, their nesting in the tuple is lost).
|
||||
When formatting, rows are enclosed in double quotes. A double quote inside a string is output as two double quotes in a row. There are no other rules for escaping characters. Date and date-time are enclosed in double quotes. Numbers are output without quotes. Values are separated by a delimiter character, which is `,` by default. The delimiter character is defined in the setting [format_csv_delimiter](/docs/en/operations/settings/settings-formats.md/#format_csv_delimiter). Rows are separated using the Unix line feed (LF). Arrays are serialized in CSV as follows: first, the array is serialized to a string as in TabSeparated format, and then the resulting string is output to CSV in double quotes. Tuples in CSV format are serialized as separate columns (that is, their nesting in the tuple is lost).
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --format_csv_delimiter="|" --query="INSERT INTO test.csv FORMAT CSV" < data.csv
|
||||
```
|
||||
|
||||
\*By default, the delimiter is `,`. See the [format_csv_delimiter](/docs/en/operations/settings/settings.md/#format_csv_delimiter) setting for more information.
|
||||
\*By default, the delimiter is `,`. See the [format_csv_delimiter](/docs/en/operations/settings/settings-formats.md/#format_csv_delimiter) setting for more information.
|
||||
|
||||
When parsing, all values can be parsed either with or without quotes. Both double and single quotes are supported. Rows can also be arranged without quotes. In this case, they are parsed up to the delimiter character or line feed (CR or LF). In violation of the RFC, when parsing rows without quotes, the leading and trailing spaces and tabs are ignored. For the line feed, Unix (LF), Windows (CR LF) and Mac OS Classic (CR LF) types are all supported.
|
||||
|
||||
`NULL` is formatted according to setting [format_csv_null_representation](/docs/en/operations/settings/settings.md/#format_csv_null_representation) (default value is `\N`).
|
||||
`NULL` is formatted according to setting [format_csv_null_representation](/docs/en/operations/settings/settings-formats.md/#format_csv_null_representation) (default value is `\N`).
|
||||
|
||||
In input data, ENUM values can be represented as names or as ids. First, we try to match the input value to the ENUM name. If we fail and the input value is a number, we try to match this number to the ENUM id.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_csv_enum_as_number](/docs/en/operations/settings/settings.md/#input_format_csv_enum_as_number) to optimize ENUM parsing.
|
||||
If input data contains only ENUM ids, it's recommended to enable the setting [input_format_csv_enum_as_number](/docs/en/operations/settings/settings-formats.md/#input_format_csv_enum_as_number) to optimize ENUM parsing.
|
||||
|
||||
The CSV format supports the output of totals and extremes the same way as `TabSeparated`.
|
||||
|
||||
### CSV format settings {#csv-format-settings}
|
||||
|
||||
- [format_csv_delimiter](/docs/en/operations/settings/settings.md/#format_csv_delimiter) - the character to be considered as a delimiter in CSV data. Default value - `,`.
|
||||
- [format_csv_allow_single_quotes](/docs/en/operations/settings/settings.md/#format_csv_allow_single_quotes) - allow strings in single quotes. Default value - `true`.
|
||||
- [format_csv_allow_double_quotes](/docs/en/operations/settings/settings.md/#format_csv_allow_double_quotes) - allow strings in double quotes. Default value - `true`.
|
||||
- [format_csv_null_representation](/docs/en/operations/settings/settings.md/#format_tsv_null_representation) - custom NULL representation in CSV format. Default value - `\N`.
|
||||
- [input_format_csv_empty_as_default](/docs/en/operations/settings/settings.md/#input_format_csv_empty_as_default) - treat empty fields in CSV input as default values. Default value - `true`. For complex default expressions, [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
- [input_format_csv_enum_as_number](/docs/en/operations/settings/settings.md/#input_format_csv_enum_as_number) - treat inserted enum values in CSV formats as enum indices. Default value - `false`.
|
||||
- [input_format_csv_use_best_effort_in_schema_inference](/docs/en/operations/settings/settings.md/#input_format_csv_use_best_effort_in_schema_inference) - use some tweaks and heuristics to infer schema in CSV format. If disabled, all fields will be inferred as Strings. Default value - `true`.
|
||||
- [input_format_csv_arrays_as_nested_csv](/docs/en/operations/settings/settings.md/#input_format_csv_arrays_as_nested_csv) - when reading Array from CSV, expect that its elements were serialized in nested CSV and then put into string. Default value - `false`.
|
||||
- [output_format_csv_crlf_end_of_line](/docs/en/operations/settings/settings.md/#output_format_csv_crlf_end_of_line) - if it is set to true, end of line in CSV output format will be `\r\n` instead of `\n`. Default value - `false`.
|
||||
- [input_format_csv_skip_first_lines](/docs/en/operations/settings/settings.md/#input_format_csv_skip_first_lines) - skip the specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [format_csv_delimiter](/docs/en/operations/settings/settings-formats.md/#format_csv_delimiter) - the character to be considered as a delimiter in CSV data. Default value - `,`.
|
||||
- [format_csv_allow_single_quotes](/docs/en/operations/settings/settings-formats.md/#format_csv_allow_single_quotes) - allow strings in single quotes. Default value - `true`.
|
||||
- [format_csv_allow_double_quotes](/docs/en/operations/settings/settings-formats.md/#format_csv_allow_double_quotes) - allow strings in double quotes. Default value - `true`.
|
||||
- [format_csv_null_representation](/docs/en/operations/settings/settings-formats.md/#format_tsv_null_representation) - custom NULL representation in CSV format. Default value - `\N`.
|
||||
- [input_format_csv_empty_as_default](/docs/en/operations/settings/settings-formats.md/#input_format_csv_empty_as_default) - treat empty fields in CSV input as default values. Default value - `true`. For complex default expressions, [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) must be enabled too.
|
||||
- [input_format_csv_enum_as_number](/docs/en/operations/settings/settings-formats.md/#input_format_csv_enum_as_number) - treat inserted enum values in CSV formats as enum indices. Default value - `false`.
|
||||
- [input_format_csv_use_best_effort_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_best_effort_in_schema_inference) - use some tweaks and heuristics to infer schema in CSV format. If disabled, all fields will be inferred as Strings. Default value - `true`.
|
||||
- [input_format_csv_arrays_as_nested_csv](/docs/en/operations/settings/settings-formats.md/#input_format_csv_arrays_as_nested_csv) - when reading Array from CSV, expect that its elements were serialized in nested CSV and then put into string. Default value - `false`.
|
||||
- [output_format_csv_crlf_end_of_line](/docs/en/operations/settings/settings-formats.md/#output_format_csv_crlf_end_of_line) - if it is set to true, end of line in CSV output format will be `\r\n` instead of `\n`. Default value - `false`.
|
||||
- [input_format_csv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_first_lines) - skip the specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [input_format_csv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_csv_detect_header) - automatically detect header with names and types in CSV format. Default value - `true`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
|
||||
Also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
:::
|
||||
|
||||
@ -478,16 +480,18 @@ Otherwise, the first row will be skipped.
|
||||
Also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings.md/#input_format_with_types_use_header) is set to 1,
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
## CustomSeparated {#format-customseparated}
|
||||
|
||||
Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings.md/#format_custom_result_after_delimiter) settings, not from format strings.
|
||||
Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings-formats.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_after_delimiter) settings, not from format strings.
|
||||
|
||||
If setting [input_format_custom_detect_header](/docs/en/operations/settings/settings.md/#input_format_custom_detect_header) is enabled, ClickHouse will automatically detect header with names and types if any.
|
||||
|
||||
There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces).
|
||||
|
||||
@ -496,8 +500,8 @@ There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [Templat
|
||||
Also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
:::
|
||||
|
||||
@ -506,10 +510,10 @@ Otherwise, the first row will be skipped.
|
||||
Also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings.md/#input_format_with_types_use_header) is set to 1,
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
@ -535,11 +539,11 @@ To read data output by this format you can use [MySQLDump](#mysqldump) input for
|
||||
|
||||
### SQLInsert format settings {#sqlinsert-format-settings}
|
||||
|
||||
- [output_format_sql_insert_max_batch_size](/docs/en/operations/settings/settings.md/#output_format_sql_insert_max_batch_size) - The maximum number of rows in one INSERT statement. Default value - `65505`.
|
||||
- [output_format_sql_insert_table_name](/docs/en/operations/settings/settings.md/#output_format_sql_insert_table_name) - The name of the table in the output INSERT query. Default value - `'table'`.
|
||||
- [output_format_sql_insert_include_column_names](/docs/en/operations/settings/settings.md/#output_format_sql_insert_include_column_names) - Include column names in INSERT query. Default value - `true`.
|
||||
- [output_format_sql_insert_use_replace](/docs/en/operations/settings/settings.md/#output_format_sql_insert_use_replace) - Use REPLACE statement instead of INSERT. Default value - `false`.
|
||||
- [output_format_sql_insert_quote_names](/docs/en/operations/settings/settings.md/#output_format_sql_insert_quote_names) - Quote column names with "\`" characters. Default value - `true`.
|
||||
- [output_format_sql_insert_max_batch_size](/docs/en/operations/settings/settings-formats.md/#output_format_sql_insert_max_batch_size) - The maximum number of rows in one INSERT statement. Default value - `65505`.
|
||||
- [output_format_sql_insert_table_name](/docs/en/operations/settings/settings-formats.md/#output_format_sql_insert_table_name) - The name of the table in the output INSERT query. Default value - `'table'`.
|
||||
- [output_format_sql_insert_include_column_names](/docs/en/operations/settings/settings-formats.md/#output_format_sql_insert_include_column_names) - Include column names in INSERT query. Default value - `true`.
|
||||
- [output_format_sql_insert_use_replace](/docs/en/operations/settings/settings-formats.md/#output_format_sql_insert_use_replace) - Use REPLACE statement instead of INSERT. Default value - `false`.
|
||||
- [output_format_sql_insert_quote_names](/docs/en/operations/settings/settings-formats.md/#output_format_sql_insert_quote_names) - Quote column names with "\`" characters. Default value - `true`.
|
||||
|
||||
## JSON {#json}
|
||||
|
||||
@ -599,7 +603,7 @@ SELECT SearchPhrase, count() AS c FROM test.hits GROUP BY SearchPhrase WITH TOTA
|
||||
}
|
||||
```
|
||||
|
||||
The JSON is compatible with JavaScript. To ensure this, some characters are additionally escaped: the slash `/` is escaped as `\/`; alternative line breaks `U+2028` and `U+2029`, which break some browsers, are escaped as `\uXXXX`. ASCII control characters are escaped: backspace, form feed, line feed, carriage return, and horizontal tab are replaced with `\b`, `\f`, `\n`, `\r`, `\t` , as well as the remaining bytes in the 00-1F range using `\uXXXX` sequences. Invalid UTF-8 sequences are changed to the replacement character <20> so the output text will consist of valid UTF-8 sequences. For compatibility with JavaScript, Int64 and UInt64 integers are enclosed in double quotes by default. To remove the quotes, you can set the configuration parameter [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings.md/#output_format_json_quote_64bit_integers) to 0.
|
||||
The JSON is compatible with JavaScript. To ensure this, some characters are additionally escaped: the slash `/` is escaped as `\/`; alternative line breaks `U+2028` and `U+2029`, which break some browsers, are escaped as `\uXXXX`. ASCII control characters are escaped: backspace, form feed, line feed, carriage return, and horizontal tab are replaced with `\b`, `\f`, `\n`, `\r`, `\t` , as well as the remaining bytes in the 00-1F range using `\uXXXX` sequences. Invalid UTF-8 sequences are changed to the replacement character <20> so the output text will consist of valid UTF-8 sequences. For compatibility with JavaScript, Int64 and UInt64 integers are enclosed in double quotes by default. To remove the quotes, you can set the configuration parameter [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) to 0.
|
||||
|
||||
`rows` – The total number of output rows.
|
||||
|
||||
@ -610,14 +614,14 @@ If the query contains GROUP BY, rows_before_limit_at_least is the exact number o
|
||||
|
||||
`extremes` – Extreme values (when extremes are set to 1).
|
||||
|
||||
ClickHouse supports [NULL](/docs/en/sql-reference/syntax.md), which is displayed as `null` in the JSON output. To enable `+nan`, `-nan`, `+inf`, `-inf` values in output, set the [output_format_json_quote_denormals](/docs/en/operations/settings/settings.md/#output_format_json_quote_denormals) to 1.
|
||||
ClickHouse supports [NULL](/docs/en/sql-reference/syntax.md), which is displayed as `null` in the JSON output. To enable `+nan`, `-nan`, `+inf`, `-inf` values in output, set the [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) to 1.
|
||||
|
||||
**See Also**
|
||||
|
||||
- [JSONEachRow](#jsoneachrow) format
|
||||
- [output_format_json_array_of_rows](/docs/en/operations/settings/settings.md/#output_format_json_array_of_rows) setting
|
||||
- [output_format_json_array_of_rows](/docs/en/operations/settings/settings-formats.md/#output_format_json_array_of_rows) setting
|
||||
|
||||
For JSON input format, if setting [input_format_json_validate_types_from_metadata](/docs/en/operations/settings/settings.md/#input_format_json_validate_types_from_metadata) is set to 1,
|
||||
For JSON input format, if setting [input_format_json_validate_types_from_metadata](/docs/en/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) is set to 1,
|
||||
the types from metadata in input data will be compared with the types of the corresponding columns from the table.
|
||||
|
||||
## JSONStrings {#jsonstrings}
|
||||
@ -690,8 +694,8 @@ Example:
|
||||
}
|
||||
```
|
||||
|
||||
During import, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Columns that are not present in the block will be filled with default values (you can use the [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings.md/#input_format_defaults_for_omitted_fields) setting here)
|
||||
During import, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Columns that are not present in the block will be filled with default values (you can use the [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
|
||||
|
||||
|
||||
## JSONColumnsWithMetadata {#jsoncolumnsmonoblock}
|
||||
@ -739,7 +743,7 @@ Example:
|
||||
}
|
||||
```
|
||||
|
||||
For JSONColumnsWithMetadata input format, if setting [input_format_json_validate_types_from_metadata](/docs/en/operations/settings/settings.md/#input_format_json_validate_types_from_metadata) is set to 1,
|
||||
For JSONColumnsWithMetadata input format, if setting [input_format_json_validate_types_from_metadata](/docs/en/operations/settings/settings-formats.md/#input_format_json_validate_types_from_metadata) is set to 1,
|
||||
the types from metadata in input data will be compared with the types of the corresponding columns from the table.
|
||||
|
||||
## JSONAsString {#jsonasstring}
|
||||
@ -891,7 +895,7 @@ Example:
|
||||
]
|
||||
```
|
||||
|
||||
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings.md/#input_format_defaults_for_omitted_fields) setting here)
|
||||
Columns that are not present in the block will be filled with default values (you can use [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting here)
|
||||
|
||||
## JSONEachRow {#jsoneachrow}
|
||||
|
||||
@ -905,7 +909,7 @@ Example:
|
||||
{"num":44,"str":"hello","arr":[0,1,2,3]}
|
||||
```
|
||||
|
||||
While importing data columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
While importing data columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
|
||||
## JSONStringsEachRow {#jsonstringseachrow}
|
||||
|
||||
@ -960,8 +964,8 @@ Differs from `JSONEachRow`/`JSONStringsEachRow` in that ClickHouse will also yie
|
||||
Differs from `JSONCompactEachRow` format in that it also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
:::
|
||||
|
||||
@ -970,10 +974,10 @@ Otherwise, the first row will be skipped.
|
||||
Differs from `JSONCompactEachRow` format in that it also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings.md/#input_format_with_types_use_header) is set to 1,
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
@ -982,8 +986,8 @@ the types from input data will be compared with the types of the corresponding c
|
||||
Differs from `JSONCompactStringsEachRow` in that in that it also prints the header row with column names, similar to [TabSeparatedWithNames](#tabseparatedwithnames).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
:::
|
||||
|
||||
@ -992,10 +996,10 @@ Otherwise, the first row will be skipped.
|
||||
Differs from `JSONCompactStringsEachRow` in that it also prints two header rows with column names and types, similar to [TabSeparatedWithNamesAndTypes](#tabseparatedwithnamesandtypes).
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings.md/#input_format_with_types_use_header) is set to 1,
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
@ -1021,7 +1025,7 @@ Example:
|
||||
}
|
||||
```
|
||||
|
||||
To use an object name as a column value you can use the special setting [format_json_object_each_row_column_for_object_name](/docs/en/operations/settings/settings.md/#format_json_object_each_row_column_for_object_name). The value of this setting is set to the name of a column, that is used as JSON key for a row in the resulting object.
|
||||
To use an object name as a column value you can use the special setting [format_json_object_each_row_column_for_object_name](/docs/en/operations/settings/settings-formats.md/#format_json_object_each_row_column_for_object_name). The value of this setting is set to the name of a column, that is used as JSON key for a row in the resulting object.
|
||||
Examples:
|
||||
|
||||
For output:
|
||||
@ -1095,7 +1099,7 @@ ClickHouse ignores spaces between elements and commas after the objects. You can
|
||||
|
||||
ClickHouse substitutes omitted values with the default values for the corresponding [data types](/docs/en/sql-reference/data-types/index.md).
|
||||
|
||||
If `DEFAULT expr` is specified, ClickHouse uses different substitution rules depending on the [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings.md/#input_format_defaults_for_omitted_fields) setting.
|
||||
If `DEFAULT expr` is specified, ClickHouse uses different substitution rules depending on the [input_format_defaults_for_omitted_fields](/docs/en/operations/settings/settings-formats.md/#input_format_defaults_for_omitted_fields) setting.
|
||||
|
||||
Consider the following table:
|
||||
|
||||
@ -1140,7 +1144,7 @@ Any set of bytes can be output in the strings. Use the `JSONEachRow` format if y
|
||||
|
||||
### Usage of Nested Structures {#jsoneachrow-nested}
|
||||
|
||||
If you have a table with [Nested](/docs/en/sql-reference/data-types/nested-data-structures/nested.md) data type columns, you can insert JSON data with the same structure. Enable this feature with the [input_format_import_nested_json](/docs/en/operations/settings/settings.md/#input_format_import_nested_json) setting.
|
||||
If you have a table with [Nested](/docs/en/sql-reference/data-types/nested-data-structures/nested.md) data type columns, you can insert JSON data with the same structure. Enable this feature with the [input_format_import_nested_json](/docs/en/operations/settings/settings-formats.md/#input_format_import_nested_json) setting.
|
||||
|
||||
For example, consider the following table:
|
||||
|
||||
@ -1154,7 +1158,7 @@ As you can see in the `Nested` data type description, ClickHouse treats each com
|
||||
INSERT INTO json_each_row_nested FORMAT JSONEachRow {"n.s": ["abc", "def"], "n.i": [1, 23]}
|
||||
```
|
||||
|
||||
To insert data as a hierarchical JSON object, set [input_format_import_nested_json=1](/docs/en/operations/settings/settings.md/#input_format_import_nested_json).
|
||||
To insert data as a hierarchical JSON object, set [input_format_import_nested_json=1](/docs/en/operations/settings/settings-formats.md/#input_format_import_nested_json).
|
||||
|
||||
``` json
|
||||
{
|
||||
@ -1199,20 +1203,20 @@ SELECT * FROM json_each_row_nested
|
||||
|
||||
### JSON formats settings {#json-formats-settings}
|
||||
|
||||
- [input_format_import_nested_json](/docs/en/operations/settings/settings.md/#input_format_import_nested_json) - map nested JSON data to nested tables (it works for JSONEachRow format). Default value - `false`.
|
||||
- [input_format_json_read_bools_as_numbers](/docs/en/operations/settings/settings.md/#input_format_json_read_bools_as_numbers) - allow to parse bools as numbers in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_read_numbers_as_strings](/docs/en/operations/settings/settings.md/#input_format_json_read_numbers_as_strings) - allow to parse numbers as strings in JSON input formats. Default value - `false`.
|
||||
- [input_format_json_read_objects_as_strings](/docs/en/operations/settings/settings.md/#input_format_json_read_objects_as_strings) - allow to parse JSON objects as strings in JSON input formats. Default value - `false`.
|
||||
- [input_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings.md/#input_format_json_named_tuples_as_objects) - parse named tuple columns as JSON objects. Default value - `true`.
|
||||
- [input_format_json_defaults_for_missing_elements_in_named_tuple](/docs/en/operations/settings/settings.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) - insert default values for missing elements in JSON object while parsing named tuple. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_decimals](/docs/en/operations/settings/settings.md/#output_format_json_quote_decimals) - controls quoting of decimals in JSON output format. Default value - `false`.
|
||||
- [output_format_json_escape_forward_slashes](/docs/en/operations/settings/settings.md/#output_format_json_escape_forward_slashes) - controls escaping forward slashes for string outputs in JSON output format. Default value - `true`.
|
||||
- [output_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings.md/#output_format_json_named_tuples_as_objects) - serialize named tuple columns as JSON objects. Default value - `true`.
|
||||
- [output_format_json_array_of_rows](/docs/en/operations/settings/settings.md/#output_format_json_array_of_rows) - output a JSON array of all rows in JSONEachRow(Compact) format. Default value - `false`.
|
||||
- [output_format_json_validate_utf8](/docs/en/operations/settings/settings.md/#output_format_json_validate_utf8) - enables validation of UTF-8 sequences in JSON output formats (note that it doesn't impact formats JSON/JSONCompact/JSONColumnsWithMetadata, they always validate utf8). Default value - `false`.
|
||||
- [input_format_import_nested_json](/docs/en/operations/settings/settings-formats.md/#input_format_import_nested_json) - map nested JSON data to nested tables (it works for JSONEachRow format). Default value - `false`.
|
||||
- [input_format_json_read_bools_as_numbers](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_bools_as_numbers) - allow to parse bools as numbers in JSON input formats. Default value - `true`.
|
||||
- [input_format_json_read_numbers_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_numbers_as_strings) - allow to parse numbers as strings in JSON input formats. Default value - `false`.
|
||||
- [input_format_json_read_objects_as_strings](/docs/en/operations/settings/settings-formats.md/#input_format_json_read_objects_as_strings) - allow to parse JSON objects as strings in JSON input formats. Default value - `false`.
|
||||
- [input_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) - parse named tuple columns as JSON objects. Default value - `true`.
|
||||
- [input_format_json_defaults_for_missing_elements_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) - insert default values for missing elements in JSON object while parsing named tuple. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_decimals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_decimals) - controls quoting of decimals in JSON output format. Default value - `false`.
|
||||
- [output_format_json_escape_forward_slashes](/docs/en/operations/settings/settings-formats.md/#output_format_json_escape_forward_slashes) - controls escaping forward slashes for string outputs in JSON output format. Default value - `true`.
|
||||
- [output_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings-formats.md/#output_format_json_named_tuples_as_objects) - serialize named tuple columns as JSON objects. Default value - `true`.
|
||||
- [output_format_json_array_of_rows](/docs/en/operations/settings/settings-formats.md/#output_format_json_array_of_rows) - output a JSON array of all rows in JSONEachRow(Compact) format. Default value - `false`.
|
||||
- [output_format_json_validate_utf8](/docs/en/operations/settings/settings-formats.md/#output_format_json_validate_utf8) - enables validation of UTF-8 sequences in JSON output formats (note that it doesn't impact formats JSON/JSONCompact/JSONColumnsWithMetadata, they always validate utf8). Default value - `false`.
|
||||
|
||||
## BSONEachRow {#bsoneachrow}
|
||||
|
||||
@ -1274,8 +1278,8 @@ Note: this format don't work properly on Big-Endian platforms.
|
||||
|
||||
### BSON format settings {#bson-format-settings}
|
||||
|
||||
- [output_format_bson_string_as_string](/docs/en/operations/settings/settings.md/#output_format_bson_string_as_string) - use BSON String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_bson_skip_fields_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings.md/#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for format BSONEachRow. Default value - `false`.
|
||||
- [output_format_bson_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_bson_string_as_string) - use BSON String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_bson_skip_fields_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_bson_skip_fields_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for format BSONEachRow. Default value - `false`.
|
||||
|
||||
## Native {#native}
|
||||
|
||||
@ -1408,12 +1412,12 @@ Differs from [PrettySpaceNoEscapes](#prettyspacenoescapes) in that up to 10,000
|
||||
|
||||
## Pretty formats settings {#pretty-formats-settings}
|
||||
|
||||
- [output_format_pretty_max_rows](/docs/en/operations/settings/settings.md/#output_format_pretty_max_rows) - rows limit for Pretty formats. Default value - `10000`.
|
||||
- [output_format_pretty_max_column_pad_width](/docs/en/operations/settings/settings.md/#output_format_pretty_max_column_pad_width) - maximum width to pad all values in a column in Pretty formats. Default value - `250`.
|
||||
- [output_format_pretty_max_value_width](/docs/en/operations/settings/settings.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`.
|
||||
- [output_format_pretty_color](/docs/en/operations/settings/settings.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`.
|
||||
- [output_format_pretty_grid_charset](/docs/en/operations/settings/settings.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `false`.
|
||||
- [output_format_pretty_max_rows](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_rows) - rows limit for Pretty formats. Default value - `10000`.
|
||||
- [output_format_pretty_max_column_pad_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_column_pad_width) - maximum width to pad all values in a column in Pretty formats. Default value - `250`.
|
||||
- [output_format_pretty_max_value_width](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_max_value_width) - Maximum width of value to display in Pretty formats. If greater - it will be cut. Default value - `10000`.
|
||||
- [output_format_pretty_color](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_color) - use ANSI escape sequences to paint colors in Pretty formats. Default value - `true`.
|
||||
- [output_format_pretty_grid_charset](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_grid_charset) - Charset for printing grid borders. Available charsets: ASCII, UTF-8. Default value - `UTF-8`.
|
||||
- [output_format_pretty_row_numbers](/docs/en/operations/settings/settings-formats.md/#output_format_pretty_row_numbers) - Add row numbers before each row for pretty output format. Default value - `false`.
|
||||
|
||||
## RowBinary {#rowbinary}
|
||||
|
||||
@ -1438,8 +1442,8 @@ Similar to [RowBinary](#rowbinary), but with added header:
|
||||
- N `String`s specifying column names
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
:::
|
||||
|
||||
@ -1452,16 +1456,16 @@ Similar to [RowBinary](#rowbinary), but with added header:
|
||||
- N `String`s specifying column types
|
||||
|
||||
:::warning
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
If setting [input_format_with_names_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_names_use_header) is set to 1,
|
||||
the columns from input data will be mapped to the columns from the table by their names, columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings.md/#input_format_with_types_use_header) is set to 1,
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
:::
|
||||
|
||||
## RowBinary format settings {#row-binary-format-settings}
|
||||
|
||||
- [format_binary_max_string_size](/docs/en/operations/settings/settings.md/#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
||||
- [format_binary_max_string_size](/docs/en/operations/settings/settings-formats.md/#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
||||
|
||||
## Values {#data-format-values}
|
||||
|
||||
@ -1473,9 +1477,9 @@ This is the format that is used in `INSERT INTO t VALUES ...`, but you can also
|
||||
|
||||
## Values format settings {#values-format-settings}
|
||||
|
||||
- [input_format_values_interpret_expressions](/docs/en/operations/settings/settings.md/#input_format_values_interpret_expressions) - if the field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression. Default value - `true`.
|
||||
- [input_format_values_deduce_templates_of_expressions](/docs/en/operations/settings/settings.md/#input_format_values_deduce_templates_of_expressions) -if the field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows. Default value - `true`.
|
||||
- [input_format_values_accurate_types_of_literals](/docs/en/operations/settings/settings.md/#input_format_values_accurate_types_of_literals) - when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues. Default value - `true`.
|
||||
- [input_format_values_interpret_expressions](/docs/en/operations/settings/settings-formats.md/#input_format_values_interpret_expressions) - if the field could not be parsed by streaming parser, run SQL parser and try to interpret it as SQL expression. Default value - `true`.
|
||||
- [input_format_values_deduce_templates_of_expressions](/docs/en/operations/settings/settings-formats.md/#input_format_values_deduce_templates_of_expressions) -if the field could not be parsed by streaming parser, run SQL parser, deduce template of the SQL expression, try to parse all rows using template and then interpret expression for all rows. Default value - `true`.
|
||||
- [input_format_values_accurate_types_of_literals](/docs/en/operations/settings/settings-formats.md/#input_format_values_accurate_types_of_literals) - when parsing and interpreting expressions using template, check actual type of literal to avoid possible overflow and precision issues. Default value - `true`.
|
||||
|
||||
|
||||
## Vertical {#vertical}
|
||||
@ -1615,7 +1619,7 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||
|
||||
For working with `Enum` in CapnProto format use the [format_capn_proto_enum_comparising_mode](/docs/en/operations/settings/settings.md/#format_capn_proto_enum_comparising_mode) setting.
|
||||
For working with `Enum` in CapnProto format use the [format_capn_proto_enum_comparising_mode](/docs/en/operations/settings/settings-formats.md/#format_capn_proto_enum_comparising_mode) setting.
|
||||
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` type also can be nested.
|
||||
|
||||
@ -1714,7 +1718,7 @@ something_weird{problem="division by zero"} +Inf -3982045
|
||||
|
||||
## Protobuf {#protobuf}
|
||||
|
||||
Protobuf - is a [Protocol Buffers](https://developers.google.com/protocol-buffers/) format.
|
||||
Protobuf - is a [Protocol Buffers](https://protobuf.dev/) format.
|
||||
|
||||
This format requires an external format schema. The schema is cached between queries.
|
||||
ClickHouse supports both `proto2` and `proto3` syntaxes. Repeated/optional/required fields are supported.
|
||||
@ -1809,7 +1813,7 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
| `long (timestamp-millis)` \** | [DateTime64(3)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-millis)` \* |
|
||||
| `long (timestamp-micros)` \** | [DateTime64(6)](/docs/en/sql-reference/data-types/datetime.md) | `long (timestamp-micros)` \* |
|
||||
|
||||
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings.md/#output_format_avro_string_column_pattern)
|
||||
\* `bytes` is default, controlled by [output_format_avro_string_column_pattern](/docs/en/operations/settings/settings-formats.md/#output_format_avro_string_column_pattern)
|
||||
\** [Avro logical types](https://avro.apache.org/docs/current/spec.html#Logical+Types)
|
||||
|
||||
Unsupported Avro data types: `record` (non-root), `map`
|
||||
@ -1831,7 +1835,7 @@ Unused fields are skipped.
|
||||
|
||||
Data types of ClickHouse table columns can differ from the corresponding fields of the Avro data inserted. When inserting data, ClickHouse interprets data types according to the table above and then [casts](/docs/en/sql-reference/functions/type-conversion-functions.md/#type_conversion_function-cast) the data to corresponding column type.
|
||||
|
||||
While importing data, when field is not found in schema and setting [input_format_avro_allow_missing_fields](/docs/en/operations/settings/settings.md/#input_format_avro_allow_missing_fields) is enabled, default value will be used instead of error.
|
||||
While importing data, when field is not found in schema and setting [input_format_avro_allow_missing_fields](/docs/en/operations/settings/settings-formats.md/#input_format_avro_allow_missing_fields) is enabled, default value will be used instead of error.
|
||||
|
||||
### Selecting Data {#selecting-data-1}
|
||||
|
||||
@ -1846,7 +1850,7 @@ Column names must:
|
||||
- start with `[A-Za-z_]`
|
||||
- subsequently contain only `[A-Za-z0-9_]`
|
||||
|
||||
Output Avro file compression and sync interval can be configured with [output_format_avro_codec](/docs/en/operations/settings/settings.md/#output_format_avro_codec) and [output_format_avro_sync_interval](/docs/en/operations/settings/settings.md/#output_format_avro_sync_interval) respectively.
|
||||
Output Avro file compression and sync interval can be configured with [output_format_avro_codec](/docs/en/operations/settings/settings-formats.md/#output_format_avro_codec) and [output_format_avro_sync_interval](/docs/en/operations/settings/settings-formats.md/#output_format_avro_sync_interval) respectively.
|
||||
|
||||
## AvroConfluent {#data-format-avro-confluent}
|
||||
|
||||
@ -1856,7 +1860,7 @@ Each Avro message embeds a schema id that can be resolved to the actual schema w
|
||||
|
||||
Schemas are cached once resolved.
|
||||
|
||||
Schema Registry URL is configured with [format_avro_schema_registry_url](/docs/en/operations/settings/settings.md/#format_avro_schema_registry_url).
|
||||
Schema Registry URL is configured with [format_avro_schema_registry_url](/docs/en/operations/settings/settings-formats.md/#format_avro_schema_registry_url).
|
||||
|
||||
### Data Types Matching {#data_types-matching-1}
|
||||
|
||||
@ -1954,12 +1958,12 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
|
||||
|
||||
### Parquet format settings {#parquet-format-settings}
|
||||
|
||||
- [output_format_parquet_row_group_size](/docs/en/operations/settings/settings.md/#output_format_parquet_row_group_size) - row group size in rows while data output. Default value - `1000000`.
|
||||
- [output_format_parquet_string_as_string](/docs/en/operations/settings/settings.md/#output_format_parquet_string_as_string) - use Parquet String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_parquet_import_nested](/docs/en/operations/settings/settings.md/#input_format_parquet_import_nested) - allow inserting array of structs into [Nested](/docs/en/sql-reference/data-types/nested-data-structures/nested.md) table in Parquet input format. Default value - `false`.
|
||||
- [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`.
|
||||
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
|
||||
- [output_format_parquet_row_group_size](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_row_group_size) - row group size in rows while data output. Default value - `1000000`.
|
||||
- [output_format_parquet_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_string_as_string) - use Parquet String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_parquet_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_import_nested) - allow inserting array of structs into [Nested](/docs/en/sql-reference/data-types/nested-data-structures/nested.md) table in Parquet input format. Default value - `false`.
|
||||
- [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`.
|
||||
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
|
||||
|
||||
## Arrow {#data-format-arrow}
|
||||
|
||||
@ -1997,7 +2001,7 @@ The table below shows supported data types and how they match ClickHouse [data t
|
||||
|
||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||
|
||||
The `DICTIONARY` type is supported for `INSERT` queries, and for `SELECT` queries there is an [output_format_arrow_low_cardinality_as_dictionary](/docs/en/operations/settings/settings.md/#output-format-arrow-low-cardinality-as-dictionary) setting that allows to output [LowCardinality](/docs/en/sql-reference/data-types/lowcardinality.md) type as a `DICTIONARY` type.
|
||||
The `DICTIONARY` type is supported for `INSERT` queries, and for `SELECT` queries there is an [output_format_arrow_low_cardinality_as_dictionary](/docs/en/operations/settings/settings-formats.md/#output-format-arrow-low-cardinality-as-dictionary) setting that allows to output [LowCardinality](/docs/en/sql-reference/data-types/lowcardinality.md) type as a `DICTIONARY` type.
|
||||
|
||||
Unsupported Arrow data types: `FIXED_SIZE_BINARY`, `JSON`, `UUID`, `ENUM`.
|
||||
|
||||
@ -2021,12 +2025,12 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
|
||||
|
||||
### Arrow format settings {#parquet-format-settings}
|
||||
|
||||
- [output_format_arrow_low_cardinality_as_dictionary](/docs/en/operations/settings/settings.md/#output_format_arrow_low_cardinality_as_dictionary) - enable output ClickHouse LowCardinality type as Dictionary Arrow type. Default value - `false`.
|
||||
- [output_format_arrow_string_as_string](/docs/en/operations/settings/settings.md/#output_format_arrow_string_as_string) - use Arrow String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_arrow_import_nested](/docs/en/operations/settings/settings.md/#input_format_arrow_import_nested) - allow inserting array of structs into Nested table in Arrow input format. Default value - `false`.
|
||||
- [input_format_arrow_case_insensitive_column_matching](/docs/en/operations/settings/settings.md/#input_format_arrow_case_insensitive_column_matching) - ignore case when matching Arrow columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||
- [input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Arrow format. Default value - `false`.
|
||||
- [output_format_arrow_low_cardinality_as_dictionary](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_low_cardinality_as_dictionary) - enable output ClickHouse LowCardinality type as Dictionary Arrow type. Default value - `false`.
|
||||
- [output_format_arrow_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_string_as_string) - use Arrow String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_arrow_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_import_nested) - allow inserting array of structs into Nested table in Arrow input format. Default value - `false`.
|
||||
- [input_format_arrow_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_case_insensitive_column_matching) - ignore case when matching Arrow columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||
- [input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Arrow format. Default value - `false`.
|
||||
|
||||
## ArrowStream {#data-format-arrow-stream}
|
||||
|
||||
@ -2081,11 +2085,11 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT ORC" > {filename.
|
||||
|
||||
### Arrow format settings {#parquet-format-settings}
|
||||
|
||||
- [output_format_arrow_string_as_string](/docs/en/operations/settings/settings.md/#output_format_arrow_string_as_string) - use Arrow String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_arrow_import_nested](/docs/en/operations/settings/settings.md/#input_format_arrow_import_nested) - allow inserting array of structs into Nested table in Arrow input format. Default value - `false`.
|
||||
- [input_format_arrow_case_insensitive_column_matching](/docs/en/operations/settings/settings.md/#input_format_arrow_case_insensitive_column_matching) - ignore case when matching Arrow columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||
- [input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Arrow format. Default value - `false`.
|
||||
- [output_format_arrow_string_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_arrow_string_as_string) - use Arrow String type instead of Binary for String columns. Default value - `false`.
|
||||
- [input_format_arrow_import_nested](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_import_nested) - allow inserting array of structs into Nested table in Arrow input format. Default value - `false`.
|
||||
- [input_format_arrow_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_case_insensitive_column_matching) - ignore case when matching Arrow columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_arrow_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_allow_missing_columns) - allow missing columns while reading Arrow data. Default value - `false`.
|
||||
- [input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_arrow_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Arrow format. Default value - `false`.
|
||||
|
||||
|
||||
To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/table-engines/integrations/hdfs.md).
|
||||
@ -2133,13 +2137,13 @@ When working with the `Regexp` format, you can use the following settings:
|
||||
|
||||
**Usage**
|
||||
|
||||
The regular expression from [format_regexp](/docs/en/operations/settings/settings.md/#format_regexp) setting is applied to every line of imported data. The number of subpatterns in the regular expression must be equal to the number of columns in imported dataset.
|
||||
The regular expression from [format_regexp](/docs/en/operations/settings/settings-formats.md/#format_regexp) setting is applied to every line of imported data. The number of subpatterns in the regular expression must be equal to the number of columns in imported dataset.
|
||||
|
||||
Lines of the imported data must be separated by newline character `'\n'` or DOS-style newline `"\r\n"`.
|
||||
|
||||
The content of every matched subpattern is parsed with the method of corresponding data type, according to [format_regexp_escaping_rule](/docs/en/operations/settings/settings.md/#format_regexp_escaping_rule) setting.
|
||||
The content of every matched subpattern is parsed with the method of corresponding data type, according to [format_regexp_escaping_rule](/docs/en/operations/settings/settings-formats.md/#format_regexp_escaping_rule) setting.
|
||||
|
||||
If the regular expression does not match the line and [format_regexp_skip_unmatched](/docs/en/operations/settings/settings.md/#format_regexp_escaping_rule) is set to 1, the line is silently skipped. Otherwise, exception is thrown.
|
||||
If the regular expression does not match the line and [format_regexp_skip_unmatched](/docs/en/operations/settings/settings-formats.md/#format_regexp_escaping_rule) is set to 1, the line is silently skipped. Otherwise, exception is thrown.
|
||||
|
||||
**Example**
|
||||
|
||||
@ -2197,8 +2201,8 @@ in the server configuration.
|
||||
|
||||
## Skipping Errors {#skippingerrors}
|
||||
|
||||
Some formats such as `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` and `Protobuf` can skip broken row if parsing error occurred and continue parsing from the beginning of next row. See [input_format_allow_errors_num](/docs/en/operations/settings/settings.md/#input_format_allow_errors_num) and
|
||||
[input_format_allow_errors_ratio](/docs/en/operations/settings/settings.md/#input_format_allow_errors_ratio) settings.
|
||||
Some formats such as `CSV`, `TabSeparated`, `TSKV`, `JSONEachRow`, `Template`, `CustomSeparated` and `Protobuf` can skip broken row if parsing error occurred and continue parsing from the beginning of next row. See [input_format_allow_errors_num](/docs/en/operations/settings/settings-formats.md/#input_format_allow_errors_num) and
|
||||
[input_format_allow_errors_ratio](/docs/en/operations/settings/settings-formats.md/#input_format_allow_errors_ratio) settings.
|
||||
Limitations:
|
||||
- In case of parsing error `JSONEachRow` skips all data until the new line (or EOF), so rows must be delimited by `\n` to count errors correctly.
|
||||
- `Template` and `CustomSeparated` use delimiter after the last column and delimiter between rows to find the beginning of next row, so skipping errors works only if at least one of them is not empty.
|
||||
@ -2277,17 +2281,17 @@ $ clickhouse-client --query="SELECT * FROM msgpack FORMAT MsgPack" > tmp_msgpack
|
||||
|
||||
### MsgPack format settings {#msgpack-format-settings}
|
||||
|
||||
- [input_format_msgpack_number_of_columns](/docs/en/operations/settings/settings.md/#input_format_msgpack_number_of_columns) - the number of columns in inserted MsgPack data. Used for automatic schema inference from data. Default value - `0`.
|
||||
- [output_format_msgpack_uuid_representation](/docs/en/operations/settings/settings.md/#output_format_msgpack_uuid_representation) - the way how to output UUID in MsgPack format. Default value - `EXT`.
|
||||
- [input_format_msgpack_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_msgpack_number_of_columns) - the number of columns in inserted MsgPack data. Used for automatic schema inference from data. Default value - `0`.
|
||||
- [output_format_msgpack_uuid_representation](/docs/en/operations/settings/settings-formats.md/#output_format_msgpack_uuid_representation) - the way how to output UUID in MsgPack format. Default value - `EXT`.
|
||||
|
||||
## MySQLDump {#mysqldump}
|
||||
|
||||
ClickHouse supports reading MySQL [dumps](https://dev.mysql.com/doc/refman/8.0/en/mysqldump.html).
|
||||
It reads all data from INSERT queries belonging to one table in dump. If there are more than one table, by default it reads data from the first one.
|
||||
You can specify the name of the table from which to read data from using [input_format_mysql_dump_table_name](/docs/en/operations/settings/settings.md/#input_format_mysql_dump_table_name) settings.
|
||||
If setting [input_format_mysql_dump_map_columns](/docs/en/operations/settings/settings.md/#input_format_mysql_dump_map_columns) is set to 1 and
|
||||
You can specify the name of the table from which to read data from using [input_format_mysql_dump_table_name](/docs/en/operations/settings/settings-formats.md/#input_format_mysql_dump_table_name) settings.
|
||||
If setting [input_format_mysql_dump_map_columns](/docs/en/operations/settings/settings-formats.md/#input_format_mysql_dump_map_columns) is set to 1 and
|
||||
dump contains CREATE query for specified table or column names in INSERT query the columns from input data will be mapped to the columns from the table by their names,
|
||||
columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
columns with unknown names will be skipped if setting [input_format_skip_unknown_fields](/docs/en/operations/settings/settings-formats.md/#input_format_skip_unknown_fields) is set to 1.
|
||||
This format supports schema inference: if the dump contains CREATE query for the specified table, the structure is extracted from it, otherwise schema is inferred from the data of INSERT queries.
|
||||
|
||||
Examples:
|
||||
|
@ -558,6 +558,8 @@ and if the value is not a number, ClickHouse treats it as a string.
|
||||
If you don't want ClickHouse to try to determine complex types using some parsers and heuristics, you can disable setting `input_format_csv_use_best_effort_in_schema_inference`
|
||||
and ClickHouse will treat all columns as Strings.
|
||||
|
||||
If setting `input_format_csv_detect_header` is enabled, ClickHouse will try to detect the header with column names (and maybe types) while inferring schema. This setting is enabled by default.
|
||||
|
||||
**Examples:**
|
||||
|
||||
Integers, Floats, Bools, Strings:
|
||||
@ -669,6 +671,61 @@ DESC format(CSV, '"[1,2,3]",42.42,Hello World!')
|
||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Examples of header auto-detection (when `input_format_csv_detect_header` is enabled):
|
||||
|
||||
Only names:
|
||||
```sql
|
||||
SELECT * FROM format(CSV,
|
||||
$$"number","string","array"
|
||||
42,"Hello","[1, 2, 3]"
|
||||
43,"World","[4, 5, 6]"
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─number─┬─string─┬─array───┐
|
||||
│ 42 │ Hello │ [1,2,3] │
|
||||
│ 43 │ World │ [4,5,6] │
|
||||
└────────┴────────┴─────────┘
|
||||
```
|
||||
|
||||
Names and types:
|
||||
|
||||
```sql
|
||||
DESC format(CSV,
|
||||
$$"number","string","array"
|
||||
"UInt32","String","Array(UInt16)"
|
||||
42,"Hello","[1, 2, 3]"
|
||||
43,"World","[4, 5, 6]"
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─name───┬─type──────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ number │ UInt32 │ │ │ │ │ │
|
||||
│ string │ String │ │ │ │ │ │
|
||||
│ array │ Array(UInt16) │ │ │ │ │ │
|
||||
└────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Note that the header can be detected only if there is at least one column with a non-String type. If all columns have String type, the header is not detected:
|
||||
|
||||
```sql
|
||||
SELECT * FROM format(CSV,
|
||||
$$"first_column","second_column"
|
||||
"Hello","World"
|
||||
"World","Hello"
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─c1───────────┬─c2────────────┐
|
||||
│ first_column │ second_column │
|
||||
│ Hello │ World │
|
||||
│ World │ Hello │
|
||||
└──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## TSV/TSKV {#tsv-tskv}
|
||||
|
||||
In TSV/TSKV formats ClickHouse extracts column value from the row according to tabular delimiters and then parses extracted value using
|
||||
@ -677,6 +734,7 @@ the recursive parser to determine the most appropriate type. If the type cannot
|
||||
If you don't want ClickHouse to try to determine complex types using some parsers and heuristics, you can disable setting `input_format_tsv_use_best_effort_in_schema_inference`
|
||||
and ClickHouse will treat all columns as Strings.
|
||||
|
||||
If setting `input_format_tsv_detect_header` is enabled, ClickHouse will try to detect the header with column names (and maybe types) while inferring schema. This setting is enabled by default.
|
||||
|
||||
**Examples:**
|
||||
|
||||
@ -799,6 +857,61 @@ DESC format(TSV, '[1,2,3] 42.42 Hello World!')
|
||||
└──────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Examples of header auto-detection (when `input_format_tsv_detect_header` is enabled):
|
||||
|
||||
Only names:
|
||||
```sql
|
||||
SELECT * FROM format(TSV,
|
||||
$$number string array
|
||||
42 Hello [1, 2, 3]
|
||||
43 World [4, 5, 6]
|
||||
$$);
|
||||
```
|
||||
|
||||
```response
|
||||
┌─number─┬─string─┬─array───┐
|
||||
│ 42 │ Hello │ [1,2,3] │
|
||||
│ 43 │ World │ [4,5,6] │
|
||||
└────────┴────────┴─────────┘
|
||||
```
|
||||
|
||||
Names and types:
|
||||
|
||||
```sql
|
||||
DESC format(TSV,
|
||||
$$number string array
|
||||
UInt32 String Array(UInt16)
|
||||
42 Hello [1, 2, 3]
|
||||
43 World [4, 5, 6]
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─name───┬─type──────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ number │ UInt32 │ │ │ │ │ │
|
||||
│ string │ String │ │ │ │ │ │
|
||||
│ array │ Array(UInt16) │ │ │ │ │ │
|
||||
└────────┴───────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Note that the header can be detected only if there is at least one column with a non-String type. If all columns have String type, the header is not detected:
|
||||
|
||||
```sql
|
||||
SELECT * FROM format(TSV,
|
||||
$$first_column second_column
|
||||
Hello World
|
||||
World Hello
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─c1───────────┬─c2────────────┐
|
||||
│ first_column │ second_column │
|
||||
│ Hello │ World │
|
||||
│ World │ Hello │
|
||||
└──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
## Values {#values}
|
||||
|
||||
In Values format ClickHouse extracts column value from the row and then parses it using
|
||||
@ -911,6 +1024,8 @@ DESC format(TSV, '[1,2,3] 42.42 Hello World!')
|
||||
In CustomSeparated format ClickHouse first extracts all column values from the row according to specified delimiters and then tries to infer
|
||||
the data type for each value according to escaping rule.
|
||||
|
||||
If setting `input_format_custom_detect_header` is enabled, ClickHouse will try to detect the header with column names (and maybe types) while inferring schema. This setting is enabled by default.
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
@ -937,6 +1052,34 @@ $$)
|
||||
└──────┴────────────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Example of header auto-detection (when `input_format_custom_detect_header` is enabled):
|
||||
|
||||
```sql
|
||||
SET format_custom_row_before_delimiter = '<row_before_delimiter>',
|
||||
format_custom_row_after_delimiter = '<row_after_delimiter>\n',
|
||||
format_custom_row_between_delimiter = '<row_between_delimiter>\n',
|
||||
format_custom_result_before_delimiter = '<result_before_delimiter>\n',
|
||||
format_custom_result_after_delimiter = '<result_after_delimiter>\n',
|
||||
format_custom_field_delimiter = '<field_delimiter>',
|
||||
format_custom_escaping_rule = 'Quoted'
|
||||
|
||||
DESC format(CustomSeparated, $$<result_before_delimiter>
|
||||
<row_before_delimiter>'number'<field_delimiter>'string'<field_delimiter>'array'<row_after_delimiter>
|
||||
<row_between_delimiter>
|
||||
<row_before_delimiter>42.42<field_delimiter>'Some string 1'<field_delimiter>[1, NULL, 3]<row_after_delimiter>
|
||||
<row_between_delimiter>
|
||||
<row_before_delimiter>NULL<field_delimiter>'Some string 3'<field_delimiter>[1, 2, NULL]<row_after_delimiter>
|
||||
<result_after_delimiter>
|
||||
$$)
|
||||
```
|
||||
|
||||
```response
|
||||
┌─number─┬─string────────┬─array──────┐
|
||||
│ 42.42 │ Some string 1 │ [1,NULL,3] │
|
||||
│ ᴺᵁᴸᴸ │ Some string 3 │ [1,2,NULL] │
|
||||
└────────┴───────────────┴────────────┘
|
||||
```
|
||||
|
||||
## Template {#template}
|
||||
|
||||
In Template format ClickHouse first extracts all column values from the row according to the specified template and then tries to infer the
|
||||
@ -1193,7 +1336,7 @@ DESC format(JSONEachRow, $$
|
||||
└──────────┴──────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
```
|
||||
|
||||
Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings.md#date_time_input_format)
|
||||
Note: Parsing datetimes during schema inference respect setting [date_time_input_format](/docs/en/operations/settings/settings-formats.md#date_time_input_format)
|
||||
|
||||
### input_format_try_infer_dates
|
||||
|
||||
|
@ -22,5 +22,6 @@ Additional cache types:
|
||||
- [Dictionaries](../sql-reference/dictionaries/index.md) data cache.
|
||||
- Schema inference cache.
|
||||
- [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks.
|
||||
- [(Experimental) Query result cache](query-result-cache.md).
|
||||
|
||||
To drop one of the caches, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md#drop-mark-cache) statements.
|
||||
|
99
docs/en/operations/query-result-cache.md
Normal file
99
docs/en/operations/query-result-cache.md
Normal file
@ -0,0 +1,99 @@
|
||||
---
|
||||
slug: /en/operations/query-result-cache
|
||||
sidebar_position: 65
|
||||
sidebar_label: Query Result Cache [experimental]
|
||||
---
|
||||
|
||||
# Query Result Cache [experimental]
|
||||
|
||||
The query result cache allows to compute SELECT queries just once and to serve further executions of the same query directly from the cache.
|
||||
Depending on the type of the queries, this can dramatically reduce latency and resource consumption of the ClickHouse server.
|
||||
|
||||
## Background, Design and Limitations
|
||||
|
||||
Query result caches can generally be viewed as transactionally consistent or inconsistent.
|
||||
|
||||
- In transactionally consistent caches, the database invalidates (discards) cached query results if the result of the SELECT query changes
|
||||
or potentially changes. In ClickHouse, operations which change the data include inserts/updates/deletes in/of/from tables or collapsing
|
||||
merges. Transactionally consistent caching is especially suitable for OLTP databases, for example
|
||||
[MySQL](https://dev.mysql.com/doc/refman/5.6/en/query-cache.html) (which removed query result cache after v8.0) and
|
||||
[Oracle](https://docs.oracle.com/database/121/TGDBA/tune_result_cache.htm).
|
||||
- In transactionally inconsistent caches, slight inaccuracies in query results are accepted under the assumption that all cache entries are
|
||||
assigned a validity period after which they expire (e.g. 1 minute) and that the underlying data changes only little during this period.
|
||||
This approach is overall more suitable for OLAP databases. As an example where transactionally inconsistent caching is sufficient,
|
||||
consider an hourly sales report in a reporting tool which is simultaneously accessed by multiple users. Sales data changes typically
|
||||
slowly enough that the database only needs to compute the report once (represented by the first SELECT query). Further queries can be
|
||||
served directly from the query result cache. In this example, a reasonable validity period could be 30 min.
|
||||
|
||||
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
|
||||
the same caching logic and configuration is often duplicated. With ClickHouse's query result cache, the caching logic moves to the server
|
||||
side. This reduces maintenance effort and avoids redundancy.
|
||||
|
||||
:::warning
|
||||
The query result cache is an experimental feature that should not be used in production. There are known cases (e.g. in distributed query
|
||||
processing) where wrong results are returned.
|
||||
:::
|
||||
|
||||
## Configuration Settings and Usage
|
||||
|
||||
Parameter [enable_experimental_query_result_cache](settings/settings.md#enable-experimental-query-result-cache) controls whether query
|
||||
results are inserted into / retrieved from the cache for the current query or session. For example, the first execution of query
|
||||
|
||||
``` sql
|
||||
SELECT some_expensive_calculation(column_1, column_2)
|
||||
FROM table
|
||||
SETTINGS enable_experimental_query_result_cache = true;
|
||||
```
|
||||
|
||||
stores the query result into the query result cache. Subsequent executions of the same query (also with parameter
|
||||
`enable_experimental_query_result_cache = true`) will read the computed result directly from the cache.
|
||||
|
||||
Sometimes, it is desirable to use the query result cache only passively, i.e. to allow reading from it but not writing into it (if the cache
|
||||
result is not stored yet). Parameter [enable_experimental_query_result_cache_passive_usage](settings/settings.md#enable-experimental-query-result-cache-passive-usage)
|
||||
instead of 'enable_experimental_query_result_cache' can be used for that.
|
||||
|
||||
For maximum control, it is generally recommended to provide settings "enable_experimental_query_result_cache" or
|
||||
"enable_experimental_query_result_cache_passive_usage" only with specific queries. It is also possible to enable caching at user or profile
|
||||
level but one should keep in mind that all SELECT queries may return a cached results, including monitoring or debugging queries to system
|
||||
tables.
|
||||
|
||||
The query result cache can be cleared using statement `SYSTEM DROP QUERY RESULT CACHE`. The content of the query result cache is displayed
|
||||
in system table `SYSTEM.QUERY_RESULT_CACHE`. The number of query result cache hits and misses are shown as events "QueryResultCacheHits" and
|
||||
"QueryResultCacheMisses" in system table `SYSTEM.EVENTS`. Both counters are only updated for SELECT queries which run with settings
|
||||
"enable_experimental_query_result_cache = true" or "enable_experimental_query_result_cache_passive_usage = true". Other queries do not
|
||||
affect the cache miss counter.
|
||||
|
||||
The query result cache exists once per ClickHouse server process. However, cache results are by default not shared between users. This can
|
||||
be changed (see below) but doing so is not recommended for security reasons.
|
||||
|
||||
Query results are referenced in the query result cache by the [Abstract Syntax Tree (AST)](https://en.wikipedia.org/wiki/Abstract_syntax_tree)
|
||||
of their query. This means that caching is agnostic to upper/lowercase, for example `SELECT 1` and `select 1` are treated as the same query.
|
||||
To make the matching more natural, all query-level settings related to the query result cache are removed from the AST.
|
||||
|
||||
If the query was aborted due to an exception or user cancellation, no entry is written into the query result cache.
|
||||
|
||||
The size of the query result cache, the maximum number of cache entries and the maximum size of cache entries (in bytes and in records) can
|
||||
be configured using different [server configuration options](server-configuration-parameters/settings.md#server_configuration_parameters_query-result-cache).
|
||||
|
||||
To define how long a query must run at least such that its result can be cached, you can use setting
|
||||
[query_result_cache_min_query_duration](settings/settings.md#query-result-cache-min-query-duration). For example, the result of query
|
||||
|
||||
``` sql
|
||||
SELECT some_expensive_calculation(column_1, column_2)
|
||||
FROM table
|
||||
SETTINGS enable_experimental_query_result_cache = true, query_result_cache_min_query_duration = 5000;
|
||||
```
|
||||
|
||||
is only cached if the query runs longer than 5 seconds. It is also possible to specify how often a query needs to run until its result is
|
||||
cached - for that use setting [query_result_cache_min_query_runs](settings/settings.md#query-result-cache-min-query-runs).
|
||||
|
||||
Entries in the query result cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a
|
||||
different value can be specified at session, profile or query level using setting [query_result_cache_ttl](settings/settings.md#query-result-cache-ttl).
|
||||
|
||||
Also, results of queries with non-deterministic functions such as `rand()` and `now()` are not cached. This can be overruled using
|
||||
setting [query_result_cache_store_results_of_queries_with_nondeterministic_functions](settings/settings.md#query-result-cache-store-results-of-queries-with-nondeterministic-functions).
|
||||
|
||||
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
|
||||
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
|
||||
be marked accessible by other users (i.e. shared) by supplying setting
|
||||
[query_result_cache_share_between_users]{settings/settings.md#query-result-cache-share-between-users}.
|
@ -1270,6 +1270,32 @@ If the table does not exist, ClickHouse will create it. If the structure of the
|
||||
</query_log>
|
||||
```
|
||||
|
||||
## query_result_cache {#server_configuration_parameters_query-result-cache}
|
||||
|
||||
[Query result cache](../query-result-cache.md) configuration.
|
||||
|
||||
The following settings are available:
|
||||
|
||||
- `size`: The maximum cache size in bytes. 0 means the query result cache is disabled. Default value: `1073741824` (1 GiB).
|
||||
- `max_entries`: The maximum number of SELECT query results stored in the cache. Default value: `1024`.
|
||||
- `max_entry_size`: The maximum size in bytes SELECT query results may have to be saved in the cache. Default value: `1048576` (1 MiB).
|
||||
- `max_entry_records`: The maximum number of records SELECT query results may have to be saved in the cache. Default value: `30000000` (30 mil).
|
||||
|
||||
:::warning
|
||||
Data for the query result cache is allocated in DRAM. If memory is scarce, make sure to set a small value for `size` or disable the query result cache altogether.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
```xml
|
||||
<query_result_cache>
|
||||
<size>1073741824</size>
|
||||
<max_entries>1024</max_entries>
|
||||
<max_entry_size>1048576</max_entry_size>
|
||||
<max_entry_records>30000000</max_entry_records>
|
||||
</query_result_cache>
|
||||
```
|
||||
|
||||
## query_thread_log {#server_configuration_parameters-query_thread_log}
|
||||
|
||||
Setting for logging threads of queries received with the [log_query_threads=1](../../operations/settings/settings.md#settings-log-query-threads) setting.
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
sidebar_label: Settings Overview
|
||||
sidebar_position: 51
|
||||
sidebar_position: 1
|
||||
slug: /en/operations/settings/
|
||||
pagination_next: en/operations/settings/settings
|
||||
---
|
||||
|
@ -106,14 +106,20 @@ Possible values:
|
||||
Default value: 1.
|
||||
|
||||
The delay (in milliseconds) for `INSERT` is calculated by the formula:
|
||||
|
||||
```code
|
||||
max_k = parts_to_throw_insert - parts_to_delay_insert
|
||||
k = 1 + parts_count_in_partition - parts_to_delay_insert
|
||||
delay_milliseconds = pow(max_delay_to_insert * 1000, k / max_k)
|
||||
```
|
||||
For example, if a partition has 299 active parts and parts_to_throw_insert = 300, parts_to_delay_insert = 150, max_delay_to_insert = 1, `INSERT` is delayed for `pow( 1 * 1000, (1 + 299 - 150) / (300 - 150) ) = 1000` milliseconds.
|
||||
|
||||
For example if a partition has 299 active parts and parts_to_throw_insert = 300, parts_to_delay_insert = 150, max_delay_to_insert = 1, `INSERT` is delayed for `pow( 1 * 1000, (1 + 299 - 150) / (300 - 150) ) = 1000` milliseconds.
|
||||
Starting from version 23.1 formula has been changed to:
|
||||
```code
|
||||
allowed_parts_over_threshold = parts_to_throw_insert - parts_to_delay_insert
|
||||
parts_over_threshold = parts_count_in_partition - parts_to_delay_insert + 1
|
||||
delay_milliseconds = max(min_delay_to_insert_ms, (max_delay_to_insert * 1000) * parts_over_threshold / allowed_parts_over_threshold)
|
||||
```
|
||||
For example, if a partition has 224 active parts and parts_to_throw_insert = 300, parts_to_delay_insert = 150, max_delay_to_insert = 1, min_delay_to_insert_ms = 10, `INSERT` is delayed for `max( 10, 1 * 1000 * (224 - 150 + 1) / (300 - 150) ) = 500` milliseconds.
|
||||
|
||||
## max_parts_in_total {#max-parts-in-total}
|
||||
|
||||
|
1486
docs/en/operations/settings/settings-formats.md
Normal file
1486
docs/en/operations/settings/settings-formats.md
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -9,6 +9,7 @@ Columns:
|
||||
|
||||
- `metric` ([String](../../sql-reference/data-types/string.md)) — Metric name.
|
||||
- `value` ([Float64](../../sql-reference/data-types/float.md)) — Metric value.
|
||||
- `description` ([String](../../sql-reference/data-types/string.md) - Metric description)
|
||||
|
||||
**Example**
|
||||
|
||||
@ -17,18 +18,18 @@ SELECT * FROM system.asynchronous_metrics LIMIT 10
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─metric──────────────────────────────────┬──────value─┐
|
||||
│ jemalloc.background_thread.run_interval │ 0 │
|
||||
│ jemalloc.background_thread.num_runs │ 0 │
|
||||
│ jemalloc.background_thread.num_threads │ 0 │
|
||||
│ jemalloc.retained │ 422551552 │
|
||||
│ jemalloc.mapped │ 1682989056 │
|
||||
│ jemalloc.resident │ 1656446976 │
|
||||
│ jemalloc.metadata_thp │ 0 │
|
||||
│ jemalloc.metadata │ 10226856 │
|
||||
│ UncompressedCacheCells │ 0 │
|
||||
│ MarkCacheFiles │ 0 │
|
||||
└─────────────────────────────────────────┴────────────┘
|
||||
┌─metric──────────────────────────────────┬──────value─┬─description────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ AsynchronousMetricsCalculationTimeSpent │ 0.00179053 │ Time in seconds spent for calculation of asynchronous metrics (this is the overhead of asynchronous metrics). │
|
||||
│ NumberOfDetachedByUserParts │ 0 │ The total number of parts detached from MergeTree tables by users with the `ALTER TABLE DETACH` query (as opposed to unexpected, broken or ignored parts). The server does not care about detached parts and they can be removed. │
|
||||
│ NumberOfDetachedParts │ 0 │ The total number of parts detached from MergeTree tables. A part can be detached by a user with the `ALTER TABLE DETACH` query or by the server itself it the part is broken, unexpected or unneeded. The server does not care about detached parts and they can be removed. │
|
||||
│ TotalRowsOfMergeTreeTables │ 2781309 │ Total amount of rows (records) stored in all tables of MergeTree family. │
|
||||
│ TotalBytesOfMergeTreeTables │ 7741926 │ Total amount of bytes (compressed, including data and indices) stored in all tables of MergeTree family. │
|
||||
│ NumberOfTables │ 93 │ Total number of tables summed across the databases on the server, excluding the databases that cannot contain MergeTree tables. The excluded database engines are those who generate the set of tables on the fly, like `Lazy`, `MySQL`, `PostgreSQL`, `SQlite`. │
|
||||
│ NumberOfDatabases │ 6 │ Total number of databases on the server. │
|
||||
│ MaxPartCountForPartition │ 6 │ Maximum number of parts per partition across all partitions of all tables of MergeTree family. Values larger than 300 indicates misconfiguration, overload, or massive data loading. │
|
||||
│ ReplicasSumMergesInQueue │ 0 │ Sum of merge operations in the queue (still to be applied) across Replicated tables. │
|
||||
│ ReplicasSumInsertsInQueue │ 0 │ Sum of INSERT operations in the queue (still to be replicated) across Replicated tables. │
|
||||
└─────────────────────────────────────────┴────────────┴────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
@ -72,3 +72,10 @@ If procfs is supported and enabled on the system, ClickHouse server collects the
|
||||
- `OSWriteChars`
|
||||
- `OSReadBytes`
|
||||
- `OSWriteBytes`
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [System Tables and a window into the internals of ClickHouse](https://clickhouse.com/blog/clickhouse-debugging-issues-with-system-tables)
|
||||
- Blog: [Essential monitoring queries - part 1 - INSERT queries](https://clickhouse.com/blog/monitoring-troubleshooting-insert-queries-clickhouse)
|
||||
- Blog: [Essential monitoring queries - part 2 - SELECT queries](https://clickhouse.com/blog/monitoring-troubleshooting-select-queries-clickhouse)
|
||||
|
||||
|
@ -54,7 +54,9 @@ Functions:
|
||||
|
||||
- [toLowCardinality](../../sql-reference/functions/type-conversion-functions.md#tolowcardinality)
|
||||
|
||||
## See Also
|
||||
## Related content
|
||||
|
||||
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/).
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf).
|
||||
- [Reducing ClickHouse Storage Cost with the Low Cardinality Type – Lessons from an Instana Engineer](https://www.instana.com/blog/reducing-clickhouse-storage-cost-with-the-low-cardinality-type-lessons-from-an-instana-engineer/)
|
||||
- [String Optimization (video presentation in Russian)](https://youtu.be/rqf-ILRgBdY?list=PL0Z2YDlm0b3iwXCpEFiOOYmwXzVmjJfEt). [Slides in English](https://github.com/ClickHouse/clickhouse-presentations/raw/master/meetup19/string_optimization.pdf)
|
||||
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -529,6 +529,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
|
||||
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
|
||||
|
||||
For an alternative to `age`, see function `date\_diff`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -600,8 +601,12 @@ Result:
|
||||
|
||||
## date\_diff
|
||||
|
||||
Returns the count of the specified `unit` boundaries crossed between the `startdate` and `enddate`.
|
||||
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
Returns the count of the specified `unit` boundaries crossed between the `startdate` and the `enddate`.
|
||||
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for unit `day` (see [toRelativeDayNum](#torelativedaynum)), 1 month for unit `month` (see [toRelativeMonthNum](#torelativemonthnum)) and 1 year for unit `year` (see [toRelativeYearNum](#torelativeyearnum)).
|
||||
|
||||
If unit `week` was specified, `date\_diff` assumes that weeks start on Monday. Note that this behavior is different from that of function `toWeek()` in which weeks start by default on Sunday.
|
||||
|
||||
For an alternative to `date\_diff`, see function `age`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1582,3 +1587,8 @@ Result:
|
||||
│ 2020-01-01 │
|
||||
└────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
||||
|
@ -588,3 +588,6 @@ Result:
|
||||
│ aeca2A │
|
||||
└───────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Related content
|
||||
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)
|
||||
|
@ -115,3 +115,7 @@ Returns the exclusive upper bound of the corresponding hopping window.
|
||||
hopEnd(bounds_tuple);
|
||||
hopEnd(time_attr, hop_interval, window_interval [, timezone]);
|
||||
```
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
@ -1402,6 +1402,8 @@ The output value is a timestamp in UTC, not in the timezone of `DateTime64`.
|
||||
|
||||
```sql
|
||||
toUnixTimestamp64Milli(value)
|
||||
toUnixTimestamp64Micro(value)
|
||||
toUnixTimestamp64Nano(value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
@ -1455,7 +1457,9 @@ Converts an `Int64` to a `DateTime64` value with fixed sub-second precision and
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
fromUnixTimestamp64Milli(value [, ti])
|
||||
fromUnixTimestamp64Milli(value [, timezone])
|
||||
fromUnixTimestamp64Micro(value [, timezone])
|
||||
fromUnixTimestamp64Nano(value [, timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
@ -158,8 +158,6 @@ For examples of columns TTL modifying, see [Column TTL](/docs/en/engines/table-e
|
||||
|
||||
If the `IF EXISTS` clause is specified, the query won’t return an error if the column does not exist.
|
||||
|
||||
The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description.
|
||||
|
||||
When changing the type, values are converted as if the [toType](/docs/en/sql-reference/functions/type-conversion-functions.md) functions were applied to them. If only the default expression is changed, the query does not do anything complex, and is completed almost instantly.
|
||||
|
||||
Example:
|
||||
@ -170,6 +168,40 @@ ALTER TABLE visits MODIFY COLUMN browser Array(String)
|
||||
|
||||
Changing the column type is the only complex action – it changes the contents of files with data. For large tables, this may take a long time.
|
||||
|
||||
The query also can change the order of the columns using `FIRST | AFTER` clause, see [ADD COLUMN](#alter_add-column) description, but column type is mandatory in this case.
|
||||
|
||||
Example:
|
||||
|
||||
```sql
|
||||
CREATE TABLE users (
|
||||
c1 Int16,
|
||||
c2 String
|
||||
) ENGINE = MergeTree
|
||||
ORDER BY c1;
|
||||
|
||||
DESCRIBE users;
|
||||
┌─name─┬─type───┬
|
||||
│ c1 │ Int16 │
|
||||
│ c2 │ String │
|
||||
└──────┴────────┴
|
||||
|
||||
ALTER TABLE users MODIFY COLUMN c2 String FIRST;
|
||||
|
||||
DESCRIBE users;
|
||||
┌─name─┬─type───┬
|
||||
│ c2 │ String │
|
||||
│ c1 │ Int16 │
|
||||
└──────┴────────┴
|
||||
|
||||
ALTER TABLE users ALTER COLUMN c2 TYPE String AFTER c1;
|
||||
|
||||
DESCRIBE users;
|
||||
┌─name─┬─type───┬
|
||||
│ c1 │ Int16 │
|
||||
│ c2 │ String │
|
||||
└──────┴────────┴
|
||||
```
|
||||
|
||||
The `ALTER` query is atomic. For MergeTree tables it is also lock-free.
|
||||
|
||||
The `ALTER` query for changing columns is replicated. The instructions are saved in ZooKeeper, then each replica applies them. All `ALTER` queries are run in the same order. The query waits for the appropriate actions to be completed on the other replicas. However, a query to change columns in a replicated table can be interrupted, and all actions will be performed asynchronously.
|
||||
|
@ -502,3 +502,9 @@ Result:
|
||||
│ t1 │ The temporary table │
|
||||
└──────┴─────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Optimizing ClickHouse with Schemas and Codecs](https://clickhouse.com/blog/optimize-clickhouse-codecs-compression-schema)
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -350,3 +350,7 @@ The window view is useful in the following scenarios:
|
||||
|
||||
* **Monitoring**: Aggregate and calculate the metrics logs by time, and output the results to a target table. The dashboard can use the target table as a source table.
|
||||
* **Analyzing**: Automatically aggregate and preprocess data in the time window. This can be useful when analyzing a large number of logs. The preprocessing eliminates repeated calculations in multiple queries and reduces query latency.
|
||||
|
||||
## Related Content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -276,14 +276,12 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
||||
|
||||
With `indexes` = 1, the `Indexes` key is added. It contains an array of used indexes. Each index is described as JSON with `Type` key (a string `MinMax`, `Partition`, `PrimaryKey` or `Skip`) and optional keys:
|
||||
|
||||
- `Name` — An index name (for now, is used only for `Skip` index).
|
||||
- `Keys` — An array of columns used by the index.
|
||||
- `Condition` — A string with condition used.
|
||||
- `Description` — An index (for now, is used only for `Skip` index).
|
||||
- `Initial Parts` — A number of parts before the index is applied.
|
||||
- `Selected Parts` — A number of parts after the index is applied.
|
||||
- `Initial Granules` — A number of granules before the index is applied.
|
||||
- `Selected Granulesis` — A number of granules after the index is applied.
|
||||
- `Name` — The index name (currently only used for `Skip` indexes).
|
||||
- `Keys` — The array of columns used by the index.
|
||||
- `Condition` — The used condition.
|
||||
- `Description` — The index description (currently only used for `Skip` indexes).
|
||||
- `Parts` — The number of parts before/after the index is applied.
|
||||
- `Granules` — The number of granules before/after the index is applied.
|
||||
|
||||
Example:
|
||||
|
||||
@ -294,46 +292,36 @@ Example:
|
||||
"Type": "MinMax",
|
||||
"Keys": ["y"],
|
||||
"Condition": "(y in [1, +inf))",
|
||||
"Initial Parts": 5,
|
||||
"Selected Parts": 4,
|
||||
"Initial Granules": 12,
|
||||
"Selected Granules": 11
|
||||
"Parts": 5/4,
|
||||
"Granules": 12/11
|
||||
},
|
||||
{
|
||||
"Type": "Partition",
|
||||
"Keys": ["y", "bitAnd(z, 3)"],
|
||||
"Condition": "and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +inf)), (bitAnd(z, 3) not in [1, 1])))",
|
||||
"Initial Parts": 4,
|
||||
"Selected Parts": 3,
|
||||
"Initial Granules": 11,
|
||||
"Selected Granules": 10
|
||||
"Parts": 4/3,
|
||||
"Granules": 11/10
|
||||
},
|
||||
{
|
||||
"Type": "PrimaryKey",
|
||||
"Keys": ["x", "y"],
|
||||
"Condition": "and((x in [11, +inf)), (y in [1, +inf)))",
|
||||
"Initial Parts": 3,
|
||||
"Selected Parts": 2,
|
||||
"Initial Granules": 10,
|
||||
"Selected Granules": 6
|
||||
"Parts": 3/2,
|
||||
"Granules": 10/6
|
||||
},
|
||||
{
|
||||
"Type": "Skip",
|
||||
"Name": "t_minmax",
|
||||
"Description": "minmax GRANULARITY 2",
|
||||
"Initial Parts": 2,
|
||||
"Selected Parts": 1,
|
||||
"Initial Granules": 6,
|
||||
"Selected Granules": 2
|
||||
"Parts": 2/1,
|
||||
"Granules": 6/2
|
||||
},
|
||||
{
|
||||
"Type": "Skip",
|
||||
"Name": "t_set",
|
||||
"Description": "set GRANULARITY 2",
|
||||
"Initial Parts": 1,
|
||||
"Selected Parts": 1,
|
||||
"Initial Granules": 2,
|
||||
"Selected Granules": 1
|
||||
"": 1/1,
|
||||
"Granules": 2/1
|
||||
}
|
||||
]
|
||||
```
|
||||
|
@ -299,3 +299,8 @@ ARRAY JOIN nest AS n, arrayEnumerate(`nest.x`) AS num;
|
||||
## Implementation Details
|
||||
|
||||
The query execution order is optimized when running `ARRAY JOIN`. Although `ARRAY JOIN` must always be specified before the [WHERE](../../../sql-reference/statements/select/where.md)/[PREWHERE](../../../sql-reference/statements/select/prewhere.md) clause in a query, technically they can be performed in any order, unless result of `ARRAY JOIN` is used for filtering. The processing order is controlled by the query optimizer.
|
||||
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -23,7 +23,9 @@ FROM table2
|
||||
```
|
||||
The condition could be any expression based on your requirements.
|
||||
|
||||
**Examples**
|
||||
## Examples
|
||||
|
||||
Here is a simple example that returns the numbers 1 to 10 that are _not_ a part of the numbers 3 to 8:
|
||||
|
||||
Query:
|
||||
|
||||
@ -33,7 +35,7 @@ SELECT number FROM numbers(1,10) EXCEPT SELECT number FROM numbers(3,6);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─number─┐
|
||||
│ 1 │
|
||||
│ 2 │
|
||||
@ -42,28 +44,109 @@ Result:
|
||||
└────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
`EXCEPT` and `INTERSECT` can often be used interchangeably with different Boolean logic, and they are both useful if you have two tables that share a common column (or columns). For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t1(one String, two String, three String) ENGINE=Memory();
|
||||
CREATE TABLE t2(four String, five String, six String) ENGINE=Memory();
|
||||
```sql
|
||||
CREATE TABLE crypto_prices
|
||||
(
|
||||
trade_date Date,
|
||||
crypto_name String,
|
||||
volume Float32,
|
||||
price Float32,
|
||||
market_cap Float32,
|
||||
change_1_day Float32
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name, trade_date);
|
||||
|
||||
INSERT INTO t1 VALUES ('q', 'm', 'b'), ('s', 'd', 'f'), ('l', 'p', 'o'), ('s', 'd', 'f'), ('s', 'd', 'f'), ('k', 't', 'd'), ('l', 'p', 'o');
|
||||
INSERT INTO t2 VALUES ('q', 'm', 'b'), ('b', 'd', 'k'), ('s', 'y', 't'), ('s', 'd', 'f'), ('m', 'f', 'o'), ('k', 'k', 'd');
|
||||
INSERT INTO crypto_prices
|
||||
SELECT *
|
||||
FROM s3(
|
||||
'https://learn-clickhouse.s3.us-east-2.amazonaws.com/crypto_prices.csv',
|
||||
'CSVWithNames'
|
||||
);
|
||||
|
||||
SELECT * FROM t1 EXCEPT SELECT * FROM t2;
|
||||
SELECT * FROM crypto_prices
|
||||
WHERE crypto_name = 'Bitcoin'
|
||||
ORDER BY trade_date DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─trade_date─┬─crypto_name─┬──────volume─┬────price─┬───market_cap─┬──change_1_day─┐
|
||||
│ 2020-11-02 │ Bitcoin │ 30771456000 │ 13550.49 │ 251119860000 │ -0.013585099 │
|
||||
│ 2020-11-01 │ Bitcoin │ 24453857000 │ 13737.11 │ 254569760000 │ -0.0031840964 │
|
||||
│ 2020-10-31 │ Bitcoin │ 30306464000 │ 13780.99 │ 255372070000 │ 0.017308505 │
|
||||
│ 2020-10-30 │ Bitcoin │ 30581486000 │ 13546.52 │ 251018150000 │ 0.008084608 │
|
||||
│ 2020-10-29 │ Bitcoin │ 56499500000 │ 13437.88 │ 248995320000 │ 0.012552661 │
|
||||
│ 2020-10-28 │ Bitcoin │ 35867320000 │ 13271.29 │ 245899820000 │ -0.02804481 │
|
||||
│ 2020-10-27 │ Bitcoin │ 33749879000 │ 13654.22 │ 252985950000 │ 0.04427984 │
|
||||
│ 2020-10-26 │ Bitcoin │ 29461459000 │ 13075.25 │ 242251000000 │ 0.0033826586 │
|
||||
│ 2020-10-25 │ Bitcoin │ 24406921000 │ 13031.17 │ 241425220000 │ -0.0058658565 │
|
||||
│ 2020-10-24 │ Bitcoin │ 24542319000 │ 13108.06 │ 242839880000 │ 0.013650347 │
|
||||
└────────────┴─────────────┴─────────────┴──────────┴──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
Now suppose we have a table named `holdings` that contains a list of cryptocurrencies that we own, along with the number of coins:
|
||||
|
||||
```sql
|
||||
CREATE TABLE holdings
|
||||
(
|
||||
crypto_name String,
|
||||
quantity UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name);
|
||||
|
||||
INSERT INTO holdings VALUES
|
||||
('Bitcoin', 1000),
|
||||
('Bitcoin', 200),
|
||||
('Ethereum', 250),
|
||||
('Ethereum', 5000),
|
||||
('DOGEFI', 10);
|
||||
('Bitcoin Diamond', 5000);
|
||||
```
|
||||
|
||||
We can use `EXCEPT` to answer a question like **"Which coins do we own have never traded below $10?"**:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
EXCEPT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price < 10;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─one─┬─two─┬─three─┐
|
||||
│ l │ p │ o │
|
||||
│ k │ t │ d │
|
||||
│ l │ p │ o │
|
||||
└─────┴─────┴───────┘
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
│ Bitcoin │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
This means of the four cryptocurrencies we own, only Bitcoin has never dropped below $10 (based on the limited data we have here in this example).
|
||||
|
||||
## EXCEPT DISTINCT
|
||||
|
||||
Notice in the previous query we had multiple Bitcoin holdings in the result. You can add `DISTINCT` to `EXCEPT` to eliminate duplicate rows from the result:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
EXCEPT DISTINCT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price < 10;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [UNION](union.md#union-clause)
|
||||
|
@ -24,17 +24,17 @@ FROM table2
|
||||
```
|
||||
The condition could be any expression based on your requirements.
|
||||
|
||||
**Examples**
|
||||
## Examples
|
||||
|
||||
Query:
|
||||
Here is a simple example that intersects the numbers 1 to 10 with the numbers 3 to 8:
|
||||
|
||||
``` sql
|
||||
```sql
|
||||
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,6);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─number─┐
|
||||
│ 3 │
|
||||
│ 4 │
|
||||
@ -45,29 +45,112 @@ Result:
|
||||
└────────┘
|
||||
```
|
||||
|
||||
Query:
|
||||
`INTERSECT` is useful if you have two tables that share a common column (or columns). You can intersect the results of two queries, as long as the results contain the same columns. For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE t1(one String, two String, three String) ENGINE=Memory();
|
||||
CREATE TABLE t2(four String, five String, six String) ENGINE=Memory();
|
||||
```sql
|
||||
CREATE TABLE crypto_prices
|
||||
(
|
||||
trade_date Date,
|
||||
crypto_name String,
|
||||
volume Float32,
|
||||
price Float32,
|
||||
market_cap Float32,
|
||||
change_1_day Float32
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name, trade_date);
|
||||
|
||||
INSERT INTO t1 VALUES ('q', 'm', 'b'), ('s', 'd', 'f'), ('l', 'p', 'o'), ('s', 'd', 'f'), ('s', 'd', 'f'), ('k', 't', 'd'), ('l', 'p', 'o');
|
||||
INSERT INTO t2 VALUES ('q', 'm', 'b'), ('b', 'd', 'k'), ('s', 'y', 't'), ('s', 'd', 'f'), ('m', 'f', 'o'), ('k', 'k', 'd');
|
||||
INSERT INTO crypto_prices
|
||||
SELECT *
|
||||
FROM s3(
|
||||
'https://learn-clickhouse.s3.us-east-2.amazonaws.com/crypto_prices.csv',
|
||||
'CSVWithNames'
|
||||
);
|
||||
|
||||
SELECT * FROM t1 INTERSECT SELECT * FROM t2;
|
||||
SELECT * FROM crypto_prices
|
||||
WHERE crypto_name = 'Bitcoin'
|
||||
ORDER BY trade_date DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─trade_date─┬─crypto_name─┬──────volume─┬────price─┬───market_cap─┬──change_1_day─┐
|
||||
│ 2020-11-02 │ Bitcoin │ 30771456000 │ 13550.49 │ 251119860000 │ -0.013585099 │
|
||||
│ 2020-11-01 │ Bitcoin │ 24453857000 │ 13737.11 │ 254569760000 │ -0.0031840964 │
|
||||
│ 2020-10-31 │ Bitcoin │ 30306464000 │ 13780.99 │ 255372070000 │ 0.017308505 │
|
||||
│ 2020-10-30 │ Bitcoin │ 30581486000 │ 13546.52 │ 251018150000 │ 0.008084608 │
|
||||
│ 2020-10-29 │ Bitcoin │ 56499500000 │ 13437.88 │ 248995320000 │ 0.012552661 │
|
||||
│ 2020-10-28 │ Bitcoin │ 35867320000 │ 13271.29 │ 245899820000 │ -0.02804481 │
|
||||
│ 2020-10-27 │ Bitcoin │ 33749879000 │ 13654.22 │ 252985950000 │ 0.04427984 │
|
||||
│ 2020-10-26 │ Bitcoin │ 29461459000 │ 13075.25 │ 242251000000 │ 0.0033826586 │
|
||||
│ 2020-10-25 │ Bitcoin │ 24406921000 │ 13031.17 │ 241425220000 │ -0.0058658565 │
|
||||
│ 2020-10-24 │ Bitcoin │ 24542319000 │ 13108.06 │ 242839880000 │ 0.013650347 │
|
||||
└────────────┴─────────────┴─────────────┴──────────┴──────────────┴───────────────┘
|
||||
```
|
||||
|
||||
Now suppose we have a table named `holdings` that contains a list of cryptocurrencies that we own, along with the number of coins:
|
||||
|
||||
```sql
|
||||
CREATE TABLE holdings
|
||||
(
|
||||
crypto_name String,
|
||||
quantity UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
PRIMARY KEY (crypto_name);
|
||||
|
||||
INSERT INTO holdings VALUES
|
||||
('Bitcoin', 1000),
|
||||
('Bitcoin', 200),
|
||||
('Ethereum', 250),
|
||||
('Ethereum', 5000),
|
||||
('DOGEFI', 10);
|
||||
('Bitcoin Diamond', 5000);
|
||||
```
|
||||
|
||||
We can use `INTERSECT` to answer questions like **"Which coins do we own have traded at a price greater than $100?"**:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
INTERSECT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price > 100
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─one─┬─two─┬─three─┐
|
||||
│ q │ m │ b │
|
||||
│ s │ d │ f │
|
||||
│ s │ d │ f │
|
||||
│ s │ d │ f │
|
||||
└─────┴─────┴───────┘
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
│ Bitcoin │
|
||||
│ Ethereum │
|
||||
│ Ethereum │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
This means at some point in time, Bitcoin and Ethereum traded above $100, and DOGEFI and Bitcoin Diamond have never traded above $100 (at least using the data we have here in this example).
|
||||
|
||||
## INTERSECT DISTINCT
|
||||
|
||||
Notice in the previous query we had multiple Bitcoin and Ethereum holdings that traded above $100. It might be nice to remove duplicate rows (since they only repeat what we already know). You can add `DISTINCT` to `INTERSECT` to eliminate duplicate rows from the result:
|
||||
|
||||
```sql
|
||||
SELECT crypto_name FROM holdings
|
||||
INTERSECT DISTINCT
|
||||
SELECT crypto_name FROM crypto_prices
|
||||
WHERE price > 100;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─crypto_name─┐
|
||||
│ Bitcoin │
|
||||
│ Ethereum │
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
|
||||
**See Also**
|
||||
|
||||
- [UNION](union.md#union-clause)
|
||||
|
@ -543,3 +543,7 @@ Result:
|
||||
│ 7 │ original │ 7 │
|
||||
└─────┴──────────┴───────┘
|
||||
```
|
||||
|
||||
## Related content
|
||||
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
|
@ -72,7 +72,7 @@ For more convenient (automatic) cache management, see disable_internal_dns_cache
|
||||
|
||||
## DROP MARK CACHE
|
||||
|
||||
Resets the mark cache. Used in development of ClickHouse and performance tests.
|
||||
Resets the mark cache.
|
||||
|
||||
## DROP REPLICA
|
||||
|
||||
@ -94,13 +94,18 @@ The fourth one is useful to remove metadata of dead replica when all other repli
|
||||
|
||||
## DROP UNCOMPRESSED CACHE
|
||||
|
||||
Reset the uncompressed data cache. Used in development of ClickHouse and performance tests.
|
||||
For manage uncompressed data cache parameters use following server level settings [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size) and query/user/profile level settings [use_uncompressed_cache](../../operations/settings/settings.md#setting-use_uncompressed_cache)
|
||||
Reset the uncompressed data cache.
|
||||
The uncompressed data cache is enabled/disabled with the query/user/profile-level setting [use_uncompressed_cache](../../operations/settings/settings.md#setting-use_uncompressed_cache).
|
||||
Its size can be configured using the server-level setting [uncompressed_cache_size](../../operations/server-configuration-parameters/settings.md#server-settings-uncompressed_cache_size).
|
||||
|
||||
## DROP COMPILED EXPRESSION CACHE
|
||||
|
||||
Reset the compiled expression cache. Used in development of ClickHouse and performance tests.
|
||||
Compiled expression cache used when query/user/profile enable option [compile-expressions](../../operations/settings/settings.md#compile-expressions)
|
||||
Reset the compiled expression cache.
|
||||
The compiled expression cache is enabled/disabled with the query/user/profile-level setting [compile_expressions](../../operations/settings/settings.md#compile-expressions).
|
||||
|
||||
## DROP QUERY RESULT CACHE
|
||||
|
||||
Resets the [query result cache](../../operations/query-result-cache.md).
|
||||
|
||||
## FLUSH LOGS
|
||||
|
||||
|
@ -14,7 +14,7 @@ The `INSERT` query uses both parsers:
|
||||
INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
|
||||
```
|
||||
|
||||
The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input_format_values_interpret_expressions](../operations/settings/settings.md#settings-input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#syntax-expressions).
|
||||
The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input_format_values_interpret_expressions](../operations/settings/settings-formats.md#settings-input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#syntax-expressions).
|
||||
|
||||
Data can have any format. When a query is received, the server calculates no more than [max_query_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed.
|
||||
It allows for avoiding issues with large `INSERT` queries.
|
||||
|
@ -51,4 +51,7 @@ SELECT * FROM random;
|
||||
│ [] │ 68091.8197 │ ('2037-10-02 12:44:23.368','039ecab7-81c2-45ee-208c-844e5c6c5652') │
|
||||
│ [8,-83,0,-22,65,9,-30,28,64] │ -186233.4909 │ ('2062-01-11 00:06:04.124','69563ea1-5ad1-f870-16d8-67061da0df25') │
|
||||
└──────────────────────────────┴──────────────┴────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
```
|
||||
|
||||
## Related content
|
||||
- Blog: [Generating random data in ClickHouse](https://clickhouse.com/blog/generating-random-test-distribution-data-for-clickhouse)
|
||||
|
74
docs/en/sql-reference/table-functions/mongodb.md
Normal file
74
docs/en/sql-reference/table-functions/mongodb.md
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/mongodb
|
||||
sidebar_position: 42
|
||||
sidebar_label: mongodb
|
||||
---
|
||||
|
||||
# mongodb
|
||||
|
||||
Allows `SELECT` queries to be performed on data that is stored on a remote MongoDB server.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
mongodb(host:port, database, collection, user, password, structure [, options])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `host:port` — MongoDB server address.
|
||||
|
||||
- `database` — Remote database name.
|
||||
|
||||
- `collection` — Remote collection name.
|
||||
|
||||
- `user` — MongoDB user.
|
||||
|
||||
- `password` — User password.
|
||||
|
||||
- `structure` - The schema for the ClickHouse table returned from this function.
|
||||
|
||||
- `options` - MongoDB connection string options (optional parameter).
|
||||
|
||||
|
||||
**Returned Value**
|
||||
|
||||
A table object with the same columns as the original MongoDB table.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
Suppose we have a collection named `my_collection` defined in a MongoDB database named `test`, and we insert a couple of documents:
|
||||
|
||||
```sql
|
||||
db.createUser({user:"test_user",pwd:"password",roles:[{role:"readWrite",db:"test"}]})
|
||||
|
||||
db.createCollection("my_collection")
|
||||
|
||||
db.my_collection.insertOne(
|
||||
{ log_type: "event", host: "120.5.33.9", command: "check-cpu-usage -w 75 -c 90" }
|
||||
)
|
||||
|
||||
db.my_collection.insertOne(
|
||||
{ log_type: "event", host: "120.5.33.4", command: "system-check"}
|
||||
)
|
||||
```
|
||||
|
||||
Let's query the collection using the `mongodb` table function:
|
||||
|
||||
```sql
|
||||
SELECT * FROM mongodb(
|
||||
'127.0.0.1:27017',
|
||||
'test',
|
||||
'my_collection',
|
||||
'test_user',
|
||||
'password',
|
||||
'log_type String, host String, command String',
|
||||
'connectTimeoutMS=10000'
|
||||
)
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [The `MongoDB` table engine](../../engines/table-engines/integrations/mongodb.md)
|
||||
- [Using MongoDB as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources/#mongodb)
|
@ -131,3 +131,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
||||
|
||||
- [The PostgreSQL table engine](../../engines/table-engines/integrations/postgresql.md)
|
||||
- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-sources.md#dicts-external_dicts_dict_sources-postgresql)
|
||||
|
||||
## Related content
|
||||
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
||||
|
@ -590,5 +590,6 @@ ORDER BY
|
||||
|
||||
## Related Content
|
||||
|
||||
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
||||
- [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)
|
||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||
- Blog: [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
||||
- Blog: [Getting Data Into ClickHouse - Part 3 - Using S3](https://clickhouse.com/blog/getting-data-into-clickhouse-part-3-s3)
|
||||
|
@ -248,10 +248,8 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
||||
- `Keys` — массив столбцов, используемых индексом.
|
||||
- `Condition` — строка с используемым условием.
|
||||
- `Description` — индекс (на данный момент используется только для индекса `Skip`).
|
||||
- `Initial Parts` — количество кусков до применения индекса.
|
||||
- `Selected Parts` — количество кусков после применения индекса.
|
||||
- `Initial Granules` — количество гранул до применения индекса.
|
||||
- `Selected Granulesis` — количество гранул после применения индекса.
|
||||
- `Parts` — количество кусков до/после применения индекса.
|
||||
- `Granules` — количество гранул до/после применения индекса.
|
||||
|
||||
Пример:
|
||||
|
||||
@ -262,46 +260,36 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
||||
"Type": "MinMax",
|
||||
"Keys": ["y"],
|
||||
"Condition": "(y in [1, +inf))",
|
||||
"Initial Parts": 5,
|
||||
"Selected Parts": 4,
|
||||
"Initial Granules": 12,
|
||||
"Selected Granules": 11
|
||||
"Parts": 5/4,
|
||||
"Granules": 12/11
|
||||
},
|
||||
{
|
||||
"Type": "Partition",
|
||||
"Keys": ["y", "bitAnd(z, 3)"],
|
||||
"Condition": "and((bitAnd(z, 3) not in [1, 1]), and((y in [1, +inf)), (bitAnd(z, 3) not in [1, 1])))",
|
||||
"Initial Parts": 4,
|
||||
"Selected Parts": 3,
|
||||
"Initial Granules": 11,
|
||||
"Selected Granules": 10
|
||||
"Parts": 4/3,
|
||||
"Granules": 11/10
|
||||
},
|
||||
{
|
||||
"Type": "PrimaryKey",
|
||||
"Keys": ["x", "y"],
|
||||
"Condition": "and((x in [11, +inf)), (y in [1, +inf)))",
|
||||
"Initial Parts": 3,
|
||||
"Selected Parts": 2,
|
||||
"Initial Granules": 10,
|
||||
"Selected Granules": 6
|
||||
"Parts": 3/2,
|
||||
"Granules": 10/6
|
||||
},
|
||||
{
|
||||
"Type": "Skip",
|
||||
"Name": "t_minmax",
|
||||
"Description": "minmax GRANULARITY 2",
|
||||
"Initial Parts": 2,
|
||||
"Selected Parts": 1,
|
||||
"Initial Granules": 6,
|
||||
"Selected Granules": 2
|
||||
"Parts": 2/1,
|
||||
"Granules": 6/2
|
||||
},
|
||||
{
|
||||
"Type": "Skip",
|
||||
"Name": "t_set",
|
||||
"Description": "set GRANULARITY 2",
|
||||
"Initial Parts": 1,
|
||||
"Selected Parts": 1,
|
||||
"Initial Granules": 2,
|
||||
"Selected Granules": 1
|
||||
"": 1/1,
|
||||
"Granules": 2/1
|
||||
}
|
||||
]
|
||||
```
|
||||
|
@ -973,7 +973,7 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
if (external_tables.back().file == "-")
|
||||
++number_of_external_tables_with_stdin_source;
|
||||
if (number_of_external_tables_with_stdin_source > 1)
|
||||
throw Exception("Two or more external tables has stdin (-) set as --file field", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Two or more external tables has stdin (-) set as --file field");
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
@ -1026,7 +1026,7 @@ void Client::processOptions(const OptionsDescription & options_description,
|
||||
}
|
||||
|
||||
if (options.count("config-file") && options.count("config"))
|
||||
throw Exception("Two or more configuration files referenced in arguments", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Two or more configuration files referenced in arguments");
|
||||
|
||||
if (options.count("config"))
|
||||
config().setString("config-file", options["config"].as<std::string>());
|
||||
@ -1217,14 +1217,14 @@ void Client::readArguments(
|
||||
/// param_name value
|
||||
++arg_num;
|
||||
if (arg_num >= argc)
|
||||
throw Exception("Parameter requires value", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter requires value");
|
||||
arg = argv[arg_num];
|
||||
query_parameters.emplace(String(param_continuation), String(arg));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (equal_pos == 0)
|
||||
throw Exception("Parameter name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter name cannot be empty");
|
||||
|
||||
/// param_name=value
|
||||
query_parameters.emplace(param_continuation.substr(0, equal_pos), param_continuation.substr(equal_pos + 1));
|
||||
@ -1238,7 +1238,7 @@ void Client::readArguments(
|
||||
{
|
||||
++arg_num;
|
||||
if (arg_num >= argc)
|
||||
throw Exception("Host argument requires value", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Host argument requires value");
|
||||
arg = argv[arg_num];
|
||||
host_arg = "--host=";
|
||||
host_arg.append(arg);
|
||||
@ -1270,7 +1270,7 @@ void Client::readArguments(
|
||||
port_arg.push_back('=');
|
||||
++arg_num;
|
||||
if (arg_num >= argc)
|
||||
throw Exception("Port argument requires value", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Port argument requires value");
|
||||
arg = argv[arg_num];
|
||||
port_arg.append(arg);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ void checkAndWriteHeader(DB::ReadBuffer & in, DB::WriteBuffer & out)
|
||||
UInt32 size_compressed = unalignedLoad<UInt32>(&header[1]);
|
||||
|
||||
if (size_compressed > DBMS_MAX_COMPRESSED_SIZE)
|
||||
throw DB::Exception("Too large size_compressed. Most likely corrupted data.", DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED);
|
||||
throw DB::Exception(DB::ErrorCodes::TOO_LARGE_SIZE_COMPRESSED, "Too large size_compressed. Most likely corrupted data.");
|
||||
|
||||
UInt32 size_decompressed = unalignedLoad<UInt32>(&header[5]);
|
||||
|
||||
@ -113,10 +113,10 @@ int mainEntryClickHouseCompressor(int argc, char ** argv)
|
||||
codecs = options["codec"].as<std::vector<std::string>>();
|
||||
|
||||
if ((use_lz4hc || use_zstd || use_deflate_qpl || use_none) && !codecs.empty())
|
||||
throw Exception("Wrong options, codec flags like --zstd and --codec options are mutually exclusive", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, codec flags like --zstd and --codec options are mutually exclusive");
|
||||
|
||||
if (!codecs.empty() && options.count("level"))
|
||||
throw Exception("Wrong options, --level is not compatible with --codec list", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Wrong options, --level is not compatible with --codec list");
|
||||
|
||||
std::string method_family = "LZ4";
|
||||
|
||||
|
@ -77,7 +77,7 @@ decltype(auto) ClusterCopier::retry(T && func, UInt64 max_tries)
|
||||
std::exception_ptr exception;
|
||||
|
||||
if (max_tries == 0)
|
||||
throw Exception("Cannot perform zero retries", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot perform zero retries");
|
||||
|
||||
for (UInt64 try_number = 1; try_number <= max_tries; ++try_number)
|
||||
{
|
||||
@ -123,7 +123,7 @@ void ClusterCopier::discoverShardPartitions(const ConnectionTimeouts & timeouts,
|
||||
}
|
||||
catch (Exception & e)
|
||||
{
|
||||
throw Exception("Partition " + partition_text_quoted + " has incorrect format. " + e.displayText(), ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Partition {} has incorrect format. {}", partition_text_quoted, e.displayText());
|
||||
}
|
||||
};
|
||||
|
||||
@ -325,8 +325,8 @@ void ClusterCopier::process(const ConnectionTimeouts & timeouts)
|
||||
|
||||
if (!table_is_done)
|
||||
{
|
||||
throw Exception("Too many tries to process table " + task_table.table_id + ". Abort remaining execution",
|
||||
ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Too many tries to process table {}. Abort remaining execution",
|
||||
task_table.table_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -666,7 +666,7 @@ TaskStatus ClusterCopier::tryMoveAllPiecesToDestinationTable(const TaskTable & t
|
||||
}
|
||||
|
||||
if (inject_fault)
|
||||
throw Exception("Copy fault injection is activated", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated");
|
||||
}
|
||||
|
||||
/// Create node to signal that we finished moving
|
||||
@ -753,7 +753,7 @@ std::shared_ptr<ASTCreateQuery> rewriteCreateQueryStorage(const ASTPtr & create_
|
||||
auto res = std::make_shared<ASTCreateQuery>(create);
|
||||
|
||||
if (create.storage == nullptr || new_storage_ast == nullptr)
|
||||
throw Exception("Storage is not specified", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Storage is not specified");
|
||||
|
||||
res->setDatabase(new_table.first);
|
||||
res->setTable(new_table.second);
|
||||
@ -775,7 +775,7 @@ bool ClusterCopier::tryDropPartitionPiece(
|
||||
const CleanStateClock & clean_state_clock)
|
||||
{
|
||||
if (is_safe_mode)
|
||||
throw Exception("DROP PARTITION is prohibited in safe mode", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "DROP PARTITION is prohibited in safe mode");
|
||||
|
||||
TaskTable & task_table = task_partition.task_shard.task_table;
|
||||
ShardPartitionPiece & partition_piece = task_partition.pieces[current_piece_number];
|
||||
@ -944,7 +944,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
for (const String & partition_name : task_table.ordered_partition_names)
|
||||
{
|
||||
if (!task_table.cluster_partitions.contains(partition_name))
|
||||
throw Exception("There are no expected partition " + partition_name + ". It is a bug", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no expected partition {}. It is a bug", partition_name);
|
||||
|
||||
ClusterPartition & cluster_partition = task_table.cluster_partitions[partition_name];
|
||||
|
||||
@ -1006,7 +1006,7 @@ bool ClusterCopier::tryProcessTable(const ConnectionTimeouts & timeouts, TaskTab
|
||||
/// Previously when we discovered that shard does not contain current partition, we skipped it.
|
||||
/// At this moment partition have to be present.
|
||||
if (it_shard_partition == shard->partition_tasks.end())
|
||||
throw Exception("There are no such partition in a shard. This is a bug.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no such partition in a shard. This is a bug.");
|
||||
auto & partition = it_shard_partition->second;
|
||||
|
||||
expected_shards.emplace_back(shard);
|
||||
@ -1587,7 +1587,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
auto cancel_check = [&] ()
|
||||
{
|
||||
if (zookeeper->expired())
|
||||
throw Exception("ZooKeeper session is expired, cancel INSERT SELECT", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "ZooKeeper session is expired, cancel INSERT SELECT");
|
||||
|
||||
if (!future_is_dirty_checker.valid())
|
||||
future_is_dirty_checker = zookeeper->asyncExists(piece_is_dirty_flag_path);
|
||||
@ -1603,7 +1603,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
LogicalClock dirt_discovery_epoch (status.stat.mzxid);
|
||||
if (dirt_discovery_epoch == clean_state_clock.discovery_zxid)
|
||||
return false;
|
||||
throw Exception("Partition is dirty, cancel INSERT SELECT", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Partition is dirty, cancel INSERT SELECT");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1646,7 +1646,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
||||
future_is_dirty_checker.get();
|
||||
|
||||
if (inject_fault)
|
||||
throw Exception("Copy fault injection is activated", ErrorCodes::UNFINISHED);
|
||||
throw Exception(ErrorCodes::UNFINISHED, "Copy fault injection is activated");
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -90,9 +90,7 @@ ASTPtr extractPartitionKey(const ASTPtr & storage_ast)
|
||||
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
throw Exception(
|
||||
"Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
if (isExtendedDefinitionStorage(storage_ast))
|
||||
@ -109,14 +107,13 @@ ASTPtr extractPartitionKey(const ASTPtr & storage_ast)
|
||||
size_t min_args = is_replicated ? 3 : 1;
|
||||
|
||||
if (!engine.arguments)
|
||||
throw Exception("Expected arguments in " + storage_str, ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected arguments in {}", storage_str);
|
||||
|
||||
ASTPtr arguments_ast = engine.arguments->clone();
|
||||
ASTs & arguments = arguments_ast->children;
|
||||
|
||||
if (arguments.size() < min_args)
|
||||
throw Exception("Expected at least " + toString(min_args) + " arguments in " + storage_str,
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expected at least {} arguments in {}", min_args, storage_str);
|
||||
|
||||
ASTPtr & month_arg = is_replicated ? arguments[2] : arguments[1];
|
||||
return makeASTFunction("toYYYYMM", month_arg->clone());
|
||||
@ -132,14 +129,12 @@ ASTPtr extractPrimaryKey(const ASTPtr & storage_ast)
|
||||
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
throw Exception("Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
if (!isExtendedDefinitionStorage(storage_ast))
|
||||
{
|
||||
throw Exception("Is not extended deginition storage " + storage_str + " Will be fixed later.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str);
|
||||
}
|
||||
|
||||
if (storage.primary_key)
|
||||
@ -158,20 +153,18 @@ ASTPtr extractOrderBy(const ASTPtr & storage_ast)
|
||||
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
throw Exception("Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
if (!isExtendedDefinitionStorage(storage_ast))
|
||||
{
|
||||
throw Exception("Is not extended deginition storage " + storage_str + " Will be fixed later.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Is not extended deginition storage {} Will be fixed later.", storage_str);
|
||||
}
|
||||
|
||||
if (storage.order_by)
|
||||
return storage.order_by->clone();
|
||||
|
||||
throw Exception("ORDER BY cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "ORDER BY cannot be empty");
|
||||
}
|
||||
|
||||
/// Wraps only identifiers with backticks.
|
||||
@ -191,7 +184,7 @@ std::string wrapIdentifiersWithBackticks(const ASTPtr & root)
|
||||
return boost::algorithm::join(function_arguments, ", ");
|
||||
}
|
||||
|
||||
throw Exception("Primary key could be represented only as columns or functions from columns.", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key could be represented only as columns or functions from columns.");
|
||||
}
|
||||
|
||||
|
||||
@ -210,9 +203,9 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
|
||||
size_t sorting_key_size = sorting_key_expr_list->children.size();
|
||||
|
||||
if (primary_key_size > sorting_key_size)
|
||||
throw Exception("Primary key must be a prefix of the sorting key, but its length: "
|
||||
+ toString(primary_key_size) + " is greater than the sorting key length: " + toString(sorting_key_size),
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key must be a prefix of the sorting key, but its length: "
|
||||
"{} is greater than the sorting key length: {}",
|
||||
primary_key_size, sorting_key_size);
|
||||
|
||||
Names primary_key_columns;
|
||||
NameSet primary_key_columns_set;
|
||||
@ -228,12 +221,12 @@ Names extractPrimaryKeyColumnNames(const ASTPtr & storage_ast)
|
||||
{
|
||||
String pk_column = primary_key_expr_list->children[i]->getColumnName();
|
||||
if (pk_column != sorting_key_column)
|
||||
throw Exception("Primary key must be a prefix of the sorting key, but the column in the position "
|
||||
+ toString(i) + " is " + sorting_key_column +", not " + pk_column,
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Primary key must be a prefix of the sorting key, "
|
||||
"but the column in the position {} is {}, not {}", i, sorting_key_column, pk_column);
|
||||
|
||||
if (!primary_key_columns_set.emplace(pk_column).second)
|
||||
throw Exception("Primary key contains duplicate columns", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Primary key contains duplicate columns");
|
||||
|
||||
primary_key_columns.push_back(wrapIdentifiersWithBackticks(primary_key_expr_list->children[i]));
|
||||
}
|
||||
@ -250,9 +243,7 @@ bool isReplicatedTableEngine(const ASTPtr & storage_ast)
|
||||
if (!endsWith(engine.name, "MergeTree"))
|
||||
{
|
||||
String storage_str = queryToString(storage_ast);
|
||||
throw Exception(
|
||||
"Unsupported engine was specified in " + storage_str + ", only *MergeTree engines are supported",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unsupported engine was specified in {}, only *MergeTree engines are supported", storage_str);
|
||||
}
|
||||
|
||||
return startsWith(engine.name, "Replicated");
|
||||
|
@ -119,7 +119,7 @@ struct TaskStateWithOwner
|
||||
rb >> state >> "\n" >> escape >> res.owner;
|
||||
|
||||
if (state >= static_cast<int>(TaskState::Unknown))
|
||||
throw Exception("Unknown state " + data, ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown state {}", data);
|
||||
|
||||
res.state = static_cast<TaskState>(state);
|
||||
return res;
|
||||
|
@ -19,7 +19,7 @@ void DB::TaskCluster::loadTasks(const Poco::Util::AbstractConfiguration & config
|
||||
|
||||
clusters_prefix = prefix + "remote_servers";
|
||||
if (!config.has(clusters_prefix))
|
||||
throw Exception("You should specify list of clusters in " + clusters_prefix, ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "You should specify list of clusters in {}", clusters_prefix);
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys tables_keys;
|
||||
config.keys(prefix + "tables", tables_keys);
|
||||
|
@ -102,7 +102,7 @@ TaskTable::TaskTable(TaskCluster & parent, const Poco::Util::AbstractConfigurati
|
||||
for (const String &key : keys)
|
||||
{
|
||||
if (!startsWith(key, "partition"))
|
||||
throw Exception("Unknown key " + key + " in " + enabled_partitions_prefix, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown key {} in {}", key, enabled_partitions_prefix);
|
||||
|
||||
enabled_partitions.emplace_back(config.getString(enabled_partitions_prefix + "." + key));
|
||||
}
|
||||
@ -213,8 +213,7 @@ ClusterPartition & TaskTable::getClusterPartition(const String & partition_name)
|
||||
{
|
||||
auto it = cluster_partitions.find(partition_name);
|
||||
if (it == cluster_partitions.end())
|
||||
throw Exception("There are no cluster partition " + partition_name + " in " + table_id,
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There are no cluster partition {} in {}", partition_name, table_id);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
if (command_arguments.size() != 2)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name_from = config.getString("diskFrom", config.getString("disk", "default"));
|
||||
|
@ -33,7 +33,7 @@ public:
|
||||
if (command_arguments.size() != 2)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -40,7 +40,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
if (!command_arguments.empty())
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
for (const auto & [disk_name, _] : global_context->getDisksMap())
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
if (command_arguments.size() != 2)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -43,7 +43,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -32,7 +32,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -44,7 +44,7 @@ public:
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
@ -119,7 +119,7 @@ void DisksApp::init(std::vector<String> & common_arguments)
|
||||
{
|
||||
std::cerr << "Unknown command name: " << command_name << "\n";
|
||||
printHelpMessage(options_description);
|
||||
throw DB::Exception("Bad Arguments", DB::ErrorCodes::BAD_ARGUMENTS);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
processOptions();
|
||||
|
@ -1160,7 +1160,7 @@ void processLog(const Options & options)
|
||||
/// Will run multiple processes in parallel
|
||||
size_t num_threads = options.threads;
|
||||
if (num_threads == 0)
|
||||
throw Exception("num-threads cannot be zero", ErrorCodes::INCORRECT_DATA);
|
||||
throw Exception(ErrorCodes::INCORRECT_DATA, "num-threads cannot be zero");
|
||||
|
||||
std::vector<std::unique_ptr<ShellCommand>> show_commands(num_threads);
|
||||
for (size_t i = 0; i < num_commits && i < num_threads; ++i)
|
||||
|
@ -484,8 +484,7 @@ try
|
||||
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), true), server_pool, socket));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
|
||||
|
@ -169,7 +169,7 @@ std::vector<PODArray<char>> placeStringColumns(const ColumnRawPtrs & columns, si
|
||||
else if (const auto * column_fixed_string = typeid_cast<const ColumnFixedString *>(column))
|
||||
data.push_back(placeFixedStringColumn(*column_fixed_string, buffer + i, size));
|
||||
else
|
||||
throw Exception("Cannot place string column.", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot place string column.");
|
||||
}
|
||||
|
||||
return data;
|
||||
|
@ -32,7 +32,7 @@ ExternalDictionaryLibraryHandler::ExternalDictionaryLibraryHandler(
|
||||
if (lib_new)
|
||||
lib_data = lib_new(&settings_holder->strings, ExternalDictionaryLibraryAPI::log);
|
||||
else
|
||||
throw Exception("Method extDict_libNew failed", ErrorCodes::EXTERNAL_LIBRARY_ERROR);
|
||||
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "Method extDict_libNew failed");
|
||||
}
|
||||
|
||||
|
||||
@ -173,22 +173,21 @@ Block ExternalDictionaryLibraryHandler::loadKeys(const Columns & key_columns)
|
||||
Block ExternalDictionaryLibraryHandler::dataToBlock(ExternalDictionaryLibraryAPI::RawClickHouseLibraryTable data)
|
||||
{
|
||||
if (!data)
|
||||
throw Exception("LibraryDictionarySource: No data returned", ErrorCodes::EXTERNAL_LIBRARY_ERROR);
|
||||
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "LibraryDictionarySource: No data returned");
|
||||
|
||||
const auto * columns_received = static_cast<const ExternalDictionaryLibraryAPI::Table *>(data);
|
||||
if (columns_received->error_code)
|
||||
throw Exception(
|
||||
"LibraryDictionarySource: Returned error: " + std::to_string(columns_received->error_code) + " " + (columns_received->error_string ? columns_received->error_string : ""),
|
||||
ErrorCodes::EXTERNAL_LIBRARY_ERROR);
|
||||
throw Exception(ErrorCodes::EXTERNAL_LIBRARY_ERROR, "LibraryDictionarySource: Returned error: {} {}",
|
||||
std::to_string(columns_received->error_code), (columns_received->error_string ? columns_received->error_string : ""));
|
||||
|
||||
MutableColumns columns = sample_block.cloneEmptyColumns();
|
||||
|
||||
for (size_t col_n = 0; col_n < columns_received->size; ++col_n)
|
||||
{
|
||||
if (columns.size() != columns_received->data[col_n].size)
|
||||
throw Exception(
|
||||
"LibraryDictionarySource: Returned unexpected number of columns: " + std::to_string(columns_received->data[col_n].size) + ", must be " + std::to_string(columns.size()),
|
||||
ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH);
|
||||
throw Exception(ErrorCodes::SIZES_OF_COLUMNS_DOESNT_MATCH, "LibraryDictionarySource: "
|
||||
"Returned unexpected number of columns: {}, must be {}",
|
||||
columns_received->data[col_n].size, columns.size());
|
||||
|
||||
for (size_t row_n = 0; row_n < columns_received->data[col_n].size; ++row_n)
|
||||
{
|
||||
|
@ -359,7 +359,7 @@ void LocalServer::setupUsers()
|
||||
if (users_config)
|
||||
global_context->setUsersConfig(users_config);
|
||||
else
|
||||
throw Exception("Can't load config for users", ErrorCodes::CANNOT_LOAD_CONFIG);
|
||||
throw Exception(ErrorCodes::CANNOT_LOAD_CONFIG, "Can't load config for users");
|
||||
}
|
||||
|
||||
void LocalServer::connect()
|
||||
@ -489,7 +489,7 @@ void LocalServer::processConfig()
|
||||
if (is_interactive && !delayed_interactive)
|
||||
{
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specify either `query` or `queries-file` option");
|
||||
|
||||
if (config().has("multiquery"))
|
||||
is_multiquery = true;
|
||||
|
@ -880,7 +880,7 @@ public:
|
||||
}
|
||||
|
||||
if (!it)
|
||||
throw Exception("Logical error in markov model", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Logical error in markov model");
|
||||
|
||||
size_t offset_from_begin_of_string = pos - data;
|
||||
size_t determinator_sliding_window_size = params.determinator_sliding_window_size;
|
||||
@ -1139,7 +1139,7 @@ public:
|
||||
if (const auto * type = typeid_cast<const DataTypeNullable *>(&data_type))
|
||||
return std::make_unique<NullableModel>(get(*type->getNestedType(), seed, markov_model_params));
|
||||
|
||||
throw Exception("Unsupported data type", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Unsupported data type");
|
||||
}
|
||||
};
|
||||
|
||||
@ -1384,7 +1384,7 @@ try
|
||||
UInt8 version = 0;
|
||||
readBinary(version, model_in);
|
||||
if (version != 0)
|
||||
throw Exception("Unknown version of the model file", ErrorCodes::UNKNOWN_FORMAT_VERSION);
|
||||
throw Exception(ErrorCodes::UNKNOWN_FORMAT_VERSION, "Unknown version of the model file");
|
||||
|
||||
readBinary(source_rows, model_in);
|
||||
|
||||
@ -1392,14 +1392,14 @@ try
|
||||
size_t header_size = 0;
|
||||
readBinary(header_size, model_in);
|
||||
if (header_size != data_types.size())
|
||||
throw Exception("The saved model was created for different number of columns", ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS);
|
||||
throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "The saved model was created for different number of columns");
|
||||
|
||||
for (size_t i = 0; i < header_size; ++i)
|
||||
{
|
||||
String type;
|
||||
readBinary(type, model_in);
|
||||
if (type != data_types[i])
|
||||
throw Exception("The saved model was created for different types of columns", ErrorCodes::TYPE_MISMATCH);
|
||||
throw Exception(ErrorCodes::TYPE_MISMATCH, "The saved model was created for different types of columns");
|
||||
}
|
||||
|
||||
obfuscator.deserialize(model_in);
|
||||
|
@ -181,7 +181,7 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
||||
}
|
||||
|
||||
if (columns.empty())
|
||||
throw Exception("Columns definition was not returned", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Columns definition was not returned");
|
||||
|
||||
WriteBufferFromHTTPServerResponse out(
|
||||
response,
|
||||
|
@ -163,7 +163,7 @@ void ODBCSource::insertValue(
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw Exception("Unsupported value type", ErrorCodes::UNKNOWN_TYPE);
|
||||
throw Exception(ErrorCodes::UNKNOWN_TYPE, "Unsupported value type");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ public:
|
||||
auto connection_available = pool->tryBorrowObject(connection, []() { return nullptr; }, ODBC_POOL_WAIT_TIMEOUT);
|
||||
|
||||
if (!connection_available)
|
||||
throw Exception("Unable to fetch connection within the timeout", ErrorCodes::NO_FREE_CONNECTION);
|
||||
throw Exception(ErrorCodes::NO_FREE_CONNECTION, "Unable to fetch connection within the timeout");
|
||||
|
||||
try
|
||||
{
|
||||
|
@ -44,7 +44,8 @@ IdentifierQuotingStyle getQuotingStyle(nanodbc::ConnectionHolderPtr connection)
|
||||
else if (identifier_quote[0] == '"')
|
||||
return IdentifierQuotingStyle::DoubleQuotes;
|
||||
else
|
||||
throw Exception("Can not map quote identifier '" + identifier_quote + "' to IdentifierQuotingStyle value", ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,10 +38,10 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
static constexpr size_t MAX_CONNECTION_STRING_SIZE = 1000;
|
||||
|
||||
if (connection_string.empty())
|
||||
throw Exception("ODBC connection string cannot be empty", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string cannot be empty");
|
||||
|
||||
if (connection_string.size() >= MAX_CONNECTION_STRING_SIZE)
|
||||
throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string is too long");
|
||||
|
||||
const char * pos = connection_string.data();
|
||||
const char * end = pos + connection_string.size();
|
||||
@ -51,7 +51,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
while (pos < end && isWhitespaceASCII(*pos))
|
||||
{
|
||||
if (*pos != ' ')
|
||||
throw Exception("ODBC connection string parameter contains unusual whitespace character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter contains unusual whitespace character");
|
||||
++pos;
|
||||
}
|
||||
};
|
||||
@ -63,7 +63,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
if (pos < end && isValidIdentifierBegin(*pos))
|
||||
++pos;
|
||||
else
|
||||
throw Exception("ODBC connection string parameter name doesn't begin with valid identifier character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING,
|
||||
"ODBC connection string parameter name doesn't begin with valid identifier character");
|
||||
|
||||
/// Additionally allow dash and dot symbols in names.
|
||||
/// Strictly speaking, the name with that characters should be escaped.
|
||||
@ -83,7 +84,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
{
|
||||
signed char c = *pos;
|
||||
if (c < 32 || strchr("[]{}(),;?*=!@'\"", c) != nullptr)
|
||||
throw Exception("ODBC connection string parameter value is unescaped and contains illegal character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING,
|
||||
"ODBC connection string parameter value is unescaped and contains illegal character");
|
||||
++pos;
|
||||
}
|
||||
|
||||
@ -97,7 +99,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
if (pos < end && *pos == '{')
|
||||
++pos;
|
||||
else
|
||||
throw Exception("ODBC connection string parameter value doesn't begin with opening curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter value doesn't begin with opening curly brace");
|
||||
|
||||
while (pos < end)
|
||||
{
|
||||
@ -109,13 +111,13 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
}
|
||||
|
||||
if (*pos == 0)
|
||||
throw Exception("ODBC connection string parameter value contains ASCII NUL character", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter value contains ASCII NUL character");
|
||||
|
||||
res += *pos;
|
||||
++pos;
|
||||
}
|
||||
|
||||
throw Exception("ODBC connection string parameter is escaped but there is no closing curly brace", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter is escaped but there is no closing curly brace");
|
||||
};
|
||||
|
||||
auto read_value = [&]
|
||||
@ -139,25 +141,25 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
|
||||
Poco::toUpperInPlace(name);
|
||||
if (name == "FILEDSN" || name == "SAVEFILE" || name == "DRIVER")
|
||||
throw Exception("ODBC connection string has forbidden parameter", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string has forbidden parameter");
|
||||
|
||||
if (pos >= end)
|
||||
throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter doesn't have value");
|
||||
|
||||
if (*pos == '=')
|
||||
++pos;
|
||||
else
|
||||
throw Exception("ODBC connection string parameter doesn't have value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string parameter doesn't have value");
|
||||
|
||||
skip_whitespaces();
|
||||
std::string value = read_value();
|
||||
skip_whitespaces();
|
||||
|
||||
if (name.size() > MAX_ELEMENT_SIZE || value.size() > MAX_ELEMENT_SIZE)
|
||||
throw Exception("ODBC connection string has too long keyword or value", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string has too long keyword or value");
|
||||
|
||||
if (!parameters.emplace(name, value).second)
|
||||
throw Exception("Duplicate parameter found in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "Duplicate parameter found in ODBC connection string");
|
||||
|
||||
if (pos >= end)
|
||||
break;
|
||||
@ -165,7 +167,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
if (*pos == ';')
|
||||
++pos;
|
||||
else
|
||||
throw Exception("Unexpected character found after parameter value in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "Unexpected character found after parameter value in ODBC connection string");
|
||||
}
|
||||
|
||||
/// Reconstruct the connection string.
|
||||
@ -173,12 +175,12 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
auto it = parameters.find("DSN");
|
||||
|
||||
if (parameters.end() == it)
|
||||
throw Exception("DSN parameter is mandatory for ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "DSN parameter is mandatory for ODBC connection string");
|
||||
|
||||
std::string dsn = it->second;
|
||||
|
||||
if (dsn.empty())
|
||||
throw Exception("DSN parameter cannot be empty in ODBC connection string", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "DSN parameter cannot be empty in ODBC connection string");
|
||||
|
||||
parameters.erase(it);
|
||||
|
||||
@ -241,7 +243,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
||||
write_element(elem.first, elem.second);
|
||||
|
||||
if (reconstructed_connection_string.size() >= MAX_CONNECTION_STRING_SIZE)
|
||||
throw Exception("ODBC connection string is too long", ErrorCodes::BAD_ODBC_CONNECTION_STRING);
|
||||
throw Exception(ErrorCodes::BAD_ODBC_CONNECTION_STRING, "ODBC connection string is too long");
|
||||
|
||||
return reconstructed_connection_string;
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ static std::string getCanonicalPath(std::string && path)
|
||||
{
|
||||
Poco::trimInPlace(path);
|
||||
if (path.empty())
|
||||
throw Exception("path configuration parameter is empty", ErrorCodes::INVALID_CONFIG_PARAMETER);
|
||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "path configuration parameter is empty");
|
||||
if (path.back() != '/')
|
||||
path += '/';
|
||||
return std::move(path);
|
||||
@ -1116,7 +1116,7 @@ try
|
||||
#endif
|
||||
|
||||
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
|
||||
throw Exception("Both http and https interserver ports are specified", ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "Both http and https interserver ports are specified");
|
||||
|
||||
static const auto interserver_tags =
|
||||
{
|
||||
@ -1141,7 +1141,7 @@ try
|
||||
int port = parse<int>(port_str);
|
||||
|
||||
if (port < 0 || port > 0xFFFF)
|
||||
throw Exception("Out of range '" + String(port_tag) + "': " + toString(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Out of range '{}': {}", String(port_tag), port);
|
||||
|
||||
global_context->setInterserverIOAddress(this_host, port);
|
||||
global_context->setInterserverScheme(scheme);
|
||||
@ -1419,8 +1419,7 @@ try
|
||||
global_context->getSettingsRef().send_timeout.totalSeconds(), true), server_pool, socket));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
}
|
||||
@ -1465,7 +1464,7 @@ try
|
||||
size_t max_cache_size = static_cast<size_t>(memory_amount * cache_size_to_ram_max_ratio);
|
||||
|
||||
/// Size of cache for uncompressed blocks. Zero means disabled.
|
||||
String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", "");
|
||||
String uncompressed_cache_policy = config().getString("uncompressed_cache_policy", "SLRU");
|
||||
LOG_INFO(log, "Uncompressed cache policy name {}", uncompressed_cache_policy);
|
||||
size_t uncompressed_cache_size = config().getUInt64("uncompressed_cache_size", 0);
|
||||
if (uncompressed_cache_size > max_cache_size)
|
||||
@ -1491,7 +1490,7 @@ try
|
||||
|
||||
/// Size of cache for marks (index of MergeTree family of tables).
|
||||
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
|
||||
String mark_cache_policy = config().getString("mark_cache_policy", "");
|
||||
String mark_cache_policy = config().getString("mark_cache_policy", "SLRU");
|
||||
if (!mark_cache_size)
|
||||
LOG_ERROR(log, "Too low mark cache size will lead to severe performance degradation.");
|
||||
if (mark_cache_size > max_cache_size)
|
||||
@ -1517,6 +1516,15 @@ try
|
||||
if (mmap_cache_size)
|
||||
global_context->setMMappedFileCache(mmap_cache_size);
|
||||
|
||||
/// A cache for query results.
|
||||
size_t query_result_cache_size = config().getUInt64("query_result_cache.size", 1_GiB);
|
||||
if (query_result_cache_size)
|
||||
global_context->setQueryResultCache(
|
||||
query_result_cache_size,
|
||||
config().getUInt64("query_result_cache.max_entries", 1024),
|
||||
config().getUInt64("query_result_cache.max_entry_size", 1_MiB),
|
||||
config().getUInt64("query_result_cache.max_entry_records", 30'000'000));
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
/// 128 MB
|
||||
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
|
||||
@ -1740,14 +1748,15 @@ try
|
||||
std::lock_guard lock(servers_lock);
|
||||
createServers(config(), listen_hosts, interserver_listen_hosts, listen_try, server_pool, async_metrics, servers);
|
||||
if (servers.empty())
|
||||
throw Exception(
|
||||
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
|
||||
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
|
||||
"to configuration file.)");
|
||||
}
|
||||
|
||||
if (servers.empty())
|
||||
throw Exception("No servers started (add valid listen_host and 'tcp_port' or 'http_port' to configuration file.)",
|
||||
ErrorCodes::NO_ELEMENTS_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
|
||||
"No servers started (add valid listen_host and 'tcp_port' or 'http_port' "
|
||||
"to configuration file.)");
|
||||
|
||||
#if USE_SSL
|
||||
CertificateReloader::instance().tryLoad(config());
|
||||
@ -1807,7 +1816,7 @@ try
|
||||
String ddl_zookeeper_path = config().getString("distributed_ddl.path", "/clickhouse/task_queue/ddl/");
|
||||
int pool_size = config().getInt("distributed_ddl.pool_size", 1);
|
||||
if (pool_size < 1)
|
||||
throw Exception("distributed_ddl.pool_size should be greater then 0", ErrorCodes::ARGUMENT_OUT_OF_BOUND);
|
||||
throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "distributed_ddl.pool_size should be greater then 0");
|
||||
global_context->setDDLWorker(std::make_unique<DDLWorker>(pool_size, ddl_zookeeper_path, global_context, &config(),
|
||||
"distributed_ddl", "DDLWorker",
|
||||
&CurrentMetrics::MaxDDLEntryID, &CurrentMetrics::MaxPushedDDLEntryID));
|
||||
@ -1936,8 +1945,7 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
||||
#if USE_SSL
|
||||
return TCPServerConnectionFactory::Ptr(new TLSHandlerFactory(*this, conf_name));
|
||||
#else
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
|
||||
if (type == "proxy1")
|
||||
@ -2104,8 +2112,7 @@ void Server::createServers(
|
||||
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"HTTPS protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "HTTPS protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
|
||||
@ -2167,8 +2174,7 @@ void Server::createServers(
|
||||
new Poco::Net::TCPServerParams));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
|
||||
@ -2273,8 +2279,7 @@ void Server::createServers(
|
||||
http_params));
|
||||
#else
|
||||
UNUSED(port);
|
||||
throw Exception{"SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.",
|
||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "SSL support for TCP protocol is disabled because Poco library was built without NetSSL support.");
|
||||
#endif
|
||||
});
|
||||
}
|
||||
|
@ -1453,6 +1453,14 @@
|
||||
</rocksdb>
|
||||
-->
|
||||
|
||||
<!-- Configuration for the query result cache -->
|
||||
<!-- <query_result_cache> -->
|
||||
<!-- <size>1073741824</size> -->
|
||||
<!-- <max_entries>1024</max_entries> -->
|
||||
<!-- <max_entry_size>1048576</max_entry_size> -->
|
||||
<!-- <max_entry_records>30000000</max_entry_records> -->
|
||||
<!-- </query_result_cache> -->
|
||||
|
||||
<!-- Uncomment if enable merge tree metadata cache -->
|
||||
<!--merge_tree_metadata_cache>
|
||||
<lru_cache_size>268435456</lru_cache_size>
|
||||
|
@ -65,7 +65,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
}
|
||||
|
||||
if (gid == 0 && getgid() != 0)
|
||||
throw Exception("Group has id 0, but dropping privileges to gid 0 does not make sense", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Group has id 0, but dropping privileges to gid 0 does not make sense");
|
||||
|
||||
if (0 != setgid(gid))
|
||||
throwFromErrno(fmt::format("Cannot do 'setgid' to user ({})", arg_gid), ErrorCodes::SYSTEM_ERROR);
|
||||
@ -90,7 +90,7 @@ void setUserAndGroup(std::string arg_uid, std::string arg_gid)
|
||||
}
|
||||
|
||||
if (uid == 0 && getuid() != 0)
|
||||
throw Exception("User has id 0, but dropping privileges to uid 0 does not make sense", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "User has id 0, but dropping privileges to uid 0 does not make sense");
|
||||
|
||||
if (0 != setuid(uid))
|
||||
throwFromErrno(fmt::format("Cannot do 'setuid' to user ({})", arg_uid), ErrorCodes::SYSTEM_ERROR);
|
||||
|
@ -289,7 +289,7 @@ namespace
|
||||
}
|
||||
|
||||
default:
|
||||
throw Exception("Unknown type: " + toString(entity_type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", toString(entity_type));
|
||||
}
|
||||
}
|
||||
return res;
|
||||
|
@ -126,10 +126,10 @@ public:
|
||||
std::lock_guard lock{mutex};
|
||||
if (!registered_prefixes.empty())
|
||||
{
|
||||
throw Exception(
|
||||
"Setting " + String{setting_name} + " is neither a builtin setting nor started with the prefix '"
|
||||
+ boost::algorithm::join(registered_prefixes, "' or '") + "' registered for user-defined settings",
|
||||
ErrorCodes::UNKNOWN_SETTING);
|
||||
throw Exception(ErrorCodes::UNKNOWN_SETTING,
|
||||
"Setting {} is neither a builtin setting nor started with the prefix '{}"
|
||||
"' registered for user-defined settings",
|
||||
String{setting_name}, boost::algorithm::join(registered_prefixes, "' or '"));
|
||||
}
|
||||
else
|
||||
BaseSettingsHelpers::throwSettingNotFound(setting_name);
|
||||
@ -450,7 +450,7 @@ void AccessControl::addStoragesFromUserDirectoriesConfig(
|
||||
addReplicatedStorage(name, zookeeper_path, get_zookeeper_function, allow_backup);
|
||||
}
|
||||
else
|
||||
throw Exception("Unknown storage type '" + type + "' at " + prefix + " in config", ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG);
|
||||
throw Exception(ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG, "Unknown storage type '{}' at {} in config", type, prefix);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,53 +80,53 @@ AccessEntityPtr deserializeAccessEntityImpl(const String & definition)
|
||||
if (auto * create_user_query = query->as<ASTCreateUserQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = user = std::make_unique<User>();
|
||||
InterpreterCreateUserQuery::updateUserFromQuery(*user, *create_user_query, /* allow_no_password = */ true, /* allow_plaintext_password = */ true);
|
||||
}
|
||||
else if (auto * create_role_query = query->as<ASTCreateRoleQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = role = std::make_unique<Role>();
|
||||
InterpreterCreateRoleQuery::updateRoleFromQuery(*role, *create_role_query);
|
||||
}
|
||||
else if (auto * create_policy_query = query->as<ASTCreateRowPolicyQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = policy = std::make_unique<RowPolicy>();
|
||||
InterpreterCreateRowPolicyQuery::updateRowPolicyFromQuery(*policy, *create_policy_query);
|
||||
}
|
||||
else if (auto * create_quota_query = query->as<ASTCreateQuotaQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = quota = std::make_unique<Quota>();
|
||||
InterpreterCreateQuotaQuery::updateQuotaFromQuery(*quota, *create_quota_query);
|
||||
}
|
||||
else if (auto * create_profile_query = query->as<ASTCreateSettingsProfileQuery>())
|
||||
{
|
||||
if (res)
|
||||
throw Exception("Two access entities attached in the same file", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "Two access entities attached in the same file");
|
||||
res = profile = std::make_unique<SettingsProfile>();
|
||||
InterpreterCreateSettingsProfileQuery::updateSettingsProfileFromQuery(*profile, *create_profile_query);
|
||||
}
|
||||
else if (auto * grant_query = query->as<ASTGrantQuery>())
|
||||
{
|
||||
if (!user && !role)
|
||||
throw Exception("A user or role should be attached before grant", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "A user or role should be attached before grant");
|
||||
if (user)
|
||||
InterpreterGrantQuery::updateUserFromQuery(*user, *grant_query);
|
||||
else
|
||||
InterpreterGrantQuery::updateRoleFromQuery(*role, *grant_query);
|
||||
}
|
||||
else
|
||||
throw Exception("No interpreter found for query " + query->getID(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "No interpreter found for query {}", query->getID());
|
||||
}
|
||||
|
||||
if (!res)
|
||||
throw Exception("No access entities attached", ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
|
||||
throw Exception(ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION, "No access entities attached");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -797,7 +797,7 @@ template <bool with_grant_option>
|
||||
void AccessRights::grantImpl(const AccessRightsElement & element)
|
||||
{
|
||||
if (element.is_partial_revoke)
|
||||
throw Exception("A partial revoke should be revoked, not granted", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "A partial revoke should be revoked, not granted");
|
||||
if constexpr (with_grant_option)
|
||||
{
|
||||
grantImplHelper<true>(element);
|
||||
|
@ -176,7 +176,7 @@ bool Authentication::areCredentialsValid(const Credentials & credentials, const
|
||||
if ([[maybe_unused]] const auto * always_allow_credentials = typeid_cast<const AlwaysAllowCredentials *>(&credentials))
|
||||
return true;
|
||||
|
||||
throw Exception("areCredentialsValid(): authentication type " + toString(auth_data.getType()) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "areCredentialsValid(): authentication type {} not supported", toString(auth_data.getType()));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ const AccessEntityTypeInfo & AccessEntityTypeInfo::get(AccessEntityType type_)
|
||||
}
|
||||
case AccessEntityType::MAX: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", static_cast<size_t>(type_));
|
||||
}
|
||||
|
||||
AccessEntityType AccessEntityTypeInfo::parseType(const String & name_)
|
||||
|
@ -44,7 +44,7 @@ namespace
|
||||
boost::to_upper(uppercased_keyword);
|
||||
it = keyword_to_flags_map.find(uppercased_keyword);
|
||||
if (it == keyword_to_flags_map.end())
|
||||
throw Exception("Unknown access type: " + String(keyword), ErrorCodes::UNKNOWN_ACCESS_TYPE);
|
||||
throw Exception(ErrorCodes::UNKNOWN_ACCESS_TYPE, "Unknown access type: {}", String(keyword));
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
@ -179,7 +179,7 @@ namespace
|
||||
else
|
||||
{
|
||||
if (nodes.contains(keyword))
|
||||
throw Exception(keyword + " declared twice", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{} declared twice", keyword);
|
||||
node = std::make_unique<Node>(keyword, node_type);
|
||||
nodes[node->keyword] = node.get();
|
||||
}
|
||||
@ -225,9 +225,9 @@ namespace
|
||||
# undef MAKE_ACCESS_FLAGS_NODE
|
||||
|
||||
if (!owned_nodes.contains("NONE"))
|
||||
throw Exception("'NONE' not declared", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "'NONE' not declared");
|
||||
if (!owned_nodes.contains("ALL"))
|
||||
throw Exception("'ALL' not declared", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "'ALL' not declared");
|
||||
|
||||
all_node = std::move(owned_nodes["ALL"]);
|
||||
none_node = std::move(owned_nodes["NONE"]);
|
||||
@ -238,9 +238,9 @@ namespace
|
||||
{
|
||||
const auto & unused_node = *(owned_nodes.begin()->second);
|
||||
if (unused_node.node_type == UNKNOWN)
|
||||
throw Exception("Parent group '" + unused_node.keyword + "' not found", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parent group '{}' not found", unused_node.keyword);
|
||||
else
|
||||
throw Exception("Access type '" + unused_node.keyword + "' should have parent group", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access type '{}' should have parent group", unused_node.keyword);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,6 +142,7 @@ enum class AccessType
|
||||
M(SYSTEM_DROP_MARK_CACHE, "SYSTEM DROP MARK, DROP MARK CACHE, DROP MARKS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_UNCOMPRESSED_CACHE, "SYSTEM DROP UNCOMPRESSED, DROP UNCOMPRESSED CACHE, DROP UNCOMPRESSED", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_MMAP_CACHE, "SYSTEM DROP MMAP, DROP MMAP CACHE, DROP MMAP", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_QUERY_RESULT_CACHE, "SYSTEM DROP QUERY RESULT, DROP QUERY RESULT CACHE, DROP QUERY RESULT", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_FILESYSTEM_CACHE, "SYSTEM DROP FILESYSTEM CACHE, DROP FILESYSTEM CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_SCHEMA_CACHE, "SYSTEM DROP SCHEMA CACHE, DROP SCHEMA CACHE", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
|
@ -67,7 +67,7 @@ const AuthenticationTypeInfo & AuthenticationTypeInfo::get(AuthenticationType ty
|
||||
case AuthenticationType::MAX:
|
||||
break;
|
||||
}
|
||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown authentication type: {}", static_cast<int>(type_));
|
||||
}
|
||||
|
||||
|
||||
@ -119,19 +119,19 @@ void AuthenticationData::setPassword(const String & password_)
|
||||
case AuthenticationType::LDAP:
|
||||
case AuthenticationType::KERBEROS:
|
||||
case AuthenticationType::SSL_CERTIFICATE:
|
||||
throw Exception("Cannot specify password for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify password for authentication type {}", toString(type));
|
||||
|
||||
case AuthenticationType::MAX:
|
||||
break;
|
||||
}
|
||||
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setPassword(): authentication type {} not supported", toString(type));
|
||||
}
|
||||
|
||||
|
||||
String AuthenticationData::getPassword() const
|
||||
{
|
||||
if (type != AuthenticationType::PLAINTEXT_PASSWORD)
|
||||
throw Exception("Cannot decode the password", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot decode the password");
|
||||
return String(password_hash.data(), password_hash.data() + password_hash.size());
|
||||
}
|
||||
|
||||
@ -157,7 +157,7 @@ void AuthenticationData::setPasswordHashHex(const String & hash)
|
||||
String AuthenticationData::getPasswordHashHex() const
|
||||
{
|
||||
if (type == AuthenticationType::LDAP || type == AuthenticationType::KERBEROS || type == AuthenticationType::SSL_CERTIFICATE)
|
||||
throw Exception("Cannot get password hex hash for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get password hex hash for authentication type {}", toString(type));
|
||||
|
||||
String hex;
|
||||
hex.resize(password_hash.size() * 2);
|
||||
@ -179,10 +179,9 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
|
||||
case AuthenticationType::SHA256_PASSWORD:
|
||||
{
|
||||
if (hash.size() != 32)
|
||||
throw Exception(
|
||||
"Password hash for the 'SHA256_PASSWORD' authentication type has length " + std::to_string(hash.size())
|
||||
+ " but must be exactly 32 bytes.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Password hash for the 'SHA256_PASSWORD' authentication type has length {} "
|
||||
"but must be exactly 32 bytes.", hash.size());
|
||||
password_hash = hash;
|
||||
return;
|
||||
}
|
||||
@ -190,10 +189,9 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
|
||||
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
|
||||
{
|
||||
if (hash.size() != 20)
|
||||
throw Exception(
|
||||
"Password hash for the 'DOUBLE_SHA1_PASSWORD' authentication type has length " + std::to_string(hash.size())
|
||||
+ " but must be exactly 20 bytes.",
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Password hash for the 'DOUBLE_SHA1_PASSWORD' authentication type has length {} "
|
||||
"but must be exactly 20 bytes.", hash.size());
|
||||
password_hash = hash;
|
||||
return;
|
||||
}
|
||||
@ -202,18 +200,18 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
|
||||
case AuthenticationType::LDAP:
|
||||
case AuthenticationType::KERBEROS:
|
||||
case AuthenticationType::SSL_CERTIFICATE:
|
||||
throw Exception("Cannot specify password binary hash for authentication type " + toString(type), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify password binary hash for authentication type {}", toString(type));
|
||||
|
||||
case AuthenticationType::MAX:
|
||||
break;
|
||||
}
|
||||
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setPasswordHashBinary(): authentication type {} not supported", toString(type));
|
||||
}
|
||||
|
||||
void AuthenticationData::setSalt(String salt_)
|
||||
{
|
||||
if (type != AuthenticationType::SHA256_PASSWORD)
|
||||
throw Exception("setSalt(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setSalt(): authentication type {} not supported", toString(type));
|
||||
salt = std::move(salt_);
|
||||
}
|
||||
|
||||
@ -225,7 +223,7 @@ String AuthenticationData::getSalt() const
|
||||
void AuthenticationData::setSSLCertificateCommonNames(boost::container::flat_set<String> common_names_)
|
||||
{
|
||||
if (common_names_.empty())
|
||||
throw Exception("The 'SSL CERTIFICATE' authentication type requires a non-empty list of common names.", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The 'SSL CERTIFICATE' authentication type requires a non-empty list of common names.");
|
||||
ssl_certificate_common_names = std::move(common_names_);
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ const QuotaTypeInfo & QuotaTypeInfo::get(QuotaType type)
|
||||
}
|
||||
case QuotaType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota type: {}", static_cast<int>(type));
|
||||
}
|
||||
|
||||
String toString(QuotaKeyType type)
|
||||
@ -187,7 +187,7 @@ const QuotaKeyTypeInfo & QuotaKeyTypeInfo::get(QuotaKeyType type)
|
||||
}
|
||||
case QuotaKeyType::MAX: break;
|
||||
}
|
||||
throw Exception("Unexpected quota key type: " + std::to_string(static_cast<int>(type)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected quota key type: {}", static_cast<int>(type));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ const RowPolicyFilterTypeInfo & RowPolicyFilterTypeInfo::get(RowPolicyFilterType
|
||||
#endif
|
||||
case RowPolicyFilterType::MAX: break;
|
||||
}
|
||||
throw Exception("Unknown type: " + std::to_string(static_cast<size_t>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown type: {}", static_cast<size_t>(type_));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -470,7 +470,7 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
|
||||
/// If the current user has been dropped we always throw an exception (even if `throw_if_denied` is false)
|
||||
/// because dropping of the current user is considered as a situation which is exceptional enough to stop
|
||||
/// query execution.
|
||||
throw Exception(getUserName() + ": User has been dropped", ErrorCodes::UNKNOWN_USER);
|
||||
throw Exception(ErrorCodes::UNKNOWN_USER, "{}: User has been dropped", getUserName());
|
||||
}
|
||||
|
||||
if (is_full_access)
|
||||
@ -790,7 +790,7 @@ void ContextAccess::checkGranteeIsAllowed(const UUID & grantee_id, const IAccess
|
||||
|
||||
auto current_user = getUser();
|
||||
if (!current_user->grantees.match(grantee_id))
|
||||
throw Exception(grantee.formatTypeWithName() + " is not allowed as grantee", ErrorCodes::ACCESS_DENIED);
|
||||
throw Exception(ErrorCodes::ACCESS_DENIED, "{} is not allowed as grantee", grantee.formatTypeWithName());
|
||||
}
|
||||
|
||||
void ContextAccess::checkGranteesAreAllowed(const std::vector<UUID> & grantee_ids) const
|
||||
|
@ -29,7 +29,7 @@ bool Credentials::isReady() const
|
||||
|
||||
void Credentials::throwNotReady()
|
||||
{
|
||||
throw Exception("Credentials are not ready", ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Credentials are not ready");
|
||||
}
|
||||
|
||||
AlwaysAllowCredentials::AlwaysAllowCredentials()
|
||||
|
@ -172,7 +172,8 @@ DiskAccessStorage::DiskAccessStorage(const String & storage_name_, const String
|
||||
std::filesystem::create_directories(directory_path, create_dir_error_code);
|
||||
|
||||
if (!std::filesystem::exists(directory_path) || !std::filesystem::is_directory(directory_path) || create_dir_error_code)
|
||||
throw Exception("Couldn't create directory " + directory_path + " reason: '" + create_dir_error_code.message() + "'", ErrorCodes::DIRECTORY_DOESNT_EXIST);
|
||||
throw Exception(ErrorCodes::DIRECTORY_DOESNT_EXIST, "Couldn't create directory {} reason: '{}'",
|
||||
directory_path, create_dir_error_code.message());
|
||||
|
||||
bool should_rebuild_lists = std::filesystem::exists(getNeedRebuildListsMarkFilePath(directory_path));
|
||||
if (!should_rebuild_lists)
|
||||
@ -722,7 +723,7 @@ void DiskAccessStorage::deleteAccessEntityOnDisk(const UUID & id) const
|
||||
{
|
||||
auto file_path = getEntityFilePath(directory_path, id);
|
||||
if (!std::filesystem::remove(file_path))
|
||||
throw Exception("Couldn't delete " + file_path, ErrorCodes::FILE_DOESNT_EXIST);
|
||||
throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "Couldn't delete {}", file_path);
|
||||
}
|
||||
|
||||
|
||||
|
@ -29,11 +29,11 @@ struct EnabledQuota::Impl
|
||||
std::chrono::system_clock::time_point end_of_interval)
|
||||
{
|
||||
const auto & type_info = QuotaTypeInfo::get(quota_type);
|
||||
throw Exception(
|
||||
"Quota for user " + backQuote(user_name) + " for " + to_string(duration) + " has been exceeded: "
|
||||
+ type_info.valueToStringWithName(used) + "/" + type_info.valueToString(max) + ". "
|
||||
+ "Interval will end at " + to_string(end_of_interval) + ". " + "Name of quota template: " + backQuote(quota_name),
|
||||
ErrorCodes::QUOTA_EXCEEDED);
|
||||
throw Exception(ErrorCodes::QUOTA_EXCEEDED, "Quota for user {} for {} has been exceeded: {}/{}. "
|
||||
"Interval will end at {}. Name of quota template: {}",
|
||||
backQuote(user_name), to_string(duration),
|
||||
type_info.valueToStringWithName(used),
|
||||
type_info.valueToString(max), to_string(end_of_interval), backQuote(quota_name));
|
||||
}
|
||||
|
||||
|
||||
|
@ -47,15 +47,16 @@ void parseLDAPSearchParams(LDAPClient::SearchParams & params, const Poco::Util::
|
||||
else if (scope == "subtree") params.scope = LDAPClient::SearchParams::Scope::SUBTREE;
|
||||
else if (scope == "children") params.scope = LDAPClient::SearchParams::Scope::CHILDREN;
|
||||
else
|
||||
throw Exception("Invalid value for 'scope' field of LDAP search parameters in '" + prefix +
|
||||
"' section, must be one of 'base', 'one_level', 'subtree', or 'children'", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Invalid value for 'scope' field of LDAP search parameters "
|
||||
"in '{}' section, must be one of 'base', 'one_level', 'subtree', or 'children'", prefix);
|
||||
}
|
||||
}
|
||||
|
||||
void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConfiguration & config, const String & name)
|
||||
{
|
||||
if (name.empty())
|
||||
throw Exception("LDAP server name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server name cannot be empty");
|
||||
|
||||
const String ldap_server_config = "ldap_servers." + name;
|
||||
|
||||
@ -77,17 +78,17 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
const bool has_search_limit = config.has(ldap_server_config + ".search_limit");
|
||||
|
||||
if (!has_host)
|
||||
throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Missing 'host' entry");
|
||||
|
||||
params.host = config.getString(ldap_server_config + ".host");
|
||||
|
||||
if (params.host.empty())
|
||||
throw Exception("Empty 'host' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Empty 'host' entry");
|
||||
|
||||
if (has_bind_dn)
|
||||
{
|
||||
if (has_auth_dn_prefix || has_auth_dn_suffix)
|
||||
throw Exception("Deprecated 'auth_dn_prefix' and 'auth_dn_suffix' entries cannot be used with 'bind_dn' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Deprecated 'auth_dn_prefix' and 'auth_dn_suffix' entries cannot be used with 'bind_dn' entry");
|
||||
|
||||
params.bind_dn = config.getString(ldap_server_config + ".bind_dn");
|
||||
}
|
||||
@ -141,7 +142,9 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
else if (tls_minimum_protocol_version_lc_str == "tls1.2")
|
||||
params.tls_minimum_protocol_version = LDAPClient::Params::TLSProtocolVersion::TLS1_2; //-V1048
|
||||
else
|
||||
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Bad value for 'tls_minimum_protocol_version' entry, allowed values are: "
|
||||
"'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'");
|
||||
}
|
||||
|
||||
if (has_tls_require_cert)
|
||||
@ -158,7 +161,9 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
else if (tls_require_cert_lc_str == "demand")
|
||||
params.tls_require_cert = LDAPClient::Params::TLSRequireCert::DEMAND; //-V1048
|
||||
else
|
||||
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
"Bad value for 'tls_require_cert' entry, allowed values are: "
|
||||
"'never', 'allow', 'try', 'demand'");
|
||||
}
|
||||
|
||||
if (has_tls_cert_file)
|
||||
@ -180,7 +185,7 @@ void parseLDAPServer(LDAPClient::Params & params, const Poco::Util::AbstractConf
|
||||
{
|
||||
UInt32 port = config.getUInt(ldap_server_config + ".port");
|
||||
if (port > 65535)
|
||||
throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad value for 'port' entry");
|
||||
|
||||
params.port = port;
|
||||
}
|
||||
@ -212,13 +217,13 @@ void parseKerberosParams(GSSAcceptorContext::Params & params, const Poco::Util::
|
||||
}
|
||||
|
||||
if (reealm_key_count > 0 && principal_keys_count > 0)
|
||||
throw Exception("Realm and principal name cannot be specified simultaneously", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Realm and principal name cannot be specified simultaneously");
|
||||
|
||||
if (reealm_key_count > 1)
|
||||
throw Exception("Multiple realm sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple realm sections are not allowed");
|
||||
|
||||
if (principal_keys_count > 1)
|
||||
throw Exception("Multiple principal sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple principal sections are not allowed");
|
||||
|
||||
params.realm = config.getString("kerberos.realm", "");
|
||||
params.principal = config.getString("kerberos.principal", "");
|
||||
@ -274,10 +279,10 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
|
||||
}
|
||||
|
||||
if (ldap_servers_key_count > 1)
|
||||
throw Exception("Multiple ldap_servers sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple ldap_servers sections are not allowed");
|
||||
|
||||
if (kerberos_keys_count > 1)
|
||||
throw Exception("Multiple kerberos sections are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple kerberos sections are not allowed");
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys ldap_server_names;
|
||||
config.keys("ldap_servers", ldap_server_names);
|
||||
@ -291,7 +296,7 @@ void ExternalAuthenticators::setConfiguration(const Poco::Util::AbstractConfigur
|
||||
ldap_server_name.resize(bracket_pos);
|
||||
|
||||
if (ldap_client_params_blueprint.contains(ldap_server_name))
|
||||
throw Exception("Multiple LDAP servers with the same name are not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Multiple LDAP servers with the same name are not allowed");
|
||||
|
||||
LDAPClient::Params ldap_client_params_tmp;
|
||||
parseLDAPServer(ldap_client_params_tmp, config, ldap_server_name);
|
||||
@ -346,7 +351,7 @@ bool ExternalAuthenticators::checkLDAPCredentials(const String & server, const B
|
||||
// Retrieve the server parameters.
|
||||
const auto pit = ldap_client_params_blueprint.find(server);
|
||||
if (pit == ldap_client_params_blueprint.end())
|
||||
throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "LDAP server '{}' is not configured", server);
|
||||
|
||||
params = pit->second;
|
||||
params->user = credentials.getUserName();
|
||||
@ -461,7 +466,7 @@ bool ExternalAuthenticators::checkKerberosCredentials(const String & realm, cons
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!kerberos_params.has_value())
|
||||
throw Exception("Kerberos is not enabled", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Kerberos is not enabled");
|
||||
|
||||
if (!credentials.isReady())
|
||||
return false;
|
||||
@ -480,7 +485,7 @@ GSSAcceptorContext::Params ExternalAuthenticators::getKerberosParams() const
|
||||
std::scoped_lock lock(mutex);
|
||||
|
||||
if (!kerberos_params.has_value())
|
||||
throw Exception("Kerberos is not enabled", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Kerberos is not enabled");
|
||||
|
||||
return kerberos_params.value();
|
||||
}
|
||||
|
@ -265,16 +265,16 @@ void GSSAcceptorContext::initHandles()
|
||||
if (!params.keytab.empty())
|
||||
{
|
||||
if (!std::filesystem::exists(params.keytab))
|
||||
throw Exception("Keytab file not found", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Keytab file not found");
|
||||
|
||||
if (krb5_gss_register_acceptor_identity(params.keytab.c_str()))
|
||||
throw Exception("Failed to register keytab file", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Failed to register keytab file");
|
||||
}
|
||||
|
||||
if (!params.principal.empty())
|
||||
{
|
||||
if (!params.realm.empty())
|
||||
throw Exception("Realm and principal name cannot be specified simultaneously", ErrorCodes::BAD_ARGUMENTS);
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Realm and principal name cannot be specified simultaneously");
|
||||
|
||||
gss_buffer_desc acceptor_name_buf;
|
||||
acceptor_name_buf.length = params.principal.size();
|
||||
@ -305,7 +305,7 @@ void GSSAcceptorContext::initHandles()
|
||||
if (GSS_ERROR(major_status))
|
||||
{
|
||||
const auto messages = extractStatusMessages(major_status, minor_status, GSS_C_NO_OID);
|
||||
throw Exception("gss_import_name() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_import_name() failed{}", (messages.empty() ? "" : ": " + messages));
|
||||
}
|
||||
|
||||
minor_status = 0;
|
||||
@ -323,7 +323,7 @@ void GSSAcceptorContext::initHandles()
|
||||
if (GSS_ERROR(major_status))
|
||||
{
|
||||
const auto messages = extractStatusMessages(major_status, minor_status, GSS_C_NO_OID);
|
||||
throw Exception("gss_acquire_cred() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_acquire_cred() failed{}", (messages.empty() ? "" : ": " + messages));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -387,21 +387,26 @@ String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger
|
||||
if (major_status == GSS_S_COMPLETE)
|
||||
{
|
||||
if (!params.mechanism.empty() && !equalMechanisms(params.mechanism, mech_type))
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the authentication mechanism is not what was expected", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR,
|
||||
"gss_accept_sec_context() succeeded, but: "
|
||||
"the authentication mechanism is not what was expected");
|
||||
|
||||
if (flags & GSS_C_ANON_FLAG)
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator does not wish to be authenticated", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator does not wish to be authenticated");
|
||||
|
||||
std::tie(user_name, realm) = extractNameAndRealm(initiator_name);
|
||||
|
||||
if (user_name.empty())
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator name cannot be extracted", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator name cannot be extracted");
|
||||
|
||||
if (realm.empty())
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator realm cannot be extracted", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() succeeded, but: the initiator realm cannot be extracted");
|
||||
|
||||
if (!params.realm.empty() && params.realm != realm)
|
||||
throw Exception("gss_accept_sec_context() succeeded, but: the initiator realm is not what was expected (expected: " + params.realm + ", actual: " + realm + ")", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR,
|
||||
"gss_accept_sec_context() succeeded, but: "
|
||||
"the initiator realm is not what was expected (expected: {}, actual: {})",
|
||||
params.realm, realm);
|
||||
|
||||
output_token = bufferToString(output_token_buf);
|
||||
|
||||
@ -420,7 +425,7 @@ String GSSAcceptorContext::processToken(const String & input_token, Poco::Logger
|
||||
else
|
||||
{
|
||||
const auto messages = extractStatusMessages(major_status, minor_status, mech_type);
|
||||
throw Exception("gss_accept_sec_context() failed" + (messages.empty() ? "" : ": " + messages), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "gss_accept_sec_context() failed{}", (messages.empty() ? "" : ": " + messages));
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
@ -452,7 +457,7 @@ void GSSAcceptorContext::initHandles()
|
||||
|
||||
String GSSAcceptorContext::processToken(const String &, Poco::Logger *)
|
||||
{
|
||||
throw Exception("ClickHouse was built without GSS-API/Kerberos support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||
throw Exception(ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME, "ClickHouse was built without GSS-API/Kerberos support");
|
||||
}
|
||||
|
||||
#endif // USE_KRB5
|
||||
|
@ -630,79 +630,70 @@ Poco::Logger * IAccessStorage::getLogger() const
|
||||
|
||||
void IAccessStorage::throwNotFound(const UUID & id) const
|
||||
{
|
||||
throw Exception(outputID(id) + " not found in " + getStorageName(), ErrorCodes::ACCESS_ENTITY_NOT_FOUND);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_NOT_FOUND, "{} not found in {}", outputID(id), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNotFound(AccessEntityType type, const String & name) const
|
||||
{
|
||||
int error_code = AccessEntityTypeInfo::get(type).not_found_error_code;
|
||||
throw Exception("There is no " + formatEntityTypeWithName(type, name) + " in " + getStorageName(), error_code);
|
||||
throw Exception(error_code, "There is no {} in {}", formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwBadCast(const UUID & id, AccessEntityType type, const String & name, AccessEntityType required_type)
|
||||
{
|
||||
throw Exception(
|
||||
outputID(id) + ": " + formatEntityTypeWithName(type, name) + " expected to be of type " + toString(required_type),
|
||||
ErrorCodes::LOGICAL_ERROR);
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "{}: {} expected to be of type {}", outputID(id),
|
||||
formatEntityTypeWithName(type, name), toString(required_type));
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwIDCollisionCannotInsert(const UUID & id, AccessEntityType type, const String & name, AccessEntityType existing_type, const String & existing_name) const
|
||||
{
|
||||
throw Exception(
|
||||
formatEntityTypeWithName(type, name) + ": cannot insert because the " + outputID(id) + " is already used by "
|
||||
+ formatEntityTypeWithName(existing_type, existing_name) + " in " + getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: "
|
||||
"cannot insert because the {} is already used by {} in {}", formatEntityTypeWithName(type, name),
|
||||
outputID(id), formatEntityTypeWithName(existing_type, existing_name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNameCollisionCannotInsert(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
formatEntityTypeWithName(type, name) + ": cannot insert because " + formatEntityTypeWithName(type, name) + " already exists in "
|
||||
+ getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot insert because {} already exists in {}",
|
||||
formatEntityTypeWithName(type, name), formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwNameCollisionCannotRename(AccessEntityType type, const String & old_name, const String & new_name) const
|
||||
{
|
||||
throw Exception(
|
||||
formatEntityTypeWithName(type, old_name) + ": cannot rename to " + backQuote(new_name) + " because "
|
||||
+ formatEntityTypeWithName(type, new_name) + " already exists in " + getStorageName(),
|
||||
ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS);
|
||||
throw Exception(ErrorCodes::ACCESS_ENTITY_ALREADY_EXISTS, "{}: cannot rename to {} because {} already exists in {}",
|
||||
formatEntityTypeWithName(type, old_name), backQuote(new_name), formatEntityTypeWithName(type, new_name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotInsert(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot insert " + formatEntityTypeWithName(type, name) + " to " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot insert {} to {} because this storage is readonly",
|
||||
formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotUpdate(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot update " + formatEntityTypeWithName(type, name) + " in " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot update {} in {} because this storage is readonly",
|
||||
formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwReadonlyCannotRemove(AccessEntityType type, const String & name) const
|
||||
{
|
||||
throw Exception(
|
||||
"Cannot remove " + formatEntityTypeWithName(type, name) + " from " + getStorageName() + " because this storage is readonly",
|
||||
ErrorCodes::ACCESS_STORAGE_READONLY);
|
||||
throw Exception(ErrorCodes::ACCESS_STORAGE_READONLY, "Cannot remove {} from {} because this storage is readonly",
|
||||
formatEntityTypeWithName(type, name), getStorageName());
|
||||
}
|
||||
|
||||
|
||||
void IAccessStorage::throwAddressNotAllowed(const Poco::Net::IPAddress & address)
|
||||
{
|
||||
throw Exception("Connections from " + address.toString() + " are not allowed", ErrorCodes::IP_ADDRESS_NOT_ALLOWED);
|
||||
throw Exception(ErrorCodes::IP_ADDRESS_NOT_ALLOWED, "Connections from {} are not allowed", address.toString());
|
||||
}
|
||||
|
||||
void IAccessStorage::throwAuthenticationTypeNotAllowed(AuthenticationType auth_type)
|
||||
@ -715,7 +706,7 @@ void IAccessStorage::throwAuthenticationTypeNotAllowed(AuthenticationType auth_t
|
||||
|
||||
void IAccessStorage::throwInvalidCredentials()
|
||||
{
|
||||
throw Exception("Invalid credentials", ErrorCodes::WRONG_PASSWORD);
|
||||
throw Exception(ErrorCodes::WRONG_PASSWORD, "Invalid credentials");
|
||||
}
|
||||
|
||||
void IAccessStorage::throwBackupNotAllowed() const
|
||||
|
@ -71,7 +71,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
const char *deftype = nullptr;
|
||||
|
||||
if (!std::filesystem::exists(keytab_file))
|
||||
throw Exception("Keytab file does not exist", ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Keytab file does not exist");
|
||||
|
||||
ret = krb5_init_context(&k5.ctx);
|
||||
if (ret)
|
||||
@ -81,7 +81,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
{
|
||||
ret = krb5_cc_resolve(k5.ctx, cache_name.c_str(), &k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error in resolving cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Resolved cache");
|
||||
}
|
||||
else
|
||||
@ -89,7 +89,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Resolve the default cache and get its type and default principal (if it is initialized).
|
||||
ret = krb5_cc_default(k5.ctx, &defcache);
|
||||
if (ret)
|
||||
throw Exception("Error while getting default cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while getting default cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Resolved default cache");
|
||||
deftype = krb5_cc_get_type(k5.ctx, defcache);
|
||||
if (krb5_cc_get_principal(k5.ctx, defcache, &defcache_princ) != 0)
|
||||
@ -99,7 +99,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Use the specified principal name.
|
||||
ret = krb5_parse_name_flags(k5.ctx, principal.c_str(), 0, &k5.me);
|
||||
if (ret)
|
||||
throw Exception("Error when parsing principal name " + principal + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when parsing principal name {}", principal + fmtError(ret));
|
||||
|
||||
// Cache related commands
|
||||
if (k5.out_cc == nullptr && krb5_cc_support_switch(k5.ctx, deftype))
|
||||
@ -107,7 +107,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Use an existing cache for the client principal if we can.
|
||||
ret = krb5_cc_cache_match(k5.ctx, k5.me, &k5.out_cc);
|
||||
if (ret && ret != KRB5_CC_NOTFOUND)
|
||||
throw Exception("Error while searching for cache for " + principal + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while searching for cache for {}", principal + fmtError(ret));
|
||||
if (0 == ret)
|
||||
{
|
||||
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
|
||||
@ -118,7 +118,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Create a new cache to avoid overwriting the initialized default cache.
|
||||
ret = krb5_cc_new_unique(k5.ctx, deftype, nullptr, &k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error while generating new cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while generating new cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Using default cache: {}", krb5_cc_get_name(k5.ctx, k5.out_cc));
|
||||
k5.switch_to_cache = 1;
|
||||
}
|
||||
@ -134,24 +134,24 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
|
||||
ret = krb5_unparse_name(k5.ctx, k5.me, &k5.name);
|
||||
if (ret)
|
||||
throw Exception("Error when unparsing name" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when unparsing name{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Using principal: {}", k5.name);
|
||||
|
||||
// Allocate a new initial credential options structure.
|
||||
ret = krb5_get_init_creds_opt_alloc(k5.ctx, &options);
|
||||
if (ret)
|
||||
throw Exception("Error in options allocation" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in options allocation{}", fmtError(ret));
|
||||
|
||||
// Resolve keytab
|
||||
ret = krb5_kt_resolve(k5.ctx, keytab_file.c_str(), &keytab);
|
||||
if (ret)
|
||||
throw Exception("Error in resolving keytab "+keytab_file + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in resolving keytab {}{}", keytab_file, fmtError(ret));
|
||||
LOG_TRACE(log,"Using keytab: {}", keytab_file);
|
||||
|
||||
// Set an output credential cache in initial credential options.
|
||||
ret = krb5_get_init_creds_opt_set_out_ccache(k5.ctx, options, k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error in setting output credential cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in setting output credential cache{}", fmtError(ret));
|
||||
|
||||
// Action: init or renew
|
||||
LOG_TRACE(log,"Trying to renew credentials");
|
||||
@ -165,7 +165,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Request KDC for an initial credentials using keytab.
|
||||
ret = krb5_get_init_creds_keytab(k5.ctx, &my_creds, k5.me, keytab, 0, nullptr, options);
|
||||
if (ret)
|
||||
throw Exception("Error in getting initial credentials" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials{}", fmtError(ret));
|
||||
else
|
||||
LOG_TRACE(log,"Got initial credentials");
|
||||
}
|
||||
@ -175,7 +175,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Initialize a credential cache. Destroy any existing contents of cache and initialize it for the default principal.
|
||||
ret = krb5_cc_initialize(k5.ctx, k5.out_cc, k5.me);
|
||||
if (ret)
|
||||
throw Exception("Error when initializing cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error when initializing cache{}", fmtError(ret));
|
||||
LOG_TRACE(log,"Initialized cache");
|
||||
// Store credentials in a credential cache.
|
||||
ret = krb5_cc_store_cred(k5.ctx, k5.out_cc, &my_creds);
|
||||
@ -189,7 +189,7 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
||||
// Make a credential cache the primary cache for its collection.
|
||||
ret = krb5_cc_switch(k5.ctx, k5.out_cc);
|
||||
if (ret)
|
||||
throw Exception("Error while switching to new cache" + fmtError(ret), ErrorCodes::KERBEROS_ERROR);
|
||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error while switching to new cache{}", fmtError(ret));
|
||||
}
|
||||
|
||||
LOG_TRACE(log,"Authenticated to Kerberos v5");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user