Merge branch 'master' into flaky_check_ordinary

This commit is contained in:
Alexander Tokmakov 2022-08-12 18:11:39 +03:00 committed by GitHub
commit b3deae3593
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
371 changed files with 5092 additions and 2927 deletions

View File

@ -23,6 +23,7 @@ if (COMPILER_CLANG)
no_warning(zero-length-array)
no_warning(c++98-compat-pedantic)
no_warning(c++98-compat)
no_warning(c++20-compat) # Use constinit in C++20 without warnings
no_warning(conversion)
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
no_warning(disabled-macro-expansion)

2
contrib/arrow vendored

@ -1 +1 @@
Subproject commit efdcd015cfdee1b6aa349c9ca227ca12c3d697f5
Subproject commit 450a5638704386356f8e520080468fc9bc8bcaf8

View File

@ -104,6 +104,7 @@ if [ -n "$MAKE_DEB" ]; then
fi
mv ./programs/clickhouse* /output
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
find . -name '*.so' -print -exec mv '{}' /output \;
find . -name '*.so.*' -print -exec mv '{}' /output \;

View File

@ -69,6 +69,8 @@ function download
wget_with_retry "$BINARY_URL_TO_DOWNLOAD"
chmod +x clickhouse
# clickhouse may be compressed - run once to decompress
./clickhouse ||:
ln -s ./clickhouse ./clickhouse-server
ln -s ./clickhouse ./clickhouse-client

View File

@ -58,6 +58,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
--tcp_port 19000 --tcp_port_secure 19440 --http_port 18123 --https_port 18443 --interserver_http_port 19009 --tcp_with_proxy_port 19010 \
--mysql_port 19004 --postgresql_port 19005 \
--keeper_server.tcp_port 19181 --keeper_server.server_id 2 \
--prometheus.port 19988 \
--macros.replica r2 # It doesn't work :(
mkdir -p /var/run/clickhouse-server2
@ -69,6 +70,7 @@ if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]
--tcp_port 29000 --tcp_port_secure 29440 --http_port 28123 --https_port 28443 --interserver_http_port 29009 --tcp_with_proxy_port 29010 \
--mysql_port 29004 --postgresql_port 29005 \
--keeper_server.tcp_port 29181 --keeper_server.server_id 3 \
--prometheus.port 29988 \
--macros.shard s2 # It doesn't work :(
MAX_RUN_TIME=$((MAX_RUN_TIME < 9000 ? MAX_RUN_TIME : 9000)) # min(MAX_RUN_TIME, 2.5 hours)

View File

@ -86,7 +86,7 @@ def process_test_log(log_path):
test_end = True
test_results = [
(test[0], test[1], test[2], "".join(test[3]))[:4096] for test in test_results
(test[0], test[1], test[2], "".join(test[3])[:4096]) for test in test_results
]
return (

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.1.1262-prestable FIXME as compared to v22.2.1.2139-prestable
### ClickHouse release v22.3.1.1262-prestable (92ab33f560e) FIXME as compared to v22.2.1.2139-prestable (75366fc95e5)
#### Backward Incompatible Change
* Improvement the toDatetime function overflows. When the date string is very large, it will be converted to 1970. [#32898](https://github.com/ClickHouse/ClickHouse/pull/32898) ([HaiBo Li](https://github.com/marising)).

View File

@ -0,0 +1,30 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.10.22-lts (25886f517d4) FIXME as compared to v22.3.9.19-lts (7976930b82e)
#### Bug Fix
* Backported in [#39761](https://github.com/ClickHouse/ClickHouse/issues/39761): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39206](https://github.com/ClickHouse/ClickHouse/issues/39206): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39381](https://github.com/ClickHouse/ClickHouse/issues/39381): Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#39588](https://github.com/ClickHouse/ClickHouse/issues/39588): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39610](https://github.com/ClickHouse/ClickHouse/issues/39610): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39834](https://github.com/ClickHouse/ClickHouse/issues/39834): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#39238](https://github.com/ClickHouse/ClickHouse/issues/39238): Fix performance regression of scalar query optimization. [#35986](https://github.com/ClickHouse/ClickHouse/pull/35986) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#39531](https://github.com/ClickHouse/ClickHouse/issues/39531): Fix some issues with async reads from remote filesystem which happened when reading low cardinality. [#36763](https://github.com/ClickHouse/ClickHouse/pull/36763) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,20 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.11.12-lts (137c5f72657) FIXME as compared to v22.3.10.22-lts (25886f517d4)
#### Build/Testing/Packaging Improvement
* Backported in [#39881](https://github.com/ClickHouse/ClickHouse/issues/39881): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39336](https://github.com/ClickHouse/ClickHouse/issues/39336): Fix `parallel_view_processing=1` with `optimize_trivial_insert_select=1`. Fix `max_insert_threads` while pushing to views. [#38731](https://github.com/ClickHouse/ClickHouse/pull/38731) ([Azat Khuzhin](https://github.com/azat)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Backport [#39687](https://github.com/ClickHouse/ClickHouse/issues/39687) to 22.3: Fix seeking while reading from encrypted disk"'. [#40052](https://github.com/ClickHouse/ClickHouse/pull/40052) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -5,5 +5,5 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.2.2-lts FIXME as compared to v22.3.1.1262-prestable
### ClickHouse release v22.3.2.2-lts (89a621679c6) FIXME as compared to v22.3.1.1262-prestable (92ab33f560e)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.3.44-lts FIXME as compared to v22.3.2.2-lts
### ClickHouse release v22.3.3.44-lts (abb756d3ca2) FIXME as compared to v22.3.2.2-lts (89a621679c6)
#### Bug Fix
* Backported in [#35928](https://github.com/ClickHouse/ClickHouse/issues/35928): Added settings `input_format_ipv4_default_on_conversion_error`, `input_format_ipv6_default_on_conversion_error` to allow insert of invalid ip address values as default into tables. Closes [#35726](https://github.com/ClickHouse/ClickHouse/issues/35726). [#35733](https://github.com/ClickHouse/ClickHouse/pull/35733) ([Maksim Kita](https://github.com/kitaisreal)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.4.20-lts FIXME as compared to v22.3.3.44-lts
### ClickHouse release v22.3.4.20-lts (ecbaf001f49) FIXME as compared to v22.3.3.44-lts (abb756d3ca2)
#### Build/Testing/Packaging Improvement
* - Add `_le_` method for ClickHouseVersion - Fix auto_version for existing tag - docker_server now support getting version from tags - Add python unit tests to backport workflow. [#36028](https://github.com/ClickHouse/ClickHouse/pull/36028) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.5.5-lts FIXME as compared to v22.3.4.20-lts
### ClickHouse release v22.3.5.5-lts (438b4a81f77) FIXME as compared to v22.3.4.20-lts (ecbaf001f49)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.6.5-lts FIXME as compared to v22.3.5.5-lts
### ClickHouse release v22.3.6.5-lts (3e44e824cff) FIXME as compared to v22.3.5.5-lts (438b4a81f77)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.3.7.28-lts FIXME as compared to v22.3.6.5-lts
### ClickHouse release v22.3.7.28-lts (420bdfa2751) FIXME as compared to v22.3.6.5-lts (3e44e824cff)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)

View File

@ -0,0 +1,32 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.8.39-lts (6bcf982f58b) FIXME as compared to v22.3.7.28-lts (420bdfa2751)
#### Build/Testing/Packaging Improvement
* Backported in [#38826](https://github.com/ClickHouse/ClickHouse/issues/38826): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#38453](https://github.com/ClickHouse/ClickHouse/issues/38453): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38710](https://github.com/ClickHouse/ClickHouse/issues/38710): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38689](https://github.com/ClickHouse/ClickHouse/issues/38689): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38776](https://github.com/ClickHouse/ClickHouse/issues/38776): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38780](https://github.com/ClickHouse/ClickHouse/issues/38780): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#36818](https://github.com/ClickHouse/ClickHouse/issues/36818): Fix projection analysis which might lead to wrong query result when IN subquery is used. This fixes [#35336](https://github.com/ClickHouse/ClickHouse/issues/35336). [#35631](https://github.com/ClickHouse/ClickHouse/pull/35631) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#38467](https://github.com/ClickHouse/ClickHouse/issues/38467): - Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Trying backport useful features for CI [#38510](https://github.com/ClickHouse/ClickHouse/pull/38510) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.3.9.19-lts (7976930b82e) FIXME as compared to v22.3.8.39-lts (6bcf982f58b)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39097](https://github.com/ClickHouse/ClickHouse/issues/39097): Any allocations inside OvercommitTracker may lead to deadlock. Logging was not very informative so it's easier just to remove logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#39080](https://github.com/ClickHouse/ClickHouse/issues/39080): Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39149](https://github.com/ClickHouse/ClickHouse/issues/39149): Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39372](https://github.com/ClickHouse/ClickHouse/issues/39372): Declare RabbitMQ queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
* Backported in [#39379](https://github.com/ClickHouse/ClickHouse/issues/39379): Fix segmentation fault in MaterializedPostgreSQL database engine, which could happen if some exception occurred at replication initialisation. Closes [#36939](https://github.com/ClickHouse/ClickHouse/issues/36939). [#39272](https://github.com/ClickHouse/ClickHouse/pull/39272) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39351](https://github.com/ClickHouse/ClickHouse/issues/39351): Fix incorrect fetch postgresql tables query fro PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Reproduce and a little bit better fix for LC dict right offset. [#36856](https://github.com/ClickHouse/ClickHouse/pull/36856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Retry docker buildx commands with progressive sleep in between [#38898](https://github.com/ClickHouse/ClickHouse/pull/38898) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add docker_server.py running to backport and release CIs [#39011](https://github.com/ClickHouse/ClickHouse/pull/39011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.1.2305-prestable FIXME as compared to v22.3.1.1262-prestable
### ClickHouse release v22.4.1.2305-prestable (77a82cc090d) FIXME as compared to v22.3.1.1262-prestable (92ab33f560e)
#### Backward Incompatible Change
* Function `yandexConsistentHash` (consistent hashing algorithm by Konstantin "kostik" Oblakov) is renamed to `kostikConsistentHash`. The old name is left as an alias for compatibility. Although this change is backward compatible, we may remove the alias in subsequent releases, that's why it's recommended to update the usages of this function in your apps. [#35553](https://github.com/ClickHouse/ClickHouse/pull/35553) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
@ -68,7 +68,7 @@ sidebar_label: 2022
* For lts releases packages will be pushed to both lts and stable repos. [#35382](https://github.com/ClickHouse/ClickHouse/pull/35382) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Support uuid for postgres engines. Closes [#35384](https://github.com/ClickHouse/ClickHouse/issues/35384). [#35403](https://github.com/ClickHouse/ClickHouse/pull/35403) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add arguments `--user`, `--password`, `--host`, `--port` for clickhouse-diagnostics. [#35422](https://github.com/ClickHouse/ClickHouse/pull/35422) ([李扬](https://github.com/taiyang-li)).
* fix INSERT INTO table FROM INFILE does not display progress bar. [#35429](https://github.com/ClickHouse/ClickHouse/pull/35429) ([xiedeyantu](https://github.com/xiedeyantu)).
* fix INSERT INTO table FROM INFILE does not display progress bar. [#35429](https://github.com/ClickHouse/ClickHouse/pull/35429) ([chen](https://github.com/xiedeyantu)).
* Allow server to bind to low-numbered ports (e.g. 443). ClickHouse installation script will set `cap_net_bind_service` to the binary file. [#35451](https://github.com/ClickHouse/ClickHouse/pull/35451) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add settings `input_format_orc_case_insensitive_column_matching`, `input_format_arrow_case_insensitive_column_matching`, and `input_format_parquet_case_insensitive_column_matching` which allows ClickHouse to use case insensitive matching of columns while reading data from ORC, Arrow or Parquet files. [#35459](https://github.com/ClickHouse/ClickHouse/pull/35459) ([Antonio Andelic](https://github.com/antonio2368)).
* - Add explicit table info to the scan node of query plan and pipeline. [#35460](https://github.com/ClickHouse/ClickHouse/pull/35460) ([何李夫](https://github.com/helifu)).
@ -106,7 +106,7 @@ sidebar_label: 2022
* ASTPartition::formatImpl should output ALL while executing ALTER TABLE t DETACH PARTITION ALL. [#35987](https://github.com/ClickHouse/ClickHouse/pull/35987) ([awakeljw](https://github.com/awakeljw)).
* `clickhouse-keeper` starts answering 4-letter commands before getting the quorum. [#35992](https://github.com/ClickHouse/ClickHouse/pull/35992) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix wrong assertion in replxx which happens when navigating back the history when the first line of input is a newline. Mark as improvement because it only affects debug build. This fixes [#34511](https://github.com/ClickHouse/ClickHouse/issues/34511). [#36007](https://github.com/ClickHouse/ClickHouse/pull/36007) ([Amos Bird](https://github.com/amosbird)).
* If someone writes DEFAULT NULL in table definition, make data type Nullable. [#35887](https://github.com/ClickHouse/ClickHouse/issues/35887). [#36058](https://github.com/ClickHouse/ClickHouse/pull/36058) ([xiedeyantu](https://github.com/xiedeyantu)).
* If someone writes DEFAULT NULL in table definition, make data type Nullable. [#35887](https://github.com/ClickHouse/ClickHouse/issues/35887). [#36058](https://github.com/ClickHouse/ClickHouse/pull/36058) ([chen](https://github.com/xiedeyantu)).
* Added `thread_id` and `query_id` columns to `system.zookeeper_log` table. [#36074](https://github.com/ClickHouse/ClickHouse/pull/36074) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Auto assign numbers for Enum elements. [#36101](https://github.com/ClickHouse/ClickHouse/pull/36101) ([awakeljw](https://github.com/awakeljw)).
* Reset thread name in `ThreadPool` to `ThreadPoolIdle` after job is done. This is to avoid displaying the old thread name for idle threads. This closes [#36114](https://github.com/ClickHouse/ClickHouse/issues/36114). [#36115](https://github.com/ClickHouse/ClickHouse/pull/36115) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
@ -331,7 +331,7 @@ sidebar_label: 2022
* ci: replace directory system log tables artifacts with tsv [#35773](https://github.com/ClickHouse/ClickHouse/pull/35773) ([Azat Khuzhin](https://github.com/azat)).
* One more try to resurrect build hash [#35774](https://github.com/ClickHouse/ClickHouse/pull/35774) ([alesapin](https://github.com/alesapin)).
* Refactoring QueryPipeline [#35789](https://github.com/ClickHouse/ClickHouse/pull/35789) ([Amos Bird](https://github.com/amosbird)).
* Delete duplicate code [#35798](https://github.com/ClickHouse/ClickHouse/pull/35798) ([xiedeyantu](https://github.com/xiedeyantu)).
* Delete duplicate code [#35798](https://github.com/ClickHouse/ClickHouse/pull/35798) ([chen](https://github.com/xiedeyantu)).
* remove unused variable [#35800](https://github.com/ClickHouse/ClickHouse/pull/35800) ([flynn](https://github.com/ucasfl)).
* Make `SortDescription::column_name` always non-empty [#35805](https://github.com/ClickHouse/ClickHouse/pull/35805) ([Nikita Taranov](https://github.com/nickitat)).
* Fix latest_error referenced before assignment [#35807](https://github.com/ClickHouse/ClickHouse/pull/35807) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
@ -417,7 +417,7 @@ sidebar_label: 2022
* Revert reverting "Fix crash in ParallelReadBuffer" [#36212](https://github.com/ClickHouse/ClickHouse/pull/36212) ([Kruglov Pavel](https://github.com/Avogar)).
* Make stateless tests with s3 always green [#36214](https://github.com/ClickHouse/ClickHouse/pull/36214) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Add Tyler Hannan to contributors [#36216](https://github.com/ClickHouse/ClickHouse/pull/36216) ([Tyler Hannan](https://github.com/tylerhannan)).
* Fix the repeated call of func to get the table when drop table [#36248](https://github.com/ClickHouse/ClickHouse/pull/36248) ([xiedeyantu](https://github.com/xiedeyantu)).
* Fix the repeated call of func to get the table when drop table [#36248](https://github.com/ClickHouse/ClickHouse/pull/36248) ([chen](https://github.com/xiedeyantu)).
* Split test 01675_data_type_coroutine into 2 tests to prevent possible timeouts [#36250](https://github.com/ClickHouse/ClickHouse/pull/36250) ([Kruglov Pavel](https://github.com/Avogar)).
* Merge TRUSTED_CONTRIBUTORS in lambda and import in check [#36252](https://github.com/ClickHouse/ClickHouse/pull/36252) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix exception "File segment can be completed only by downloader" in tests [#36253](https://github.com/ClickHouse/ClickHouse/pull/36253) ([Kseniia Sumarokova](https://github.com/kssenii)).

View File

@ -5,5 +5,5 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.2.1-stable FIXME as compared to v22.4.1.2305-prestable
### ClickHouse release v22.4.2.1-stable (b34ebdc36ae) FIXME as compared to v22.4.1.2305-prestable (77a82cc090d)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.3.3-stable FIXME as compared to v22.4.2.1-stable
### ClickHouse release v22.4.3.3-stable (def956d6299) FIXME as compared to v22.4.2.1-stable (b34ebdc36ae)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.4.7-stable FIXME as compared to v22.4.3.3-stable
### ClickHouse release v22.4.4.7-stable (ba44414f9b3) FIXME as compared to v22.4.3.3-stable (def956d6299)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.4.5.9-stable FIXME as compared to v22.4.4.7-stable
### ClickHouse release v22.4.5.9-stable (059ef6cadcd) FIXME as compared to v22.4.4.7-stable (ba44414f9b3)
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -0,0 +1,44 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.4.6.53-stable (0625731c940) FIXME as compared to v22.4.5.9-stable (059ef6cadcd)
#### New Feature
* Backported in [#38714](https://github.com/ClickHouse/ClickHouse/issues/38714): SALT is allowed for CREATE USER <user> IDENTIFIED WITH sha256_hash. [#37377](https://github.com/ClickHouse/ClickHouse/pull/37377) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Build/Testing/Packaging Improvement
* Backported in [#38828](https://github.com/ClickHouse/ClickHouse/issues/38828): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#37717](https://github.com/ClickHouse/ClickHouse/issues/37717): Fix unexpected errors with a clash of constant strings in aggregate function, prewhere and join. Close [#36891](https://github.com/ClickHouse/ClickHouse/issues/36891). [#37336](https://github.com/ClickHouse/ClickHouse/pull/37336) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37512](https://github.com/ClickHouse/ClickHouse/issues/37512): Fix logical error in normalizeUTF8 functions. Closes [#37298](https://github.com/ClickHouse/ClickHouse/issues/37298). [#37443](https://github.com/ClickHouse/ClickHouse/pull/37443) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#37941](https://github.com/ClickHouse/ClickHouse/issues/37941): Fix setting cast_ipv4_ipv6_default_on_conversion_error for internal cast function. Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#37761](https://github.com/ClickHouse/ClickHouse/pull/37761) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#38452](https://github.com/ClickHouse/ClickHouse/issues/38452): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38711](https://github.com/ClickHouse/ClickHouse/issues/38711): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38593](https://github.com/ClickHouse/ClickHouse/issues/38593): Fix parts removal (will be left forever if they had not been removed on server shutdown) after incorrect server shutdown. [#38486](https://github.com/ClickHouse/ClickHouse/pull/38486) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#38596](https://github.com/ClickHouse/ClickHouse/issues/38596): Fix table creation to avoid replication issues with pre-22.4 replicas. [#38541](https://github.com/ClickHouse/ClickHouse/pull/38541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38686](https://github.com/ClickHouse/ClickHouse/issues/38686): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38663](https://github.com/ClickHouse/ClickHouse/issues/38663): Adapt some more nodes to avoid issues with pre-22.4 replicas. [#38627](https://github.com/ClickHouse/ClickHouse/pull/38627) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38777](https://github.com/ClickHouse/ClickHouse/issues/38777): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38781](https://github.com/ClickHouse/ClickHouse/issues/38781): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
* Backported in [#37456](https://github.com/ClickHouse/ClickHouse/issues/37456): Server might fail to start if it cannot resolve hostname of external ClickHouse dictionary. It's fixed. Fixes [#36451](https://github.com/ClickHouse/ClickHouse/issues/36451). [#36463](https://github.com/ClickHouse/ClickHouse/pull/36463) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#38468](https://github.com/ClickHouse/ClickHouse/issues/38468): - Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37363](https://github.com/ClickHouse/ClickHouse/issues/37363): Fixed problem with infs in `quantileTDigest`. Fixes [#32107](https://github.com/ClickHouse/ClickHouse/issues/32107). [#37021](https://github.com/ClickHouse/ClickHouse/pull/37021) ([Vladimir Chebotarev](https://github.com/excitoon)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Integration tests [#36866](https://github.com/ClickHouse/ClickHouse/pull/36866) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update protobuf files for kafka and rabbitmq [fix integration tests] [#37884](https://github.com/ClickHouse/ClickHouse/pull/37884) ([Nikita Taranov](https://github.com/nickitat)).
* Try fix `test_grpc_protocol/test.py::test_progress` [#37908](https://github.com/ClickHouse/ClickHouse/pull/37908) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,10 +5,10 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.5.1.2079-stable FIXME as compared to v22.4.1.2305-prestable
### ClickHouse release v22.5.1.2079-stable (df0cb062098) FIXME as compared to v22.4.1.2305-prestable (77a82cc090d)
#### Backward Incompatible Change
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)).
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Deleted user](https://github.com/ghost)).
* Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### New Feature
@ -20,7 +20,7 @@ sidebar_label: 2022
* Parse collations in CREATE TABLE, throw exception or ignore. closes [#35892](https://github.com/ClickHouse/ClickHouse/issues/35892). [#36271](https://github.com/ClickHouse/ClickHouse/pull/36271) ([yuuch](https://github.com/yuuch)).
* Add aliases JSONLines and NDJSON for JSONEachRow. Closes [#36303](https://github.com/ClickHouse/ClickHouse/issues/36303). [#36327](https://github.com/ClickHouse/ClickHouse/pull/36327) ([flynn](https://github.com/ucasfl)).
* Set parts_to_delay_insert and parts_to_throw_insert as query-level settings. If they are defined, they can override table-level settings. [#36371](https://github.com/ClickHouse/ClickHouse/pull/36371) ([Memo](https://github.com/Joeywzr)).
* temporary table can show total rows and total bytes. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([xiedeyantu](https://github.com/xiedeyantu)).
* temporary table can show total rows and total bytes. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([chen](https://github.com/xiedeyantu)).
* Added new hash function - wyHash64. [#36467](https://github.com/ClickHouse/ClickHouse/pull/36467) ([olevino](https://github.com/olevino)).
* Window function nth_value was added. [#36601](https://github.com/ClickHouse/ClickHouse/pull/36601) ([Nikolay](https://github.com/ndchikin)).
* Add MySQLDump input format. It reads all data from INSERT queries belonging to one table in dump. If there are more than one table, by default it reads data from the first one. [#36667](https://github.com/ClickHouse/ClickHouse/pull/36667) ([Kruglov Pavel](https://github.com/Avogar)).
@ -212,7 +212,7 @@ sidebar_label: 2022
* Update version after release [#36502](https://github.com/ClickHouse/ClickHouse/pull/36502) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Followup on [#36172](https://github.com/ClickHouse/ClickHouse/issues/36172) password hash salt feature [#36510](https://github.com/ClickHouse/ClickHouse/pull/36510) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Update version_date.tsv after v22.4.2.1-stable [#36533](https://github.com/ClickHouse/ClickHouse/pull/36533) ([github-actions[bot]](https://github.com/apps/github-actions)).
* fix log should print 'from' path [#36535](https://github.com/ClickHouse/ClickHouse/pull/36535) ([xiedeyantu](https://github.com/xiedeyantu)).
* fix log should print 'from' path [#36535](https://github.com/ClickHouse/ClickHouse/pull/36535) ([chen](https://github.com/xiedeyantu)).
* Add function bin tests for Int/UInt128/UInt256 [#36537](https://github.com/ClickHouse/ClickHouse/pull/36537) ([Memo](https://github.com/Joeywzr)).
* Fix 01161_all_system_tables [#36539](https://github.com/ClickHouse/ClickHouse/pull/36539) ([Antonio Andelic](https://github.com/antonio2368)).
* Update PULL_REQUEST_TEMPLATE.md [#36543](https://github.com/ClickHouse/ClickHouse/pull/36543) ([Ivan Blinkov](https://github.com/blinkov)).

View File

@ -0,0 +1,40 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.2.53-stable (5fd600fda9e) FIXME as compared to v22.5.1.2079-stable (df0cb062098)
#### New Feature
* Backported in [#38713](https://github.com/ClickHouse/ClickHouse/issues/38713): SALT is allowed for CREATE USER <user> IDENTIFIED WITH sha256_hash. [#37377](https://github.com/ClickHouse/ClickHouse/pull/37377) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Build/Testing/Packaging Improvement
* Backported in [#38827](https://github.com/ClickHouse/ClickHouse/issues/38827): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#37716](https://github.com/ClickHouse/ClickHouse/issues/37716): Fix unexpected errors with a clash of constant strings in aggregate function, prewhere and join. Close [#36891](https://github.com/ClickHouse/ClickHouse/issues/36891). [#37336](https://github.com/ClickHouse/ClickHouse/pull/37336) ([Vladimir C](https://github.com/vdimir)).
* Backported in [#37408](https://github.com/ClickHouse/ClickHouse/issues/37408): Throw an exception when GROUPING SETS used with ROLLUP or CUBE. [#37367](https://github.com/ClickHouse/ClickHouse/pull/37367) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#37513](https://github.com/ClickHouse/ClickHouse/issues/37513): Fix logical error in normalizeUTF8 functions. Closes [#37298](https://github.com/ClickHouse/ClickHouse/issues/37298). [#37443](https://github.com/ClickHouse/ClickHouse/pull/37443) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#37942](https://github.com/ClickHouse/ClickHouse/issues/37942): Fix setting cast_ipv4_ipv6_default_on_conversion_error for internal cast function. Closes [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#37761](https://github.com/ClickHouse/ClickHouse/pull/37761) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#38451](https://github.com/ClickHouse/ClickHouse/issues/38451): Fix bug with nested short-circuit functions that led to execution of arguments even if condition is false. Closes [#38040](https://github.com/ClickHouse/ClickHouse/issues/38040). [#38173](https://github.com/ClickHouse/ClickHouse/pull/38173) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#38544](https://github.com/ClickHouse/ClickHouse/issues/38544): Do not allow recursive usage of OvercommitTracker during logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794) cc @tavplubix @davenger. [#38246](https://github.com/ClickHouse/ClickHouse/pull/38246) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#38708](https://github.com/ClickHouse/ClickHouse/issues/38708): Fix incorrect result of distributed queries with `DISTINCT` and `LIMIT`. Fixes [#38282](https://github.com/ClickHouse/ClickHouse/issues/38282). [#38371](https://github.com/ClickHouse/ClickHouse/pull/38371) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#38595](https://github.com/ClickHouse/ClickHouse/issues/38595): Fix parts removal (will be left forever if they had not been removed on server shutdown) after incorrect server shutdown. [#38486](https://github.com/ClickHouse/ClickHouse/pull/38486) ([Azat Khuzhin](https://github.com/azat)).
* Backported in [#38598](https://github.com/ClickHouse/ClickHouse/issues/38598): Fix table creation to avoid replication issues with pre-22.4 replicas. [#38541](https://github.com/ClickHouse/ClickHouse/pull/38541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38688](https://github.com/ClickHouse/ClickHouse/issues/38688): Now it's possible to start a clickhouse-server and attach/detach tables even for tables with the incorrect values of IPv4/IPv6 representation. Proper fix for issue [#35156](https://github.com/ClickHouse/ClickHouse/issues/35156). [#38590](https://github.com/ClickHouse/ClickHouse/pull/38590) ([alesapin](https://github.com/alesapin)).
* Backported in [#38664](https://github.com/ClickHouse/ClickHouse/issues/38664): Adapt some more nodes to avoid issues with pre-22.4 replicas. [#38627](https://github.com/ClickHouse/ClickHouse/pull/38627) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#38779](https://github.com/ClickHouse/ClickHouse/issues/38779): `rankCorr` function will work correctly if some arguments are NaNs. This closes [#38396](https://github.com/ClickHouse/ClickHouse/issues/38396). [#38722](https://github.com/ClickHouse/ClickHouse/pull/38722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#38783](https://github.com/ClickHouse/ClickHouse/issues/38783): Fix use-after-free for Map combinator that leads to incorrect result. [#38748](https://github.com/ClickHouse/ClickHouse/pull/38748) ([Azat Khuzhin](https://github.com/azat)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Try to fix some trash [#37303](https://github.com/ClickHouse/ClickHouse/pull/37303) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update protobuf files for kafka and rabbitmq [fix integration tests] [#37884](https://github.com/ClickHouse/ClickHouse/pull/37884) ([Nikita Taranov](https://github.com/nickitat)).
* Try fix `test_grpc_protocol/test.py::test_progress` [#37908](https://github.com/ClickHouse/ClickHouse/pull/37908) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Try to fix BC check [#38178](https://github.com/ClickHouse/ClickHouse/pull/38178) ([Kruglov Pavel](https://github.com/Avogar)).
* Update docker-compose to try get rid of v1 errors [#38394](https://github.com/ClickHouse/ClickHouse/pull/38394) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix backports diff [#38703](https://github.com/ClickHouse/ClickHouse/pull/38703) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,24 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.3.21-stable (e03724efec5) FIXME as compared to v22.5.2.53-stable (5fd600fda9e)
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#38241](https://github.com/ClickHouse/ClickHouse/issues/38241): Fix possible crash in `Distributed` async insert in case of removing a replica from config. [#38029](https://github.com/ClickHouse/ClickHouse/pull/38029) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39098](https://github.com/ClickHouse/ClickHouse/issues/39098): Any allocations inside OvercommitTracker may lead to deadlock. Logging was not very informative so it's easier just to remove logging. Fixes [#37794](https://github.com/ClickHouse/ClickHouse/issues/37794). [#39030](https://github.com/ClickHouse/ClickHouse/pull/39030) ([Dmitry Novik](https://github.com/novikd)).
* Backported in [#39078](https://github.com/ClickHouse/ClickHouse/issues/39078): Fix bug in filesystem cache that could happen in some corner case which coincided with cache capacity hitting the limit. Closes [#39066](https://github.com/ClickHouse/ClickHouse/issues/39066). [#39070](https://github.com/ClickHouse/ClickHouse/pull/39070) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#39152](https://github.com/ClickHouse/ClickHouse/issues/39152): Fix error `Block structure mismatch` which could happen for INSERT into table with attached MATERIALIZED VIEW and enabled setting `extremes = 1`. Closes [#29759](https://github.com/ClickHouse/ClickHouse/issues/29759) and [#38729](https://github.com/ClickHouse/ClickHouse/issues/38729). [#39125](https://github.com/ClickHouse/ClickHouse/pull/39125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#39274](https://github.com/ClickHouse/ClickHouse/issues/39274): Fixed error `Not found column Type in block` in selects with `PREWHERE` and read-in-order optimizations. [#39157](https://github.com/ClickHouse/ClickHouse/pull/39157) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#39369](https://github.com/ClickHouse/ClickHouse/issues/39369): Declare RabbitMQ queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
* Backported in [#39350](https://github.com/ClickHouse/ClickHouse/issues/39350): Fix incorrect fetch postgresql tables query fro PostgreSQL database engine. Closes [#33502](https://github.com/ClickHouse/ClickHouse/issues/33502). [#39283](https://github.com/ClickHouse/ClickHouse/pull/39283) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Retry docker buildx commands with progressive sleep in between [#38898](https://github.com/ClickHouse/ClickHouse/pull/38898) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add docker_server.py running to backport and release CIs [#39011](https://github.com/ClickHouse/ClickHouse/pull/39011) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,29 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.5.4.19-stable (c893bba830e) FIXME as compared to v22.5.3.21-stable (e03724efec5)
#### Bug Fix
* Backported in [#39748](https://github.com/ClickHouse/ClickHouse/issues/39748): Fix seeking while reading from encrypted disk. This PR fixes [#38381](https://github.com/ClickHouse/ClickHouse/issues/38381). [#39687](https://github.com/ClickHouse/ClickHouse/pull/39687) ([Vitaly Baranov](https://github.com/vitlibar)).
#### Build/Testing/Packaging Improvement
* Backported in [#39882](https://github.com/ClickHouse/ClickHouse/issues/39882): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#39209](https://github.com/ClickHouse/ClickHouse/issues/39209): Fix reading of sparse columns from `MergeTree` tables that store their data in S3. [#37978](https://github.com/ClickHouse/ClickHouse/pull/37978) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39589](https://github.com/ClickHouse/ClickHouse/issues/39589): Fix data race and possible heap-buffer-overflow in Avro format. Closes [#39094](https://github.com/ClickHouse/ClickHouse/issues/39094) Closes [#33652](https://github.com/ClickHouse/ClickHouse/issues/33652). [#39498](https://github.com/ClickHouse/ClickHouse/pull/39498) ([Kruglov Pavel](https://github.com/Avogar)).
* Backported in [#39611](https://github.com/ClickHouse/ClickHouse/issues/39611): Fix bug with maxsplit argument for splitByChar, which was not working correctly. [#39552](https://github.com/ClickHouse/ClickHouse/pull/39552) ([filimonov](https://github.com/filimonov)).
* Backported in [#39790](https://github.com/ClickHouse/ClickHouse/issues/39790): Fix wrong index analysis with tuples and operator `IN`, which could lead to wrong query result. [#39752](https://github.com/ClickHouse/ClickHouse/pull/39752) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#39835](https://github.com/ClickHouse/ClickHouse/issues/39835): Fix `CANNOT_READ_ALL_DATA` exception with `local_filesystem_read_method=pread_threadpool`. This bug affected only Linux kernel version 5.9 and 5.10 according to [man](https://manpages.debian.org/testing/manpages-dev/preadv2.2.en.html#BUGS). [#39800](https://github.com/ClickHouse/ClickHouse/pull/39800) ([Anton Popov](https://github.com/CurtizJ)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Fix reading from s3 in some corner cases [#38239](https://github.com/ClickHouse/ClickHouse/pull/38239) ([Anton Popov](https://github.com/CurtizJ)).
* Replace MemoryTrackerBlockerInThread to LockMemoryExceptionInThread [#39619](https://github.com/ClickHouse/ClickHouse/pull/39619) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change mysql-odbc url [#39702](https://github.com/ClickHouse/ClickHouse/pull/39702) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.1.1985-stable FIXME as compared to v22.5.1.2079-stable
### ClickHouse release v22.6.1.1985-stable (7000c4e0033) FIXME as compared to v22.5.1.2079-stable (df0cb062098)
#### Backward Incompatible Change
* Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)).
@ -78,7 +78,7 @@ sidebar_label: 2022
* Allow to use String type instead of Binary in Arrow/Parquet/ORC formats. This PR introduces 3 new settings for it: `output_format_arrow_string_as_string`, `output_format_parquet_string_as_string`, `output_format_orc_string_as_string`. Default value for all settings is `false`. [#37327](https://github.com/ClickHouse/ClickHouse/pull/37327) ([Kruglov Pavel](https://github.com/Avogar)).
* Apply setting `input_format_max_rows_to_read_for_schema_inference` for all read rows in total from all files in globs. Previously setting `input_format_max_rows_to_read_for_schema_inference` was applied for each file in glob separately and in case of huge number of nulls we could read first `input_format_max_rows_to_read_for_schema_inference` rows from each file and get nothing. Also increase default value for this setting to 25000. [#37332](https://github.com/ClickHouse/ClickHouse/pull/37332) ([Kruglov Pavel](https://github.com/Avogar)).
* allows providing `NULL`/`NOT NULL` right after type in column declaration. [#37337](https://github.com/ClickHouse/ClickHouse/pull/37337) ([Igor Nikonov](https://github.com/devcrafter)).
* optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([xiedeyantu](https://github.com/xiedeyantu)).
* optimize file segment PARTIALLY_DOWNLOADED get read buffer. [#37338](https://github.com/ClickHouse/ClickHouse/pull/37338) ([chen](https://github.com/xiedeyantu)).
* Allow to prune the list of files via virtual columns such as `_file` and `_path` when reading from S3. This is for [#37174](https://github.com/ClickHouse/ClickHouse/issues/37174) , [#23494](https://github.com/ClickHouse/ClickHouse/issues/23494). [#37356](https://github.com/ClickHouse/ClickHouse/pull/37356) ([Amos Bird](https://github.com/amosbird)).
* Try to improve short circuit functions processing to fix problems with stress tests. [#37384](https://github.com/ClickHouse/ClickHouse/pull/37384) ([Kruglov Pavel](https://github.com/Avogar)).
* Closes [#37395](https://github.com/ClickHouse/ClickHouse/issues/37395). [#37415](https://github.com/ClickHouse/ClickHouse/pull/37415) ([Memo](https://github.com/Joeywzr)).
@ -117,7 +117,7 @@ sidebar_label: 2022
* Remove recursive submodules, because we don't need them and they can be confusing. Add style check to prevent recursive submodules. This closes [#32821](https://github.com/ClickHouse/ClickHouse/issues/32821). [#37616](https://github.com/ClickHouse/ClickHouse/pull/37616) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)).
* Fix overly aggressive stripping which removed the embedded hash required for checking the consistency of the executable. [#37993](https://github.com/ClickHouse/ClickHouse/pull/37993) ([Robert Schulze](https://github.com/rschu1ze)).
* fix MacOS build compressor faild. [#38007](https://github.com/ClickHouse/ClickHouse/pull/38007) ([xiedeyantu](https://github.com/xiedeyantu)).
* fix MacOS build compressor faild. [#38007](https://github.com/ClickHouse/ClickHouse/pull/38007) ([chen](https://github.com/xiedeyantu)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
@ -166,7 +166,7 @@ sidebar_label: 2022
* Fix possible incorrect result of `SELECT ... WITH FILL` in the case when `ORDER BY` should be applied after `WITH FILL` result (e.g. for outer query). Incorrect result was caused by optimization for `ORDER BY` expressions ([#35623](https://github.com/ClickHouse/ClickHouse/issues/35623)). Closes [#37904](https://github.com/ClickHouse/ClickHouse/issues/37904). [#37959](https://github.com/ClickHouse/ClickHouse/pull/37959) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Add missing default columns when pushing to the target table in WindowView, fix [#37815](https://github.com/ClickHouse/ClickHouse/issues/37815). [#37965](https://github.com/ClickHouse/ClickHouse/pull/37965) ([vxider](https://github.com/Vxider)).
* Fixed a stack overflow issue that would cause compilation to fail. [#37996](https://github.com/ClickHouse/ClickHouse/pull/37996) ([Han Shukai](https://github.com/KinderRiven)).
* when open enable_filesystem_query_cache_limit, throw Reserved cache size exceeds the remaining cache size. [#38004](https://github.com/ClickHouse/ClickHouse/pull/38004) ([xiedeyantu](https://github.com/xiedeyantu)).
* when open enable_filesystem_query_cache_limit, throw Reserved cache size exceeds the remaining cache size. [#38004](https://github.com/ClickHouse/ClickHouse/pull/38004) ([chen](https://github.com/xiedeyantu)).
* Query, containing ORDER BY ... WITH FILL, can generate extra rows when multiple WITH FILL columns are present. [#38074](https://github.com/ClickHouse/ClickHouse/pull/38074) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.2.12-stable FIXME as compared to v22.6.1.1985-stable
### ClickHouse release v22.6.2.12-stable (1fc97f10cbf) FIXME as compared to v22.6.1.1985-stable (7000c4e0033)
#### Improvement
* Backported in [#38484](https://github.com/ClickHouse/ClickHouse/issues/38484): Improve the stability for hive storage integration test. Move the data prepare step into test.py. [#38260](https://github.com/ClickHouse/ClickHouse/pull/38260) ([lgbo](https://github.com/lgbo-ustc)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.3.35-stable FIXME as compared to v22.6.2.12-stable
### ClickHouse release v22.6.3.35-stable (d5566f2f2dd) FIXME as compared to v22.6.2.12-stable (1fc97f10cbf)
#### Bug Fix
* Backported in [#38812](https://github.com/ClickHouse/ClickHouse/issues/38812): Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)).

View File

@ -5,7 +5,7 @@ sidebar_label: 2022
# 2022 Changelog
### ClickHouse release v22.6.4.35-stable FIXME as compared to v22.6.3.35-stable
### ClickHouse release v22.6.4.35-stable (b9202cae6f4) FIXME as compared to v22.6.3.35-stable (d5566f2f2dd)
#### Build/Testing/Packaging Improvement
* Backported in [#38822](https://github.com/ClickHouse/ClickHouse/issues/38822): - Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,18 @@
---
sidebar_position: 1
sidebar_label: 2022
---
# 2022 Changelog
### ClickHouse release v22.7.3.5-stable (e140b8b5f3a) FIXME as compared to v22.7.2.15-stable (f843089624e)
#### Build/Testing/Packaging Improvement
* Backported in [#39884](https://github.com/ClickHouse/ClickHouse/issues/39884): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#39884](https://github.com/ClickHouse/ClickHouse/issues/39884): Former packages used to install systemd.service file to `/etc`. The files there are marked as `conf` and are not cleaned out, and not updated automatically. This PR cleans them out. [#39323](https://github.com/ClickHouse/ClickHouse/pull/39323) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
* Backported in [#40045](https://github.com/ClickHouse/ClickHouse/issues/40045): Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#40045](https://github.com/ClickHouse/ClickHouse/issues/40045): Fix big memory usage during fetches. Fixes [#39915](https://github.com/ClickHouse/ClickHouse/issues/39915). [#39990](https://github.com/ClickHouse/ClickHouse/pull/39990) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).

View File

@ -5,7 +5,7 @@ sidebar_label: Date32
# Date32
A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1925-01-01. Allows storing values till 2283-11-11.
A date. Supports the date range same with [Datetime64](../../sql-reference/data-types/datetime64.md). Stored in four bytes as the number of days since 1900-01-01. Allows storing values till 2299-12-31.
**Examples**
@ -36,5 +36,5 @@ SELECT * FROM new;
- [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32)
- [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero)
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)

View File

@ -18,7 +18,7 @@ DateTime64(precision, [timezone])
Internally, stores data as a number of ticks since epoch start (1970-01-01 00:00:00 UTC) as Int64. The tick resolution is determined by the precision parameter. Additionally, the `DateTime64` type can store time zone that is the same for the entire column, that affects how the values of the `DateTime64` type values are displayed in text format and how the values specified as strings are parsed (2020-01-01 05:00:01.000). The time zone is not stored in the rows of the table (or in resultset), but is stored in the column metadata. See details in [DateTime](../../sql-reference/data-types/datetime.md).
Supported range of values: \[1925-01-01 00:00:00, 2283-11-11 23:59:59.99999999\] (Note: The precision of the maximum value is 8).
Supported range of values: \[1900-01-01 00:00:00, 2299-12-31 23:59:59.99999999\] (Note: The precision of the maximum value is 8).
## Examples

View File

@ -55,9 +55,9 @@ Differs from intDiv in that it returns zero when dividing by zero or when
## modulo(a, b), a % b operator
Calculates the remainder after division.
If arguments are floating-point numbers, they are pre-converted to integers by dropping the decimal portion.
The remainder is taken in the same sense as in C++. Truncated division is used for negative numbers.
Calculates the remainder when dividing `a` by `b`.
The result type is an integer if both inputs are integers. If one of the inputs is a floating-point number, the result is a floating-point number.
The remainder is computed like in C++. Truncated division is used for negative numbers.
An exception is thrown when dividing by zero or when dividing a minimal negative number by minus one.
## moduloOrZero(a, b)

View File

@ -266,8 +266,8 @@ Result:
└────────────────┘
```
:::note
The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of the normal range (years 1925 - 2283) will give an incorrect result.
:::note
The return type `toStartOf*` functions described below is `Date` or `DateTime`. Though these functions can take `DateTime64` as an argument, passing them a `DateTime64` that is out of the normal range (years 1900 - 2299) will give an incorrect result.
:::
## toStartOfYear
@ -291,7 +291,7 @@ Returns the date.
Rounds down a date or date with time to the first day of the month.
Returns the date.
:::note
:::note
The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow.
:::

View File

@ -218,23 +218,23 @@ SELECT toDate32('1955-01-01') AS value, toTypeName(value);
2. The value is outside the range:
``` sql
SELECT toDate32('1924-01-01') AS value, toTypeName(value);
SELECT toDate32('1899-01-01') AS value, toTypeName(value);
```
``` text
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
│ 1925-01-01 │ Date32 │
┌──────value─┬─toTypeName(toDate32('1899-01-01'))─┐
│ 1900-01-01 │ Date32 │
└────────────┴────────────────────────────────────┘
```
3. With `Date`-type argument:
``` sql
SELECT toDate32(toDate('1924-01-01')) AS value, toTypeName(value);
SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value);
```
``` text
┌──────value─┬─toTypeName(toDate32(toDate('1924-01-01')))─┐
┌──────value─┬─toTypeName(toDate32(toDate('1899-01-01')))─┐
│ 1970-01-01 │ Date32 │
└────────────┴────────────────────────────────────────────┘
```
@ -248,14 +248,14 @@ The same as [toDate32](#todate32) but returns the min value of [Date32](../../sq
Query:
``` sql
SELECT toDate32OrZero('1924-01-01'), toDate32OrZero('');
SELECT toDate32OrZero('1899-01-01'), toDate32OrZero('');
```
Result:
``` text
┌─toDate32OrZero('1924-01-01')─┬─toDate32OrZero('')─┐
│ 1925-01-01 │ 1925-01-01 │
┌─toDate32OrZero('1899-01-01')─┬─toDate32OrZero('')─┐
│ 1900-01-01 │ 1900-01-01 │
└──────────────────────────────┴────────────────────┘
```
@ -1218,18 +1218,30 @@ Result:
└────────────────────────────┴────────────────────────────────┘
```
## parseDateTime64BestEffortUS
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity.
## parseDateTime64BestEffortOrNull
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortOrZero
Same as for [parseDateTime64BestEffort](#parsedatetimebesteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort) except that it returns zero date or zero date time when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortUSOrNull
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity and returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64BestEffortUSOrZero
Same as for [parseDateTime64BestEffort](#parsedatetime64besteffort), except that this function prefers US date format (`MM/DD/YYYY` etc.) in case of ambiguity and returns zero date or zero date time when it encounters a date format that cannot be processed.
## toLowCardinality
Converts input parameter to the [LowCardianlity](../../sql-reference/data-types/lowcardinality.md) version of same data type.
Converts input parameter to the [LowCardinality](../../sql-reference/data-types/lowcardinality.md) version of same data type.
To convert data from the `LowCardinality` data type use the [CAST](#type_conversion_function-cast) function. For example, `CAST(x as String)`.

View File

@ -9,13 +9,13 @@ Table functions are methods for constructing tables.
You can use table functions in:
- [FROM](../../sql-reference/statements/select/from.md) clause of the `SELECT` query.
- [FROM](../../sql-reference/statements/select/from.md) clause of the `SELECT` query.
The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes.
The method for creating a temporary table that is available only in the current query. The table is deleted when the query finishes.
- [CREATE TABLE AS table_function()](../../sql-reference/statements/create/table.md) query.
It's one of the methods of creating a table.
It's one of the methods of creating a table.
- [INSERT INTO TABLE FUNCTION](../../sql-reference/statements/insert-into.md#inserting-into-table-function) query.
@ -38,4 +38,3 @@ You cant use table functions if the [allow_ddl](../../operations/settings/per
| [s3](../../sql-reference/table-functions/s3.md) | Creates a [S3](../../engines/table-engines/integrations/s3.md)-engine table. |
| [sqlite](../../sql-reference/table-functions/sqlite.md) | Creates a [sqlite](../../engines/table-engines/integrations/sqlite.md)-engine table. |
[Original article](https://clickhouse.com/docs/en/sql-reference/table-functions/) <!--hide-->

View File

@ -8,6 +8,7 @@ sidebar_label: Сборка на Mac OS X
:::info "Вам не нужно собирать ClickHouse самостоятельно"
Вы можете установить предварительно собранный ClickHouse, как описано в [Быстром старте](https://clickhouse.com/#quick-start).
Следуйте инструкциям по установке для `macOS (Intel)` или `macOS (Apple Silicon)`.
:::
Сборка должна запускаться с x86_64 (Intel) на macOS версии 10.15 (Catalina) и выше в последней версии компилятора Xcode's native AppleClang, Homebrew's vanilla Clang или в GCC-компиляторах.
@ -90,6 +91,7 @@ $ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/
:::info "Note"
Вам понадобится команда `sudo`.
:::
1. Создайте файл `/Library/LaunchDaemons/limit.maxfiles.plist` и поместите в него следующее:

View File

@ -49,6 +49,7 @@ PostgreSQL массивы конвертируются в массивы ClickHo
:::info "Внимание"
Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустимы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы.
:::
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:

View File

@ -40,6 +40,7 @@ ORDER BY (CounterID, StartDate, intHash32(UserID));
:::info "Info"
Не рекомендуется делать слишком гранулированное партиционирование то есть задавать партиции по столбцу, в котором будет слишком большой разброс значений (речь идет о порядке более тысячи партиций). Это приведет к скоплению большого числа файлов и файловых дескрипторов в системе, что может значительно снизить производительность запросов `SELECT`.
:::
Чтобы получить набор кусков и партиций таблицы, можно воспользоваться системной таблицей [system.parts](../../../engines/table-engines/mergetree-family/custom-partitioning-key.md#system_tables-parts). В качестве примера рассмотрим таблицу `visits`, в которой задано партиционирование по месяцам. Выполним `SELECT` для таблицы `system.parts`:
@ -80,6 +81,7 @@ WHERE table = 'visits'
:::info "Info"
Названия кусков для таблиц старого типа образуются следующим образом: `20190117_20190123_2_2_0` (минимальная дата _ максимальная дата _ номер минимального блока _ номер максимального блока _ уровень).
:::
Как видно из примера выше, таблица содержит несколько отдельных кусков для одной и той же партиции (например, куски `201901_1_3_1` и `201901_1_9_2` принадлежат партиции `201901`). Это означает, что эти куски еще не были объединены в файловой системе они хранятся отдельно. После того как будет выполнено автоматическое слияние данных (выполняется примерно спустя 10 минут после вставки данных), исходные куски будут объединены в один более крупный кусок и помечены как неактивные.

View File

@ -14,3 +14,4 @@ sidebar_position: 10
:::info "Забавный факт"
Спустя годы после того, как ClickHouse получил свое название, принцип комбинирования двух слов, каждое из которых имеет подходящий смысл, был признан лучшим способом назвать базу данных в [исследовании Andy Pavlo](https://www.cs.cmu.edu/~pavlo/blog/2020/03/on-naming-a-database-management-system.html), Associate Professor of Databases в Carnegie Mellon University. ClickHouse разделил награду "за лучшее название СУБД" с Postgres.
:::

View File

@ -20,5 +20,6 @@ sidebar_label: Общие вопросы
:::info "Если вы не нашли то, что искали:"
Загляните в другие категории F.A.Q. или поищите в остальных разделах документации, ориентируясь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/ru/faq/general/) <!--hide-->

View File

@ -60,3 +60,4 @@ sidebar_position: 8
- Ориентируйтесь на показатели, собранные при работе с реальными данными.
- Проверяйте производительность в процессе CI.
- Измеряйте и анализируйте всё, что только возможно.
:::

View File

@ -15,5 +15,6 @@ sidebar_label: Интеграция
:::info "Если вы не нашли то, что искали"
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/ru/faq/integration/)

View File

@ -14,5 +14,6 @@ sidebar_label: Операции
:::info "Если вы не нашли то, что искали"
Загляните в другие подразделы F.A.Q. или поищите в остальных разделах документации, ориентируйтесь по оглавлению слева.
:::
[Original article](https://clickhouse.com/docs/en/faq/operations/)

View File

@ -293,6 +293,7 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.trips_mergetree"
:::info "Info"
Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы
нужно добавить имя базы, `datasets.trips_mergetree`.
:::
## Результаты на одном сервере {#rezultaty-na-odnom-servere}

View File

@ -157,6 +157,7 @@ $ clickhouse-client --query "SELECT COUNT(*) FROM datasets.ontime"
:::info "Info"
Если вы собираетесь выполнять запросы, приведенные ниже, то к имени таблицы
нужно добавить имя базы, `datasets.ontime`.
:::
## Запросы: {#zaprosy}

View File

@ -99,6 +99,7 @@ ClickHouse предоставляет возможность аутентифи
:::info ""
Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`.
:::
### Настройка Kerberos через SQL {#enabling-kerberos-using-sql}

View File

@ -174,6 +174,7 @@ ClickHouse проверяет условия для `min_part_size` и `min_part
:::info "Примечание"
Жесткое ограничение настраивается с помощью системных инструментов.
:::
**Пример**
@ -706,6 +707,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения:
@ -726,6 +728,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения:
@ -746,6 +749,7 @@ ClickHouse поддерживает динамическое изменение
:::info "Примечание"
Параметры этих настроек могут быть изменены во время выполнения запросов и вступят в силу немедленно. Запросы, которые уже запущены, выполнятся без изменений.
:::
Возможные значения:

View File

@ -30,6 +30,7 @@
:::info "Замечание"
Даже если `parts_to_do = 0`, для реплицированной таблицы возможна ситуация, когда мутация ещё не завершена из-за долго выполняющейся операции `INSERT`, которая добавляет данные, которые нужно будет мутировать.
:::
Если во время мутации какого-либо куска возникли проблемы, заполняются следующие столбцы:

View File

@ -8,6 +8,7 @@ sidebar_position: 141
:::info "Примечание"
Чтобы эта функция работала должным образом, исходные данные должны быть отсортированы. В [материализованном представлении](../../../sql-reference/statements/create/view.md#materialized) вместо нее рекомендуется использовать [deltaSumTimestamp](../../../sql-reference/aggregate-functions/reference/deltasumtimestamp.md#agg_functions-deltasumtimestamp).
:::
**Синтаксис**

View File

@ -20,6 +20,7 @@ intervalLengthSum(start, end)
:::info "Примечание"
Аргументы должны быть одного типа. В противном случае ClickHouse сгенерирует исключение.
:::
**Возвращаемое значение**

View File

@ -5,7 +5,7 @@ sidebar_label: Date32
# Date32 {#data_type-datetime32}
Дата. Поддерживается такой же диапазон дат, как для типа [Datetime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1925-01-01 по 2283-11-11.
Дата. Поддерживается такой же диапазон дат, как для типа [Datetime64](../../sql-reference/data-types/datetime64.md). Значение хранится в четырех байтах и соответствует числу дней с 1900-01-01 по 2299-12-31.
**Пример**
@ -36,5 +36,5 @@ SELECT * FROM new;
- [toDate32](../../sql-reference/functions/type-conversion-functions.md#todate32)
- [toDate32OrZero](../../sql-reference/functions/type-conversion-functions.md#todate32-or-zero)
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)
- [toDate32OrNull](../../sql-reference/functions/type-conversion-functions.md#todate32-or-null)

View File

@ -18,7 +18,7 @@ DateTime64(precision, [timezone])
Данные хранятся в виде количества ‘тиков’, прошедших с момента начала эпохи (1970-01-01 00:00:00 UTC), в Int64. Размер тика определяется параметром precision. Дополнительно, тип `DateTime64` позволяет хранить часовой пояс, единый для всей колонки, который влияет на то, как будут отображаться значения типа `DateTime64` в текстовом виде и как будут парситься значения заданные в виде строк (2020-01-01 05:00:01.000). Часовой пояс не хранится в строках таблицы (выборки), а хранится в метаданных колонки. Подробнее см. [DateTime](datetime.md).
Диапазон значений: \[1925-01-01 00:00:00, 2283-11-11 23:59:59.99999999\] (Примечание: Точность максимального значения составляет 8).
Диапазон значений: \[1900-01-01 00:00:00, 2299-12-31 23:59:59.99999999\] (Примечание: Точность максимального значения составляет 8).
## Примеры {#examples}

View File

@ -26,6 +26,7 @@ sidebar_label: Nullable
:::info "Info"
Почти всегда использование `Nullable` снижает производительность, учитывайте это при проектировании своих баз.
:::
## Поиск NULL {#finding-null}

View File

@ -464,6 +464,7 @@ SOURCE(ODBC(
:::info "Примечание"
Поля `table` и `query` не могут быть использованы вместе. Также обязательно должен быть один из источников данных: `table` или `query`.
:::
ClickHouse получает от ODBC-драйвера информацию о квотировании и квотирует настройки в запросах к драйверу, поэтому имя таблицы нужно указывать в соответствии с регистром имени таблицы в базе данных.
@ -543,6 +544,7 @@ SOURCE(MYSQL(
:::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
Явный параметр `secure` отсутствует. Автоматически поддержана работа в обоих случаях: когда установка SSL-соединения необходима и когда нет.
:::
MySQL можно подключить на локальном хосте через сокеты, для этого необходимо задать `host` и `socket`.
@ -633,6 +635,7 @@ SOURCE(CLICKHOUSE(
:::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
:::
### MongoDB {#dicts-external_dicts_dict_sources-mongodb}
@ -748,6 +751,7 @@ SOURCE(REDIS(
:::info "Примечание"
Поля `column_family` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `column_family` или `query`.
:::
### PostgreSQL {#dicts-external_dicts_dict_sources-postgresql}
@ -804,3 +808,4 @@ SOURCE(POSTGRESQL(
:::info "Примечание"
Поля `table` или `where` не могут быть использованы вместе с полем `query`. Также обязательно должен быть один из источников данных: `table` или `query`.
:::

View File

@ -56,7 +56,7 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0
## modulo(a, b), оператор a % b {#modulo}
Вычисляет остаток от деления.
Если аргументы - числа с плавающей запятой, то они предварительно преобразуются в целые числа, путём отбрасывания дробной части.
Тип результата - целое число, если оба аргумента - целые числа. Если один из аргументов является числом с плавающей точкой, результатом будет число с плавающей точкой.
Берётся остаток в том же смысле, как это делается в C++. По факту, для отрицательных чисел, используется truncated division.
При делении на ноль или при делении минимального отрицательного числа на минус единицу, кидается исключение.

View File

@ -57,7 +57,7 @@ toTimezone(value, timezone)
**Аргументы**
- `value` — время или дата с временем. [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — часовой пояс для возвращаемого значения. [String](../../sql-reference/data-types/string.md). Этот аргумент является константой, потому что `toTimezone` изменяет часовой пояс столбца (часовой пояс является атрибутом типов `DateTime*`).
- `timezone` — часовой пояс для возвращаемого значения. [String](../../sql-reference/data-types/string.md). Этот аргумент является константой, потому что `toTimezone` изменяет часовой пояс столбца (часовой пояс является атрибутом типов `DateTime*`).
**Возвращаемое значение**
@ -267,7 +267,7 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp;
```
:::note "Attention"
`Date` или `DateTime` это возвращаемый тип функций `toStartOf*`, который описан ниже. Несмотря на то, что эти функции могут принимать `DateTime64` в качестве аргумента, если переданное значение типа `DateTime64` выходит за пределы нормального диапазона (с 1925 по 2283 год), то это даст неверный результат.
`Date` или `DateTime` это возвращаемый тип функций `toStartOf*`, который описан ниже. Несмотря на то, что эти функции могут принимать `DateTime64` в качестве аргумента, если переданное значение типа `DateTime64` выходит за пределы нормального диапазона (с 1900 по 2299 год), то это даст неверный результат.
:::
## toStartOfYear {#tostartofyear}

View File

@ -86,6 +86,7 @@ geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precisi
:::info "Замечание"
Все передаваемые координаты должны быть одного и того же типа: либо `Float32`, либо `Float64`.
:::
**Возвращаемые значения**
@ -96,6 +97,7 @@ geohashesInBox(longitude_min, latitude_min, longitude_max, latitude_max, precisi
:::info "Замечание"
Если возвращаемый массив содержит свыше 10 000 000 элементов, функция сгенерирует исключение.
:::
**Пример**

View File

@ -209,7 +209,7 @@ SELECT toDate32('1955-01-01') AS value, toTypeName(value);
```
``` text
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
┌──────value─┬─toTypeName(toDate32('1955-01-01'))─┐
│ 1955-01-01 │ Date32 │
└────────────┴────────────────────────────────────┘
```
@ -217,23 +217,23 @@ SELECT toDate32('1955-01-01') AS value, toTypeName(value);
2. Значение выходит за границы диапазона:
``` sql
SELECT toDate32('1924-01-01') AS value, toTypeName(value);
SELECT toDate32('1899-01-01') AS value, toTypeName(value);
```
``` text
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
│ 1925-01-01 │ Date32 │
┌──────value─┬─toTypeName(toDate32('1899-01-01'))─┐
│ 1900-01-01 │ Date32 │
└────────────┴────────────────────────────────────┘
```
3. С аргументом типа `Date`:
``` sql
SELECT toDate32(toDate('1924-01-01')) AS value, toTypeName(value);
SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value);
```
``` text
┌──────value─┬─toTypeName(toDate32(toDate('1924-01-01')))─┐
┌──────value─┬─toTypeName(toDate32(toDate('1899-01-01')))─┐
│ 1970-01-01 │ Date32 │
└────────────┴────────────────────────────────────────────┘
```
@ -247,14 +247,14 @@ SELECT toDate32(toDate('1924-01-01')) AS value, toTypeName(value);
Запрос:
``` sql
SELECT toDate32OrZero('1924-01-01'), toDate32OrZero('');
SELECT toDate32OrZero('1899-01-01'), toDate32OrZero('');
```
Результат:
``` text
┌─toDate32OrZero('1924-01-01')─┬─toDate32OrZero('')─┐
│ 1925-01-01 │ 1925-01-01 │
┌─toDate32OrZero('1899-01-01')─┬─toDate32OrZero('')─┐
│ 1900-01-01 │ 1900-01-01 │
└──────────────────────────────┴────────────────────┘
```
@ -1140,17 +1140,29 @@ FORMAT PrettyCompactMonoBlock;
└────────────────────────────┴────────────────────────────────┘
```
## parseDateTime64BestEffortOrNull {#parsedatetime32besteffortornull}
## parseDateTime64BestEffortUS {#parsedatetime64besteffortus}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности.
## parseDateTime64BestEffortOrNull {#parsedatetime64besteffortornull}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но возвращает `NULL`, если формат даты не может быть обработан.
## parseDateTime64BestEffortOrZero {#parsedatetime64besteffortorzero}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetimebesteffort), но возвращает нулевую дату и время, если формат даты не может быть обработан.
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но возвращает нулевую дату и время, если формат даты не может быть обработан.
## parseDateTime64BestEffortUSOrNull {#parsedatetime64besteffortusornull}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности и возвращает `NULL`, если формат даты не может быть обработан.
## parseDateTime64BestEffortUSOrZero {#parsedatetime64besteffortusorzero}
Работает аналогично функции [parseDateTime64BestEffort](#parsedatetime64besteffort), но разница состоит в том, что в она предполагает американский формат даты (`MM/DD/YYYY` etc.) в случае неоднозначности и возвращает нулевую дату и время, если формат даты не может быть обработан.
## toLowCardinality {#tolowcardinality}
Преобразует входные данные в версию [LowCardianlity](../data-types/lowcardinality.md) того же типа данных.
Преобразует входные данные в версию [LowCardinality](../data-types/lowcardinality.md) того же типа данных.
Чтобы преобразовать данные из типа `LowCardinality`, используйте функцию [CAST](#type_conversion_function-cast). Например, `CAST(x as String)`.
@ -1197,6 +1209,7 @@ SELECT toLowCardinality('1');
:::info "Примечание"
Возвращаемое значение — это временная метка в UTC, а не в часовом поясе `DateTime64`.
:::
**Синтаксис**

View File

@ -25,7 +25,7 @@ ALTER TABLE [db].name [ON CLUSTER cluster] ADD|DROP|CLEAR|COMMENT|MODIFY COLUMN
- [CONSTRAINT](../../../sql-reference/statements/alter/constraint.md)
- [TTL](../../../sql-reference/statements/alter/ttl.md)
:::note
:::note
Запрос `ALTER TABLE` поддерживается только для таблиц типа `*MergeTree`, а также `Merge` и `Distributed`. Запрос имеет несколько вариантов.
:::
Следующие запросы `ALTER` управляют представлениями:
@ -77,5 +77,6 @@ ALTER TABLE [db.]table MATERIALIZE INDEX name IN PARTITION partition_name
:::info "Примечание"
Для всех запросов `ALTER` при `replication_alter_partitions_sync = 2` и неактивности некоторых реплик больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, генерируется исключение `UNFINISHED`.
:::
Для запросов `ALTER TABLE ... UPDATE|DELETE` синхронность выполнения определяется настройкой [mutations_sync](../../../operations/settings/settings.md#mutations_sync).

View File

@ -56,6 +56,7 @@ CREATE USER [IF NOT EXISTS | OR REPLACE] name1 [ON CLUSTER cluster_name1]
:::info "Внимание"
ClickHouse трактует конструкцию `user_name@'address'` как имя пользователя целиком. То есть технически вы можете создать несколько пользователей с одинаковыми `user_name`, но разными частями конструкции после `@`, но лучше так не делать.
:::
## Секция GRANTEES {#grantees}

View File

@ -88,6 +88,7 @@ LIVE-представления работают по тому же принци
- `LIVE VIEW` не обновляется, если в исходном запросе используются несколько таблиц.
В случаях, когда `LIVE VIEW` не обновляется автоматически, чтобы обновлять его принудительно с заданной периодичностью, используйте [WITH REFRESH](#live-view-with-refresh).
:::
### Отслеживание изменений LIVE-представлений {#live-view-monitoring}

View File

@ -30,6 +30,7 @@ ClickHouse не оповещает клиента. Чтобы включить
:::info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.
:::
## Выражение BY {#by-expression}

View File

@ -368,6 +368,7 @@ SHOW ACCESS
:::info "Note"
По запросу `SHOW CLUSTER name` вы получите содержимое таблицы system.clusters для этого кластера.
:::
### Синтаксис {#show-cluster-syntax}

View File

@ -19,3 +19,4 @@ TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
:::info "Примечание"
Если значение настройки `replication_alter_partitions_sync` равно `2` и некоторые реплики не активны больше времени, заданного настройкой `replication_wait_for_inactive_replica_timeout`, то генерируется исключение `UNFINISHED`.
:::

View File

@ -104,3 +104,4 @@ WATCH lv EVENTS LIMIT 1;
:::info "Примечание"
При отслеживании [LIVE VIEW](./create/view.md#live-view) через интерфейс HTTP следует использовать формат [JSONEachRowWithProgress](../../interfaces/formats.md#jsoneachrowwithprogress). Постоянные сообщения об изменениях будут добавлены в поток вывода для поддержания активности долговременного HTTP-соединения до тех пор, пока результат запроса изменяется. Проомежуток времени между сообщениями об изменениях управляется настройкой[live_view_heartbeat_interval](./create/view.md#live-view-settings).
:::

View File

@ -28,6 +28,7 @@ postgresql('host:port', 'database', 'table', 'user', 'password'[, `schema`])
:::info "Примечание"
В запросах `INSERT` для того чтобы отличить табличную функцию `postgresql(...)` от таблицы со списком имен столбцов вы должны указывать ключевые слова `FUNCTION` или `TABLE FUNCTION`. См. примеры ниже.
:::
## Особенности реализации {#implementation-details}
@ -43,6 +44,7 @@ PostgreSQL массивы конвертируются в массивы ClickHo
:::info "Примечание"
Будьте внимательны, в PostgreSQL массивы, созданные как `type_name[]`, являются многомерными и могут содержать в себе разное количество измерений в разных строках одной таблицы. Внутри ClickHouse допустипы только многомерные массивы с одинаковым кол-вом измерений во всех строках таблицы.
:::
Поддерживает несколько реплик, которые должны быть перечислены через `|`. Например:

View File

@ -19,7 +19,7 @@ DateTime64(precision, [timezone])
在内部此类型以Int64类型将数据存储为自Linux纪元开始(1970-01-01 00:00:00UTC)的时间刻度数ticks。时间刻度的分辨率由precision参数确定。此外`DateTime64` 类型可以像存储其他数据列一样存储时区信息,时区会影响 `DateTime64` 类型的值如何以文本格式显示,以及如何解析以字符串形式指定的时间数据 (2020-01-01 05:00:01.000)。时区不存储在表的行中也不在resultset中而是存储在列的元数据中。详细信息请参考 [DateTime](datetime.md) 数据类型.
值的范围: \[1925-01-01 00:00:00, 2283-11-11 23:59:59.99999999\] (注意: 最大值的精度是8)。
值的范围: \[1900-01-01 00:00:00, 2299-12-31 23:59:59.99999999\] (注意: 最大值的精度是8)。
## 示例 {#examples}

View File

@ -54,7 +54,7 @@ SELECT toTypeName(0), toTypeName(0 + 0), toTypeName(0 + 0 + 0), toTypeName(0 + 0
## modulo(a, b), a % b operator {#modulo}
计算除法后的余数。
如果参数是浮点数,则通过删除小数部分将它们预转换为整数。
如果两个输入都是整数,结果类型是整数。如果其中一个输入是浮点数,则结果是浮点数。
其余部分与C++中的含义相同。截断除法用于负数。
除以零或将最小负数除以-1时抛出异常。

View File

@ -263,8 +263,8 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp
└────────────────┘
```
:::注意
下面描述的返回类型 `toStartOf` 函数是 `Date``DateTime`。尽管这些函数可以将 `DateTime64` 作为参数但将超出正常范围1925年-2283年)的 `DateTime64` 传递给它们会给出不正确的结果。
:::注意
下面描述的返回类型 `toStartOf` 函数是 `Date``DateTime`。尽管这些函数可以将 `DateTime64` 作为参数但将超出正常范围1900年-2299年)的 `DateTime64` 传递给它们会给出不正确的结果。
:::
## toStartOfYear {#tostartofyear}
@ -1221,4 +1221,4 @@ SELECT fromModifiedJulianDayOrNull(58849);
└────────────────────────────────────┘
```
[Original article](https://clickhouse.com/docs/en/query_language/functions/date_time_functions/) <!--hide-->
[Original article](https://clickhouse.com/docs/en/query_language/functions/date_time_functions/) <!--hide-->

View File

@ -512,7 +512,7 @@ SELECT parseDateTimeBestEffort('10 20:19')
## toLowCardinality {#tolowcardinality}
把输入值转换为[LowCardianlity](../data-types/lowcardinality.md)的相同类型的数据。
把输入值转换为[LowCardinality](../data-types/lowcardinality.md)的相同类型的数据。
如果要把`LowCardinality`类型的数据转换为其他类型,使用[CAST](#type_conversion_function-cast)函数。比如:`CAST(x as String)`。

View File

@ -133,6 +133,7 @@ std::vector<String> Client::loadWarningMessages()
std::vector<String> messages;
connection->sendQuery(connection_parameters.timeouts,
"SELECT * FROM viewIfPermitted(SELECT message FROM system.warnings ELSE null('message String'))",
{} /* query_parameters */,
"" /* query_id */,
QueryProcessingStage::Complete,
&global_context->getSettingsRef(),

View File

@ -26,13 +26,13 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_GET)
{
if (uri.getPath() == "/extdict_ping")
return std::make_unique<LibraryBridgeExistsHandler>(keep_alive_timeout, getContext());
return std::make_unique<ExternalDictionaryLibraryBridgeExistsHandler>(keep_alive_timeout, getContext());
}
if (request.getMethod() == Poco::Net::HTTPRequest::HTTP_POST)
{
if (uri.getPath() == "/extdict_request")
return std::make_unique<LibraryBridgeRequestHandler>(keep_alive_timeout, getContext());
return std::make_unique<ExternalDictionaryLibraryBridgeRequestHandler>(keep_alive_timeout, getContext());
}
return nullptr;

View File

@ -5,6 +5,7 @@
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <Common/BridgeProtocolVersion.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Poco/Net/HTMLForm.h>
@ -78,18 +79,39 @@ static void writeData(Block data, OutputFormatPtr format)
executor.execute();
}
LibraryBridgeRequestHandler::LibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_)
ExternalDictionaryLibraryBridgeRequestHandler::ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, log(&Poco::Logger::get("LibraryBridgeRequestHandler"))
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeRequestHandler"))
, keep_alive_timeout(keep_alive_timeout_)
{
}
void LibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
{
LOG_TRACE(log, "Request URI: {}", request.getURI());
HTMLForm params(getContext()->getSettingsRef(), request);
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
processError(response, "Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != LIBRARY_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
processError(response, "Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("method"))
{
processError(response, "No 'method' in request URL");
@ -340,14 +362,14 @@ void LibraryBridgeRequestHandler::handleRequest(HTTPServerRequest & request, HTT
}
}
LibraryBridgeExistsHandler::LibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
ExternalDictionaryLibraryBridgeExistsHandler::ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_)
: WithContext(context_)
, keep_alive_timeout(keep_alive_timeout_)
, log(&Poco::Logger::get("LibraryBridgeExistsHandler"))
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeExistsHandler"))
{
}
void LibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
void ExternalDictionaryLibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response)
{
try
{
@ -361,6 +383,7 @@ void LibraryBridgeExistsHandler::handleRequest(HTTPServerRequest & request, HTTP
}
std::string dictionary_id = params.get("dictionary_id");
auto library_handler = ExternalDictionaryLibraryHandlerFactory::instance().get(dictionary_id);
String res = library_handler ? "1" : "0";

View File

@ -9,7 +9,6 @@
namespace DB
{
/// Handler for requests to Library Dictionary Source, returns response in RowBinary format.
/// When a library dictionary source is created, it sends 'extDict_libNew' request to library bridge (which is started on first
/// request to it, if it was not yet started). On this request a new sharedLibrayHandler is added to a
@ -17,10 +16,10 @@ namespace DB
/// names of dictionary attributes, sample block to parse block of null values, block of null values. Everything is
/// passed in binary format and is urlencoded. When dictionary is cloned, a new handler is created.
/// Each handler is unique to dictionary.
class LibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext
class ExternalDictionaryLibraryBridgeRequestHandler : public HTTPRequestHandler, WithContext
{
public:
LibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
ExternalDictionaryLibraryBridgeRequestHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;
@ -32,10 +31,10 @@ private:
};
class LibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext
class ExternalDictionaryLibraryBridgeExistsHandler : public HTTPRequestHandler, WithContext
{
public:
LibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
ExternalDictionaryLibraryBridgeExistsHandler(size_t keep_alive_timeout_, ContextPtr context_);
void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response) override;

View File

@ -5,6 +5,7 @@
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeNullable.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Parsers/ParserQueryWithOutput.h>
#include <Parsers/parseQuery.h>
@ -14,6 +15,7 @@
#include <Poco/NumberParser.h>
#include <Common/logger_useful.h>
#include <base/scope_guard.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/quoteString.h>
#include "getIdentifierQuote.h"
#include "validateODBCConnectionString.h"
@ -80,6 +82,27 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
LOG_WARNING(log, fmt::runtime(message));
};
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
process_error("Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
process_error("Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("table"))
{
process_error("No 'table' param in request URL");

View File

@ -5,11 +5,13 @@
#include <DataTypes/DataTypeFactory.h>
#include <Server/HTTP/HTMLForm.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Parsers/ParserQueryWithOutput.h>
#include <Parsers/parseQuery.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/logger_useful.h>
#include <base/scope_guard.h>
#include "getIdentifierQuote.h"
@ -32,6 +34,27 @@ void IdentifierQuoteHandler::handleRequest(HTTPServerRequest & request, HTTPServ
LOG_WARNING(log, fmt::runtime(message));
};
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
process_error("Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
process_error("Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("connection_string"))
{
process_error("No 'connection_string' in request URL");

View File

@ -17,6 +17,7 @@
#include <QueryPipeline/QueryPipeline.h>
#include <Processors/Executors/CompletedPipelineExecutor.h>
#include <Processors/Formats/IInputFormat.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/logger_useful.h>
#include <Server/HTTP/HTMLForm.h>
#include <Common/config.h>
@ -55,6 +56,28 @@ void ODBCHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse
HTMLForm params(getContext()->getSettingsRef(), request);
LOG_TRACE(log, "Request URI: {}", request.getURI());
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
processError(response, "Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
processError(response, "Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (mode == "read")
params.read(request.getStream());

View File

@ -4,9 +4,11 @@
#include <Server/HTTP/HTMLForm.h>
#include <Server/HTTP/WriteBufferFromHTTPServerResponse.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/logger_useful.h>
#include "validateODBCConnectionString.h"
#include "ODBCPooledConnectionFactory.h"
@ -40,6 +42,28 @@ void SchemaAllowedHandler::handleRequest(HTTPServerRequest & request, HTTPServer
LOG_WARNING(log, fmt::runtime(message));
};
size_t version;
if (!params.has("version"))
version = 0; /// assumed version for too old servers which do not send a version
else
{
String version_str = params.get("version");
if (!tryParse(version, version_str))
{
process_error("Unable to parse 'version' string in request URL: '" + version_str + "' Check if the server and library-bridge have the same version.");
return;
}
}
if (version != XDBC_BRIDGE_PROTOCOL_VERSION)
{
/// backwards compatibility is considered unnecessary for now, just let the user know that the server and the bridge must be upgraded together
process_error("Server and library-bridge have different versions: '" + std::to_string(version) + "' vs. '" + std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION) + "'");
return;
}
if (!params.has("connection_string"))
{
process_error("No 'connection_string' in request URL");

View File

@ -60,6 +60,14 @@
</logger>
</levels>
-->
<!-- Structured log formatting:
You can specify log format(for now, JSON only). In that case, the console log will be printed
in specified format like JSON.
For example, as below:
{"date_time":"1650918987.180175","thread_name":"#1","thread_id":"254545","level":"Trace","query_id":"","logger_name":"BaseDaemon","message":"Received signal 2","source_file":"../base/daemon/BaseDaemon.cpp; virtual void SignalListener::run()","source_line":"192"}
To enable JSON logging support, just uncomment <formatting> tag below.
-->
<!-- <formatting>json</formatting> -->
</logger>
<!-- Add headers to response in options request. OPTIONS method is used in CORS preflight requests. -->

View File

@ -31,16 +31,10 @@ ExternalDictionaryLibraryBridgeHelper::ExternalDictionaryLibraryBridgeHelper(
const Block & sample_block_,
const Field & dictionary_id_,
const LibraryInitData & library_data_)
: IBridgeHelper(context_->getGlobalContext())
, log(&Poco::Logger::get("ExternalDictionaryLibraryBridgeHelper"))
: LibraryBridgeHelper(context_->getGlobalContext())
, sample_block(sample_block_)
, config(context_->getConfigRef())
, http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value)
, library_data(library_data_)
, dictionary_id(dictionary_id_)
, bridge_host(config.getString("library_bridge.host", DEFAULT_HOST))
, bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT))
, http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_))
{
}
@ -65,28 +59,13 @@ Poco::URI ExternalDictionaryLibraryBridgeHelper::getMainURI() const
Poco::URI ExternalDictionaryLibraryBridgeHelper::createRequestURI(const String & method) const
{
auto uri = getMainURI();
uri.addQueryParameter("version", std::to_string(LIBRARY_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("dictionary_id", toString(dictionary_id));
uri.addQueryParameter("method", method);
return uri;
}
Poco::URI ExternalDictionaryLibraryBridgeHelper::createBaseURI() const
{
Poco::URI uri;
uri.setHost(bridge_host);
uri.setPort(bridge_port);
uri.setScheme("http");
return uri;
}
void ExternalDictionaryLibraryBridgeHelper::startBridge(std::unique_ptr<ShellCommand> cmd) const
{
getContext()->addBridgeCommand(std::move(cmd));
}
bool ExternalDictionaryLibraryBridgeHelper::bridgeHandShake()
{
String result;
@ -225,6 +204,14 @@ QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadAll()
}
static String getDictIdsString(const std::vector<UInt64> & ids)
{
WriteBufferFromOwnString out;
writeVectorBinary(ids, out);
return out.str();
}
QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadIds(const std::vector<uint64_t> & ids)
{
startBridgeSync();
@ -283,13 +270,4 @@ QueryPipeline ExternalDictionaryLibraryBridgeHelper::loadBase(const Poco::URI &
return QueryPipeline(std::move(source));
}
String ExternalDictionaryLibraryBridgeHelper::getDictIdsString(const std::vector<UInt64> & ids)
{
WriteBufferFromOwnString out;
writeVectorBinary(ids, out);
return out.str();
}
}

View File

@ -2,10 +2,9 @@
#include <Interpreters/Context.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <Poco/Logger.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/URI.h>
#include <BridgeHelper/IBridgeHelper.h>
#include <BridgeHelper/LibraryBridgeHelper.h>
#include <QueryPipeline/QueryPipeline.h>
@ -14,7 +13,8 @@ namespace DB
class Pipe;
class ExternalDictionaryLibraryBridgeHelper : public IBridgeHelper
// Class to access the external dictionary part of the clickhouse-library-bridge.
class ExternalDictionaryLibraryBridgeHelper : public LibraryBridgeHelper
{
public:
@ -25,7 +25,6 @@ public:
String dict_attributes;
};
static constexpr inline size_t DEFAULT_PORT = 9012;
static constexpr inline auto PING_HANDLER = "/extdict_ping";
static constexpr inline auto MAIN_HANDLER = "/extdict_request";
@ -56,26 +55,6 @@ protected:
bool bridgeHandShake() override;
void startBridge(std::unique_ptr<ShellCommand> cmd) const override;
String serviceAlias() const override { return "clickhouse-library-bridge"; }
String serviceFileName() const override { return serviceAlias(); }
size_t getDefaultPort() const override { return DEFAULT_PORT; }
bool startBridgeManually() const override { return false; }
String configPrefix() const override { return "library_bridge"; }
const Poco::Util::AbstractConfiguration & getConfig() const override { return config; }
Poco::Logger * getLog() const override { return log; }
Poco::Timespan getHTTPTimeout() const override { return http_timeout; }
Poco::URI createBaseURI() const override;
QueryPipeline loadBase(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = {});
bool executeRequest(const Poco::URI & uri, ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = {}) const;
@ -94,20 +73,10 @@ private:
Poco::URI createRequestURI(const String & method) const;
static String getDictIdsString(const std::vector<UInt64> & ids);
Poco::Logger * log;
const Block sample_block;
const Poco::Util::AbstractConfiguration & config;
const Poco::Timespan http_timeout;
LibraryInitData library_data;
Field dictionary_id;
std::string bridge_host;
size_t bridge_port;
bool library_initialized = false;
ConnectionTimeouts http_timeouts;
Poco::Net::HTTPBasicCredentials credentials{};
};
}

View File

@ -0,0 +1,34 @@
#include "LibraryBridgeHelper.h"
namespace DB
{
LibraryBridgeHelper::LibraryBridgeHelper(ContextPtr context_)
: IBridgeHelper(context_)
, config(context_->getConfigRef())
, log(&Poco::Logger::get("LibraryBridgeHelper"))
, http_timeout(context_->getGlobalContext()->getSettingsRef().http_receive_timeout.value)
, bridge_host(config.getString("library_bridge.host", DEFAULT_HOST))
, bridge_port(config.getUInt("library_bridge.port", DEFAULT_PORT))
, http_timeouts(ConnectionTimeouts::getHTTPTimeouts(context_))
{
}
void LibraryBridgeHelper::startBridge(std::unique_ptr<ShellCommand> cmd) const
{
getContext()->addBridgeCommand(std::move(cmd));
}
Poco::URI LibraryBridgeHelper::createBaseURI() const
{
Poco::URI uri;
uri.setHost(bridge_host);
uri.setPort(bridge_port);
uri.setScheme("http");
return uri;
}
}

View File

@ -0,0 +1,51 @@
#pragma once
#include <Interpreters/Context.h>
#include <IO/ReadWriteBufferFromHTTP.h>
#include <Poco/Logger.h>
#include <Poco/Net/HTTPRequest.h>
#include <Poco/URI.h>
#include <BridgeHelper/IBridgeHelper.h>
#include <Common/BridgeProtocolVersion.h>
namespace DB
{
// Common base class to access the clickhouse-library-bridge.
class LibraryBridgeHelper : public IBridgeHelper
{
protected:
explicit LibraryBridgeHelper(ContextPtr context_);
void startBridge(std::unique_ptr<ShellCommand> cmd) const override;
String serviceAlias() const override { return "clickhouse-library-bridge"; }
String serviceFileName() const override { return serviceAlias(); }
size_t getDefaultPort() const override { return DEFAULT_PORT; }
bool startBridgeManually() const override { return false; }
String configPrefix() const override { return "library_bridge"; }
const Poco::Util::AbstractConfiguration & getConfig() const override { return config; }
Poco::Logger * getLog() const override { return log; }
Poco::Timespan getHTTPTimeout() const override { return http_timeout; }
Poco::URI createBaseURI() const override;
static constexpr inline size_t DEFAULT_PORT = 9012;
const Poco::Util::AbstractConfiguration & config;
Poco::Logger * log;
const Poco::Timespan http_timeout;
std::string bridge_host;
size_t bridge_port;
ConnectionTimeouts http_timeouts;
Poco::Net::HTTPBasicCredentials credentials{};
};
}

View File

@ -9,6 +9,7 @@
#include <Poco/Net/HTTPRequest.h>
#include <Poco/URI.h>
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/BridgeProtocolVersion.h>
#include <Common/ShellCommand.h>
#include <Common/logger_useful.h>
#include <IO/ConnectionTimeoutsContext.h>
@ -86,6 +87,7 @@ protected:
{
auto uri = createBaseURI();
uri.setPath(MAIN_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
return uri;
}
@ -163,6 +165,7 @@ protected:
{
auto uri = createBaseURI();
uri.setPath(COL_INFO_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
return uri;
}
@ -184,6 +187,7 @@ protected:
auto uri = createBaseURI();
uri.setPath(SCHEMA_ALLOWED_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("connection_string", getConnectionString());
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);
@ -204,6 +208,7 @@ protected:
auto uri = createBaseURI();
uri.setPath(IDENTIFIER_QUOTE_HANDLER);
uri.addQueryParameter("version", std::to_string(XDBC_BRIDGE_PROTOCOL_VERSION));
uri.addQueryParameter("connection_string", getConnectionString());
ReadWriteBufferFromHTTP buf(uri, Poco::Net::HTTPRequest::HTTP_POST, {}, ConnectionTimeouts::getHTTPTimeouts(getContext()), credentials);

View File

@ -740,8 +740,10 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
/// Rewrite query only when we have query parameters.
/// Note that if query is rewritten, comments in query are lost.
/// But the user often wants to see comments in server logs, query log, processlist, etc.
/// For recent versions of the server query parameters will be transferred by network and applied on the server side.
auto query = query_to_execute;
if (!query_parameters.empty())
if (!query_parameters.empty()
&& connection->getServerRevision(connection_parameters.timeouts) < DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS)
{
/// Replace ASTQueryParameter with ASTLiteral for prepared statements.
ReplaceQueryParameterVisitor visitor(query_parameters);
@ -762,6 +764,7 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa
connection->sendQuery(
connection_parameters.timeouts,
query,
query_parameters,
global_context->getCurrentQueryId(),
query_processing_stage,
&global_context->getSettingsRef(),
@ -1087,7 +1090,8 @@ bool ClientBase::receiveSampleBlock(Block & out, ColumnsDescription & columns_de
void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr parsed_query)
{
auto query = query_to_execute;
if (!query_parameters.empty())
if (!query_parameters.empty()
&& connection->getServerRevision(connection_parameters.timeouts) < DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS)
{
/// Replace ASTQueryParameter with ASTLiteral for prepared statements.
ReplaceQueryParameterVisitor visitor(query_parameters);
@ -1114,6 +1118,7 @@ void ClientBase::processInsertQuery(const String & query_to_execute, ASTPtr pars
connection->sendQuery(
connection_parameters.timeouts,
query,
query_parameters,
global_context->getCurrentQueryId(),
query_processing_stage,
&global_context->getSettingsRef(),

View File

@ -477,6 +477,7 @@ TablesStatusResponse Connection::getTablesStatus(const ConnectionTimeouts & time
void Connection::sendQuery(
const ConnectionTimeouts & timeouts,
const String & query,
const NameToNameMap & query_parameters,
const String & query_id_,
UInt64 stage,
const Settings * settings,
@ -569,6 +570,14 @@ void Connection::sendQuery(
writeStringBinary(query, *out);
if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS)
{
Settings params;
for (const auto & [name, value] : query_parameters)
params.set(name, value);
params.write(*out, SettingsWriteFormat::STRINGS_WITH_FLAGS);
}
maybe_compressed_in.reset();
maybe_compressed_out.reset();
block_in.reset();

View File

@ -97,6 +97,7 @@ public:
void sendQuery(
const ConnectionTimeouts & timeouts,
const String & query,
const NameToNameMap& query_parameters,
const String & query_id_/* = "" */,
UInt64 stage/* = QueryProcessingStage::Complete */,
const Settings * settings/* = nullptr */,

View File

@ -183,7 +183,7 @@ void HedgedConnections::sendQuery(
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
}
replica.connection->sendQuery(timeouts, query, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
replica.connection->sendQuery(timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
replica.change_replica_timeout.setRelative(timeouts.receive_data_timeout);
replica.packet_receiver->setReceiveTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout);
};

View File

@ -86,6 +86,7 @@ public:
virtual void sendQuery(
const ConnectionTimeouts & timeouts,
const String & query,
const NameToNameMap & query_parameters,
const String & query_id_,
UInt64 stage,
const Settings * settings,

View File

@ -75,6 +75,7 @@ void LocalConnection::sendProfileEvents()
void LocalConnection::sendQuery(
const ConnectionTimeouts &,
const String & query,
const NameToNameMap & query_parameters,
const String & query_id,
UInt64 stage,
const Settings *,
@ -82,6 +83,9 @@ void LocalConnection::sendQuery(
bool,
std::function<void(const Progress &)> process_progress_callback)
{
if (!query_parameters.empty())
throw Exception(ErrorCodes::LOGICAL_ERROR, "clickhouse local does not support query parameters");
/// Suggestion comes without client_info.
if (client_info)
query_context = session.makeQueryContext(*client_info);

View File

@ -94,6 +94,7 @@ public:
void sendQuery(
const ConnectionTimeouts & timeouts,
const String & query,
const NameToNameMap & query_parameters,
const String & query_id/* = "" */,
UInt64 stage/* = QueryProcessingStage::Complete */,
const Settings * settings/* = nullptr */,

View File

@ -160,15 +160,15 @@ void MultiplexedConnections::sendQuery(
if (enable_sample_offset_parallel_processing)
modified_settings.parallel_replica_offset = i;
replica_states[i].connection->sendQuery(timeouts, query, query_id,
stage, &modified_settings, &client_info, with_pending_data, {});
replica_states[i].connection->sendQuery(
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
}
}
else
{
/// Use single replica.
replica_states[0].connection->sendQuery(timeouts, query, query_id,
stage, &modified_settings, &client_info, with_pending_data, {});
replica_states[0].connection->sendQuery(
timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
}
sent_query = true;

View File

@ -138,7 +138,8 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p
void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query)
{
connection.sendQuery(timeouts, query, "" /* query_id */, QueryProcessingStage::Complete, nullptr, nullptr, false, {});
connection.sendQuery(
timeouts, query, {} /* query_parameters */, "" /* query_id */, QueryProcessingStage::Complete, nullptr, nullptr, false, {});
while (true)
{

View File

@ -56,7 +56,9 @@ ColumnArray::ColumnArray(MutableColumnPtr && nested_column, MutableColumnPtr &&
/// This will also prevent possible overflow in offset.
if (data->size() != last_offset)
throw Exception("offsets_column has data inconsistent with nested_column", ErrorCodes::LOGICAL_ERROR);
throw Exception(ErrorCodes::LOGICAL_ERROR,
"offsets_column has data inconsistent with nested_column. Data size: {}, last offset: {}",
data->size(), last_offset);
}
/** NOTE

View File

@ -0,0 +1,14 @@
#pragma once
#include <cstddef>
namespace DB
{
// Version of protocol between clickhouse-server and clickhouse-library-bridge. Increment if you change it in a non-compatible way.
static constexpr size_t LIBRARY_BRIDGE_PROTOCOL_VERSION = 1;
// Version of protocol between clickhouse-server and clickhouse-xdbc-bridge. Increment if you change it in a non-compatible way.
static constexpr size_t XDBC_BRIDGE_PROTOCOL_VERSION = 1;
}

View File

@ -6,11 +6,9 @@
#include <Common/Arena.h>
#include <Common/LRUCache.h>
#include <Common/assert_cast.h>
#include "Columns/IColumn.h"
#include <base/unaligned.h>
#include <Columns/ColumnString.h>
#include <Columns/ColumnConst.h>
#include <Columns/ColumnFixedString.h>
#include <Columns/ColumnLowCardinality.h>
@ -85,11 +83,8 @@ struct HashMethodString
HashMethodString(const ColumnRawPtrs & key_columns, const Sizes & /*key_sizes*/, const HashMethodContextPtr &)
{
const IColumn * column = key_columns[0];
if (isColumnConst(*column))
column = &assert_cast<const ColumnConst &>(*column).getDataColumn();
const ColumnString & column_string = assert_cast<const ColumnString &>(*column);
const IColumn & column = *key_columns[0];
const ColumnString & column_string = assert_cast<const ColumnString &>(column);
offsets = column_string.getOffsets().data();
chars = column_string.getChars().data();
}

View File

@ -43,13 +43,6 @@ ProfileEvents::Counters & CurrentThread::getProfileEvents()
return current_thread ? current_thread->performance_counters : ProfileEvents::global_counters;
}
MemoryTracker * CurrentThread::getMemoryTracker()
{
if (unlikely(!current_thread))
return nullptr;
return &current_thread->memory_tracker;
}
void CurrentThread::updateProgressIn(const Progress & value)
{
if (unlikely(!current_thread))

Some files were not shown because too many files have changed in this diff Show More