Merge branch 'master' into dev-seriesPeriodDetect

This commit is contained in:
Bhavna Jindal 2023-12-08 10:26:39 -05:00 committed by GitHub
commit 1f58b0f150
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
91 changed files with 1978 additions and 144 deletions

3
.gitmodules vendored
View File

@ -357,3 +357,6 @@
[submodule "contrib/pocketfft"]
path = contrib/pocketfft
url = https://github.com/mreineck/pocketfft.git
[submodule "contrib/sqids-cpp"]
path = contrib/sqids-cpp
url = https://github.com/sqids/sqids-cpp.git

View File

@ -1,5 +1,5 @@
### Table of Contents
**[ClickHouse release v23.11, 2023-12-05](#2311)**<br/>
**[ClickHouse release v23.11, 2023-12-06](#2311)**<br/>
**[ClickHouse release v23.10, 2023-11-02](#2310)**<br/>
**[ClickHouse release v23.9, 2023-09-28](#239)**<br/>
**[ClickHouse release v23.8 LTS, 2023-08-31](#238)**<br/>
@ -14,7 +14,7 @@
# 2023 Changelog
### <a id="2311"></a> ClickHouse release 23.11, 2023-12-05
### <a id="2311"></a> ClickHouse release 23.11, 2023-12-06
#### Backward Incompatible Change
* The default ClickHouse server configuration file has enabled `access_management` (user manipulation by SQL queries) and `named_collection_control` (manipulation of named collection by SQL queries) for the `default` user by default. This closes [#56482](https://github.com/ClickHouse/ClickHouse/issues/56482). [#56619](https://github.com/ClickHouse/ClickHouse/pull/56619) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -13,6 +13,7 @@ The following versions of ClickHouse server are currently being supported with s
| Version | Supported |
|:-|:-|
| 23.11 | ✔️ |
| 23.10 | ✔️ |
| 23.9 | ✔️ |
| 23.8 | ✔️ |

View File

@ -2,11 +2,11 @@
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
SET(VERSION_REVISION 54480)
SET(VERSION_REVISION 54481)
SET(VERSION_MAJOR 23)
SET(VERSION_MINOR 11)
SET(VERSION_MINOR 12)
SET(VERSION_PATCH 1)
SET(VERSION_GITHASH 13adae0e42fd48de600486fc5d4b64d39f80c43e)
SET(VERSION_DESCRIBE v23.11.1.1-testing)
SET(VERSION_STRING 23.11.1.1)
SET(VERSION_GITHASH 05bc8ef1e02b9c7332f08091775b255d191341bf)
SET(VERSION_DESCRIBE v23.12.1.1-testing)
SET(VERSION_STRING 23.12.1.1)
# end of autochange

View File

@ -156,6 +156,7 @@ add_contrib (nuraft-cmake NuRaft)
add_contrib (fast_float-cmake fast_float)
add_contrib (datasketches-cpp-cmake datasketches-cpp)
add_contrib (incbin-cmake incbin)
add_contrib (sqids-cpp-cmake sqids-cpp)
option(ENABLE_NLP "Enable NLP functions support" ${ENABLE_LIBRARIES})
if (ENABLE_NLP)

1
contrib/sqids-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit 3756e537d4d48cc0dd4176801fe19f99601439b0

View File

@ -0,0 +1,14 @@
option(ENABLE_SQIDS "Enable sqids support" ${ENABLE_LIBRARIES})
if ((NOT ENABLE_SQIDS))
message (STATUS "Not using sqids")
return()
endif()
set (SQIDS_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/sqids-cpp")
set (SQIDS_INCLUDE_DIR "${SQIDS_SOURCE_DIR}/include")
add_library(_sqids INTERFACE)
target_include_directories(_sqids SYSTEM INTERFACE ${SQIDS_INCLUDE_DIR})
add_library(ch_contrib::sqids ALIAS _sqids)
target_compile_definitions(_sqids INTERFACE ENABLE_SQIDS)

View File

@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.10.5.20"
ARG VERSION="23.11.1.2711"
ARG PACKAGES="clickhouse-keeper"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="23.10.5.20"
ARG VERSION="23.11.1.2711"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# user/group precreated explicitly with fixed uid/gid on purpose.

View File

@ -30,7 +30,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="23.10.5.20"
ARG VERSION="23.11.1.2711"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image

View File

@ -1,7 +1,7 @@
# docker build -t clickhouse/integration-helper .
# Helper docker container to run iptables without sudo
FROM alpine
FROM alpine:3.18
RUN apk add --no-cache -U iproute2 \
&& for bin in iptables iptables-restore iptables-save; \
do ln -sf xtables-nft-multi "/sbin/$bin"; \

View File

@ -0,0 +1,525 @@
---
sidebar_position: 1
sidebar_label: 2023
---
# 2023 Changelog
### ClickHouse release v23.11.1.2711-stable (05bc8ef1e02) FIXME as compared to v23.10.1.1976-stable (13adae0e42f)
#### Backward Incompatible Change
* Formatters `%l`/`%k`/`%c` in function `parseDateTime()` are now able to parse hours/months without leading zeros, e.g. `select parseDateTime('2023-11-26 8:14', '%F %k:%i')` now works. Set `parsedatetime_parse_without_leading_zeros = 0` to restore the previous behavior which required two digits. Function `formatDateTime` is now also able to print hours/months without leading zeros. This is controlled by setting `formatdatetime_format_without_leading_zeros` but off by default to not break existing use cases. [#55872](https://github.com/ClickHouse/ClickHouse/pull/55872) ([Azat Khuzhin](https://github.com/azat)).
* You can no longer use the aggregate function `avgWeighted` with arguments of type `Decimal`. Workaround: convert arguments to `Float64`. This closes [#43928](https://github.com/ClickHouse/ClickHouse/issues/43928). This closes [#31768](https://github.com/ClickHouse/ClickHouse/issues/31768). This closes [#56435](https://github.com/ClickHouse/ClickHouse/issues/56435). If you have used this function inside materialized views or projections with `Decimal` arguments, contact support@clickhouse.com. Fixed error in aggregate function `sumMap` and made it slower around 1.5..2 times. It does not matter because the function is garbage anyway. This closes [#54955](https://github.com/ClickHouse/ClickHouse/issues/54955). This closes [#53134](https://github.com/ClickHouse/ClickHouse/issues/53134). This closes [#55148](https://github.com/ClickHouse/ClickHouse/issues/55148). Fix a bug in function `groupArraySample` - it used the same random seed in case more than one aggregate state is generated in a query. [#56350](https://github.com/ClickHouse/ClickHouse/pull/56350) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The default ClickHouse server configuration file has enabled `access_management` (user manipulation by SQL queries) and `named_collection_control` (manipulation of named collection by SQL queries) for the `default` user by default. This closes [#56482](https://github.com/ClickHouse/ClickHouse/issues/56482). [#56619](https://github.com/ClickHouse/ClickHouse/pull/56619) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Multiple improvements for RESPECT/IGNORE NULLS. [#57189](https://github.com/ClickHouse/ClickHouse/pull/57189) ([Raúl Marín](https://github.com/Algunenano)).
* Remove optimization optimize_move_functions_out_of_any. [#57190](https://github.com/ClickHouse/ClickHouse/pull/57190) ([Raúl Marín](https://github.com/Algunenano)).
#### New Feature
* Added server setting `async_load_databases` for asynchronous loading of databases and tables. Speeds up the server start time. Applies to databases with Ordinary, Atomic and Replicated engines. Their tables load metadata asynchronously. Query to a table increases the priority of the load job and waits for it to be done. Added table `system.async_loader`. [#49351](https://github.com/ClickHouse/ClickHouse/pull/49351) ([Sergei Trifonov](https://github.com/serxa)).
* 1. Add function `extractPlainRanges` to `KeyCondition`. 2. Add some useful functions to `Range` 3. Add `PlainRanges` who represent a serious of ranges that ordered and no overlapping. 4. Add `NumbersRangedSource` who can accurately return user selected numbers. [#50909](https://github.com/ClickHouse/ClickHouse/pull/50909) ([JackyWoo](https://github.com/JackyWoo)).
* Add system table `blob_storage_log`. [#52918](https://github.com/ClickHouse/ClickHouse/pull/52918) ([vdimir](https://github.com/vdimir)).
* Use statistic to order prewhere conditions better. [#53240](https://github.com/ClickHouse/ClickHouse/pull/53240) ([Han Fei](https://github.com/hanfei1991)).
* Added a new aggregation function `groupArraySorted(n)(value)` which returns an array with the n first values from a field value sorted by itself. [#53562](https://github.com/ClickHouse/ClickHouse/pull/53562) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Added support for compression in keeper protocol. Can be enabled on clickhouse by using this flag `use_compression` inside `zookeeper`. resolves [#49507](https://github.com/ClickHouse/ClickHouse/issues/49507). [#54957](https://github.com/ClickHouse/ClickHouse/pull/54957) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Add ClickHouse setting to disable tunneling for HTTPS requests over HTTP proxy. [#55033](https://github.com/ClickHouse/ClickHouse/pull/55033) ([Arthur Passos](https://github.com/arthurpassos)).
* Introduce the feature `storage_metadata_write_full_object_key`. If it is set as `true` then metadata files are written with new format VERSION_FULL_OBJECT_KEY. With that format CH stores full remote object key in the metadata file. [#55566](https://github.com/ClickHouse/ClickHouse/pull/55566) ([Sema Checherinda](https://github.com/CheSema)).
* Add new settings and syntax to protect named collections' fields from being overridden. This is meant to prevent a malicious user from obtaining unauthorized access to secrets. [#55782](https://github.com/ClickHouse/ClickHouse/pull/55782) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Add `hostname` column to all system log tables;. [#55894](https://github.com/ClickHouse/ClickHouse/pull/55894) ([Bharat Nallan](https://github.com/bharatnc)).
* Add `CHECK ALL TABLES` query. [#56022](https://github.com/ClickHouse/ClickHouse/pull/56022) ([vdimir](https://github.com/vdimir)).
* Added function `fromDaysSinceYearZero()` which is similar to MySQL's `FROM_DAYS`. E.g. `SELECT fromDaysSinceYearZero(739136)` returns `2023-09-08`. [#56088](https://github.com/ClickHouse/ClickHouse/pull/56088) ([Joanna Hulboj](https://github.com/jh0x)).
* Implemented series period detect method using FFT in pocketFFT lib. [#56171](https://github.com/ClickHouse/ClickHouse/pull/56171) ([Bhavna Jindal](https://github.com/bhavnajindal)).
* Add an external Python tool to view backups and to extract information from them without using ClickHouse. [#56268](https://github.com/ClickHouse/ClickHouse/pull/56268) ([Vitaly Baranov](https://github.com/vitlibar)).
* ... [#56275](https://github.com/ClickHouse/ClickHouse/pull/56275) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* This pull request implements new setting called `preferred_projection_name`. If it is set to a non-empty string, the specified projection would be used if possible. [#56309](https://github.com/ClickHouse/ClickHouse/pull/56309) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* S3 adaptive timeout means that first attempt made with low send and receive timeouts. [#56314](https://github.com/ClickHouse/ClickHouse/pull/56314) ([Sema Checherinda](https://github.com/CheSema)).
* Add 4-letter command for yielding/resigning leadership (https://github.com/ClickHouse/ClickHouse/issues/56352). [#56354](https://github.com/ClickHouse/ClickHouse/pull/56354) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* Added a new SQL function, "arrayRandomSample(arr, k)" which returns a sample of k elements from the input array. Similar functionality could previously be achieved only with less convenient syntax, e.g. "SELECT arrayReduce('groupArraySample(3)', range(10))". [#56416](https://github.com/ClickHouse/ClickHouse/pull/56416) ([Robert Schulze](https://github.com/rschu1ze)).
* Added support for `float16` type data to use in `.npy` files. Closes [#56344](https://github.com/ClickHouse/ClickHouse/issues/56344). [#56424](https://github.com/ClickHouse/ClickHouse/pull/56424) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Added system view `information_schema.statistics` for better compatibility with Tableau Online. [#56425](https://github.com/ClickHouse/ClickHouse/pull/56425) ([Serge Klochkov](https://github.com/slvrtrn)).
* Add function `getClientHTTPHeader` for fetching values header values set in the HTTP request. [#56488](https://github.com/ClickHouse/ClickHouse/pull/56488) ([凌涛](https://github.com/lingtaolf)).
* Add a new table function named `fuzzJSON` with rows containing perturbed versions of the source JSON string with random variations. [#56490](https://github.com/ClickHouse/ClickHouse/pull/56490) ([Julia Kartseva](https://github.com/jkartseva)).
* Add `system.symbols` table useful for introspection of the binary. [#56548](https://github.com/ClickHouse/ClickHouse/pull/56548) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add 4-letter command for yielding/resigning leadership. [#56620](https://github.com/ClickHouse/ClickHouse/pull/56620) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* Configurable dashboards. Queries for charts are now loaded using a query, which by default uses a new `system.dashboards` table. [#56771](https://github.com/ClickHouse/ClickHouse/pull/56771) ([Sergei Trifonov](https://github.com/serxa)).
* Introduce `fileCluster` table function. [#56868](https://github.com/ClickHouse/ClickHouse/pull/56868) ([Andrey Zvonov](https://github.com/zvonand)).
* Add `_size` virtual column with file size in bytes to `s3/file/hdfs/url/azureBlobStorage` engines. [#57126](https://github.com/ClickHouse/ClickHouse/pull/57126) ([Kruglov Pavel](https://github.com/Avogar)).
* Expose the number of errors occurred on a server since last restart from the Prometheus endpoint. [#57209](https://github.com/ClickHouse/ClickHouse/pull/57209) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Added a new SQL function `sqid` to generate Sqids (https://sqids.org/), example: `SELECT sqid(125, 126)`. [#57442](https://github.com/ClickHouse/ClickHouse/pull/57442) ([awakeljw](https://github.com/awakeljw)).
#### Performance Improvement
* Support window functions parallel evaluation. Fixes [#34688](https://github.com/ClickHouse/ClickHouse/issues/34688). [#39631](https://github.com/ClickHouse/ClickHouse/pull/39631) ([Dmitry Novik](https://github.com/novikd)).
* Increase the default value of `max_concurrent_queries` from 100 to 1000. This makes sense when there is a large number of connecting clients, which are slowly sending or receiving data, so the server is not limited by CPU, or when the number of CPU cores is larger than 100. Also, enable the concurrency control by default, and set the desired number of query processing threads in total as twice the number of CPU cores. It improves performance in scenarios with a very large number of concurrent queries. [#46927](https://github.com/ClickHouse/ClickHouse/pull/46927) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fixed filtering by `IN(...)` condition for `Merge` table engine. [#54905](https://github.com/ClickHouse/ClickHouse/pull/54905) ([Nikita Taranov](https://github.com/nickitat)).
* An improvement which takes place when cache is full and there are big reads. [#55158](https://github.com/ClickHouse/ClickHouse/pull/55158) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add ability to disable checksums for S3 to avoid excessive input file read (this new behavior could be enabled with `s3_disable_checksum=true`). [#55559](https://github.com/ClickHouse/ClickHouse/pull/55559) ([Azat Khuzhin](https://github.com/azat)).
* Now we read synchronously from remote tables when data is in page cache (like we do for local tables). It is faster, doesn't require synchronisation inside thread pool, doesn't hesitate to do `seek`-s on local fs and reduces cpu wait. [#55841](https://github.com/ClickHouse/ClickHouse/pull/55841) ([Nikita Taranov](https://github.com/nickitat)).
* ... This PR follows [#55929](https://github.com/ClickHouse/ClickHouse/issues/55929), it will bring about 30% speedup. - reduce the reserved memory - reduce the `resize` call. [#55957](https://github.com/ClickHouse/ClickHouse/pull/55957) ([lgbo](https://github.com/lgbo-ustc)).
* The performance experiments of **OnTime** on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) show that this change could bring the improvements of **7.4%, 5.9%, 4.7%, 3.0%, and 4.6%** to the QPS of the query Q2, Q3, Q4, Q5 and Q6 respectively while having no impact on others. [#56079](https://github.com/ClickHouse/ClickHouse/pull/56079) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
* Limit the number of threads busy inside the query profiler. If there are more - they will skip profiling. [#56105](https://github.com/ClickHouse/ClickHouse/pull/56105) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* WindowTransform decrease amount of virtual function calls. [#56120](https://github.com/ClickHouse/ClickHouse/pull/56120) ([Maksim Kita](https://github.com/kitaisreal)).
* Allow recursive tuple field pruning in ORC to speed up scaning. [#56122](https://github.com/ClickHouse/ClickHouse/pull/56122) ([李扬](https://github.com/taiyang-li)).
* This pull request provides countRows support for Npy data format. Now with setting `optimize_count_from_files=1` queries like `select count() from file(data.npy)` will work much more fast because of caching the results. [#56304](https://github.com/ClickHouse/ClickHouse/pull/56304) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Queries with aggregation and a large number of streams will use less amount of memory during the plan's construction. [#57074](https://github.com/ClickHouse/ClickHouse/pull/57074) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Improve performance of executing queries for use cases with many users. [#57106](https://github.com/ClickHouse/ClickHouse/pull/57106) ([Andrej Hoos](https://github.com/adikus)).
* Trivial improvement on array join, reuse some intermediate results. [#57183](https://github.com/ClickHouse/ClickHouse/pull/57183) ([李扬](https://github.com/taiyang-li)).
* There are cases when stack unwinding was slow. [#57221](https://github.com/ClickHouse/ClickHouse/pull/57221) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Now we use default read pool for reading from external storage when `max_streams = 1`. It is beneficial when read prefetches are enabled. [#57334](https://github.com/ClickHouse/ClickHouse/pull/57334) ([Nikita Taranov](https://github.com/nickitat)).
#### Improvement
* Engine `Merge` filters the records according to the row policies of the underlying tables. [#50209](https://github.com/ClickHouse/ClickHouse/pull/50209) ([Ilya Golshtein](https://github.com/ilejn)).
* Add a setting `max_execution_time_leaf` to limit the execution time on shard for distributed query, and `timeout_overflow_mode_leaf` to control the behaviour if timeout happens. [#51823](https://github.com/ClickHouse/ClickHouse/pull/51823) ([Duc Canh Le](https://github.com/canhld94)).
* Fix possible postgresql logical replication conversion_error when using MaterializedPostgreSQL. [#53721](https://github.com/ClickHouse/ClickHouse/pull/53721) ([takakawa](https://github.com/takakawa)).
* Set `background_fetches_pool_size` to 16, background_schedule_pool_size to 512 that is better for production usage with frequent small insertions. [#54327](https://github.com/ClickHouse/ClickHouse/pull/54327) ([Denny Crane](https://github.com/den-crane)).
* While read data from a csv format file, and at end of line is'\r' , which not followed by '\n', then we will enconter the exception as below ``` Cannot parse CSV format: found \r (CR) not followed by \n (LF). Line must end by \n (LF) or \r\n (CR LF) or \n\r.: ``` In clickhouse, the csv end of line must be \n or \r\n or \n\r, so the \r must be followed by \n , but in some suitation, the csv input data is abnormal, like above, \r is at end of line. [#54340](https://github.com/ClickHouse/ClickHouse/pull/54340) ([KevinyhZou](https://github.com/KevinyhZou)).
* Update arrow library to release-13.0.0 that supports new encodings. Closes [#44505](https://github.com/ClickHouse/ClickHouse/issues/44505). [#54800](https://github.com/ClickHouse/ClickHouse/pull/54800) ([Kruglov Pavel](https://github.com/Avogar)).
* Improve performance of ON CLUSTER queries by removing heavy system calls to get all network interfaces when looking for local ip address in the DDL entry hosts list. [#54909](https://github.com/ClickHouse/ClickHouse/pull/54909) ([Duc Canh Le](https://github.com/canhld94)).
* Keeper improvement: improve memory-usage during startup by delaying log preprocessing. [#55660](https://github.com/ClickHouse/ClickHouse/pull/55660) ([Antonio Andelic](https://github.com/antonio2368)).
* Fixed accounting of memory allocated before attaching thread to a query or a user. [#56089](https://github.com/ClickHouse/ClickHouse/pull/56089) ([Nikita Taranov](https://github.com/nickitat)).
* ClickHouse keeper reports its running availability zone at `/keeper/availability-zone` path, when running on AWS environment. [#56104](https://github.com/ClickHouse/ClickHouse/pull/56104) ([Jianfei Hu](https://github.com/incfly)).
* Add support for LARGE_LIST with Arrow. [#56118](https://github.com/ClickHouse/ClickHouse/pull/56118) ([edef](https://github.com/edef1c)).
* Improved performance of glob matching for `file` and `hdfs` storages. [#56141](https://github.com/ClickHouse/ClickHouse/pull/56141) ([Andrey Zvonov](https://github.com/zvonand)).
* Allow manual compaction of `EmbeddedRocksDB` via `OPTIMIZE` query. [#56225](https://github.com/ClickHouse/ClickHouse/pull/56225) ([Azat Khuzhin](https://github.com/azat)).
* Posting lists in inverted indexes are now compressed which reduces their size by 10-30%. [#56226](https://github.com/ClickHouse/ClickHouse/pull/56226) ([Harry Lee](https://github.com/HarryLeeIBM)).
* Add ability to specify BlockBasedTableOptions for EmbeddedRocksDB. [#56264](https://github.com/ClickHouse/ClickHouse/pull/56264) ([Azat Khuzhin](https://github.com/azat)).
* `SHOW COLUMNS` now displays MySQL's equivalent data type name when the connection was made through the MySQL protocol. Previously, this was the case when setting `use_mysql_types_in_show_columns = 1`. The setting is retained but made obsolete. [#56277](https://github.com/ClickHouse/ClickHouse/pull/56277) ([Robert Schulze](https://github.com/rschu1ze)).
* Fixed possible `The local set of parts of table doesn't look like the set of parts in ZooKeeper` error if server was restarted just after `TRUNCATE` or `DROP PARTITION`. [#56282](https://github.com/ClickHouse/ClickHouse/pull/56282) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Parallelise `BackupEntriesCollector`. [#56312](https://github.com/ClickHouse/ClickHouse/pull/56312) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fixed handling of non-const query strings in functions `formatQuery()`/ `formatQuerySingleLine()`. Also added `OrNull` variants of both functions that return a NULL when a query cannot be parsed instead of throwing an exception. [#56327](https://github.com/ClickHouse/ClickHouse/pull/56327) ([Robert Schulze](https://github.com/rschu1ze)).
* Support create and materialized index in the same alter query, also support modity TTL and materialize TTL in the same query. Closes [#55651](https://github.com/ClickHouse/ClickHouse/issues/55651). [#56331](https://github.com/ClickHouse/ClickHouse/pull/56331) ([flynn](https://github.com/ucasfl)).
* Enable adding new disk to storage configuration without restart. [#56367](https://github.com/ClickHouse/ClickHouse/pull/56367) ([Duc Canh Le](https://github.com/canhld94)).
* Allow backup of materialized view with dropped inner table instead of failing the backup. [#56387](https://github.com/ClickHouse/ClickHouse/pull/56387) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Queries to `system.replicas` initiate requests to ZooKeeper when certain columns are queried. When there are thousands of tables these requests might produce a considerable load on ZooKeeper. If there are multiple simultaneous queries to `system.replicas` they do same requests multiple times. The change is to "deduplicate" requests from concurrent queries. [#56420](https://github.com/ClickHouse/ClickHouse/pull/56420) ([Alexander Gololobov](https://github.com/davenger)).
* Add transition from reading key to reading quoted key when double quotes are found. [#56423](https://github.com/ClickHouse/ClickHouse/pull/56423) ([Arthur Passos](https://github.com/arthurpassos)).
* Fix transfer query to MySQL compatible query. [#56456](https://github.com/ClickHouse/ClickHouse/pull/56456) ([flynn](https://github.com/ucasfl)).
* Add support for backing up and restoring tables using KeeperMap engine. [#56460](https://github.com/ClickHouse/ClickHouse/pull/56460) ([Antonio Andelic](https://github.com/antonio2368)).
* 404 response for CompleteMultipartUpload has to be rechecked. Operation could be done on server even if client got timeout or other network errors. The next retry of CompleteMultipartUpload receives 404 response. If the object key exists that operation is considered as successful. [#56475](https://github.com/ClickHouse/ClickHouse/pull/56475) ([Sema Checherinda](https://github.com/CheSema)).
* Enable the HTTP OPTIONS method by default - it simplifies requesting ClickHouse from a web browser. [#56483](https://github.com/ClickHouse/ClickHouse/pull/56483) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* The value for `dns_max_consecutive_failures` was changed by mistake in [#46550](https://github.com/ClickHouse/ClickHouse/issues/46550) - this is reverted and adjusted to a better value. Also, increased the HTTP keep-alive timeout to a reasonable value from production. [#56485](https://github.com/ClickHouse/ClickHouse/pull/56485) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Load base backups lazily (a base backup won't be loaded until it's needed). Also add some log message and profile events for backups. [#56516](https://github.com/ClickHouse/ClickHouse/pull/56516) ([Vitaly Baranov](https://github.com/vitlibar)).
* Setting `query_cache_store_results_of_queries_with_nondeterministic_functions` (with values `false` or `true`) was marked obsolete. It was replaced by setting `query_cache_nondeterministic_function_handling`, a three-valued enum that controls how the query cache handles queries with non-deterministic functions: a) throw an exception (default behavior), b) save the non-deterministic query result regardless, or c) ignore, i.e. don't throw an exception and don't cache the result. [#56519](https://github.com/ClickHouse/ClickHouse/pull/56519) ([Robert Schulze](https://github.com/rschu1ze)).
* Rewrite equality with `is null` check in JOIN ON section. *Analyzer only*. [#56538](https://github.com/ClickHouse/ClickHouse/pull/56538) ([vdimir](https://github.com/vdimir)).
* Function`concat` now supports arbitrary argument types (instead of only String and FixedString arguments). This makes it behave more similar to MySQL `concat` implementation. For example, `SELECT concat('ab', 42)` now returns `ab42`. [#56540](https://github.com/ClickHouse/ClickHouse/pull/56540) ([Serge Klochkov](https://github.com/slvrtrn)).
* Allow getting cache configuration from 'named_collection' section in config or from sql created named collection. [#56541](https://github.com/ClickHouse/ClickHouse/pull/56541) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update `query_masking_rules` when reloading the config ([#56449](https://github.com/ClickHouse/ClickHouse/issues/56449)). [#56573](https://github.com/ClickHouse/ClickHouse/pull/56573) ([Mikhail Koviazin](https://github.com/mkmkme)).
* Make removeoutdatedtables() less aggressive with unsuccessful postgres connection. [#56609](https://github.com/ClickHouse/ClickHouse/pull/56609) ([jsc0218](https://github.com/jsc0218)).
* Currenting setting takes too much time to connnect to PG when URL is not right, so the relevant query stucks there and get cancelled. [#56648](https://github.com/ClickHouse/ClickHouse/pull/56648) ([jsc0218](https://github.com/jsc0218)).
* ClickHouse keeper reports its running availability zone at `/keeper/availability-zone` path. This can be configured via `<availability_zone><value>us-west-1a</value></availability_zone>`. [#56715](https://github.com/ClickHouse/ClickHouse/pull/56715) ([Jianfei Hu](https://github.com/incfly)).
* Do not allow tables on different replicas have different aggregate functions in SimpleAggregateFunction columns. [#56724](https://github.com/ClickHouse/ClickHouse/pull/56724) ([Duc Canh Le](https://github.com/canhld94)).
* Add support for the [well-known Protobuf types](https://protobuf.dev/reference/protobuf/google.protobuf/) in the Protobuf format. [#56741](https://github.com/ClickHouse/ClickHouse/pull/56741) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
* Keeper improvement: disable compressed logs by default in Keeper. [#56763](https://github.com/ClickHouse/ClickHouse/pull/56763) ([Antonio Andelic](https://github.com/antonio2368)).
* Add config setting `wait_dictionaries_load_at_startup`:. [#56782](https://github.com/ClickHouse/ClickHouse/pull/56782) ([Vitaly Baranov](https://github.com/vitlibar)).
* There was a potential vulnerability in previous ClickHouse versions: if a user has connected and unsuccessfully tried to authenticate with the "interserver secret" method, the server didn't terminate the connection immediately but continued to receive and ignore the leftover packets from the client. While these packets are ignored, they are still parsed, and if they use a compression method with another known vulnerability, it will lead to exploitation of it without authentication. This issue was found with [ClickHouse Bug Bounty Program](https://github.com/ClickHouse/ClickHouse/issues/38986) by https://twitter.com/malacupa. [#56794](https://github.com/ClickHouse/ClickHouse/pull/56794) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fetching a part waits when that part is fully committed on remote replica. It is better not send part in PreActive state. In case of zero copy this is mandatory restriction. [#56808](https://github.com/ClickHouse/ClickHouse/pull/56808) ([Sema Checherinda](https://github.com/CheSema)).
* Implement user-level setting `alter_move_to_space_execute_async` which allow to execute queries `ALTER TABLE ... MOVE PARTITION|PART TO DISK|VOLUME` asynchronously. The size of pool for background executions is controlled by `background_move_pool_size`. Default behavior is synchronous execution. Fixes [#47643](https://github.com/ClickHouse/ClickHouse/issues/47643). [#56809](https://github.com/ClickHouse/ClickHouse/pull/56809) ([alesapin](https://github.com/alesapin)).
* Able to filter by engine when scanning system.tables, avoid unnecessary (potentially time-consuming) connection. [#56813](https://github.com/ClickHouse/ClickHouse/pull/56813) ([jsc0218](https://github.com/jsc0218)).
* Show `total_bytes` and `total_rows` in system tables for RocksDB storage. [#56816](https://github.com/ClickHouse/ClickHouse/pull/56816) ([Aleksandr Musorin](https://github.com/AVMusorin)).
* Allow basic commands in ALTER for TEMPORARY tables. [#56892](https://github.com/ClickHouse/ClickHouse/pull/56892) ([Sergey](https://github.com/icuken)).
* Lz4 compression. Buffer compressed block in a rare case when out buffer capacity is not enough for writing compressed block directly to out's buffer. [#56938](https://github.com/ClickHouse/ClickHouse/pull/56938) ([Sema Checherinda](https://github.com/CheSema)).
* Add metrics for the number of queued jobs, which is useful for the IO thread pool. [#56958](https://github.com/ClickHouse/ClickHouse/pull/56958) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add a setting for PostgreSQL table engine setting in the config file Added a check for the setting Added documentation around the additional setting. [#56959](https://github.com/ClickHouse/ClickHouse/pull/56959) ([Peignon Melvyn](https://github.com/melvynator)).
* Run interpreter with `only_analyze` flag in getsampleblock method. [#56972](https://github.com/ClickHouse/ClickHouse/pull/56972) ([Mikhail Artemenko](https://github.com/Michicosun)).
* Add a new `MergeTree` setting `add_implicit_sign_column_constraint_for_collapsing_engine` (disabled by default). When enabled, it adds an implicit CHECK constraint for `CollapsingMergeTree` tables that restricts the value of the `Sign` column to be only -1 or 1. [#56701](https://github.com/ClickHouse/ClickHouse/issues/56701). [#56986](https://github.com/ClickHouse/ClickHouse/pull/56986) ([Kevin Mingtarja](https://github.com/kevinmingtarja)).
* Function `concat()` can now be called with a single argument, e.g., `SELECT concat('abc')`. This makes its behavior more consistent with MySQL's concat implementation. [#57000](https://github.com/ClickHouse/ClickHouse/pull/57000) ([Serge Klochkov](https://github.com/slvrtrn)).
* Signs all `x-amz-*` headers as required by AWS S3 docs. [#57001](https://github.com/ClickHouse/ClickHouse/pull/57001) ([Arthur Passos](https://github.com/arthurpassos)).
* Function `fromDaysSinceYearZero` (alias: `FROM_DAYS`) can now be used with unsigned and signed integer types (previously, it had to be an unsigned integer). This improve compatibility with 3rd party tools such as Tableau Online. [#57002](https://github.com/ClickHouse/ClickHouse/pull/57002) ([Serge Klochkov](https://github.com/slvrtrn)).
* Add system.s3queue_log to default config. [#57036](https://github.com/ClickHouse/ClickHouse/pull/57036) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Change the default for `wait_dictionaries_load_at_startup` to true, and use this setting only if `dictionaries_lazy_load` is false. [#57133](https://github.com/ClickHouse/ClickHouse/pull/57133) ([Vitaly Baranov](https://github.com/vitlibar)).
* Check dictionary source type on creation even if `dictionaries_lazy_load` is enabled. [#57134](https://github.com/ClickHouse/ClickHouse/pull/57134) ([Vitaly Baranov](https://github.com/vitlibar)).
* Plan-level optimizations can now be enabled/disabled individually. Previously, it was only possible to disable them all. The setting which previously did that (`query_plan_enable_optimizations`) is retained and can still be used to disable all optimizations. [#57152](https://github.com/ClickHouse/ClickHouse/pull/57152) ([Robert Schulze](https://github.com/rschu1ze)).
* The server's exit code will correspond to the exception code. For example, if the server cannot start due to memory limit, it will exit with the code 241 = MEMORY_LIMIT_EXCEEDED. In previous versions, the exit code for exceptions was always 70 = Poco::Util::ExitCode::EXIT_SOFTWARE. [#57153](https://github.com/ClickHouse/ClickHouse/pull/57153) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Do not demangle and symbolize stack frames from __functional c++ header. [#57201](https://github.com/ClickHouse/ClickHouse/pull/57201) ([Mike Kot](https://github.com/myrrc)).
* It is now possible to refer to ALIAS column in index (non-primary-key) definitions (issue [#55650](https://github.com/ClickHouse/ClickHouse/issues/55650)). Example: `CREATE TABLE tab(col UInt32, col_alias ALIAS col + 1, INDEX idx (col_alias) TYPE minmax) ENGINE = MergeTree ORDER BY col;`. [#57220](https://github.com/ClickHouse/ClickHouse/pull/57220) ([flynn](https://github.com/ucasfl)).
* HTTP server page `/dashboard` now supports charts with multiple lines. [#57236](https://github.com/ClickHouse/ClickHouse/pull/57236) ([Sergei Trifonov](https://github.com/serxa)).
* This pr gives possibility to use suffixes (K, M, G, T, E) along with the amount of memory to be used. Closes [#56879](https://github.com/ClickHouse/ClickHouse/issues/56879). [#57273](https://github.com/ClickHouse/ClickHouse/pull/57273) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Bumped Intel QPL (used by codec `DEFLATE_QPL`) from v1.2.0 to v1.3.1 . Also fixed a bug in case of BOF (Block On Fault) = 0, changed to handle page faults by falling back to SW path. [#57291](https://github.com/ClickHouse/ClickHouse/pull/57291) ([jasperzhu](https://github.com/jinjunzh)).
* Make alter materialized view non experimental and deprecate `allow_experimental_alter_materialized_view_structure` setting. Fixes [#15206](https://github.com/ClickHouse/ClickHouse/issues/15206). [#57311](https://github.com/ClickHouse/ClickHouse/pull/57311) ([alesapin](https://github.com/alesapin)).
* Increase default `replicated_deduplication_window` of MergeTree settings from 100 to 1k. [#57335](https://github.com/ClickHouse/ClickHouse/pull/57335) ([sichenzhao](https://github.com/sichenzhao)).
* Stop using `INCONSISTENT_METADATA_FOR_BACKUP` that much. If possible prefer to continue scanning instead of stopping and starting the scanning for backup from the beginning. [#57385](https://github.com/ClickHouse/ClickHouse/pull/57385) ([Vitaly Baranov](https://github.com/vitlibar)).
* Introduce the limit for the maximum number of table projections (default 25). [#57491](https://github.com/ClickHouse/ClickHouse/pull/57491) ([Julia Kartseva](https://github.com/jkartseva)).
* Enable `async_block_ids_cache` by default for `async_inserts` deduplication. [#57513](https://github.com/ClickHouse/ClickHouse/pull/57513) ([alesapin](https://github.com/alesapin)).
#### Build/Testing/Packaging Improvement
* Enable temporary_data_in_cache in s3 tests in CI. [#48425](https://github.com/ClickHouse/ClickHouse/pull/48425) ([vdimir](https://github.com/vdimir)).
* Run sqllogic test. [#56078](https://github.com/ClickHouse/ClickHouse/pull/56078) ([Han Fei](https://github.com/hanfei1991)).
* Add a new build option `SANITIZE_COVERAGE`. If it is enabled, the code is instrumented to track the coverage. The collected information is available inside ClickHouse with: (1) a new function `coverage` that returns an array of unique addresses in the code found after the previous coverage reset; (2) `SYSTEM RESET COVERAGE` query that resets the accumulated data. This allows us to compare the coverage of different tests, including differential code coverage. Continuation of [#20539](https://github.com/ClickHouse/ClickHouse/issues/20539). [#56102](https://github.com/ClickHouse/ClickHouse/pull/56102) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* In [#54043](https://github.com/ClickHouse/ClickHouse/issues/54043) the setup plan started to appear in the logs. It should be only in the `runner_get_all_tests.log` only. As well, send the failed infrastructure event to CI db. [#56214](https://github.com/ClickHouse/ClickHouse/pull/56214) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Some of the stack frames might not be resolved when collecting stacks. In such cases the raw address might be helpful. [#56267](https://github.com/ClickHouse/ClickHouse/pull/56267) ([Alexander Gololobov](https://github.com/davenger)).
* Add an option to disable libssh. [#56333](https://github.com/ClickHouse/ClickHouse/pull/56333) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add automatic check that there are no large translation units. [#56559](https://github.com/ClickHouse/ClickHouse/pull/56559) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Lower the size of the single-binary distribution. This closes [#55181](https://github.com/ClickHouse/ClickHouse/issues/55181). [#56617](https://github.com/ClickHouse/ClickHouse/pull/56617) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Make `clickhouse-local` and `clickhouse-client` available under short names (`ch`, `chl`, `chc`) for usability. [#56634](https://github.com/ClickHouse/ClickHouse/pull/56634) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Information about the sizes of every translation unit and binary file after each build will be sent to the CI database in ClickHouse Cloud. This closes [#56107](https://github.com/ClickHouse/ClickHouse/issues/56107). [#56636](https://github.com/ClickHouse/ClickHouse/pull/56636) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Certain files of "Apache Arrow" library (which we use only for non-essential things like parsing the arrow format) were rebuilt all the time regardless of the build cache. This is fixed. [#56657](https://github.com/ClickHouse/ClickHouse/pull/56657) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Avoid recompiling translation units depending on the autogenerated source file about version. [#56660](https://github.com/ClickHouse/ClickHouse/pull/56660) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Do not fetch changed submodules in the builder container. [#56689](https://github.com/ClickHouse/ClickHouse/pull/56689) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Tracing data of the linker invocations will be sent to the CI database in ClickHouse Cloud. [#56725](https://github.com/ClickHouse/ClickHouse/pull/56725) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Use DWARF 5 debug symbols for the clickhouse binary (was DWARF 4 previously). [#56770](https://github.com/ClickHouse/ClickHouse/pull/56770) ([Michael Kolupaev](https://github.com/al13n321)).
* Optimized build size further by removing unused code from external libraries. [#56786](https://github.com/ClickHouse/ClickHouse/pull/56786) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Set memory usage for client (`1G`) to address problems like this: https://s3.amazonaws.com/clickhouse-test-reports/0/f1bf3f1fc39f520871ec878d815e515e12fd3e7b/fuzzer_astfuzzertsan/report.html. [#56873](https://github.com/ClickHouse/ClickHouse/pull/56873) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* There was an attempt to have the proper listing in [#44311](https://github.com/ClickHouse/ClickHouse/issues/44311), but the fix itself was in the wrong place, so it's still broken. See an [example](https://github.com/ClickHouse/ClickHouse/actions/runs/6897342568/job/18781001022#step:8:25). [#56989](https://github.com/ClickHouse/ClickHouse/pull/56989) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fixed the memory leak in integration test of postgres dictionary. The case of network partition is not correctly handled at the time of the repo pulled years ago. [#57231](https://github.com/ClickHouse/ClickHouse/pull/57231) ([jsc0218](https://github.com/jsc0218)).
* Fix a test filename typo. [#57272](https://github.com/ClickHouse/ClickHouse/pull/57272) ([jsc0218](https://github.com/jsc0218)).
* Fix issue caught in https://github.com/docker-library/official-images/pull/15846. [#57571](https://github.com/ClickHouse/ClickHouse/pull/57571) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Fix analyzer - insertion from select with subquery referencing insertion table should process only insertion block. [#50857](https://github.com/ClickHouse/ClickHouse/pull/50857) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Setting JoinAlgorithm respect specified order [#51745](https://github.com/ClickHouse/ClickHouse/pull/51745) ([vdimir](https://github.com/vdimir)).
* Keeper `reconfig`: add timeout before yielding/taking leadership [#53481](https://github.com/ClickHouse/ClickHouse/pull/53481) ([Mike Kot](https://github.com/myrrc)).
* Fix incorrect header in grace hash join and filter pushdown [#53922](https://github.com/ClickHouse/ClickHouse/pull/53922) ([vdimir](https://github.com/vdimir)).
* Select from system tables when table based on table function. [#55540](https://github.com/ClickHouse/ClickHouse/pull/55540) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* RFC: Fix "Cannot find column X in source stream" for Distributed queries with LIMIT BY [#55836](https://github.com/ClickHouse/ClickHouse/pull/55836) ([Azat Khuzhin](https://github.com/azat)).
* Fix 'Cannot read from file:' while running client in a background [#55976](https://github.com/ClickHouse/ClickHouse/pull/55976) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix clickhouse-local exit on bad send_logs_level setting [#55994](https://github.com/ClickHouse/ClickHouse/pull/55994) ([Kruglov Pavel](https://github.com/Avogar)).
* Bug fix explain ast with parameterized view [#56004](https://github.com/ClickHouse/ClickHouse/pull/56004) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Fix a crash during table loading on startup [#56232](https://github.com/ClickHouse/ClickHouse/pull/56232) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix ClickHouse-sourced dictionaries with an explicit query [#56236](https://github.com/ClickHouse/ClickHouse/pull/56236) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix segfault in signal handler for Keeper [#56266](https://github.com/ClickHouse/ClickHouse/pull/56266) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix incomplete query result for UNION in view() function. [#56274](https://github.com/ClickHouse/ClickHouse/pull/56274) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix inconsistency of "cast('0' as DateTime64(3))" and "cast('0' as Nullable(DateTime64(3)))" [#56286](https://github.com/ClickHouse/ClickHouse/pull/56286) ([李扬](https://github.com/taiyang-li)).
* Fix rare race condition related to Memory allocation failure [#56303](https://github.com/ClickHouse/ClickHouse/pull/56303) ([alesapin](https://github.com/alesapin)).
* Fix restore from backup with `flatten_nested` and `data_type_default_nullable` [#56306](https://github.com/ClickHouse/ClickHouse/pull/56306) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix crash in case of adding a column with type Object(JSON) [#56307](https://github.com/ClickHouse/ClickHouse/pull/56307) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix crash in filterPushDown [#56380](https://github.com/ClickHouse/ClickHouse/pull/56380) ([vdimir](https://github.com/vdimir)).
* Fix restore from backup with mat view and dropped source table [#56383](https://github.com/ClickHouse/ClickHouse/pull/56383) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix segfault during Kerberos initialization [#56401](https://github.com/ClickHouse/ClickHouse/pull/56401) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix buffer overflow in T64 [#56434](https://github.com/ClickHouse/ClickHouse/pull/56434) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix nullable primary key in final (2) [#56452](https://github.com/ClickHouse/ClickHouse/pull/56452) ([Amos Bird](https://github.com/amosbird)).
* Fix ON CLUSTER queries without database on initial node [#56484](https://github.com/ClickHouse/ClickHouse/pull/56484) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix startup failure due to TTL dependency [#56489](https://github.com/ClickHouse/ClickHouse/pull/56489) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix ALTER COMMENT queries ON CLUSTER [#56491](https://github.com/ClickHouse/ClickHouse/pull/56491) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix ALTER COLUMN with ALIAS [#56493](https://github.com/ClickHouse/ClickHouse/pull/56493) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix empty NAMED COLLECTIONs [#56494](https://github.com/ClickHouse/ClickHouse/pull/56494) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix two cases of projection analysis. [#56502](https://github.com/ClickHouse/ClickHouse/pull/56502) ([Amos Bird](https://github.com/amosbird)).
* Fix handling of aliases in query cache [#56545](https://github.com/ClickHouse/ClickHouse/pull/56545) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix conversion from `Nullable(Enum)` to `Nullable(String)` [#56644](https://github.com/ClickHouse/ClickHouse/pull/56644) ([Nikolay Degterinsky](https://github.com/evillique)).
* More reliable log handling in Keeper [#56670](https://github.com/ClickHouse/ClickHouse/pull/56670) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix configuration merge for nodes with substitution attributes [#56694](https://github.com/ClickHouse/ClickHouse/pull/56694) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Fix duplicate usage of table function input(). [#56695](https://github.com/ClickHouse/ClickHouse/pull/56695) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix: RabbitMQ OpenSSL dynamic loading issue [#56703](https://github.com/ClickHouse/ClickHouse/pull/56703) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix crash in GCD codec in case when zeros present in data [#56704](https://github.com/ClickHouse/ClickHouse/pull/56704) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix 'mutex lock failed: Invalid argument' in clickhouse-local during insert into function [#56710](https://github.com/ClickHouse/ClickHouse/pull/56710) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix Date text parsing in optimistic path [#56765](https://github.com/ClickHouse/ClickHouse/pull/56765) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix crash in FPC codec [#56795](https://github.com/ClickHouse/ClickHouse/pull/56795) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* DatabaseReplicated: fix DDL query timeout after recovering a replica [#56796](https://github.com/ClickHouse/ClickHouse/pull/56796) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix incorrect nullable columns reporting in MySQL binary protocol [#56799](https://github.com/ClickHouse/ClickHouse/pull/56799) ([Serge Klochkov](https://github.com/slvrtrn)).
* Support Iceberg metadata files for metastore tables [#56810](https://github.com/ClickHouse/ClickHouse/pull/56810) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix TSAN report under transform [#56817](https://github.com/ClickHouse/ClickHouse/pull/56817) ([Raúl Marín](https://github.com/Algunenano)).
* Fix SET query and SETTINGS formatting [#56825](https://github.com/ClickHouse/ClickHouse/pull/56825) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix failure to start due to table dependency in joinGet [#56828](https://github.com/ClickHouse/ClickHouse/pull/56828) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix flattening existing Nested columns during ADD COLUMN [#56830](https://github.com/ClickHouse/ClickHouse/pull/56830) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix allow cr end of line for csv [#56901](https://github.com/ClickHouse/ClickHouse/pull/56901) ([KevinyhZou](https://github.com/KevinyhZou)).
* Fix `tryBase64Decode()` with invalid input [#56913](https://github.com/ClickHouse/ClickHouse/pull/56913) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix generating deep nested columns in CapnProto/Protobuf schemas [#56941](https://github.com/ClickHouse/ClickHouse/pull/56941) ([Kruglov Pavel](https://github.com/Avogar)).
* Prevent incompatible ALTER of projection columns [#56948](https://github.com/ClickHouse/ClickHouse/pull/56948) ([Amos Bird](https://github.com/amosbird)).
* Fix sqlite file path validation [#56984](https://github.com/ClickHouse/ClickHouse/pull/56984) ([San](https://github.com/santrancisco)).
* S3Queue: fix metadata reference increment [#56990](https://github.com/ClickHouse/ClickHouse/pull/56990) ([Kseniia Sumarokova](https://github.com/kssenii)).
* S3Queue minor fix [#56999](https://github.com/ClickHouse/ClickHouse/pull/56999) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix file path validation for DatabaseFileSystem [#57029](https://github.com/ClickHouse/ClickHouse/pull/57029) ([San](https://github.com/santrancisco)).
* Fix `fuzzBits` with `ARRAY JOIN` [#57033](https://github.com/ClickHouse/ClickHouse/pull/57033) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix Nullptr dereference in partial merge join with joined_subquery_re… [#57048](https://github.com/ClickHouse/ClickHouse/pull/57048) ([vdimir](https://github.com/vdimir)).
* Fix race condition in RemoteSource [#57052](https://github.com/ClickHouse/ClickHouse/pull/57052) ([Raúl Marín](https://github.com/Algunenano)).
* Implement `bitHammingDistance` for big integers [#57073](https://github.com/ClickHouse/ClickHouse/pull/57073) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* S3-style links bug fix [#57075](https://github.com/ClickHouse/ClickHouse/pull/57075) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Fix JSON_QUERY function with multiple numeric paths [#57096](https://github.com/ClickHouse/ClickHouse/pull/57096) ([KevinyhZou](https://github.com/KevinyhZou)).
* Fix buffer overflow in Gorilla codec [#57107](https://github.com/ClickHouse/ClickHouse/pull/57107) ([Nikolay Degterinsky](https://github.com/evillique)).
* Close interserver connection on any exception before authentication [#57142](https://github.com/ClickHouse/ClickHouse/pull/57142) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix segfault after ALTER UPDATE with Nullable MATERIALIZED column [#57147](https://github.com/ClickHouse/ClickHouse/pull/57147) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix incorrect JOIN plan optimization with partially materialized normal projection [#57196](https://github.com/ClickHouse/ClickHouse/pull/57196) ([Amos Bird](https://github.com/amosbird)).
* Ignore comments when comparing column descriptions [#57259](https://github.com/ClickHouse/ClickHouse/pull/57259) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix `ReadonlyReplica` metric for all cases [#57267](https://github.com/ClickHouse/ClickHouse/pull/57267) ([Antonio Andelic](https://github.com/antonio2368)).
* Background merges correctly use temporary data storage in the cache [#57275](https://github.com/ClickHouse/ClickHouse/pull/57275) ([vdimir](https://github.com/vdimir)).
* Keeper fix for changelog and snapshots [#57299](https://github.com/ClickHouse/ClickHouse/pull/57299) ([Antonio Andelic](https://github.com/antonio2368)).
* Ignore finished ON CLUSTER tasks if hostname changed [#57339](https://github.com/ClickHouse/ClickHouse/pull/57339) ([Alexander Tokmakov](https://github.com/tavplubix)).
* MergeTree mutations reuse source part index granularity [#57352](https://github.com/ClickHouse/ClickHouse/pull/57352) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix function jsonMergePatch for partially const columns [#57379](https://github.com/ClickHouse/ClickHouse/pull/57379) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix ubsan error in `Arena` [#57407](https://github.com/ClickHouse/ClickHouse/pull/57407) ([Nikita Taranov](https://github.com/nickitat)).
* fs cache: add limit for background download [#57424](https://github.com/ClickHouse/ClickHouse/pull/57424) ([Kseniia Sumarokova](https://github.com/kssenii)).
* bugfix: correctly parse SYSTEM STOP LISTEN TCP SECURE [#57483](https://github.com/ClickHouse/ClickHouse/pull/57483) ([joelynch](https://github.com/joelynch)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Add function `arrayRandomSample()`"'. [#56399](https://github.com/ClickHouse/ClickHouse/pull/56399) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Update README.md'. [#56549](https://github.com/ClickHouse/ClickHouse/pull/56549) ([Tyler Hannan](https://github.com/tylerhannan)).
* NO CL ENTRY: 'Revert "FunctionSleep exception message fix"'. [#56591](https://github.com/ClickHouse/ClickHouse/pull/56591) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Inserting only non-duplicate chunks in MV"'. [#56598](https://github.com/ClickHouse/ClickHouse/pull/56598) ([Maksim Kita](https://github.com/kitaisreal)).
* NO CL ENTRY: 'Add new header for README with updated logo'. [#56607](https://github.com/ClickHouse/ClickHouse/pull/56607) ([Justin de Guzman](https://github.com/justindeguzman)).
* NO CL ENTRY: 'Revert "Add /keeper/availability-zone node to allow server load balancing within AZ."'. [#56610](https://github.com/ClickHouse/ClickHouse/pull/56610) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Add 4-letter command for yielding/resigning leadership"'. [#56611](https://github.com/ClickHouse/ClickHouse/pull/56611) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'fix(docs): correct default value for output_format_parquet_compression_method to 'lz4''. [#56614](https://github.com/ClickHouse/ClickHouse/pull/56614) ([james-seymour-cubiko](https://github.com/james-seymour-cubiko)).
* NO CL ENTRY: 'Update except.md'. [#56651](https://github.com/ClickHouse/ClickHouse/pull/56651) ([rondo_1895](https://github.com/yangguang1991)).
* NO CL ENTRY: 'Revert "Add a setting max_execution_time_leaf to limit the execution time on shard for distributed query"'. [#56702](https://github.com/ClickHouse/ClickHouse/pull/56702) ([Alexander Tokmakov](https://github.com/tavplubix)).
* NO CL ENTRY: 'Revert "Better except for SSL authentication failure"'. [#56844](https://github.com/ClickHouse/ClickHouse/pull/56844) ([Antonio Andelic](https://github.com/antonio2368)).
* NO CL ENTRY: 'Revert "s3 adaptive timeouts"'. [#56992](https://github.com/ClickHouse/ClickHouse/pull/56992) ([Alexander Tokmakov](https://github.com/tavplubix)).
* NO CL ENTRY: 'Revert "Revert "s3 adaptive timeouts""'. [#56994](https://github.com/ClickHouse/ClickHouse/pull/56994) ([Sema Checherinda](https://github.com/CheSema)).
* NO CL ENTRY: 'Revert "Resubmit 01600_parts_types_metrics test (possibly without flakiness)"'. [#57163](https://github.com/ClickHouse/ClickHouse/pull/57163) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Mark select() as harmful function"'. [#57195](https://github.com/ClickHouse/ClickHouse/pull/57195) ([Alexander Tokmakov](https://github.com/tavplubix)).
* NO CL ENTRY: 'Revert "Update Sentry"'. [#57229](https://github.com/ClickHouse/ClickHouse/pull/57229) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Add debugging info for 01600_parts_types_metrics on failures"'. [#57232](https://github.com/ClickHouse/ClickHouse/pull/57232) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Update date-time-functions.md"'. [#57329](https://github.com/ClickHouse/ClickHouse/pull/57329) ([Denny Crane](https://github.com/den-crane)).
* NO CL ENTRY: 'Revert "add function getClientHTTPHeader"'. [#57510](https://github.com/ClickHouse/ClickHouse/pull/57510) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Add `sqid()` function"'. [#57511](https://github.com/ClickHouse/ClickHouse/pull/57511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Add new aggregation function groupArraySorted()"'. [#57519](https://github.com/ClickHouse/ClickHouse/pull/57519) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Implemented series period detect method using pocketfft lib"'. [#57536](https://github.com/ClickHouse/ClickHouse/pull/57536) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* NO CL ENTRY: 'Revert "Support use alias column in indices"'. [#57537](https://github.com/ClickHouse/ClickHouse/pull/57537) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Remove useless "install" from CMake (step 1) [#36589](https://github.com/ClickHouse/ClickHouse/pull/36589) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Analyzer support 'is not distinct from' in join on section [#54068](https://github.com/ClickHouse/ClickHouse/pull/54068) ([vdimir](https://github.com/vdimir)).
* Refactor merge join transform [#55007](https://github.com/ClickHouse/ClickHouse/pull/55007) ([Alex Cheng](https://github.com/Alex-Cheng)).
* Add function jaccardIndex back with better performance [#55126](https://github.com/ClickHouse/ClickHouse/pull/55126) ([vdimir](https://github.com/vdimir)).
* Use more thread pools in BACKUP/RESTORE to avoid its hanging in tests [#55216](https://github.com/ClickHouse/ClickHouse/pull/55216) ([Vitaly Baranov](https://github.com/vitlibar)).
* Parallel replicas: progress bar [#55574](https://github.com/ClickHouse/ClickHouse/pull/55574) ([Igor Nikonov](https://github.com/devcrafter)).
* Analyzer: Fix result type after IfConstantConditionPass [#55951](https://github.com/ClickHouse/ClickHouse/pull/55951) ([Dmitry Novik](https://github.com/novikd)).
* RemoteSource: remove unnecessary flag [#55980](https://github.com/ClickHouse/ClickHouse/pull/55980) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix `REPLICA_ALREADY_EXISTS` for ReplicatedMergeTree [#56000](https://github.com/ClickHouse/ClickHouse/pull/56000) ([Nikolay Degterinsky](https://github.com/evillique)).
* Rework [#52159](https://github.com/ClickHouse/ClickHouse/issues/52159) to avoid coredump generation [#56039](https://github.com/ClickHouse/ClickHouse/pull/56039) ([Raúl Marín](https://github.com/Algunenano)).
* Bump gRPC to v1.47.5 [#56059](https://github.com/ClickHouse/ClickHouse/pull/56059) ([Robert Schulze](https://github.com/rschu1ze)).
* See what happens if we use less different docker images in integration tests [#56082](https://github.com/ClickHouse/ClickHouse/pull/56082) ([Raúl Marín](https://github.com/Algunenano)).
* Add missing zookeeper retries in StorageReplicatedMergeTree::backupData [#56131](https://github.com/ClickHouse/ClickHouse/pull/56131) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Better process broken parts on table start for replicated tables [#56142](https://github.com/ClickHouse/ClickHouse/pull/56142) ([alesapin](https://github.com/alesapin)).
* Add more details to "Data after merge is not byte-identical to data on another replicas" [#56164](https://github.com/ClickHouse/ClickHouse/pull/56164) ([Azat Khuzhin](https://github.com/azat)).
* Revert "Revert "Fix output/input of Arrow dictionary column"" [#56167](https://github.com/ClickHouse/ClickHouse/pull/56167) ([Kruglov Pavel](https://github.com/Avogar)).
* Add a log message for DatabaseReplicated [#56215](https://github.com/ClickHouse/ClickHouse/pull/56215) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Correct aggregate function cross tab accessors to be endianness-independent. [#56223](https://github.com/ClickHouse/ClickHouse/pull/56223) ([Austin Kothig](https://github.com/kothiga)).
* Fix client suggestions for user without grants [#56234](https://github.com/ClickHouse/ClickHouse/pull/56234) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix link to failed check report in status commit [#56243](https://github.com/ClickHouse/ClickHouse/pull/56243) ([vdimir](https://github.com/vdimir)).
* Analyzer: fix 01019_alter_materialized_view_consistent [#56246](https://github.com/ClickHouse/ClickHouse/pull/56246) ([vdimir](https://github.com/vdimir)).
* Properly process aliases for aggregation-by-partition optimization. [#56254](https://github.com/ClickHouse/ClickHouse/pull/56254) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* deltalake: Do not raise errors when processing add and remove actions [#56260](https://github.com/ClickHouse/ClickHouse/pull/56260) ([joelynch](https://github.com/joelynch)).
* Fix rare logical error in Replicated database [#56272](https://github.com/ClickHouse/ClickHouse/pull/56272) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Update version_date.tsv and changelogs after v23.10.1.1976-stable [#56278](https://github.com/ClickHouse/ClickHouse/pull/56278) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Add assertion that `SizePredictor` is set if `preferred_block_size_bytes` is set [#56302](https://github.com/ClickHouse/ClickHouse/pull/56302) ([Nikita Taranov](https://github.com/nickitat)).
* Implement digest helpers for different objects [#56305](https://github.com/ClickHouse/ClickHouse/pull/56305) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Removed stale events from README [#56311](https://github.com/ClickHouse/ClickHouse/pull/56311) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix more tests with analyzer. [#56315](https://github.com/ClickHouse/ClickHouse/pull/56315) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Change some exception codes [#56316](https://github.com/ClickHouse/ClickHouse/pull/56316) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix using table shared id during backup and improve logs. [#56339](https://github.com/ClickHouse/ClickHouse/pull/56339) ([Vitaly Baranov](https://github.com/vitlibar)).
* Print info while decompressing the binary [#56360](https://github.com/ClickHouse/ClickHouse/pull/56360) ([Antonio Andelic](https://github.com/antonio2368)).
* remove unstable test test_heavy_insert_select_check_memory [#56369](https://github.com/ClickHouse/ClickHouse/pull/56369) ([Sema Checherinda](https://github.com/CheSema)).
* Update test_storage_s3_queue/test.py [#56370](https://github.com/ClickHouse/ClickHouse/pull/56370) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update 02735_system_zookeeper_connection.sql [#56374](https://github.com/ClickHouse/ClickHouse/pull/56374) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Cleanup convenience functions in IDataType [#56375](https://github.com/ClickHouse/ClickHouse/pull/56375) ([Robert Schulze](https://github.com/rschu1ze)).
* Update test_storage_s3_queue [#56376](https://github.com/ClickHouse/ClickHouse/pull/56376) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Minor improvements for S3Queue [#56377](https://github.com/ClickHouse/ClickHouse/pull/56377) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add obsolete setting back [#56382](https://github.com/ClickHouse/ClickHouse/pull/56382) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Rewrite jobs to use callable workflow [#56385](https://github.com/ClickHouse/ClickHouse/pull/56385) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Update stress.py [#56388](https://github.com/ClickHouse/ClickHouse/pull/56388) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix rocksdb with analyzer. [#56391](https://github.com/ClickHouse/ClickHouse/pull/56391) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Option to check particular file with utils/check-style/check-doc-aspell [#56394](https://github.com/ClickHouse/ClickHouse/pull/56394) ([vdimir](https://github.com/vdimir)).
* Add a metric for suspicious parts in ZooKeeper [#56395](https://github.com/ClickHouse/ClickHouse/pull/56395) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix 02404_memory_bound_merging with analyzer. [#56419](https://github.com/ClickHouse/ClickHouse/pull/56419) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* move storage_metadata_write_full_object_key setting to the server scope [#56421](https://github.com/ClickHouse/ClickHouse/pull/56421) ([Sema Checherinda](https://github.com/CheSema)).
* Make autoscaling more responsive [#56422](https://github.com/ClickHouse/ClickHouse/pull/56422) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix `test_attach_without_fetching` [#56429](https://github.com/ClickHouse/ClickHouse/pull/56429) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Use `pcg` + `randomSeed()` instead of `std::mt19937`/`std::random_device` [#56430](https://github.com/ClickHouse/ClickHouse/pull/56430) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix test `02725_database_hdfs.sh` [#56457](https://github.com/ClickHouse/ClickHouse/pull/56457) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update the AMI receipt [#56459](https://github.com/ClickHouse/ClickHouse/pull/56459) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Make IMergeTreeDataPart::getState() inlinable [#56461](https://github.com/ClickHouse/ClickHouse/pull/56461) ([Alexander Gololobov](https://github.com/davenger)).
* Update version_date.tsv and changelogs after v23.10.2.13-stable [#56467](https://github.com/ClickHouse/ClickHouse/pull/56467) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.9.4.11-stable [#56468](https://github.com/ClickHouse/ClickHouse/pull/56468) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.8.6.16-lts [#56469](https://github.com/ClickHouse/ClickHouse/pull/56469) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.3.16.7-lts [#56470](https://github.com/ClickHouse/ClickHouse/pull/56470) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Disable randomization of allow_experimental_block_number_column flag [#56474](https://github.com/ClickHouse/ClickHouse/pull/56474) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Parallel clone sparse/shallow submodules [#56479](https://github.com/ClickHouse/ClickHouse/pull/56479) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix default port for Replicated database cluster [#56486](https://github.com/ClickHouse/ClickHouse/pull/56486) ([Nikolay Degterinsky](https://github.com/evillique)).
* Updated compression to LZ4 [#56497](https://github.com/ClickHouse/ClickHouse/pull/56497) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
* Analyzer remove unused projection columns [#56499](https://github.com/ClickHouse/ClickHouse/pull/56499) ([Maksim Kita](https://github.com/kitaisreal)).
* FunctionSleep exception message fix [#56500](https://github.com/ClickHouse/ClickHouse/pull/56500) ([Maksim Kita](https://github.com/kitaisreal)).
* Continue rewriting workflows to reusable tests [#56501](https://github.com/ClickHouse/ClickHouse/pull/56501) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Analyzer special functions projection names fix [#56514](https://github.com/ClickHouse/ClickHouse/pull/56514) ([Maksim Kita](https://github.com/kitaisreal)).
* CTE invalid query analysis add test [#56517](https://github.com/ClickHouse/ClickHouse/pull/56517) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix compilation of BackupsWorker.cpp [#56518](https://github.com/ClickHouse/ClickHouse/pull/56518) ([Vitaly Baranov](https://github.com/vitlibar)).
* Analyzer MoveFunctionsOutOfAnyPass refactoring [#56520](https://github.com/ClickHouse/ClickHouse/pull/56520) ([Maksim Kita](https://github.com/kitaisreal)).
* Analyzer support EXPLAIN ESTIMATE [#56522](https://github.com/ClickHouse/ClickHouse/pull/56522) ([Maksim Kita](https://github.com/kitaisreal)).
* Analyzer log used row policies [#56531](https://github.com/ClickHouse/ClickHouse/pull/56531) ([Maksim Kita](https://github.com/kitaisreal)).
* Analyzer ORDER BY read in order query plan add test [#56532](https://github.com/ClickHouse/ClickHouse/pull/56532) ([Maksim Kita](https://github.com/kitaisreal)).
* ReplicatedMergeTree: check shutdown flags in retry loops [#56533](https://github.com/ClickHouse/ClickHouse/pull/56533) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix race between REPLACE_RANGE and GET_PART (set actual part name when fetching) [#56536](https://github.com/ClickHouse/ClickHouse/pull/56536) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Bump gRPC to v1.54.3 [#56543](https://github.com/ClickHouse/ClickHouse/pull/56543) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix flaky LDAP integration tests [#56544](https://github.com/ClickHouse/ClickHouse/pull/56544) ([Julian Maicher](https://github.com/jmaicher)).
* Remove useless using [#56546](https://github.com/ClickHouse/ClickHouse/pull/56546) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Better warning message [#56547](https://github.com/ClickHouse/ClickHouse/pull/56547) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Allow `chassert` to guide the static analyzer [#56552](https://github.com/ClickHouse/ClickHouse/pull/56552) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove C++ templates [#56556](https://github.com/ClickHouse/ClickHouse/pull/56556) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix `test_keeper_four_word_command/test.py::test_cmd_crst` [#56570](https://github.com/ClickHouse/ClickHouse/pull/56570) ([Antonio Andelic](https://github.com/antonio2368)).
* Delete unnecessary file from tests [#56572](https://github.com/ClickHouse/ClickHouse/pull/56572) ([vdimir](https://github.com/vdimir)).
* Analyzer: fix logical error with set in array join [#56587](https://github.com/ClickHouse/ClickHouse/pull/56587) ([vdimir](https://github.com/vdimir)).
* hide VERSION_INLINE_DATA under feature flag [#56594](https://github.com/ClickHouse/ClickHouse/pull/56594) ([Sema Checherinda](https://github.com/CheSema)).
* Fix 02554_fix_grouping_sets_predicate_push_down with analyzer. [#56595](https://github.com/ClickHouse/ClickHouse/pull/56595) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Add "FunctionSleep exception message fix" again [#56597](https://github.com/ClickHouse/ClickHouse/pull/56597) ([Raúl Marín](https://github.com/Algunenano)).
* Update version_date.tsv and changelogs after v23.10.3.5-stable [#56606](https://github.com/ClickHouse/ClickHouse/pull/56606) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Remove bad test [#56612](https://github.com/ClickHouse/ClickHouse/pull/56612) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Symbolize `trace_log` for exporting [#56613](https://github.com/ClickHouse/ClickHouse/pull/56613) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add indices to exported system logs [#56615](https://github.com/ClickHouse/ClickHouse/pull/56615) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove dependencies [#56616](https://github.com/ClickHouse/ClickHouse/pull/56616) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* WIP: Add test describing MV deduplication issues [#56621](https://github.com/ClickHouse/ClickHouse/pull/56621) ([Jordi Villar](https://github.com/jrdi)).
* Add test for ROW POLICY ON CLUSTER [#56623](https://github.com/ClickHouse/ClickHouse/pull/56623) ([Nikolay Degterinsky](https://github.com/evillique)).
* Enable --secure flag for clickhouse-client for hostnames pointing to clickhouse cloud [#56638](https://github.com/ClickHouse/ClickHouse/pull/56638) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* Continue with work from [#56621](https://github.com/ClickHouse/ClickHouse/issues/56621) [#56641](https://github.com/ClickHouse/ClickHouse/pull/56641) ([Jordi Villar](https://github.com/jrdi)).
* Switch to SSL port for clickhouse-client for hostnames pointing to clickhouse cloud [#56649](https://github.com/ClickHouse/ClickHouse/pull/56649) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
* Remove garbage from libssh [#56654](https://github.com/ClickHouse/ClickHouse/pull/56654) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Delete a file [#56655](https://github.com/ClickHouse/ClickHouse/pull/56655) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Delete a file (2) [#56656](https://github.com/ClickHouse/ClickHouse/pull/56656) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove some entries from `analyzer_tech_debt.txt` [#56658](https://github.com/ClickHouse/ClickHouse/pull/56658) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Miscellaneous [#56662](https://github.com/ClickHouse/ClickHouse/pull/56662) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Bump gRPC to v1.55.4 and protobuf to v22.5 [#56664](https://github.com/ClickHouse/ClickHouse/pull/56664) ([Robert Schulze](https://github.com/rschu1ze)).
* Small refactoring of AST hash calculation (follow-up to [#56545](https://github.com/ClickHouse/ClickHouse/issues/56545)) [#56665](https://github.com/ClickHouse/ClickHouse/pull/56665) ([Robert Schulze](https://github.com/rschu1ze)).
* Analyzer: filtering by virtual columns for StorageS3 [#56668](https://github.com/ClickHouse/ClickHouse/pull/56668) ([vdimir](https://github.com/vdimir)).
* Add back flaky tests to analyzer_tech_debt.txt [#56669](https://github.com/ClickHouse/ClickHouse/pull/56669) ([Raúl Marín](https://github.com/Algunenano)).
* gRPC: remove build dependency on systemd [#56671](https://github.com/ClickHouse/ClickHouse/pull/56671) ([Raúl Marín](https://github.com/Algunenano)).
* Remove unused code [#56677](https://github.com/ClickHouse/ClickHouse/pull/56677) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix missing argument for style_check.py in master workflow [#56691](https://github.com/ClickHouse/ClickHouse/pull/56691) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix unexpected parts handling [#56693](https://github.com/ClickHouse/ClickHouse/pull/56693) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Revert "Revert "Add a setting max_execution_time_leaf to limit the execution time on shard for distributed query"" [#56707](https://github.com/ClickHouse/ClickHouse/pull/56707) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix use_structure_from_insertion_table_in_table_functions with new Analyzer [#56708](https://github.com/ClickHouse/ClickHouse/pull/56708) ([Kruglov Pavel](https://github.com/Avogar)).
* Disable settings randomisation for `02896_memory_accounting_for_user.sh` [#56709](https://github.com/ClickHouse/ClickHouse/pull/56709) ([Nikita Taranov](https://github.com/nickitat)).
* Light autogenerated file [#56720](https://github.com/ClickHouse/ClickHouse/pull/56720) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Less CMake checks [#56721](https://github.com/ClickHouse/ClickHouse/pull/56721) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Remove orphan header files [#56722](https://github.com/ClickHouse/ClickHouse/pull/56722) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Try to fix hang in 01104_distributed_numbers_test [#56764](https://github.com/ClickHouse/ClickHouse/pull/56764) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Test RabbitMQ with secure connection [#56767](https://github.com/ClickHouse/ClickHouse/pull/56767) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix flaky test_replicated_merge_tree_encryption_codec. [#56768](https://github.com/ClickHouse/ClickHouse/pull/56768) ([Vitaly Baranov](https://github.com/vitlibar)).
* fix typo in ClickHouseDictionarySource [#56776](https://github.com/ClickHouse/ClickHouse/pull/56776) ([Mikhail Koviazin](https://github.com/mkmkme)).
* Fix pygithub [#56778](https://github.com/ClickHouse/ClickHouse/pull/56778) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add test for avoided recursion [#56785](https://github.com/ClickHouse/ClickHouse/pull/56785) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix randomization of Keeper configs in stress tests [#56788](https://github.com/ClickHouse/ClickHouse/pull/56788) ([Antonio Andelic](https://github.com/antonio2368)).
* Try fix `No user in current context, it's a bug` [#56789](https://github.com/ClickHouse/ClickHouse/pull/56789) ([Antonio Andelic](https://github.com/antonio2368)).
* Update avg_weighted.xml [#56797](https://github.com/ClickHouse/ClickHouse/pull/56797) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Better except for SSL authentication failure [#56811](https://github.com/ClickHouse/ClickHouse/pull/56811) ([Nikolay Degterinsky](https://github.com/evillique)).
* More stable `test_keeper_reconfig_replace_leader` [#56835](https://github.com/ClickHouse/ClickHouse/pull/56835) ([Antonio Andelic](https://github.com/antonio2368)).
* Add cancellation hook for moving background operation [#56846](https://github.com/ClickHouse/ClickHouse/pull/56846) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Updated comment in universal.sh [#56852](https://github.com/ClickHouse/ClickHouse/pull/56852) ([Robert Schulze](https://github.com/rschu1ze)).
* Bump gRPC to v1.59 and protobuf to v24.4 [#56853](https://github.com/ClickHouse/ClickHouse/pull/56853) ([Robert Schulze](https://github.com/rschu1ze)).
* Better exception messages [#56854](https://github.com/ClickHouse/ClickHouse/pull/56854) ([Antonio Andelic](https://github.com/antonio2368)).
* Sparse checkout: Use `--remote` for `git submodule update` [#56857](https://github.com/ClickHouse/ClickHouse/pull/56857) ([Aleksandr Musorin](https://github.com/AVMusorin)).
* Fix `test_keeper_broken_logs` [#56858](https://github.com/ClickHouse/ClickHouse/pull/56858) ([Antonio Andelic](https://github.com/antonio2368)).
* CMake: Small cleanup in cpu_features.cmake [#56861](https://github.com/ClickHouse/ClickHouse/pull/56861) ([Robert Schulze](https://github.com/rschu1ze)).
* Planner support transactions [#56867](https://github.com/ClickHouse/ClickHouse/pull/56867) ([Maksim Kita](https://github.com/kitaisreal)).
* Improve diagnostics in test 02908_many_requests_to_system_replicas [#56869](https://github.com/ClickHouse/ClickHouse/pull/56869) ([Alexander Gololobov](https://github.com/davenger)).
* Update 01052_window_view_proc_tumble_to_now.sh [#56870](https://github.com/ClickHouse/ClickHouse/pull/56870) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Call cache check a bit more often [#56872](https://github.com/ClickHouse/ClickHouse/pull/56872) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update test_storage_s3_queue/test.py [#56874](https://github.com/ClickHouse/ClickHouse/pull/56874) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix perf tests report when there are no tests [#56881](https://github.com/ClickHouse/ClickHouse/pull/56881) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Remove ctest [#56894](https://github.com/ClickHouse/ClickHouse/pull/56894) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Simpler CMake [#56898](https://github.com/ClickHouse/ClickHouse/pull/56898) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* test for [#56790](https://github.com/ClickHouse/ClickHouse/issues/56790) [#56899](https://github.com/ClickHouse/ClickHouse/pull/56899) ([Denny Crane](https://github.com/den-crane)).
* Allow delegate disk to handle retries for createDirectories [#56905](https://github.com/ClickHouse/ClickHouse/pull/56905) ([Alexander Gololobov](https://github.com/davenger)).
* Update version_date.tsv and changelogs after v23.10.4.25-stable [#56906](https://github.com/ClickHouse/ClickHouse/pull/56906) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.3.17.13-lts [#56907](https://github.com/ClickHouse/ClickHouse/pull/56907) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.8.7.24-lts [#56908](https://github.com/ClickHouse/ClickHouse/pull/56908) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.9.5.29-stable [#56909](https://github.com/ClickHouse/ClickHouse/pull/56909) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Remove outdated instructions [#56911](https://github.com/ClickHouse/ClickHouse/pull/56911) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Avoid dependencies with no fixed versions [#56914](https://github.com/ClickHouse/ClickHouse/pull/56914) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix race on zk_log initialization [#56915](https://github.com/ClickHouse/ClickHouse/pull/56915) ([Alexander Gololobov](https://github.com/davenger)).
* Check what will happen if I remove some lines [#56916](https://github.com/ClickHouse/ClickHouse/pull/56916) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Update fasttest [#56919](https://github.com/ClickHouse/ClickHouse/pull/56919) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Make some tests independent of macro settings [#56927](https://github.com/ClickHouse/ClickHouse/pull/56927) ([Raúl Marín](https://github.com/Algunenano)).
* Fix flaky 02494_query_cache_events [#56935](https://github.com/ClickHouse/ClickHouse/pull/56935) ([Robert Schulze](https://github.com/rschu1ze)).
* Add CachedReadBufferReadFromCache{Hits,Misses} profile events [#56936](https://github.com/ClickHouse/ClickHouse/pull/56936) ([Jordi Villar](https://github.com/jrdi)).
* Send fatal logs by default in clickhouse-local [#56956](https://github.com/ClickHouse/ClickHouse/pull/56956) ([Nikolay Degterinsky](https://github.com/evillique)).
* Resubmit: Better except for SSL authentication [#56957](https://github.com/ClickHouse/ClickHouse/pull/56957) ([Nikolay Degterinsky](https://github.com/evillique)).
* Fix `test_keeper_auth` [#56960](https://github.com/ClickHouse/ClickHouse/pull/56960) ([Antonio Andelic](https://github.com/antonio2368)).
* Fewer concurrent requests in 02908_many_requests_to_system_replicas [#56968](https://github.com/ClickHouse/ClickHouse/pull/56968) ([Alexander Gololobov](https://github.com/davenger)).
* Own CMake for GRPC [#56971](https://github.com/ClickHouse/ClickHouse/pull/56971) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix build in Backups/BackupIO_S3.cpp [#56974](https://github.com/ClickHouse/ClickHouse/pull/56974) ([Robert Schulze](https://github.com/rschu1ze)).
* Add exclude for tryBase64Decode to backward compat test (follow-up to [#56913](https://github.com/ClickHouse/ClickHouse/issues/56913)) [#56975](https://github.com/ClickHouse/ClickHouse/pull/56975) ([Robert Schulze](https://github.com/rschu1ze)).
* Prefer sccache to ccache by default [#56980](https://github.com/ClickHouse/ClickHouse/pull/56980) ([Igor Nikonov](https://github.com/devcrafter)).
* update 02003_memory_limit_in_client.sh [#56981](https://github.com/ClickHouse/ClickHouse/pull/56981) ([Bharat Nallan](https://github.com/bharatnc)).
* Make check for the limited cmake dependencies the part of sparse checkout [#56991](https://github.com/ClickHouse/ClickHouse/pull/56991) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix flaky and slow tests. [#56993](https://github.com/ClickHouse/ClickHouse/pull/56993) ([Amos Bird](https://github.com/amosbird)).
* Fix dropping tables in test_create_or_drop_tables_during_backup [#57007](https://github.com/ClickHouse/ClickHouse/pull/57007) ([Vitaly Baranov](https://github.com/vitlibar)).
* Enable Analyzer in Stress and Fuzz tests [#57008](https://github.com/ClickHouse/ClickHouse/pull/57008) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Run CI for PRs with missing documentation [#57018](https://github.com/ClickHouse/ClickHouse/pull/57018) ([Michael Kolupaev](https://github.com/al13n321)).
* test_s3_engine_heavy_write_check_mem: turn test off [#57025](https://github.com/ClickHouse/ClickHouse/pull/57025) ([Sema Checherinda](https://github.com/CheSema)).
* NamedCollections: make exception message more informative. [#57031](https://github.com/ClickHouse/ClickHouse/pull/57031) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Avoid returning biggest resolution when fpr > 0.283 [#57034](https://github.com/ClickHouse/ClickHouse/pull/57034) ([Jordi Villar](https://github.com/jrdi)).
* Fix: suppress TSAN in RabbitMQ test [#57040](https://github.com/ClickHouse/ClickHouse/pull/57040) ([Igor Nikonov](https://github.com/devcrafter)).
* Small Keeper fixes [#57047](https://github.com/ClickHouse/ClickHouse/pull/57047) ([Antonio Andelic](https://github.com/antonio2368)).
* Parallel replicas: cleanup, narrow dependency [#57054](https://github.com/ClickHouse/ClickHouse/pull/57054) ([Igor Nikonov](https://github.com/devcrafter)).
* Fix gRPC build on macOS [#57061](https://github.com/ClickHouse/ClickHouse/pull/57061) ([Robert Schulze](https://github.com/rschu1ze)).
* Better comment for ITransformingStep::transformPipeline [#57062](https://github.com/ClickHouse/ClickHouse/pull/57062) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix `Duplicate set` for StorageSet with analyzer. [#57063](https://github.com/ClickHouse/ClickHouse/pull/57063) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Better metadata path [#57083](https://github.com/ClickHouse/ClickHouse/pull/57083) ([Nikolay Degterinsky](https://github.com/evillique)).
* Analyzer fuzzer 3 (aggregate_functions_null_for_empty for projections) [#57099](https://github.com/ClickHouse/ClickHouse/pull/57099) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Update numbers.md [#57100](https://github.com/ClickHouse/ClickHouse/pull/57100) ([konruvikt](https://github.com/konruvikt)).
* Fix FunctionNode::toASTImpl [#57102](https://github.com/ClickHouse/ClickHouse/pull/57102) ([vdimir](https://github.com/vdimir)).
* Analyzer fuzzer 5 [#57103](https://github.com/ClickHouse/ClickHouse/pull/57103) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Cancel PipelineExecutor properly in case of exception in spawnThreads [#57104](https://github.com/ClickHouse/ClickHouse/pull/57104) ([Kruglov Pavel](https://github.com/Avogar)).
* Allow HashedDictionary/FunctionsConversion as large TU [#57108](https://github.com/ClickHouse/ClickHouse/pull/57108) ([Azat Khuzhin](https://github.com/azat)).
* Disable checksums for builds with fuzzer [#57122](https://github.com/ClickHouse/ClickHouse/pull/57122) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Analyzer: Fix logical error in LogicalExpressionOptimizerVisitor [#57123](https://github.com/ClickHouse/ClickHouse/pull/57123) ([vdimir](https://github.com/vdimir)).
* Split HashedDictionary CU [#57124](https://github.com/ClickHouse/ClickHouse/pull/57124) ([Azat Khuzhin](https://github.com/azat)).
* Cancel executor in ~CreatingSetsTransform [#57125](https://github.com/ClickHouse/ClickHouse/pull/57125) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix system.*_log in artifacts on CI [#57128](https://github.com/ClickHouse/ClickHouse/pull/57128) ([Azat Khuzhin](https://github.com/azat)).
* Fix something in ReplicatedMergeTree [#57129](https://github.com/ClickHouse/ClickHouse/pull/57129) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Do not symbolize traces for debug/sanitizer builds for sending to cloud [#57130](https://github.com/ClickHouse/ClickHouse/pull/57130) ([Azat Khuzhin](https://github.com/azat)).
* Resubmit 01600_parts_types_metrics test (possibly without flakiness) [#57131](https://github.com/ClickHouse/ClickHouse/pull/57131) ([Azat Khuzhin](https://github.com/azat)).
* Follow up to [#56541](https://github.com/ClickHouse/ClickHouse/issues/56541) [#57141](https://github.com/ClickHouse/ClickHouse/pull/57141) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Allow to disable reorder-functions-after-sorting optimization [#57144](https://github.com/ClickHouse/ClickHouse/pull/57144) ([Robert Schulze](https://github.com/rschu1ze)).
* Fix bad test `00002_log_and_exception_messages_formatting` [#57145](https://github.com/ClickHouse/ClickHouse/pull/57145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix test test_replicated_merge_tree_encryption_codec/test.py::test_different_keys [#57146](https://github.com/ClickHouse/ClickHouse/pull/57146) ([Vitaly Baranov](https://github.com/vitlibar)).
* Remove partial results from build matrix for stress tests [#57150](https://github.com/ClickHouse/ClickHouse/pull/57150) ([Azat Khuzhin](https://github.com/azat)).
* Minor changes in test_check_table [#57154](https://github.com/ClickHouse/ClickHouse/pull/57154) ([vdimir](https://github.com/vdimir)).
* Fix 02903_rmt_retriable_merge_exception flakiness for replicated database [#57155](https://github.com/ClickHouse/ClickHouse/pull/57155) ([Azat Khuzhin](https://github.com/azat)).
* Mark select() as harmful function [#57156](https://github.com/ClickHouse/ClickHouse/pull/57156) ([Igor Nikonov](https://github.com/devcrafter)).
* Improve the cherry-pick PR description [#57167](https://github.com/ClickHouse/ClickHouse/pull/57167) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add debugging info for 01600_parts_types_metrics on failures [#57170](https://github.com/ClickHouse/ClickHouse/pull/57170) ([Azat Khuzhin](https://github.com/azat)).
* Tiny improvement security [#57171](https://github.com/ClickHouse/ClickHouse/pull/57171) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Update blob_storage_log.md [#57187](https://github.com/ClickHouse/ClickHouse/pull/57187) ([vdimir](https://github.com/vdimir)).
* [RFC] Set log_comment to the file name while processing files in client [#57191](https://github.com/ClickHouse/ClickHouse/pull/57191) ([Azat Khuzhin](https://github.com/azat)).
* Add test for [#5323](https://github.com/ClickHouse/ClickHouse/issues/5323) [#57192](https://github.com/ClickHouse/ClickHouse/pull/57192) ([Raúl Marín](https://github.com/Algunenano)).
* Analyzer fuzzer 6 (arrayJoin) [#57198](https://github.com/ClickHouse/ClickHouse/pull/57198) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Add test for [#47366](https://github.com/ClickHouse/ClickHouse/issues/47366) [#57200](https://github.com/ClickHouse/ClickHouse/pull/57200) ([Raúl Marín](https://github.com/Algunenano)).
* Add test for [#51321](https://github.com/ClickHouse/ClickHouse/issues/51321) [#57202](https://github.com/ClickHouse/ClickHouse/pull/57202) ([Raúl Marín](https://github.com/Algunenano)).
* Fix possible crash (in Rust) of fuzzy finder in client [#57204](https://github.com/ClickHouse/ClickHouse/pull/57204) ([Azat Khuzhin](https://github.com/azat)).
* fix zero-copy locks leaking [#57205](https://github.com/ClickHouse/ClickHouse/pull/57205) ([Sema Checherinda](https://github.com/CheSema)).
* Fix test_distributed_storage_configuration flakiness [#57206](https://github.com/ClickHouse/ClickHouse/pull/57206) ([Azat Khuzhin](https://github.com/azat)).
* Update Sentry [#57222](https://github.com/ClickHouse/ClickHouse/pull/57222) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Update version_date.tsv and changelogs after v23.10.5.20-stable [#57223](https://github.com/ClickHouse/ClickHouse/pull/57223) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.9.6.20-stable [#57224](https://github.com/ClickHouse/ClickHouse/pull/57224) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.3.18.15-lts [#57225](https://github.com/ClickHouse/ClickHouse/pull/57225) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Update version_date.tsv and changelogs after v23.8.8.20-lts [#57226](https://github.com/ClickHouse/ClickHouse/pull/57226) ([robot-clickhouse](https://github.com/robot-clickhouse)).
* Change cursor style for overwrite mode (INS) to blinking in client [#57227](https://github.com/ClickHouse/ClickHouse/pull/57227) ([Azat Khuzhin](https://github.com/azat)).
* Remove test `01280_ttl_where_group_by` [#57230](https://github.com/ClickHouse/ClickHouse/pull/57230) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix docs [#57234](https://github.com/ClickHouse/ClickHouse/pull/57234) ([Nikolay Degterinsky](https://github.com/evillique)).
* Remove addBatchSinglePlaceFromInterval [#57258](https://github.com/ClickHouse/ClickHouse/pull/57258) ([Raúl Marín](https://github.com/Algunenano)).
* Add some additional groups to CI [#57260](https://github.com/ClickHouse/ClickHouse/pull/57260) ([alesapin](https://github.com/alesapin)).
* Analyzer: fix result type of aggregate function with NULL [#57265](https://github.com/ClickHouse/ClickHouse/pull/57265) ([vdimir](https://github.com/vdimir)).
* Ignore memory exception in Keeper asio workers [#57268](https://github.com/ClickHouse/ClickHouse/pull/57268) ([Antonio Andelic](https://github.com/antonio2368)).
* Fix code reports [#57301](https://github.com/ClickHouse/ClickHouse/pull/57301) ([Raúl Marín](https://github.com/Algunenano)).
* Follow up recommendations from [#57167](https://github.com/ClickHouse/ClickHouse/issues/57167) [#57302](https://github.com/ClickHouse/ClickHouse/pull/57302) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Add back flaky tests to analyzer_tech_debt.txt [#57307](https://github.com/ClickHouse/ClickHouse/pull/57307) ([Raúl Marín](https://github.com/Algunenano)).
* Lower level for annoying S3 log [#57312](https://github.com/ClickHouse/ClickHouse/pull/57312) ([Antonio Andelic](https://github.com/antonio2368)).
* Add regression test for skim (Rust) crash on pasting certain input [#57313](https://github.com/ClickHouse/ClickHouse/pull/57313) ([Azat Khuzhin](https://github.com/azat)).
* Remove unused Strings from MergeTreeData [#57318](https://github.com/ClickHouse/ClickHouse/pull/57318) ([Mikhail Koviazin](https://github.com/mkmkme)).
* Address 02668_ulid_decoding flakiness [#57320](https://github.com/ClickHouse/ClickHouse/pull/57320) ([Raúl Marín](https://github.com/Algunenano)).
* DiskWeb fix [#57322](https://github.com/ClickHouse/ClickHouse/pull/57322) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Update README.md [#57325](https://github.com/ClickHouse/ClickHouse/pull/57325) ([Tyler Hannan](https://github.com/tylerhannan)).
* Add information about new _size virtual column in file/s3/url/hdfs/azure table functions [#57328](https://github.com/ClickHouse/ClickHouse/pull/57328) ([Kruglov Pavel](https://github.com/Avogar)).
* Follow-up to [#56490](https://github.com/ClickHouse/ClickHouse/issues/56490): Fix build with `cmake -DENABLE_LIBRARIES=0` [#57330](https://github.com/ClickHouse/ClickHouse/pull/57330) ([Robert Schulze](https://github.com/rschu1ze)).
* Mark a setting obsolete [#57336](https://github.com/ClickHouse/ClickHouse/pull/57336) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Always renew ZK client in `WithRetries` [#57357](https://github.com/ClickHouse/ClickHouse/pull/57357) ([Antonio Andelic](https://github.com/antonio2368)).
* Shutdown disks after tables [#57358](https://github.com/ClickHouse/ClickHouse/pull/57358) ([Alexander Gololobov](https://github.com/davenger)).
* Update DDLTask.cpp [#57369](https://github.com/ClickHouse/ClickHouse/pull/57369) ([Alexander Tokmakov](https://github.com/tavplubix)).
* verbose exception messages for StorageFuzzJSON [#57372](https://github.com/ClickHouse/ClickHouse/pull/57372) ([Julia Kartseva](https://github.com/jkartseva)).
* Initialize only required disks in clickhouse-disks [#57387](https://github.com/ClickHouse/ClickHouse/pull/57387) ([Nikolay Degterinsky](https://github.com/evillique)).
* Allow wildcards in directories for partitioned write with File storage [#57391](https://github.com/ClickHouse/ClickHouse/pull/57391) ([Nikolay Degterinsky](https://github.com/evillique)).
* Add tests for 43202 [#57404](https://github.com/ClickHouse/ClickHouse/pull/57404) ([Raúl Marín](https://github.com/Algunenano)).
* Consider whole check failure in bugfix validate check as an error [#57413](https://github.com/ClickHouse/ClickHouse/pull/57413) ([vdimir](https://github.com/vdimir)).
* Change type of s3_cache in test_encrypted_disk [#57416](https://github.com/ClickHouse/ClickHouse/pull/57416) ([vdimir](https://github.com/vdimir)).
* Add extra debug information on replication consistency errors [#57419](https://github.com/ClickHouse/ClickHouse/pull/57419) ([Raúl Marín](https://github.com/Algunenano)).
* Don't print server revision in client on connect [#57435](https://github.com/ClickHouse/ClickHouse/pull/57435) ([Nikita Taranov](https://github.com/nickitat)).
* Adding Sydney Meetup [#57457](https://github.com/ClickHouse/ClickHouse/pull/57457) ([Tyler Hannan](https://github.com/tylerhannan)).
* Fix adjusting log_comment in case of multiple files passed [#57464](https://github.com/ClickHouse/ClickHouse/pull/57464) ([Azat Khuzhin](https://github.com/azat)).
* Fix flaky test 02697_stop_reading_on_first_cancel.sh [#57481](https://github.com/ClickHouse/ClickHouse/pull/57481) ([Raúl Marín](https://github.com/Algunenano)).
* Tiny refactoring around cache [#57482](https://github.com/ClickHouse/ClickHouse/pull/57482) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Decrease default value for `filesystem_prefetch_min_bytes_for_single_read_task` [#57489](https://github.com/ClickHouse/ClickHouse/pull/57489) ([Nikita Taranov](https://github.com/nickitat)).
* Remove bad test [#57494](https://github.com/ClickHouse/ClickHouse/pull/57494) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Add changelog for 23.11 [#57517](https://github.com/ClickHouse/ClickHouse/pull/57517) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Simple cleanup in distributed (while dealing with parallel replicas) [#57518](https://github.com/ClickHouse/ClickHouse/pull/57518) ([Igor Nikonov](https://github.com/devcrafter)).
* Remove a feature. [#57521](https://github.com/ClickHouse/ClickHouse/pull/57521) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* `S3Queue` is production ready [#57548](https://github.com/ClickHouse/ClickHouse/pull/57548) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Revert "Merge pull request [#56724](https://github.com/ClickHouse/ClickHouse/issues/56724) from canhld94/ch_replicated_column_mismatch" [#57576](https://github.com/ClickHouse/ClickHouse/pull/57576) ([Alexey Milovidov](https://github.com/alexey-milovidov)).

View File

@ -2647,7 +2647,7 @@ Default value: 0.
## input_format_parallel_parsing {#input-format-parallel-parsing}
Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TKSV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats.
Enables or disables order-preserving parallel parsing of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TSKV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats.
Possible values:
@ -2658,7 +2658,7 @@ Default value: `1`.
## output_format_parallel_formatting {#output-format-parallel-formatting}
Enables or disables parallel formatting of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TKSV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats.
Enables or disables parallel formatting of data formats. Supported only for [TSV](../../interfaces/formats.md/#tabseparated), [TSKV](../../interfaces/formats.md/#tskv), [CSV](../../interfaces/formats.md/#csv) and [JSONEachRow](../../interfaces/formats.md/#jsoneachrow) formats.
Possible values:

View File

@ -1776,3 +1776,33 @@ Result:
│ (('queries','database','analytical'),('oriented','processing','DBMS')) │
└────────────────────────────────────────────────────────────────────────┘
```
## sqid
Transforms numbers into YouTube-like short URL hash called [Sqid](https://sqids.org/).
**Syntax**
```sql
sqid(number1, ...)
```
**Arguments**
- A variable number of UInt8, UInt16, UInt32 or UInt64 numbers.
**Returned Value**
A hash id [String](/docs/en/sql-reference/data-types/string.md).
**Example**
```sql
SELECT sqid(1, 2, 3, 4, 5);
```
```response
┌─sqid(1, 2, 3, 4, 5)─┐
│ gXHfJ1C6dN │
└─────────────────────┘
```

View File

@ -628,6 +628,8 @@ SELECT
formatReadableSize(filesize_bytes) AS filesize
```
Alias: `FORMAT_BYTES`.
``` text
┌─filesize_bytes─┬─filesize───┐
│ 1 │ 1.00 B │

View File

@ -45,7 +45,7 @@ $ cat /var/lib/clickhouse/user_files/test1.csv
1,"file1"
11,"file11"
$ cat /var/lib/clickhouse/user_files/test1.csv
$ cat /var/lib/clickhouse/user_files/test2.csv
2,"file2"
22,"file22"
```

View File

@ -2249,7 +2249,7 @@ SELECT * FROM test_table
## input_format_parallel_parsing {#input-format-parallel-parsing}
Включает или отключает режим, при котором входящие данные разбиваются на части, парсинг каждой из которых осуществляется параллельно с сохранением исходного порядка. Поддерживается только для форматов [TSV](../../interfaces/formats.md#tabseparated), [TKSV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) и [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
Включает или отключает режим, при котором входящие данные разбиваются на части, парсинг каждой из которых осуществляется параллельно с сохранением исходного порядка. Поддерживается только для форматов [TSV](../../interfaces/formats.md#tabseparated), [TSKV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) и [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
Возможные значения:
@ -2260,7 +2260,7 @@ SELECT * FROM test_table
## output_format_parallel_formatting {#output-format-parallel-formatting}
Включает или отключает режим, при котором исходящие данные форматируются параллельно с сохранением исходного порядка. Поддерживается только для форматов [TSV](../../interfaces/formats.md#tabseparated), [TKSV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) и [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
Включает или отключает режим, при котором исходящие данные форматируются параллельно с сохранением исходного порядка. Поддерживается только для форматов [TSV](../../interfaces/formats.md#tabseparated), [TSKV](../../interfaces/formats.md#tskv), [CSV](../../interfaces/formats.md#csv) и [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
Возможные значения:

View File

@ -44,7 +44,7 @@ $ cat /var/lib/clickhouse/user_files/test1.csv
1,"file1"
11,"file11"
$ cat /var/lib/clickhouse/user_files/test1.csv
$ cat /var/lib/clickhouse/user_files/test2.csv
2,"file2"
22,"file22"
```

View File

@ -1203,7 +1203,7 @@ ClickHouse生成异常
- 类型:布尔
- 默认值True
启用数据格式的保序并行分析。 仅支持TSVTKSVCSV和JSONEachRow格式。
启用数据格式的保序并行分析。 仅支持TSVTSKVCSV和JSONEachRow格式。
## min_chunk_bytes_for_parallel_parsing {#min-chunk-bytes-for-parallel-parsing}

View File

@ -16,6 +16,7 @@
#include <IO/ReadHelpers.h>
#include <Common/PODArray.h>
#include <Common/assert_cast.h>
#include <base/types.h>
#include <boost/math/distributions/normal.hpp>
@ -99,31 +100,39 @@ struct LargestTriangleThreeBucketsData : public StatisticalSample<Float64, Float
}
// Find the size of each bucket
size_t single_bucket_size = this->x.size() / total_buckets;
Float64 single_bucket_size = static_cast<Float64>(this->x.size() - 2) / static_cast<Float64>(total_buckets - 2);
// Include the first data point
result.emplace_back(std::make_pair(this->x[0], this->y[0]));
for (size_t i = 1; i < total_buckets - 1; ++i) // Skip the first and last bucket
// the start index of current bucket
size_t start_index = 1;
// the end index of current bucket, also is the start index of next bucket
size_t center_index = start_index + static_cast<int>(floor(single_bucket_size));
for (size_t i = 0; i < total_buckets - 2; ++i) // Skip the first and last bucket
{
size_t start_index = i * single_bucket_size;
size_t end_index = (i + 1) * single_bucket_size;
// the end index of next bucket
size_t end_index = 1 + static_cast<int>(floor(single_bucket_size * (i + 2)));
// current bucket is the last bucket
if (end_index > this->x.size())
end_index = this->x.size();
// Compute the average point in the next bucket
Float64 avg_x = 0;
Float64 avg_y = 0;
for (size_t j = end_index; j < (i + 2) * single_bucket_size; ++j)
for (size_t j = center_index; j < end_index; ++j)
{
avg_x += this->x[j];
avg_y += this->y[j];
}
avg_x /= single_bucket_size;
avg_y /= single_bucket_size;
avg_x /= static_cast<Float64>(end_index - center_index);
avg_y /= static_cast<Float64>(end_index - center_index);
// Find the point in the current bucket that forms the largest triangle
size_t max_index = start_index;
Float64 max_area = 0.0;
for (size_t j = start_index; j < end_index; ++j)
for (size_t j = start_index; j < center_index; ++j)
{
Float64 area = std::abs(
0.5
@ -138,6 +147,9 @@ struct LargestTriangleThreeBucketsData : public StatisticalSample<Float64, Float
// Include the selected point
result.emplace_back(std::make_pair(this->x[max_index], this->y[max_index]));
start_index = center_index;
center_index = end_index;
}
// Include the last data point

View File

@ -33,8 +33,17 @@ public:
if (function_node->getArguments().getNodes().size() != 3)
return;
auto result_type = function_node->getResultType();
function_node->resolveAsFunction(if_function_ptr->build(function_node->getArgumentColumns()));
auto if_function_value = if_function_ptr->build(function_node->getArgumentColumns());
if (!if_function_value->getResultType()->equals(*function_node->getResultType()))
{
/** We faced some corner case, when result type of `if` and `multiIf` are different.
* For example, currently `if(NULL`, a, b)` returns type of `a` column,
* but multiIf(NULL, a, b) returns supertypetype of `a` and `b`.
*/
return;
}
function_node->resolveAsFunction(std::move(if_function_value));
}
private:

View File

@ -6,6 +6,7 @@
#include <IO/copyData.h>
#include <algorithm>
#include <cstdlib>
#include <stdexcept>
#include <chrono>
#include <cerrno>
@ -21,6 +22,7 @@
#include <fstream>
#include <filesystem>
#include <fmt/format.h>
#include <Common/quoteString.h>
#include "config.h" // USE_SKIM
#if USE_SKIM
@ -94,7 +96,14 @@ int executeCommand(char * const argv[])
throw std::runtime_error(fmt::format("Cannot waitpid {}: {}", pid, errnoToString()));
} while (true);
return status;
if (WIFEXITED(status))
return WEXITSTATUS(status);
if (WIFSIGNALED(status))
throw std::runtime_error(fmt::format("Child process was terminated by signal {}", WTERMSIG(status)));
if (WIFSTOPPED(status))
throw std::runtime_error(fmt::format("Child process was stopped by signal {}", WSTOPSIG(status)));
throw std::runtime_error("Child process was not exited normally by unknown reason");
}
void writeRetry(int fd, const std::string & data)
@ -504,22 +513,29 @@ void ReplxxLineReader::addToHistory(const String & line)
void ReplxxLineReader::openEditor()
{
TemporaryFile editor_file("clickhouse_client_editor_XXXXXX.sql");
editor_file.write(rx.get_state().text());
editor_file.close();
char * const argv[] = {editor.data(), editor_file.getPath().data(), nullptr};
try
{
if (executeCommand(argv) == 0)
TemporaryFile editor_file("clickhouse_client_editor_XXXXXX.sql");
editor_file.write(rx.get_state().text());
editor_file.close();
char * const argv[] = {editor.data(), editor_file.getPath().data(), nullptr};
int editor_exit_code = executeCommand(argv);
if (editor_exit_code == EXIT_SUCCESS)
{
const std::string & new_query = readFile(editor_file.getPath());
rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast<int>(new_query.size())));
}
else
{
rx.print(fmt::format("Editor {} terminated unsuccessfully: {}\n", backQuoteIfNeed(editor), editor_exit_code).data());
}
}
catch (const std::runtime_error & e)
{
rx.print(e.what());
rx.print("\n");
}
if (bracketed_paste_enabled)

View File

@ -144,6 +144,7 @@ const char * errorMessage(Error code)
case Error::ZCLOSING: return "ZooKeeper is closing";
case Error::ZNOTHING: return "(not error) no server responses to process";
case Error::ZSESSIONMOVED: return "Session moved to another server, so operation is ignored";
case Error::ZNOTREADONLY: return "State-changing request is passed to read-only server";
}
UNREACHABLE();
@ -156,7 +157,8 @@ bool isHardwareError(Error zk_return_code)
|| zk_return_code == Error::ZSESSIONMOVED
|| zk_return_code == Error::ZCONNECTIONLOSS
|| zk_return_code == Error::ZMARSHALLINGERROR
|| zk_return_code == Error::ZOPERATIONTIMEOUT;
|| zk_return_code == Error::ZOPERATIONTIMEOUT
|| zk_return_code == Error::ZNOTREADONLY;
}
bool isUserError(Error zk_return_code)
@ -196,4 +198,3 @@ void MultiResponse::removeRootPath(const String & root_path)
}
}

View File

@ -109,7 +109,8 @@ enum class Error : int32_t
ZAUTHFAILED = -115, /// Client authentication failed
ZCLOSING = -116, /// ZooKeeper is closing
ZNOTHING = -117, /// (not error) no server responses to process
ZSESSIONMOVED = -118 /// Session moved to another server, so operation is ignored
ZSESSIONMOVED = -118, /// Session moved to another server, so operation is ignored
ZNOTREADONLY = -119, /// State-changing request is passed to read-only server
};
/// Network errors and similar. You should reinitialize ZooKeeper session in case of these errors
@ -445,6 +446,7 @@ enum State
CONNECTING = 1,
ASSOCIATING = 2,
CONNECTED = 3,
READONLY = 5,
NOTCONNECTED = 999
};

View File

@ -323,6 +323,9 @@ Coordination::Error ZooKeeper::tryCreate(const std::string & path, const std::st
{
Coordination::Error code = createImpl(path, data, mode, path_created);
if (code == Coordination::Error::ZNOTREADONLY && exists(path))
return Coordination::Error::ZNODEEXISTS;
if (!(code == Coordination::Error::ZOK ||
code == Coordination::Error::ZNONODE ||
code == Coordination::Error::ZNODEEXISTS ||
@ -345,6 +348,8 @@ void ZooKeeper::createIfNotExists(const std::string & path, const std::string &
if (code == Coordination::Error::ZOK || code == Coordination::Error::ZNODEEXISTS)
return;
else if (code == Coordination::Error::ZNOTREADONLY && exists(path))
return;
else
throw KeeperException::fromPath(code, path);
}

View File

@ -51,6 +51,7 @@ static constexpr int32_t KEEPER_PROTOCOL_VERSION_CONNECTION_REJECT = 42;
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH = 44;
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH_WITH_READONLY = 45;
static constexpr int32_t SERVER_HANDSHAKE_LENGTH = 36;
static constexpr int32_t SERVER_HANDSHAKE_LENGTH_WITH_READONLY = 37;
static constexpr int32_t PASSWORD_LENGTH = 16;
/// ZooKeeper has 1 MB node size and serialization limit by default,

View File

@ -1,3 +1,4 @@
#include "Common/ZooKeeper/ZooKeeperConstants.h"
#include <Common/ZooKeeper/ZooKeeperImpl.h>
#include <IO/Operators.h>
@ -552,12 +553,13 @@ void ZooKeeper::connect(
void ZooKeeper::sendHandshake()
{
int32_t handshake_length = 44;
int32_t handshake_length = 45;
int64_t last_zxid_seen = 0;
int32_t timeout = args.session_timeout_ms;
int64_t previous_session_id = 0; /// We don't support session restore. So previous session_id is always zero.
constexpr int32_t passwd_len = 16;
std::array<char, passwd_len> passwd {};
bool read_only = true;
write(handshake_length);
if (use_compression)
@ -568,6 +570,7 @@ void ZooKeeper::sendHandshake()
write(timeout);
write(previous_session_id);
write(passwd);
write(read_only);
flushWriteBuffer();
}
@ -577,9 +580,10 @@ void ZooKeeper::receiveHandshake()
int32_t protocol_version_read;
int32_t timeout;
std::array<char, PASSWORD_LENGTH> passwd;
bool read_only;
read(handshake_length);
if (handshake_length != SERVER_HANDSHAKE_LENGTH)
if (handshake_length != SERVER_HANDSHAKE_LENGTH && handshake_length != SERVER_HANDSHAKE_LENGTH_WITH_READONLY)
throw Exception(Error::ZMARSHALLINGERROR, "Unexpected handshake length received: {}", handshake_length);
read(protocol_version_read);
@ -607,6 +611,8 @@ void ZooKeeper::receiveHandshake()
read(session_id);
read(passwd);
if (handshake_length == SERVER_HANDSHAKE_LENGTH_WITH_READONLY)
read(read_only);
}

View File

@ -27,6 +27,7 @@
#cmakedefine01 USE_H3
#cmakedefine01 USE_S2_GEOMETRY
#cmakedefine01 USE_FASTOPS
#cmakedefine01 USE_SQIDS
#cmakedefine01 USE_NLP
#cmakedefine01 USE_VECTORSCAN
#cmakedefine01 USE_LIBURING

View File

@ -43,6 +43,7 @@ struct Settings;
M(UInt64, max_requests_batch_bytes_size, 100*1024, "Max size in bytes of batch of requests that can be sent to RAFT", 0) \
M(UInt64, max_flush_batch_size, 1000, "Max size of batch of requests that can be flushed together", 0) \
M(UInt64, max_requests_quick_batch_size, 100, "Max size of batch of requests to try to get before proceeding with RAFT. Keeper will not wait for requests but take only requests that are already in queue" , 0) \
M(UInt64, max_memory_usage_soft_limit, 0, "Soft limit in bytes of keeper memory usage", 0) \
M(Bool, quorum_reads, false, "Execute read requests as writes through whole RAFT consesus with similar speed", 0) \
M(Bool, force_sync, true, "Call fsync on each change in RAFT changelog", 0) \
M(Bool, compress_logs, false, "Write compressed coordination logs in ZSTD format", 0) \

View File

@ -51,6 +51,56 @@ namespace ErrorCodes
extern const int SYSTEM_ERROR;
}
namespace
{
bool checkIfRequestIncreaseMem(const Coordination::ZooKeeperRequestPtr & request)
{
if (request->getOpNum() == Coordination::OpNum::Create
|| request->getOpNum() == Coordination::OpNum::CreateIfNotExists
|| request->getOpNum() == Coordination::OpNum::Set)
{
return true;
}
else if (request->getOpNum() == Coordination::OpNum::Multi)
{
Coordination::ZooKeeperMultiRequest & multi_req = dynamic_cast<Coordination::ZooKeeperMultiRequest &>(*request);
Int64 memory_delta = 0;
for (const auto & sub_req : multi_req.requests)
{
auto sub_zk_request = std::dynamic_pointer_cast<Coordination::ZooKeeperRequest>(sub_req);
switch (sub_zk_request->getOpNum())
{
case Coordination::OpNum::Create:
case Coordination::OpNum::CreateIfNotExists:
{
Coordination::ZooKeeperCreateRequest & create_req = dynamic_cast<Coordination::ZooKeeperCreateRequest &>(*sub_zk_request);
memory_delta += create_req.bytesSize();
break;
}
case Coordination::OpNum::Set:
{
Coordination::ZooKeeperSetRequest & set_req = dynamic_cast<Coordination::ZooKeeperSetRequest &>(*sub_zk_request);
memory_delta += set_req.bytesSize();
break;
}
case Coordination::OpNum::Remove:
{
Coordination::ZooKeeperRemoveRequest & remove_req = dynamic_cast<Coordination::ZooKeeperRemoveRequest &>(*sub_zk_request);
memory_delta -= remove_req.bytesSize();
break;
}
default:
break;
}
}
return memory_delta > 0;
}
return false;
}
}
KeeperDispatcher::KeeperDispatcher()
: responses_queue(std::numeric_limits<size_t>::max())
@ -93,6 +143,14 @@ void KeeperDispatcher::requestThread()
if (shutdown_called)
break;
Int64 mem_soft_limit = configuration_and_settings->coordination_settings->max_memory_usage_soft_limit;
if (configuration_and_settings->standalone_keeper && mem_soft_limit > 0 && total_memory_tracker.get() >= mem_soft_limit && checkIfRequestIncreaseMem(request.request))
{
LOG_TRACE(log, "Processing requests refused because of max_memory_usage_soft_limit {}, the total used memory is {}, request type is {}", mem_soft_limit, total_memory_tracker.get(), request.request->getOpNum());
addErrorResponses({request}, Coordination::Error::ZCONNECTIONLOSS);
continue;
}
KeeperStorage::RequestsForSessions current_batch;
size_t current_batch_bytes_size = 0;

View File

@ -216,7 +216,9 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList(
/// All rows must contain the same number of dimensions, so limit 1 is ok. If number of dimensions in all rows is not the same -
/// such arrays are not able to be used as ClickHouse Array at all.
pqxx::result result{tx.exec(fmt::format("SELECT array_ndims({}) FROM {} LIMIT 1", name_and_type.name, postgres_table))};
auto dimensions = result[0][0].as<int>();
// array_ndims() may return null for empty array, but we expect 0:
// https://github.com/postgres/postgres/blob/d16a0c1e2e3874cd5adfa9ee968008b6c4b1ae01/src/backend/utils/adt/arrayfuncs.c#L1658
auto dimensions = result[0][0].as<std::optional<int>>().value_or(0);
/// It is always 1d array if it is in recheck.
DataTypePtr type = assert_cast<const DataTypeArray *>(name_and_type.type.get())->getNestedType();

View File

@ -79,6 +79,10 @@ if (ENABLE_NLP)
list (APPEND PRIVATE_LIBS ch_contrib::cld2)
endif()
if (TARGET ch_contrib::sqids)
list (APPEND PRIVATE_LIBS ch_contrib::sqids)
endif()
if (TARGET ch_contrib::h3)
list (APPEND PRIVATE_LIBS ch_contrib::h3)
endif()

View File

@ -0,0 +1,104 @@
#include "config.h"
#ifdef ENABLE_SQIDS
#include <Columns/ColumnString.h>
#include <Columns/ColumnsNumber.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypesNumber.h>
#include <Functions/FunctionFactory.h>
#include <Functions/IFunction.h>
#include <Functions/FunctionHelpers.h>
#include <Interpreters/Context.h>
#include <sqids/sqids.hpp>
namespace DB
{
namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
// sqid(number1, ...)
class FunctionSqid : public IFunction
{
public:
static constexpr auto name = "sqid";
String getName() const override { return name; }
size_t getNumberOfArguments() const override { return 0; }
bool isVariadic() const override { return true; }
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; }
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionSqid>(); }
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
if (arguments.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument.", getName());
for (size_t i = 0; i < arguments.size(); ++i)
{
if (!checkDataTypes<
DataTypeUInt8,
DataTypeUInt16,
DataTypeUInt32,
DataTypeUInt64>(arguments[i].get()))
throw Exception(
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Argument {} for function {} must have datatype UInt*, given type: {}.",
i, getName(), arguments[i]->getName());
}
return std::make_shared<DataTypeString>();
}
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t input_rows_count) const override
{
size_t num_args = arguments.size();
auto col_res = ColumnString::create();
std::vector<UInt64> numbers(num_args);
for (size_t i = 0; i < input_rows_count; ++i)
{
for (size_t j = 0; j < num_args; ++j)
{
const ColumnWithTypeAndName & arg = arguments[j];
ColumnPtr current_column = arg.column;
numbers[j] = current_column->getUInt(i);
}
auto id = sqids.encode(numbers);
col_res->insert(id);
}
return col_res;
}
private:
sqidscxx::Sqids<> sqids;
};
REGISTER_FUNCTION(Sqid)
{
factory.registerFunction<FunctionSqid>(FunctionDocumentation{
.description=R"(
Transforms numbers into YouTube-like short URL hash called [Sqid](https://sqids.org/).)",
.syntax="sqid(number1, ...)",
.arguments={{"number1, ...", "Arbitrarily many UInt8, UInt16, UInt32 or UInt64 arguments"}},
.returned_value="A hash id [String](/docs/en/sql-reference/data-types/string.md).",
.examples={
{"simple",
"SELECT sqid(1, 2, 3, 4, 5);",
R"(
sqid(1, 2, 3, 4, 5)
gXHfJ1C6dN
)"
}}
});
}
}
#endif

View File

@ -21,6 +21,7 @@ namespace
REGISTER_FUNCTION(FormatReadableSize)
{
factory.registerFunction<FunctionFormatReadable<Impl>>();
factory.registerAlias("FORMAT_BYTES", Impl::name, FunctionFactory::CaseInsensitive);
}
}

View File

@ -367,6 +367,7 @@ struct ContextSharedPart : boost::noncopyable
std::shared_ptr<Clusters> clusters TSA_GUARDED_BY(clusters_mutex);
ConfigurationPtr clusters_config TSA_GUARDED_BY(clusters_mutex); /// Stores updated configs
std::unique_ptr<ClusterDiscovery> cluster_discovery TSA_GUARDED_BY(clusters_mutex);
size_t clusters_version TSA_GUARDED_BY(clusters_mutex) = 0;
/// No lock required for async_insert_queue modified only during initialization
std::shared_ptr<AsynchronousInsertQueue> async_insert_queue;
@ -3523,6 +3524,14 @@ void Context::setClustersConfig(const ConfigurationPtr & config, bool enable_dis
shared->clusters = std::make_shared<Clusters>(*shared->clusters_config, settings, getMacros(), config_name);
else
shared->clusters->updateClusters(*shared->clusters_config, settings, config_name, old_clusters_config);
++shared->clusters_version;
}
size_t Context::getClustersVersion() const
{
std::lock_guard lock(shared->clusters_mutex);
return shared->clusters_version;
}

View File

@ -1023,6 +1023,7 @@ public:
std::shared_ptr<Cluster> getCluster(const std::string & cluster_name) const;
std::shared_ptr<Cluster> tryGetCluster(const std::string & cluster_name) const;
void setClustersConfig(const ConfigurationPtr & config, bool enable_discovery = false, const String & config_name = "remote_servers");
size_t getClustersVersion() const;
void startClusterDiscovery();

View File

@ -254,7 +254,8 @@ HashJoin::HashJoin(std::shared_ptr<TableJoin> table_join_, const Block & right_s
else if (table_join->getClauses().empty())
{
data->type = Type::EMPTY;
sample_block_with_columns_to_add = right_sample_block;
/// We might need to insert default values into the right columns, materialize them
sample_block_with_columns_to_add = materializeBlock(right_sample_block);
}
else if (table_join->oneDisjunct())
{

View File

@ -53,6 +53,7 @@ DataTypePtr getCoordinationErrorCodesEnumType()
{"ZCLOSING", static_cast<Int8>(Coordination::Error::ZCLOSING)},
{"ZNOTHING", static_cast<Int8>(Coordination::Error::ZNOTHING)},
{"ZSESSIONMOVED", static_cast<Int8>(Coordination::Error::ZSESSIONMOVED)},
{"ZNOTREADONLY", static_cast<Int8>(Coordination::Error::ZNOTREADONLY)},
});
}
@ -115,6 +116,7 @@ NamesAndTypesList ZooKeeperLogElement::getNamesAndTypes()
{"CONNECTING", static_cast<Int16>(Coordination::State::CONNECTING)},
{"ASSOCIATING", static_cast<Int16>(Coordination::State::ASSOCIATING)},
{"CONNECTED", static_cast<Int16>(Coordination::State::CONNECTED)},
{"READONLY", static_cast<Int16>(Coordination::State::READONLY)},
{"NOTCONNECTED", static_cast<Int16>(Coordination::State::NOTCONNECTED)},
});

View File

@ -985,7 +985,7 @@ JoinTreeQueryPlan buildQueryPlanForJoinNode(const QueryTreeNodePtr & join_table_
std::optional<bool> join_constant;
if (join_strictness == JoinStrictness::All)
if (join_strictness == JoinStrictness::All || join_strictness == JoinStrictness::Semi || join_strictness == JoinStrictness::Anti)
join_constant = tryExtractConstantFromJoinNode(join_table_expression);
if (join_constant)

View File

@ -199,7 +199,8 @@ PostgreSQLSource<T>::~PostgreSQLSource()
tryLogCurrentException(__PRETTY_FUNCTION__);
}
connection_holder->setBroken();
if (connection_holder)
connection_holder->setBroken();
}
}

View File

@ -349,7 +349,7 @@ Chain buildPushingToViewsChain(
for (const auto & column : header)
{
/// But skip columns which storage doesn't have.
if (inner_table_columns.hasPhysical(column.name))
if (inner_table_columns.hasNotAlias(column.name))
insert_columns.emplace_back(column.name);
}

View File

@ -121,6 +121,8 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const
if (send_errors)
{
size_t total_count = 0;
for (size_t i = 0, end = ErrorCodes::end(); i < end; ++i)
{
const auto & error = ErrorCodes::values[i].get();
@ -136,7 +138,15 @@ void PrometheusMetricsWriter::write(WriteBuffer & wb) const
writeOutLine(wb, "# TYPE", key, "counter");
/// We are interested in errors which are happened only on this server.
writeOutLine(wb, key, error.local.count);
total_count += error.local.count;
}
/// Write the total number of errors as a separate metric
std::string key{error_metrics_prefix + toString("ALL")};
writeOutLine(wb, "# HELP", key, "The total number of errors since last server restart");
writeOutLine(wb, "# TYPE", key, "counter");
writeOutLine(wb, key, total_count);
}
}

View File

@ -668,6 +668,12 @@ bool ColumnsDescription::hasPhysical(const String & column_name) const
it->default_desc.kind != ColumnDefaultKind::Alias && it->default_desc.kind != ColumnDefaultKind::Ephemeral;
}
bool ColumnsDescription::hasNotAlias(const String & column_name) const
{
auto it = columns.get<1>().find(column_name);
return it != columns.get<1>().end() && it->default_desc.kind != ColumnDefaultKind::Alias;
}
bool ColumnsDescription::hasAlias(const String & column_name) const
{
auto it = columns.get<1>().find(column_name);

View File

@ -182,6 +182,7 @@ public:
Names getNamesOfPhysical() const;
bool hasPhysical(const String & column_name) const;
bool hasNotAlias(const String & column_name) const;
bool hasAlias(const String & column_name) const;
bool hasColumnOrSubcolumn(GetColumnsOptions::Kind kind, const String & column_name) const;
bool hasColumnOrNested(GetColumnsOptions::Kind kind, const String & column_name) const;

View File

@ -7,7 +7,6 @@
#include <Formats/NativeReader.h>
#include <Processors/ISource.h>
#include <Interpreters/Context.h>
#include <Interpreters/Cluster.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/ConnectionTimeouts.h>
@ -55,7 +54,7 @@ namespace
{
template <typename PoolFactory>
ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory && factory, const Cluster::ShardsInfo & shards_info, Poco::Logger * log)
ConnectionPoolPtrs createPoolsForAddresses(const Cluster::Addresses & addresses, PoolFactory && factory, Poco::Logger * log)
{
ConnectionPoolPtrs pools;
@ -76,30 +75,8 @@ ConnectionPoolPtrs createPoolsForAddresses(const std::string & name, PoolFactory
}
};
for (auto it = boost::make_split_iterator(name, boost::first_finder(",")); it != decltype(it){}; ++it)
{
const std::string & dirname = boost::copy_range<std::string>(*it);
Cluster::Address address = Cluster::Address::fromFullString(dirname);
if (address.shard_index && dirname.ends_with("_all_replicas"))
{
if (address.shard_index > shards_info.size())
{
LOG_ERROR(log, "No shard with shard_index={} ({})", address.shard_index, name);
continue;
}
const auto & shard_info = shards_info[address.shard_index - 1];
size_t replicas = shard_info.per_replica_pools.size();
for (size_t replica_index = 1; replica_index <= replicas; ++replica_index)
{
address.replica_index = static_cast<UInt32>(replica_index);
make_connection(address);
}
}
else
make_connection(address);
}
for (const auto & address : addresses)
make_connection(address);
return pools;
}
@ -254,34 +231,14 @@ void DistributedAsyncInsertDirectoryQueue::run()
}
ConnectionPoolPtr DistributedAsyncInsertDirectoryQueue::createPool(const std::string & name, const StorageDistributed & storage)
ConnectionPoolPtr DistributedAsyncInsertDirectoryQueue::createPool(const Cluster::Addresses & addresses, const StorageDistributed & storage)
{
const auto pool_factory = [&storage, &name] (const Cluster::Address & address) -> ConnectionPoolPtr
const auto pool_factory = [&storage] (const Cluster::Address & address) -> ConnectionPoolPtr
{
const auto & cluster = storage.getCluster();
const auto & shards_info = cluster->getShardsInfo();
const auto & shards_addresses = cluster->getShardsAddresses();
/// Check new format shard{shard_index}_replica{replica_index}
/// (shard_index and replica_index starts from 1).
if (address.shard_index != 0)
{
if (!address.replica_index)
throw Exception(ErrorCodes::INCORRECT_FILE_NAME,
"Wrong replica_index={} ({})", address.replica_index, name);
if (address.shard_index > shards_info.size())
throw Exception(ErrorCodes::INCORRECT_FILE_NAME,
"No shard with shard_index={} ({})", address.shard_index, name);
const auto & shard_info = shards_info[address.shard_index - 1];
if (address.replica_index > shard_info.per_replica_pools.size())
throw Exception(ErrorCodes::INCORRECT_FILE_NAME,
"No shard with replica_index={} ({})", address.replica_index, name);
return shard_info.per_replica_pools[address.replica_index - 1];
}
/// Existing connections pool have a higher priority.
for (size_t shard_index = 0; shard_index < shards_info.size(); ++shard_index)
{
@ -318,7 +275,7 @@ ConnectionPoolPtr DistributedAsyncInsertDirectoryQueue::createPool(const std::st
address.secure);
};
auto pools = createPoolsForAddresses(name, pool_factory, storage.getCluster()->getShardsInfo(), storage.log);
auto pools = createPoolsForAddresses(addresses, pool_factory, storage.log);
const auto settings = storage.getContext()->getSettings();
return pools.size() == 1 ? pools.front() : std::make_shared<ConnectionPoolWithFailover>(pools,

View File

@ -4,6 +4,7 @@
#include <Common/ConcurrentBoundedQueue.h>
#include <Client/ConnectionPool.h>
#include <IO/ReadBufferFromFile.h>
#include <Interpreters/Cluster.h>
#include <Disks/IDisk.h>
#include <atomic>
#include <mutex>
@ -56,7 +57,7 @@ public:
~DistributedAsyncInsertDirectoryQueue();
static ConnectionPoolPtr createPool(const std::string & name, const StorageDistributed & storage);
static ConnectionPoolPtr createPool(const Cluster::Addresses & addresses, const StorageDistributed & storage);
void updatePath(const std::string & new_relative_path);

View File

@ -10,6 +10,8 @@
#include <Parsers/parseQuery.h>
#include <Storages/extractKeyExpressionList.h>
#include <Storages/ReplaceAliasByExpressionVisitor.h>
#include <Core/Defines.h>
#include "Common/Exception.h"
@ -22,6 +24,11 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
}
namespace
{
using ReplaceAliasToExprVisitor = InDepthNodeVisitor<ReplaceAliasByExpressionMatcher, true>;
}
IndexDescription::IndexDescription(const IndexDescription & other)
: definition_ast(other.definition_ast ? other.definition_ast->clone() : nullptr)
, expression_list_ast(other.expression_list_ast ? other.expression_list_ast->clone() : nullptr)
@ -94,6 +101,10 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast
if (index_definition->expr)
{
expr_list = extractKeyExpressionList(index_definition->expr->clone());
ReplaceAliasToExprVisitor::Data data{columns};
ReplaceAliasToExprVisitor{data}.visit(expr_list);
result.expression_list_ast = expr_list->clone();
}
else

View File

@ -714,11 +714,9 @@ void PostgreSQLReplicationHandler::shutdownFinal()
dropReplicationSlot(tx, /* temporary */false);
});
}
catch (Exception & e)
catch (...)
{
e.addMessage("while dropping replication slot: {}", replication_slot);
LOG_ERROR(log, "Failed to drop replication slot: {}. It must be dropped manually.", replication_slot);
throw;
LOG_ERROR(log, "Failed to drop replication slot: {}. It must be dropped manually. Error: {}", replication_slot, getCurrentExceptionMessage(true));
}
}

View File

@ -0,0 +1,32 @@
#include <Storages/ReplaceAliasByExpressionVisitor.h>
#include <Parsers/ASTIdentifier.h>
#include <Parsers/ASTLiteral.h>
#include <Storages/ColumnsDescription.h>
#include <Common/typeid_cast.h>
namespace DB
{
void ReplaceAliasByExpressionMatcher::visit(ASTPtr & ast, Data & data)
{
if (auto * identifier = ast->as<ASTIdentifier>())
{
visit(*identifier, ast, data);
}
}
void ReplaceAliasByExpressionMatcher::visit(const ASTIdentifier & column, ASTPtr & ast, Data & data)
{
const auto & column_name = column.name();
if (data.columns.hasAlias(column_name))
{
/// Alias expr is saved in default expr.
if (auto col_default = data.columns.getDefault(column_name))
{
ast = col_default->expression->clone();
}
}
}
}

View File

@ -0,0 +1,40 @@
#pragma once
#include <Parsers/IAST.h>
#include <Interpreters/InDepthNodeVisitor.h>
namespace DB
{
class ASTFunction;
class ColumnsDescription;
class ASTIdentifier;
/* The Visitor is used to replace ALIAS by EXPRESSION when we refer to ALIAS
* column in index definition.
*
* For example, if we have following create statement:
* CREATE TABLE t
* (
* col UInt8,
* col_alias ALIAS col + 1
* INDEX idx (col_alias) TYPE minmax
* ) ENGINE = MergeTree ORDER BY col;
* we need call the visitor to replace `col_alias` by `col` + 1 when get index
* description from index definition AST.
*/
class ReplaceAliasByExpressionMatcher
{
public:
struct Data
{
const ColumnsDescription & columns;
};
static void visit(ASTPtr & ast, Data &);
static void visit(const ASTIdentifier &, ASTPtr & ast, Data &);
static bool needChildVisit(const ASTPtr &, const ASTPtr &) { return true; }
};
}

View File

@ -1379,9 +1379,13 @@ DistributedAsyncInsertDirectoryQueue & StorageDistributed::getDirectoryQueue(con
std::lock_guard lock(cluster_nodes_mutex);
auto & node_data = cluster_nodes_data[key];
if (!node_data.directory_queue)
/// If the node changes, you need to recreate the DistributedAsyncInsertDirectoryQueue
if (!node_data.directory_queue
|| (node_data.clusters_version < getContext()->getClustersVersion() && node_data.addresses != parseAddresses(name)))
{
node_data.connection_pool = DistributedAsyncInsertDirectoryQueue::createPool(name, *this);
node_data.addresses = parseAddresses(name);
node_data.clusters_version = getContext()->getClustersVersion();
node_data.connection_pool = DistributedAsyncInsertDirectoryQueue::createPool(node_data.addresses, *this);
node_data.directory_queue = std::make_unique<DistributedAsyncInsertDirectoryQueue>(
*this, disk, relative_data_path + name,
node_data.connection_pool,
@ -1401,6 +1405,53 @@ std::vector<DistributedAsyncInsertDirectoryQueue::Status> StorageDistributed::ge
return statuses;
}
Cluster::Addresses StorageDistributed::parseAddresses(const std::string & name) const
{
Cluster::Addresses addresses;
const auto & cluster = getCluster();
const auto & shards_info = cluster->getShardsInfo();
const auto & shards_addresses = cluster->getShardsAddresses();
for (auto it = boost::make_split_iterator(name, boost::first_finder(",")); it != decltype(it){}; ++it)
{
const std::string & dirname = boost::copy_range<std::string>(*it);
Cluster::Address address = Cluster::Address::fromFullString(dirname);
/// Check new format shard{shard_index}_replica{replica_index}
/// (shard_index and replica_index starts from 1).
if (address.shard_index)
{
if (address.shard_index > shards_info.size())
{
LOG_ERROR(log, "No shard with shard_index={} ({})", address.shard_index, name);
continue;
}
const auto & replicas_addresses = shards_addresses[address.shard_index - 1];
size_t replicas = replicas_addresses.size();
if (dirname.ends_with("_all_replicas"))
{
for (const auto & replica_address : replicas_addresses)
addresses.push_back(replica_address);
continue;
}
if (address.replica_index > replicas)
{
LOG_ERROR(log, "No shard with replica_index={} ({})", address.replica_index, name);
continue;
}
addresses.push_back(replicas_addresses[address.replica_index - 1]);
}
else
addresses.push_back(address);
}
return addresses;
}
std::optional<UInt64> StorageDistributed::totalBytes(const Settings &) const
{
UInt64 total_bytes = 0;

View File

@ -176,6 +176,8 @@ private:
/// Used for the INSERT into Distributed in case of distributed_foreground_insert==1, from DistributedSink.
DistributedAsyncInsertDirectoryQueue & getDirectoryQueue(const DiskPtr & disk, const std::string & name);
/// Parse the address corresponding to the directory name of the directory queue
Cluster::Addresses parseAddresses(const std::string & name) const;
/// Return list of metrics for all created monitors
/// (note that monitors are created lazily, i.e. until at least one INSERT executed)
@ -270,6 +272,8 @@ private:
{
std::shared_ptr<DistributedAsyncInsertDirectoryQueue> directory_queue;
ConnectionPoolPtr connection_pool;
Cluster::Addresses addresses;
size_t clusters_version;
};
std::unordered_map<std::string, ClusterNodeData> cluster_nodes_data;
mutable std::mutex cluster_nodes_mutex;

View File

@ -1869,7 +1869,7 @@ MergeTreeData::DataPartsVector StorageReplicatedMergeTree::checkPartChecksumsAnd
}
}
throw zkutil::KeeperException(e);
throw zkutil::KeeperMultiException(e, ops, responses);
}
}

View File

@ -128,6 +128,7 @@ const char * auto_contributors[] {
"Andrei Ch",
"Andrei Chulkov",
"Andrei Nekrashevich",
"Andrej Hoos",
"Andrew",
"Andrew Grigorev",
"Andrew Onyshchuk",
@ -194,6 +195,7 @@ const char * auto_contributors[] {
"Artur Beglaryan",
"Artur Filatenkov",
"Artur Malchanau",
"Arvind Pj",
"Ash Vardanian",
"AsiaKorushkina",
"Atri Sharma",
@ -253,6 +255,7 @@ const char * auto_contributors[] {
"Christian",
"Christian Clauss",
"Christoph Wurm",
"Chuan-Zheng Lee",
"Chun-Sheng, Li",
"Ciprian Hacman",
"Clayton McClure",
@ -498,6 +501,7 @@ const char * auto_contributors[] {
"Jakub Kuklis",
"James Maidment",
"James Morrison",
"James Seymour",
"JaosnHsieh",
"Jason",
"Jason Keirstead",
@ -555,6 +559,7 @@ const char * auto_contributors[] {
"Kerry Clendinning",
"Kevin Chiang",
"Kevin Michel",
"Kevin Mingtarja",
"Kevin Zhang",
"KevinyhZou",
"KinderRiven",
@ -787,6 +792,7 @@ const char * auto_contributors[] {
"Oleg Obleukhov",
"Oleg Strokachuk",
"Oleg Taizov",
"Oleg V. Kozlyuk",
"Olga Khvostikova",
"Olga Revyakina",
"OmarBazaraa",
@ -825,6 +831,7 @@ const char * auto_contributors[] {
"Pervakov Grigorii",
"Pervakov Grigory",
"Petr Vasilev",
"Philip Hallstrom",
"Philippe Ombredanne",
"PigInCloud",
"Potya",
@ -917,6 +924,7 @@ const char * auto_contributors[] {
"Sergey Kazmin",
"Sergey Kislov",
"Sergey Kononenko",
"Sergey Kviatkevich",
"Sergey Lazarev",
"Sergey Magidovich",
"Sergey Mirvoda",
@ -934,6 +942,7 @@ const char * auto_contributors[] {
"Shane Andrade",
"Sherry Wang",
"Shoh Jahon",
"Shri Bodas",
"Sichen Zhao",
"SiderZhang",
"Sidorov Pavel",
@ -1146,6 +1155,7 @@ const char * auto_contributors[] {
"[데이터플랫폼팀] 이호선",
"a.palagashvili",
"aaapetrenko",
"abakhmetev",
"abdrakhmanov",
"abel-wang",
"abidino",
@ -1280,6 +1290,7 @@ const char * auto_contributors[] {
"dongyifeng",
"ducle.canh",
"eaxdev",
"edef",
"eejoin",
"egatov",
"ekrasikov",
@ -1393,7 +1404,9 @@ const char * auto_contributors[] {
"jinjunzh",
"jiyoungyoooo",
"jkuklis",
"joelynch",
"johanngan",
"johnnymatthews",
"jsc0218",
"jthmath",
"jun won",
@ -1414,6 +1427,7 @@ const char * auto_contributors[] {
"koloshmet",
"kolsys",
"konnectr",
"konruvikt",
"koshachy",
"kothiga",
"kreuzerkrieg",
@ -1496,6 +1510,7 @@ const char * auto_contributors[] {
"mayamika",
"mehanizm",
"melin",
"melvynator",
"memo",
"meo",
"meoww-bot",
@ -1566,6 +1581,7 @@ const char * auto_contributors[] {
"pingyu",
"pkubaj",
"potya",
"pppeace",
"presto53",
"priera",
"proller",
@ -1598,9 +1614,11 @@ const char * auto_contributors[] {
"rodrigargar",
"roman",
"romanzhukov",
"rondo_1895",
"root",
"roverxu",
"ruct",
"ruslandoga",
"ryzuo",
"s-kat",
"sanjam",
@ -1623,6 +1641,7 @@ const char * auto_contributors[] {
"sichenzhao",
"simon-says",
"simpleton",
"slu",
"slvrtrn",
"snyk-bot",
"songenjie",
@ -1644,6 +1663,7 @@ const char * auto_contributors[] {
"tai",
"taichong",
"taiyang-li",
"takakawa",
"tangjiangling",
"tao jiang",
"taofengliu",
@ -1661,6 +1681,7 @@ const char * auto_contributors[] {
"tiger.yan",
"timfursov",
"tison",
"tomtana",
"topvisor",
"tpanetti",
"turbo jason",
@ -1707,6 +1728,7 @@ const char * auto_contributors[] {
"xiedeyantu",
"xieyichen",
"xinhuitian",
"xleoken",
"xlwh",
"xmy",
"xuelei",
@ -1804,6 +1826,7 @@ const char * auto_contributors[] {
"枢木",
"王智博",
"董海镔",
"袁焊忠",
"谢磊",
"贾顺名(Jarvis)",
"郭小龙",

View File

@ -2,6 +2,8 @@
#include <Parsers/ASTLiteral.h>
#include <Access/Common/AccessFlags.h>
#include <DataTypes/DataTypeArray.h>
#include <DataTypes/DataTypeString.h>
#include <DataTypes/DataTypesNumber.h>
@ -78,6 +80,7 @@ ColumnsDescription TableFunctionDictionary::getActualTableStructure(ContextPtr c
StoragePtr TableFunctionDictionary::executeImpl(
const ASTPtr &, ContextPtr context, const std::string & table_name, ColumnsDescription, bool is_insert_query) const
{
context->checkAccess(AccessType::dictGet, getDatabaseName(), table_name);
StorageID dict_id(getDatabaseName(), table_name);
auto dictionary_table_structure = getActualTableStructure(context, is_insert_query);

View File

@ -128,6 +128,9 @@ endif()
if (TARGET ch_contrib::fastops)
set(USE_FASTOPS 1)
endif()
if (TARGET ch_contrib::sqids)
set(USE_SQIDS 1)
endif()
if (TARGET ch_contrib::vectorscan)
set(USE_VECTORSCAN 1)
endif()

View File

@ -56,21 +56,20 @@ def get_scales(runner_type: str) -> Tuple[int, int]:
"returns the multipliers for scaling down and up ASG by types"
# Scaling down is quicker on the lack of running jobs than scaling up on
# queue
scale_down = 2
# The ASG should deflate almost instantly
scale_down = 1
# the style checkers have so many noise, so it scales up too quickly
# The 5 was too quick, there are complainings regarding too slow with
# 10. I am trying 7 now.
# 7 still looks a bit slow, so I try 6
# Let's have it the same as the other ASG
# UPDATE THE COMMENT ON CHANGES
scale_up = 3
if runner_type == "style-checker":
# The ASG should deflate almost instantly
scale_down = 1
# the style checkers have so many noise, so it scales up too quickly
# The 5 was too quick, there are complainings regarding too slow with
# 10. I am trying 7 now.
# 7 still looks a bit slow, so I try 6
# Let's have it the same as the other ASG
# UPDATE THE COMMENT ON CHANGES
## scale_down = 3
if runner_type.startswith("private-"):
scale_up = 1
elif runner_type == "limited-tester":
# The limited runners should inflate and deflate faster
scale_down = 1
scale_up = 2
return scale_down, scale_up

View File

@ -90,13 +90,25 @@ class TestSetCapacity(unittest.TestCase):
16,
),
TestCase("lower-min", 10, 5, 20, [Queue("queued", 5, "lower-min")], 10),
# scale up group with prefix private-
TestCase(
"private-increase",
1,
13,
20,
[
Queue("in_progress", 12, "private-increase"),
Queue("queued", 11, "private-increase"),
],
20,
),
# Decrease capacity
TestCase("w/reserve", 1, 13, 20, [Queue("queued", 5, "w/reserve")], 9),
TestCase("w/reserve", 1, 13, 20, [Queue("queued", 5, "w/reserve")], 5),
TestCase(
"style-checker", 1, 13, 20, [Queue("queued", 5, "style-checker")], 5
),
TestCase("w/reserve", 1, 23, 20, [Queue("queued", 17, "w/reserve")], 20),
TestCase("decrease", 1, 13, 20, [Queue("in_progress", 3, "decrease")], 8),
TestCase("w/reserve", 1, 23, 20, [Queue("queued", 17, "w/reserve")], 17),
TestCase("decrease", 1, 13, 20, [Queue("in_progress", 3, "decrease")], 3),
TestCase(
"style-checker",
1,

View File

@ -390,6 +390,7 @@ class ClickHouseCluster:
odbc_bridge_bin_path=None,
library_bridge_bin_path=None,
zookeeper_config_path=None,
keeper_config_dir=None,
custom_dockerd_host=None,
zookeeper_keyfile=None,
zookeeper_certfile=None,
@ -426,6 +427,12 @@ class ClickHouseCluster:
else p.join(HELPERS_DIR, "zookeeper_config.xml")
)
self.keeper_config_dir = (
p.join(self.base_dir, keeper_config_dir)
if keeper_config_dir
else HELPERS_DIR
)
project_name = (
pwd.getpwuid(os.getuid()).pw_name + p.basename(self.base_dir) + self.name
)
@ -2725,7 +2732,9 @@ class ClickHouseCluster:
if self.use_keeper: # TODO: remove hardcoded paths from here
for i in range(1, 4):
shutil.copy(
os.path.join(HELPERS_DIR, f"keeper_config{i}.xml"),
os.path.join(
self.keeper_config_dir, f"keeper_config{i}.xml"
),
os.path.join(
self.keeper_instance_dir_prefix + f"{i}", "config"
),

View File

@ -22,6 +22,10 @@ postgres_table_template_5 = """
CREATE TABLE IF NOT EXISTS "{}" (
key Integer NOT NULL, value UUID, PRIMARY KEY(key))
"""
postgres_table_template_6 = """
CREATE TABLE IF NOT EXISTS "{}" (
key Integer NOT NULL, value Text, PRIMARY KEY(key))
"""
def get_postgres_conn(
@ -140,7 +144,7 @@ class PostgresManager:
raise ex
def execute(self, query):
self.cursor.execute(query)
return self.cursor.execute(query)
def prepare(self):
self.conn = get_postgres_conn(ip=self.ip, port=self.port)
@ -236,12 +240,14 @@ class PostgresManager:
postgres_database="",
settings=[],
table_overrides="",
user="postgres",
password="mysecretpassword",
):
postgres_database = self.database_or_default(postgres_database)
self.created_materialized_postgres_db_list.add(materialized_database)
self.instance.query(f"DROP DATABASE IF EXISTS {materialized_database}")
create_query = f"CREATE DATABASE {materialized_database} ENGINE = MaterializedPostgreSQL('{ip}:{port}', '{postgres_database}', 'postgres', 'mysecretpassword')"
create_query = f"CREATE DATABASE {materialized_database} ENGINE = MaterializedPostgreSQL('{ip}:{port}', '{postgres_database}', '{user}', '{password}')"
if len(settings) > 0:
create_query += " SETTINGS "
for i in range(len(settings)):

View File

@ -0,0 +1,38 @@
<clickhouse>
<remote_servers>
<test_cluster>
<shard>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
</shard>
</test_cluster>
<test_cluster_with_replication>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node2</host>
<port>9000</port>
</replica>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_with_replication>
</remote_servers>
</clickhouse>

View File

@ -0,0 +1,232 @@
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance(
"node1",
main_configs=["configs/remote_servers.xml"],
)
node2 = cluster.add_instance(
"node2",
main_configs=["configs/remote_servers.xml"],
)
node3 = cluster.add_instance(
"node3",
main_configs=["configs/remote_servers.xml"],
)
config1 = """<clickhouse>
<remote_servers>
<test_cluster>
<shard>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
</shard>
</test_cluster>
<test_cluster_with_replication>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node2</host>
<port>9000</port>
</replica>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_with_replication>
</remote_servers>
</clickhouse>"""
config2 = """<clickhouse>
<remote_servers>
<test_cluster>
<shard>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>node2</host>
<port>9000</port>
</replica>
</shard>
<shard>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
</shard>
</test_cluster>
<test_cluster_with_replication>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node1</host>
<port>9000</port>
</replica>
</shard>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>node3</host>
<port>9000</port>
</replica>
</shard>
</test_cluster_with_replication>
</remote_servers>
</clickhouse>
"""
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
for _, node in cluster.instances.items():
node.query(
f"""
create table dist_local (c1 Int32, c2 String) engine=MergeTree() order by c1;
create table dist (c1 Int32, c2 String) engine=Distributed(test_cluster, currentDatabase(), dist_local, c1);
create table replica_dist_local (c1 Int32, c2 String) engine=MergeTree() order by c1;
create table replica_dist (c1 Int32, c2 String) engine=Distributed(test_cluster_with_replication, currentDatabase(), replica_dist_local, c1);
"""
)
yield cluster
finally:
cluster.shutdown()
def test_distributed_async_insert(started_cluster):
node1.query("insert into dist select number,'A' from system.numbers limit 10;")
node1.query("system flush distributed dist;")
assert int(node3.query("select count() from dist_local where c2 = 'A'")) == 5
assert int(node1.query("select count() from dist_local where c2 = 'A'")) == 5
# Add node2
node1.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config2)
node1.query("SYSTEM RELOAD CONFIG;")
node2.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config2)
node2.query("SYSTEM RELOAD CONFIG;")
node3.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config2)
node3.query("SYSTEM RELOAD CONFIG;")
node1.query("insert into dist select number,'B' from system.numbers limit 12;")
node1.query("system flush distributed dist;")
assert int(node1.query("select count() from dist_local where c2 = 'B'")) == 4
assert int(node2.query("select count() from dist_local where c2 = 'B'")) == 4
assert int(node3.query("select count() from dist_local where c2 = 'B'")) == 4
# Delete node2
node1.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config1)
node1.query("SYSTEM RELOAD CONFIG;")
node2.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config1)
node2.query("SYSTEM RELOAD CONFIG;")
node3.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config1)
node3.query("SYSTEM RELOAD CONFIG;")
node1.query("insert into dist select number,'C' from system.numbers limit 10;")
node1.query("system flush distributed dist;")
assert int(node1.query("select count() from dist_local where c2 = 'C'")) == 5
assert int(node2.query("select count() from dist_local where c2 = 'C'")) == 0
assert int(node3.query("select count() from dist_local where c2 = 'C'")) == 5
def test_distributed_async_insert_with_replica(started_cluster):
node1.query(
"insert into replica_dist select number,'A' from system.numbers limit 10;"
)
node1.query("system flush distributed replica_dist;")
node2_res = int(
node2.query("select count() from replica_dist_local where c2 = 'A'")
)
node3_res = int(
node3.query("select count() from replica_dist_local where c2 = 'A'")
)
assert (
int(node1.query("select count() from replica_dist_local where c2 = 'A'")) == 5
)
assert (node2_res == 0 and node3_res == 5) or (node2_res == 5 and node3_res == 0)
# Delete node2
node1.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config2)
node1.query("SYSTEM RELOAD CONFIG;")
node2.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config2)
node2.query("SYSTEM RELOAD CONFIG;")
node3.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config2)
node3.query("SYSTEM RELOAD CONFIG;")
node1.query(
"insert into replica_dist select number,'B' from system.numbers limit 10;"
)
node1.query("system flush distributed replica_dist;")
assert (
int(node1.query("select count() from replica_dist_local where c2 = 'B'")) == 5
)
assert (
int(node2.query("select count() from replica_dist_local where c2 = 'B'")) == 0
)
assert (
int(node3.query("select count() from replica_dist_local where c2 = 'B'")) == 5
)
# Add node2
node1.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config1)
node1.query("SYSTEM RELOAD CONFIG;")
node2.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config1)
node2.query("SYSTEM RELOAD CONFIG;")
node3.replace_config("/etc/clickhouse-server/config.d/remote_servers.xml", config1)
node3.query("SYSTEM RELOAD CONFIG;")
node1.query(
"insert into replica_dist select number,'C' from system.numbers limit 10;"
)
node1.query("system flush distributed replica_dist;")
node2_res = int(
node2.query("select count() from replica_dist_local where c2 = 'C'")
)
node3_res = int(
node3.query("select count() from replica_dist_local where c2 = 'C'")
)
assert (
int(node1.query("select count() from replica_dist_local where c2 = 'C'")) == 5
)
assert (node2_res == 0 and node3_res == 5) or (node2_res == 5 and node3_res == 0)

View File

@ -0,0 +1,49 @@
<clickhouse>
<listen_try>true</listen_try>
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
</logger>
<keeper_server>
<tcp_port>2181</tcp_port>
<availability_zone>
<value>az-zoo1</value>
</availability_zone>
<server_id>1</server_id>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>15000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
<force_sync>false</force_sync>
<election_timeout_lower_bound_ms>2000</election_timeout_lower_bound_ms>
<election_timeout_upper_bound_ms>4000</election_timeout_upper_bound_ms>
<max_memory_usage_soft_limit>200000000</max_memory_usage_soft_limit>
<async_replication>1</async_replication>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>zoo1</hostname>
<port>9444</port>
</server>
<server>
<id>2</id>
<hostname>zoo2</hostname>
<port>9444</port>
</server>
<server>
<id>3</id>
<hostname>zoo3</hostname>
<port>9444</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -0,0 +1,50 @@
<clickhouse>
<listen_try>true</listen_try>
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
</logger>
<keeper_server>
<tcp_port>2181</tcp_port>
<server_id>2</server_id>
<availability_zone>
<value>az-zoo2</value>
<enable_auto_detection_on_cloud>1</enable_auto_detection_on_cloud>
</availability_zone>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>15000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
<force_sync>false</force_sync>
<election_timeout_lower_bound_ms>2000</election_timeout_lower_bound_ms>
<election_timeout_upper_bound_ms>4000</election_timeout_upper_bound_ms>
<max_memory_usage_soft_limit>20000000</max_memory_usage_soft_limit>
<async_replication>1</async_replication>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>zoo1</hostname>
<port>9444</port>
</server>
<server>
<id>2</id>
<hostname>zoo2</hostname>
<port>9444</port>
</server>
<server>
<id>3</id>
<hostname>zoo3</hostname>
<port>9444</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -0,0 +1,46 @@
<clickhouse>
<listen_try>true</listen_try>
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<logger>
<level>trace</level>
<log>/var/log/clickhouse-keeper/clickhouse-keeper.log</log>
<errorlog>/var/log/clickhouse-keeper/clickhouse-keeper.err.log</errorlog>
</logger>
<keeper_server>
<tcp_port>2181</tcp_port>
<server_id>3</server_id>
<coordination_settings>
<operation_timeout_ms>10000</operation_timeout_ms>
<session_timeout_ms>15000</session_timeout_ms>
<raft_logs_level>trace</raft_logs_level>
<force_sync>false</force_sync>
<election_timeout_lower_bound_ms>2000</election_timeout_lower_bound_ms>
<election_timeout_upper_bound_ms>4000</election_timeout_upper_bound_ms>
<max_memory_usage_soft_limit>20000000</max_memory_usage_soft_limit>
<async_replication>1</async_replication>
</coordination_settings>
<raft_configuration>
<server>
<id>1</id>
<hostname>zoo1</hostname>
<port>9444</port>
</server>
<server>
<id>2</id>
<hostname>zoo2</hostname>
<port>9444</port>
</server>
<server>
<id>3</id>
<hostname>zoo3</hostname>
<port>9444</port>
</server>
</raft_configuration>
</keeper_server>
</clickhouse>

View File

@ -0,0 +1,63 @@
#!/usr/bin/env python3
import random
import string
import pytest
from helpers.cluster import ClickHouseCluster
from helpers import keeper_utils
from kazoo.client import KazooClient, KazooState
from kazoo.exceptions import ConnectionLoss
cluster = ClickHouseCluster(__file__, keeper_config_dir="configs/")
# clickhouse itself will use external zookeeper
node = cluster.add_instance(
"node",
stay_alive=True,
with_zookeeper=True,
)
def random_string(length):
return "".join(random.choices(string.ascii_lowercase + string.digits, k=length))
def get_connection_zk(nodename, timeout=30.0):
_fake_zk_instance = KazooClient(
hosts=cluster.get_instance_ip(nodename) + ":2181", timeout=timeout
)
_fake_zk_instance.start()
return _fake_zk_instance
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def test_soft_limit_create(started_cluster):
started_cluster.wait_zookeeper_to_start()
try:
node_zk = get_connection_zk("zoo1")
loop_time = 100000
node_zk.create("/test_soft_limit", b"abc")
for i in range(loop_time):
node_zk.create(
"/test_soft_limit/node_" + str(i), random_string(1000).encode()
)
except ConnectionLoss:
txn = node_zk.transaction()
for i in range(10):
txn.delete("/test_soft_limit/node_" + str(i))
txn.create("/test_soft_limit/node_1000001" + str(i), b"abcde")
txn.commit()
return
raise Exception("all records are inserted but no error occurs")

View File

@ -33,6 +33,7 @@ from helpers.postgres_utility import (
postgres_table_template_3,
postgres_table_template_4,
postgres_table_template_5,
postgres_table_template_6,
)
from helpers.postgres_utility import queries
@ -623,30 +624,77 @@ def test_database_with_multiple_non_default_schemas_2(started_cluster):
def test_table_override(started_cluster):
table_name = "table_override"
materialized_database = "test_database"
pg_manager.create_postgres_table(table_name, template=postgres_table_template_5)
pg_manager.create_postgres_table(table_name, template=postgres_table_template_6)
instance.query(
f"create table {table_name}(key Int32, value UUID) engine = PostgreSQL (postgres1, table={table_name})"
f"insert into postgres_database.{table_name} select number, 'test' from numbers(10)"
)
instance.query(
f"insert into {table_name} select number, generateUUIDv4() from numbers(10)"
)
table_overrides = f" TABLE OVERRIDE {table_name} (COLUMNS (key Int32, value UUID) PARTITION BY key)"
table_overrides = f" TABLE OVERRIDE {table_name} (COLUMNS (key Int32, value String) PARTITION BY key)"
pg_manager.create_materialized_db(
ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
settings=[f"materialized_postgresql_tables_list = '{table_name}'"],
materialized_database=materialized_database,
table_overrides=table_overrides,
)
assert_nested_table_is_created(instance, table_name, materialized_database)
result = instance.query(f"show create table {materialized_database}.{table_name}")
print(result)
expected = "CREATE TABLE test_database.table_override\\n(\\n `key` Int32,\\n `value` UUID,\\n `_sign` Int8() MATERIALIZED 1,\\n `_version` UInt64() MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)"
assert result.strip() == expected
time.sleep(5)
query = f"select * from {materialized_database}.{table_name} order by key"
expected = instance.query(f"select * from {table_name} order by key")
instance.query(f"drop table {table_name} sync")
assert_eq_with_retry(instance, query, expected)
check_tables_are_synchronized(
instance, table_name, postgres_database=pg_manager.get_default_database()
)
assert 10 == int(
instance.query(f"SELECT count() FROM {materialized_database}.{table_name}")
)
expected = "CREATE TABLE test_database.table_override\\n(\\n `key` Int32,\\n `value` String,\\n `_sign` Int8() MATERIALIZED 1,\\n `_version` UInt64() MATERIALIZED 1\\n)\\nENGINE = ReplacingMergeTree(_version)\\nPARTITION BY key\\nORDER BY tuple(key)"
assert (
expected
== instance.query(
f"show create table {materialized_database}.{table_name}"
).strip()
)
assert (
"test"
== instance.query(
f"SELECT value FROM {materialized_database}.{table_name} WHERE key = 2"
).strip()
)
conn = get_postgres_conn(
ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
database_name="postgres_database",
database=True,
auto_commit=True,
)
cursor = conn.cursor()
cursor.execute(f"SELECT count(*) FROM {table_name}")
assert 10 == cursor.fetchall()[0][0]
pg_manager.execute(f"UPDATE {table_name} SET value='kek' WHERE key=2")
cursor.execute(f"SELECT value FROM {table_name} WHERE key=2")
assert "kek" == cursor.fetchall()[0][0]
pg_manager.execute(f"DELETE FROM {table_name} WHERE key=2")
cursor.execute(f"SELECT count(*) FROM {table_name}")
assert 9 == cursor.fetchall()[0][0]
conn.close()
check_tables_are_synchronized(
instance, table_name, postgres_database=pg_manager.get_default_database()
)
assert (
""
== instance.query(
f"SELECT value FROM {materialized_database}.{table_name} WHERE key = 2"
).strip()
)
def test_materialized_view(started_cluster):
@ -810,6 +858,63 @@ def test_replica_consumer(started_cluster):
pg_manager_instance2.clear()
def test_bad_connection_options(started_cluster):
table = "test_bad_connection_options"
pg_manager.create_postgres_table(table)
instance.query(
f"INSERT INTO postgres_database.{table} SELECT number, number from numbers(0, 50)"
)
pg_manager.create_materialized_db(
ip=started_cluster.postgres_ip,
port=started_cluster.postgres_port,
settings=[
f"materialized_postgresql_tables_list = '{table}'",
"materialized_postgresql_backoff_min_ms = 100",
"materialized_postgresql_backoff_max_ms = 100",
],
user="postrges",
password="kek",
)
instance.wait_for_log_line('role "postrges" does not exist')
assert instance.contains_in_log(
"<Error> void DB::DatabaseMaterializedPostgreSQL::startSynchronization(): std::exception. Code: 1001, type: pqxx::broken_connection"
)
assert "test_database" in instance.query("SHOW DATABASES")
assert "" == instance.query("show tables from test_database").strip()
pg_manager.drop_materialized_db("test_database")
def test_failed_load_from_snapshot(started_cluster):
if instance.is_built_with_sanitizer() or instance.is_debug_build():
pytest.skip(
"Sanitizers and debug mode are skipped, because this test thrown logical error"
)
table = "failed_load"
pg_manager.create_postgres_table(
table,
template="""
CREATE TABLE IF NOT EXISTS "{}" (
key text NOT NULL, value text[], PRIMARY KEY(key))
""",
)
instance.query(
f"INSERT INTO postgres_database.{table} SELECT number, [1, 2] from numbers(0, 1000000)"
)
# Create a table with wrong table structure
assert "Could not convert string to i" in instance.query_and_get_error(
f"""
SET allow_experimental_materialized_postgresql_table=1;
CREATE TABLE {table} (a Int32, b Int32) ENGINE=MaterializedPostgreSQL('{started_cluster.postgres_ip}:{started_cluster.postgres_port}', 'postgres_database', '{table}', 'postgres', 'mysecretpassword') ORDER BY a
"""
)
if __name__ == "__main__":
cluster.start()
input("Cluster created, press any key to destroy...")

View File

@ -77,3 +77,11 @@ def test_prometheus_endpoint(start_cluster):
metrics_dict = get_and_check_metrics(10)
assert metrics_dict["ClickHouseProfileEvents_Query"] >= prev_query_count + 3
node.query_and_get_error(
"SELECT throwIf(1, 'test', toInt16(42)) SETTINGS allow_custom_error_code_in_throwif=1"
)
metrics_dict = get_and_check_metrics(10)
assert metrics_dict["ClickHouseErrorMetric_NUMBER_OF_ARGUMENTS_DOESNT_MATCH"] >= 1
assert metrics_dict["ClickHouseErrorMetric_ALL"] >= 1

View File

@ -1,4 +1,4 @@
WITH round(exp(number), 6) AS x, x > 0xFFFFFFFFFFFFFFFF ? 0xFFFFFFFFFFFFFFFF : toUInt64(x) AS y, x > 0x7FFFFFFF ? 0x7FFFFFFF : toInt32(x) AS z
SELECT formatReadableSize(x), formatReadableSize(y), formatReadableSize(z)
SELECT FORMAT_BYTES(x), format_bytes(y), formatReadableSize(z)
FROM system.numbers
LIMIT 70;

View File

@ -54,3 +54,14 @@ SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experime
2 0
0 2
0 3
SELECT * FROM (SELECT 1 as a) as t1 INNER JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
SELECT * FROM (SELECT 1 as a) as t1 LEFT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
1 ('',0)
SELECT * FROM (SELECT 1 as a) as t1 RIGHT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
0 ('b',256)
SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
1 ('',0)
0 ('b',256)
SELECT * FROM (SELECT 1 as a) as t1 SEMI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
1 ('',0)

View File

@ -81,6 +81,13 @@ SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experime
SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1;
SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 SETTINGS allow_experimental_analyzer = 1;
SELECT * FROM (SELECT 1 as a) as t1 INNER JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
SELECT * FROM (SELECT 1 as a) as t1 LEFT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
SELECT * FROM (SELECT 1 as a) as t1 RIGHT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
SELECT * FROM (SELECT 1 as a) as t1 SEMI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL;
-- { echoOff }
DROP TABLE IF EXISTS t1;

View File

@ -5,4 +5,5 @@ set allow_experimental_analyzer=1;
create table t (a UInt64, b UInt64) engine=MergeTree() order by (a);
insert into t select number % 2, number from numbers(10);
set optimize_distinct_in_order=1;
select trimBoth(explain) from (explain pipeline select distinct a from t) where explain like '%InOrder%';

View File

@ -1 +1,3 @@
SELECT sum(A) FROM (SELECT multiIf(1, 1, NULL) as A);
SELECT sum(multiIf(number = NULL, 65536, 3)) FROM numbers(3);
SELECT multiIf(NULL, 65536 :: UInt32, 3 :: Int32);

View File

@ -0,0 +1,11 @@
SELECT
sum(c),
toInt32((h - null::Nullable(DateTime)) / 3600) + 1 AS a
FROM
(
SELECT count() AS c, h
FROM ( SELECT now() AS h )
WHERE toInt32((h - null::Nullable(DateTime)) / 3600) + 1 = 1
GROUP BY h
)
GROUP BY a settings min_count_to_compile_expression = 0;

View File

@ -1,7 +1,17 @@
[]
[(1,10)]
[(1,10),(10,70)]
[(1,10),(3,15),(5,40),(10,70)]
[(0.02,0.16),(0.06,0.73),(0.08,0.88),(0.09,0),(0.11,0.31),(0.15,0.09),(0.19,0.27),(0.2,0.24),(0.21,0.46),(0.23,0.23),(0.25,0.34),(0.29,0.16),(0.36,0.99),(0.37,0.86),(0.38,0.75),(0.39,0.86),(0.41,0.63),(0.45,0.86),(0.47,0.06),(0.98,0.09)]
[('2023-01-14 00:00:00.000',35.96528042030847),('2023-03-15 00:00:00.000',98.77709508458238),('2023-06-05 00:00:00.000',8.107958052612418),('2023-09-29 00:00:00.000',80.05338096818797),('2023-12-31 00:00:00.000',98.52375935588333)]
[('2023-01-14 00:00:00.000',35.96528042030847),('2023-03-15 00:00:00.000',98.77709508458238),('2023-06-05 00:00:00.000',8.107958052612418),('2023-09-29 00:00:00.000',80.05338096818797),('2023-12-31 00:00:00.000',98.52375935588333)]
[(1,10),(3,15),(9,55),(10,70)]
[(0.02,0.16),(0.08,0.88),(0.09,0),(0.15,0.09),(0.21,0.46),(0.23,0.23),(0.29,0.16),(0.37,0.86),(0.39,0.86),(0.47,0.06),(0.48,0.57),(0.52,0.11),(0.6,0.64),(0.63,0.25),(0.7,0.25),(0.73,0.95),(0.83,0.67),(0.87,0.15),(0.91,0.62),(0.98,0.09)]
[('2023-01-14 00:00:00.000',35.96528042030847),('2023-03-15 00:00:00.000',98.77709508458238),('2023-06-05 00:00:00.000',8.107958052612418),('2023-12-18 00:00:00.000',12.832032764204616),('2023-12-31 00:00:00.000',98.52375935588333)]
[('2023-01-14 00:00:00.000',35.96528042030847),('2023-03-15 00:00:00.000',98.77709508458238),('2023-06-05 00:00:00.000',8.107958052612418),('2023-12-18 00:00:00.000',12.832032764204616),('2023-12-31 00:00:00.000',98.52375935588333)]
(9908,908) 9908 10
(9918,918) 9918 10
(9928,928) 9928 10
(9938,938) 9938 10
(9948,948) 9948 10
(9958,958) 9958 10
(9968,968) 9968 10
(9978,978) 9978 10
(9988,988) 9988 10
(9999,999) 9999 11

View File

@ -44,4 +44,20 @@ select largestTriangleThreeBuckets(5)(x, y) from largestTriangleThreeBucketsTest
select lttb(5)(x, y) from largestTriangleThreeBucketsTestDateTime64Float64;
drop table largestTriangleThreeBucketsTestDateTime64Float64;
drop table largestTriangleThreeBucketsTestDateTime64Float64;
CREATE TABLE largestTriangleTreeBucketsBucketSizeTest
(
x UInt32,
y UInt32
) ENGINE = MergeTree ORDER BY x;
INSERT INTO largestTriangleTreeBucketsBucketSizeTest (x, y) SELECT (number + 1) AS x, (x % 1000) AS y FROM numbers(9999);
SELECT
arrayJoin(lttb(1000)(x, y)) AS point,
tupleElement(point, 1) AS point_x,
point_x - neighbor(point_x, -1) AS point_x_diff_with_previous_row
FROM largestTriangleTreeBucketsBucketSizeTest LIMIT 990, 10;
DROP TABLE largestTriangleTreeBucketsBucketSizeTest;

View File

@ -0,0 +1,55 @@
Expression ((Projection + Before ORDER BY))
Filter (WHERE)
ReadFromMergeTree (02911_support_alias_column_in_indices.test1)
Indexes:
PrimaryKey
Keys:
c
Condition: (plus(c, 1) in [11, +Inf))
Parts: 1/2
Granules: 1/2
Skip
Name: i
Description: minmax GRANULARITY 1
Parts: 1/1
Granules: 1/1
Expression ((Project names + Projection))
Filter ((WHERE + Change column names to column identifiers))
ReadFromMergeTree (02911_support_alias_column_in_indices.test1)
Indexes:
PrimaryKey
Keys:
c
Condition: (_CAST(plus(c, \'UInt64\'), 1) in [11, +Inf))
Parts: 1/2
Granules: 1/2
Skip
Name: i
Description: minmax GRANULARITY 1
Parts: 1/1
Granules: 1/1
Expression ((Projection + Before ORDER BY))
Filter (WHERE)
ReadFromMergeTree (02911_support_alias_column_in_indices.test2)
Indexes:
PrimaryKey
Keys:
c
Condition: (plus(plus(c, 1), 1) in [16, +Inf))
Parts: 1/2
Granules: 1/2
Skip
Name: i
Description: minmax GRANULARITY 1
Parts: 1/1
Granules: 1/1
Expression ((Project names + Projection))
Filter ((WHERE + Change column names to column identifiers))
ReadFromMergeTree (02911_support_alias_column_in_indices.test2)
Indexes:
PrimaryKey
Keys:
c
Condition: (_CAST(plus(_CAST(plus(c, \'UInt64\'), 1), \'UInt64\'), 1) in [16, +Inf))
Parts: 1/2
Granules: 1/2

View File

@ -0,0 +1,40 @@
-- Tags: no-parallel
drop database if exists 02911_support_alias_column_in_indices;
create database 02911_support_alias_column_in_indices;
use 02911_support_alias_column_in_indices;
create table test1
(
c UInt32,
a alias c + 1,
index i (a) type minmax
)
engine = MergeTree
order by c
settings index_granularity = 8192, min_index_granularity_bytes = 1024, index_granularity_bytes = 10485760; -- default settings, prevent randomization in tests
insert into test1 select * from numbers(10);
insert into test1 select * from numbers(11, 20);
explain indexes = 1 select * from test1 where a > 10 settings allow_experimental_analyzer = 0;
explain indexes = 1 select * from test1 where a > 10 settings allow_experimental_analyzer = 1;
create table test2
(
c UInt32,
a1 alias c + 1,
a2 alias a1 + 1,
index i (a2) type minmax
)
engine = MergeTree
order by c
settings index_granularity = 8192, min_index_granularity_bytes = 1024, index_granularity_bytes = 10485760; -- default settings, prevent randomization in tests
insert into test2 select * from numbers(10);
insert into test2 select * from numbers(11, 20);
explain indexes = 1 select * from test2 where a2 > 15 settings allow_experimental_analyzer = 0;
explain indexes = 1 select * from test2 where a2 > 15 settings allow_experimental_analyzer = 1; -- buggy, analyzer does not pick up index i
drop database 02911_support_alias_column_in_indices;

View File

@ -0,0 +1,3 @@
0
ACCESS_DENIED
ACCESS_DENIED

View File

@ -0,0 +1,37 @@
#!/usr/bin/env bash
CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh
username="user_${CLICKHOUSE_TEST_UNIQUE_NAME}"
dictname="dict_${CLICKHOUSE_TEST_UNIQUE_NAME}"
${CLICKHOUSE_CLIENT} -nm --query "
CREATE DICTIONARY IF NOT EXISTS ${dictname}
(
id UInt64,
value UInt64
)
PRIMARY KEY id
SOURCE(NULL())
LAYOUT(FLAT())
LIFETIME(MIN 0 MAX 1000);
CREATE USER IF NOT EXISTS ${username} NOT IDENTIFIED;
GRANT CREATE TEMPORARY TABLE ON *.* to ${username};
SELECT * FROM dictionary(${dictname});
SELECT dictGet(${dictname}, 'value', 1);
"
$CLICKHOUSE_CLIENT -nm --user="${username}" --query "
SELECT * FROM dictionary(${dictname});
" 2>&1 | grep -o ACCESS_DENIED | uniq
$CLICKHOUSE_CLIENT -nm --user="${username}" --query "
SELECT dictGet(${dictname}, 'value', 1);
" 2>&1 | grep -o ACCESS_DENIED | uniq
${CLICKHOUSE_CLIENT} -nm --query "
DROP DICTIONARY IF EXISTS ${dictname};
DROP USER IF EXISTS ${username};
"

View File

@ -0,0 +1,2 @@
3 3
42 42

View File

@ -0,0 +1,30 @@
CREATE TABLE raw
(
name String,
num String
) ENGINE = MergeTree
ORDER BY (name);
CREATE TABLE parsed_eph
(
name String,
num_ephemeral UInt32 EPHEMERAL,
num UInt32 MATERIALIZED num_ephemeral,
) ENGINE = MergeTree
ORDER BY (name);
CREATE MATERIALIZED VIEW parse_mv_eph
TO parsed_eph
AS
SELECT
name,
toUInt32(num) as num_ephemeral
FROM raw;
INSERT INTO raw VALUES ('3', '3'), ('42', '42');
SELECT name, num FROM parsed_eph;
DROP VIEW parse_mv_eph;
DROP TABLE parsed_eph;
DROP TABLE raw;

View File

@ -0,0 +1,13 @@
-- negative tests
-- const UInt*
Uk
XMbT
86Rf07
Td1EnWQo
XMbT
-- non-const UInt*
Uk
XMbT
86Rf07
Td1EnWQo
XMbT

View File

@ -0,0 +1,21 @@
-- Tags: no-fasttest
SET allow_suspicious_low_cardinality_types = 1;
SELECT '-- negative tests';
SELECT sqid(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
SELECT sqid('1'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT }
SELECT '-- const UInt*';
SELECT sqid(1);
SELECT sqid(1, 2);
SELECT sqid(1, 2, 3);
SELECT sqid(1::UInt8, 2::UInt16, 3::UInt32, 4::UInt64);
SELECT sqid(toNullable(1), toLowCardinality(2));
SELECT '-- non-const UInt*';
SELECT sqid(materialize(1));
SELECT sqid(materialize(1), materialize(2));
SELECT sqid(materialize(1), materialize(2), materialize(3));
SELECT sqid(materialize(1::UInt8), materialize(2::UInt16), materialize(3::UInt32), materialize(4::UInt64));
SELECT sqid(toNullable(materialize(1)), toLowCardinality(materialize(2)));

View File

@ -1256,7 +1256,7 @@ class S3URI:
host = parsed_url.netloc
if host.find(".s3") == -1:
return False
self.bucket, new_host = path.split(".s3", maxsplit=1)
self.bucket, new_host = host.split(".s3", maxsplit=1)
if len(self.bucket) < 3:
return False
new_host = "s3" + new_host

View File

@ -872,7 +872,6 @@ TCPConnection
TCPThreads
TDigest
TINYINT
TKSV
TLSv
TPCH
TSDB
@ -1789,6 +1788,7 @@ logTrace
logagent
loghouse
london
lookups
lowcardinality
lowerUTF
lowercased
@ -2275,6 +2275,7 @@ splitByRegexp
splitByString
splitByWhitespace
splitby
sqid
sql
sqlalchemy
sqlinsert

View File

@ -1,3 +1,4 @@
v23.11.1.2711-stable 2023-12-06
v23.10.5.20-stable 2023-11-25
v23.10.4.25-stable 2023-11-17
v23.10.3.5-stable 2023-11-10

1 v23.10.5.20-stable v23.11.1.2711-stable 2023-11-25 2023-12-06
1 v23.11.1.2711-stable 2023-12-06
2 v23.10.5.20-stable v23.10.5.20-stable 2023-11-25 2023-11-25
3 v23.10.4.25-stable v23.10.4.25-stable 2023-11-17 2023-11-17
4 v23.10.3.5-stable v23.10.3.5-stable 2023-11-10 2023-11-10