mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-03 13:02:00 +00:00
Merge branch 'ClickHouse:master' into gcddelta-codec
This commit is contained in:
commit
9a71bce1f5
@ -13,18 +13,14 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 23.8 | ✔️ |
|
||||||
| 23.7 | ✔️ |
|
| 23.7 | ✔️ |
|
||||||
| 23.6 | ✔️ |
|
| 23.6 | ✔️ |
|
||||||
| 23.5 | ✔️ |
|
| 23.5 | ❌ |
|
||||||
| 23.4 | ❌ |
|
| 23.4 | ❌ |
|
||||||
| 23.3 | ✔️ |
|
| 23.3 | ✔️ |
|
||||||
| 23.2 | ❌ |
|
| 23.2 | ❌ |
|
||||||
| 23.1 | ❌ |
|
| 23.1 | ❌ |
|
||||||
| 22.12 | ❌ |
|
|
||||||
| 22.11 | ❌ |
|
|
||||||
| 22.10 | ❌ |
|
|
||||||
| 22.9 | ❌ |
|
|
||||||
| 22.8 | ✔️ |
|
|
||||||
| 22.* | ❌ |
|
| 22.* | ❌ |
|
||||||
| 21.* | ❌ |
|
| 21.* | ❌ |
|
||||||
| 20.* | ❌ |
|
| 20.* | ❌ |
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54477)
|
SET(VERSION_REVISION 54478)
|
||||||
SET(VERSION_MAJOR 23)
|
SET(VERSION_MAJOR 23)
|
||||||
SET(VERSION_MINOR 8)
|
SET(VERSION_MINOR 9)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH a70127baecc451f1f7073bad7b6198f6703441d8)
|
SET(VERSION_GITHASH ebc7d9a9f3b40be89e0b3e738b35d394aabeea3e)
|
||||||
SET(VERSION_DESCRIBE v23.8.1.1-testing)
|
SET(VERSION_DESCRIBE v23.9.1.1-testing)
|
||||||
SET(VERSION_STRING 23.8.1.1)
|
SET(VERSION_STRING 23.9.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||||
ARG VERSION="23.7.5.30"
|
ARG VERSION="23.8.1.2992"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="23.7.5.30"
|
ARG VERSION="23.8.1.2992"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="23.7.5.30"
|
ARG VERSION="23.8.1.2992"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -24,7 +24,7 @@ function __set_connection_args
|
|||||||
# It's impossible to use generous $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
# It's impossible to use generous $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
||||||
# That's why we must stick to the generated option
|
# That's why we must stick to the generated option
|
||||||
CONNECTION_ARGS=(
|
CONNECTION_ARGS=(
|
||||||
--receive_timeout=10 --send_timeout=10 --secure
|
--receive_timeout=45 --send_timeout=45 --secure
|
||||||
--user "${CLICKHOUSE_CI_LOGS_USER}" --host "${CLICKHOUSE_CI_LOGS_HOST}"
|
--user "${CLICKHOUSE_CI_LOGS_USER}" --host "${CLICKHOUSE_CI_LOGS_HOST}"
|
||||||
--password "${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
--password "${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||||
)
|
)
|
||||||
@ -119,7 +119,7 @@ function setup_logs_replication
|
|||||||
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
||||||
|
|
||||||
# It's doesn't make sense to try creating tables if SYNC fails
|
# It's doesn't make sense to try creating tables if SYNC fails
|
||||||
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client --receive_timeout 180 "${CONNECTION_ARGS[@]}" || return 0
|
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client "${CONNECTION_ARGS[@]}" || return 0
|
||||||
|
|
||||||
# For each system log table:
|
# For each system log table:
|
||||||
echo 'Create %_log tables'
|
echo 'Create %_log tables'
|
||||||
@ -144,7 +144,7 @@ function setup_logs_replication
|
|||||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
||||||
|
|
||||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||||
--distributed_ddl_task_timeout=10 --receive_timeout=10 --send_timeout=10 \
|
--distributed_ddl_task_timeout=30 \
|
||||||
"${CONNECTION_ARGS[@]}" || continue
|
"${CONNECTION_ARGS[@]}" || continue
|
||||||
|
|
||||||
echo "Creating table system.${table}_sender" >&2
|
echo "Creating table system.${table}_sender" >&2
|
||||||
|
591
docs/changelogs/v23.8.1.2992-lts.md
Normal file
591
docs/changelogs/v23.8.1.2992-lts.md
Normal file
@ -0,0 +1,591 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2023
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2023 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v23.8.1.2992-lts (ebc7d9a9f3b) FIXME as compared to v23.7.1.2470-stable (a70127baecc)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Deprecate the metadata cache feature. It is experimental and we have never used it. The feature is dangerous: [#51182](https://github.com/ClickHouse/ClickHouse/issues/51182). Remove the `system.merge_tree_metadata_cache` system table. The metadata cache is still available in this version but will be removed soon. This closes [#39197](https://github.com/ClickHouse/ClickHouse/issues/39197). [#51303](https://github.com/ClickHouse/ClickHouse/pull/51303) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* If a dynamic disk contains a name, it should be specified as `disk = disk(name = 'disk_name'`, ...) in disk function arguments. In previous version it could be specified as `disk = disk_<disk_name>(...)`, which is no longer supported. [#52820](https://github.com/ClickHouse/ClickHouse/pull/52820) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* `clickhouse-benchmark` will establish connections in parallel when invoked with `--concurrency` more than one. Previously it was unusable if you ran it with 1000 concurrent connections from Europe to the US. Correct calculation of QPS for connections with high latency. Backward incompatible change: the option for JSON output of `clickhouse-benchmark` is removed. If you've used this option, you can also extract data from the `system.query_log` in JSON format as a workaround. [#53293](https://github.com/ClickHouse/ClickHouse/pull/53293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The `microseconds` column is removed from the `system.text_log`, and the `milliseconds` column is removed from the `system.metric_log`, because they are redundant in the presence of the `event_time_microseconds` column. [#53601](https://github.com/ClickHouse/ClickHouse/pull/53601) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Changed zookeeper paths for storage `S3Queue` metadata. [#54137](https://github.com/ClickHouse/ClickHouse/pull/54137) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add column `ptr` to `system.trace_log` for `trace_type = 'MemorySample'`. This column contains an address of allocation. Added function `flameGraph` which can build flamegraph containing allocated and not released memory. Reworking of [#38391](https://github.com/ClickHouse/ClickHouse/issues/38391). [#45322](https://github.com/ClickHouse/ClickHouse/pull/45322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Add setting `rewrite_count_distinct_if_with_count_distinct_implementation` to rewrite `countDistinctIf` with `count_distinct_implementation`. Closes [#30642](https://github.com/ClickHouse/ClickHouse/issues/30642). [#46051](https://github.com/ClickHouse/ClickHouse/pull/46051) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add new table engine `S3Queue` for streaming data import from s3. Closes [#37012](https://github.com/ClickHouse/ClickHouse/issues/37012). [#49086](https://github.com/ClickHouse/ClickHouse/pull/49086) ([s-kat](https://github.com/s-kat)).
|
||||||
|
* SevenZipArchiveReader - TarArchiveReader - Table Function file('path_to_archive :: filename') - Functional tests for "Table Function file('path_to_archive :: filename')" - Unit tests for TarArchiveReader/SevenZipArchiveReader. [#50321](https://github.com/ClickHouse/ClickHouse/pull/50321) ([nikitakeba](https://github.com/nikitakeba)).
|
||||||
|
* Added table function azureBlobStorageCluster table function. The supported set of features is very similar to table function S3Cluster. [#50795](https://github.com/ClickHouse/ClickHouse/pull/50795) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Allow using cluster, clusterAllReplicas, remote, remoteRaw and remoteSecure without table name in issue [#50808](https://github.com/ClickHouse/ClickHouse/issues/50808). [#50848](https://github.com/ClickHouse/ClickHouse/pull/50848) ([Yangkuan Liu](https://github.com/LiuYangkuan)).
|
||||||
|
* System table to monitor kafka consumers. [#50999](https://github.com/ClickHouse/ClickHouse/pull/50999) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* Added max_sessions_for_user setting. [#51724](https://github.com/ClickHouse/ClickHouse/pull/51724) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Now that clickhouse do not have a function to convert UTC timezone timestamp to other timezone timestamp, which is not same as spark, and so we and the functions `toUTCTimestamp/fromUTCTimestamp` to act same as spark's `to_utc_timestamp/from_utc_timestamp`. [#52117](https://github.com/ClickHouse/ClickHouse/pull/52117) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Add new functions `structureToCapnProtoSchema`/`structureToProtobufSchema` that convert ClickHouse table structure to CapnProto/Protobuf format schema. Allow to intput/output data in CapnProto/Protobuf format without external format schema using autogenerated schema from table structure (controled by settings `format_capn_proto_use_autogenerated_schema`/`format_protobuf_use_autogenerated_schema`). Allow to export autogenerated schema while input/outoput using setting `output_format_schema`. [#52278](https://github.com/ClickHouse/ClickHouse/pull/52278) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* A new field "query_cache_usage" in SYSTEM.QUERY_LOG now shows if and how the query cache was used. [#52384](https://github.com/ClickHouse/ClickHouse/pull/52384) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add new function startsWithUTF8 and endsWithUTF8. [#52555](https://github.com/ClickHouse/ClickHouse/pull/52555) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Allow variable number of columns in TSV/CuatomSeprarated/JSONCompactEachRow, make schema inference work with variable number of columns. Add settings `input_format_tsv_allow_variable_number_of_columns`, `input_format_custom_allow_variable_number_of_columns`, `input_format_json_compact_allow_variable_number_of_columns`. [#52692](https://github.com/ClickHouse/ClickHouse/pull/52692) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Added `SYSTEM STOP/START PULLING REPLICATION LOG` queries (for testing `ReplicatedMergeTree`). [#52881](https://github.com/ClickHouse/ClickHouse/pull/52881) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Allow to execute constant non-deterministic functions in mutations on initiator. [#53129](https://github.com/ClickHouse/ClickHouse/pull/53129) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add input format One that doesn't read any data and always returns single row with column `dummy` with type `UInt8` and value `0` like `system.one`. It can be used together with `_file/_path` virtual columns to list files in file/s3/url/hdfs/etc table functions without reading any data. [#53209](https://github.com/ClickHouse/ClickHouse/pull/53209) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add tupleConcat function. Closes [#52759](https://github.com/ClickHouse/ClickHouse/issues/52759). [#53239](https://github.com/ClickHouse/ClickHouse/pull/53239) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Support `TRUNCATE DATABASE` operation. [#53261](https://github.com/ClickHouse/ClickHouse/pull/53261) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||||
|
* Add max_threads_for_indexes setting to limit number of threads used for primary key processing. [#53313](https://github.com/ClickHouse/ClickHouse/pull/53313) ([jorisgio](https://github.com/jorisgio)).
|
||||||
|
* Add experimental support for HNSW as approximate neighbor search method. [#53447](https://github.com/ClickHouse/ClickHouse/pull/53447) ([Davit Vardanyan](https://github.com/davvard)).
|
||||||
|
* Re-add SipHash keyed functions. [#53525](https://github.com/ClickHouse/ClickHouse/pull/53525) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* ([#52755](https://github.com/ClickHouse/ClickHouse/issues/52755) , [#52895](https://github.com/ClickHouse/ClickHouse/issues/52895)) Added functions `arrayRotateLeft`, `arrayRotateRight`, `arrayShiftLeft`, `arrayShiftRight`. [#53557](https://github.com/ClickHouse/ClickHouse/pull/53557) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||||
|
* Add column `name` to `system.clusters` as an alias to cluster. [#53605](https://github.com/ClickHouse/ClickHouse/pull/53605) ([irenjj](https://github.com/irenjj)).
|
||||||
|
* The advanced dashboard now allows mass editing (save/load). [#53608](https://github.com/ClickHouse/ClickHouse/pull/53608) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add support for plural units. [#53641](https://github.com/ClickHouse/ClickHouse/pull/53641) ([irenjj](https://github.com/irenjj)).
|
||||||
|
* Support function `isNotDistinctFrom` in join on section for null-safe comparison, ref [#53061](https://github.com/ClickHouse/ClickHouse/issues/53061). [#53755](https://github.com/ClickHouse/ClickHouse/pull/53755) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Added the "hide_in_preprocessed" attribute to ClickHouse's server configuration XML dialect. This is a mechanism to hide certain settings from appearing in preprocessed server configuration files. Useful e.g. for passwords or private keys that should not appear verbatim in files. [#53818](https://github.com/ClickHouse/ClickHouse/pull/53818) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Added server setting validate_tcp_client_information determines whether validation of client information enabled when query packet is received. [#53907](https://github.com/ClickHouse/ClickHouse/pull/53907) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Enable JIT compilation for AArch64, PowerPC, SystemZ, RISCV. [#38217](https://github.com/ClickHouse/ClickHouse/pull/38217) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* This patch will provide a method to deal with all the hashsets in parallel before merge. [#50748](https://github.com/ClickHouse/ClickHouse/pull/50748) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* Optimize aggregation performance of nullable string key when using aggregationmethodserialized. [#51399](https://github.com/ClickHouse/ClickHouse/pull/51399) ([LiuNeng](https://github.com/liuneng1994)).
|
||||||
|
* The performance experiments of **SSB** on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) show that this change could bring an improvement of **8.5%** to the **geomean QPS** when the experimental analyzer is enabled. The details are shown below: ![image](https://github.com/ClickHouse/ClickHouse/assets/26588299/4e58bf8b-d276-408d-ad45-38c82d3cb918). [#52091](https://github.com/ClickHouse/ClickHouse/pull/52091) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Parquet filter pushdown. I.e. when reading Parquet files, row groups (chunks of the file) are skipped based on the WHERE condition and the min/max values in each column. In particular, if the file is roughly sorted by some column, queries that filter by a short range of that column will be much faster. [#52951](https://github.com/ClickHouse/ClickHouse/pull/52951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Optimize the merge if all hashSets are singleLevel in UniqExactSet. [#52973](https://github.com/ClickHouse/ClickHouse/pull/52973) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* StorageJoin: do not create clone hash join with all columns. [#53046](https://github.com/ClickHouse/ClickHouse/pull/53046) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Optimize reading small row groups by batching them together in Parquet. Closes [#53069](https://github.com/ClickHouse/ClickHouse/issues/53069). [#53281](https://github.com/ClickHouse/ClickHouse/pull/53281) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Implement native orc input format without arrow to improve performance. [#53324](https://github.com/ClickHouse/ClickHouse/pull/53324) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* The dashboard will tell the server to compress the data, which is useful for large time frames over slow internet connections. For example, one chart with 86400 points can be 1.5 MB uncompressed and 60 KB compressed with `br`. [#53569](https://github.com/ClickHouse/ClickHouse/pull/53569) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Optimize count from files in most input formats. Closes [#44334](https://github.com/ClickHouse/ClickHouse/issues/44334). [#53637](https://github.com/ClickHouse/ClickHouse/pull/53637) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Better utilization of thread pool for BACKUPs&RESTOREs. [#53649](https://github.com/ClickHouse/ClickHouse/pull/53649) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Remove useless slow on client performance check. [#53695](https://github.com/ClickHouse/ClickHouse/pull/53695) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Bloom filter indices are pruned so that they correlate with cardinality of the data set they are tracking. [#35102](https://github.com/ClickHouse/ClickHouse/pull/35102) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
|
* Add `stderr_reaction` configuration/setting to control the reaction (none, log or throw) when external command stderr has data. This helps make debugging external command easier. [#43210](https://github.com/ClickHouse/ClickHouse/pull/43210) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Https://github.com/clickhouse/clickhouse/issues/48720. @kgoralski helped with some thought about `system.merges` part. :d. [#48990](https://github.com/ClickHouse/ClickHouse/pull/48990) ([Jianfei Hu](https://github.com/incfly)).
|
||||||
|
* If a dictionary is created with a complex key, automatically choose the "complex key" layout variant. [#49587](https://github.com/ClickHouse/ClickHouse/pull/49587) ([xiebin](https://github.com/xbthink)).
|
||||||
|
* Add setting `use_concurrency_control` for better testing of the new concurrency control feature. [#49618](https://github.com/ClickHouse/ClickHouse/pull/49618) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Added suggestions for mistyped names for db and tables with different scenarios commented. [#49801](https://github.com/ClickHouse/ClickHouse/pull/49801) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* While read small files from hdfs by gluten, we found that it will cost more times when compare to directly query by spark. [#50063](https://github.com/ClickHouse/ClickHouse/pull/50063) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Too many worthless error logs after session expiration. [#50171](https://github.com/ClickHouse/ClickHouse/pull/50171) ([helifu](https://github.com/helifu)).
|
||||||
|
* Introduce fallback ZooKeeper sessions which are time-bound. Fixed `index` column in system.zookeeper_connection for DNS addresses. [#50424](https://github.com/ClickHouse/ClickHouse/pull/50424) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||||
|
* Add ability to log when max_partitions_per_insert_block is reached ... [#50948](https://github.com/ClickHouse/ClickHouse/pull/50948) ([Sean Haynes](https://github.com/seandhaynes)).
|
||||||
|
* Added a bunch of custom commands (mostly to make ClickHouse debugging easier). [#51117](https://github.com/ClickHouse/ClickHouse/pull/51117) ([pufit](https://github.com/pufit)).
|
||||||
|
* Updated check for connection_string as connection string with sas does not always begin with DefaultEndPoint and updated connection url to include sas token after adding container to url. [#51141](https://github.com/ClickHouse/ClickHouse/pull/51141) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix description for filtering sets in full_sorting_merge join. [#51329](https://github.com/ClickHouse/ClickHouse/pull/51329) ([Tanay Tummalapalli](https://github.com/ttanay)).
|
||||||
|
* The sizes of the (index) uncompressed/mark, mmap and query caches can now be configured dynamically at runtime. [#51446](https://github.com/ClickHouse/ClickHouse/pull/51446) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fixed memory consumption in `Aggregator` when `max_block_size` is huge. [#51566](https://github.com/ClickHouse/ClickHouse/pull/51566) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Add `SYSTEM SYNC FILESYSTEM CACHE` command. It will compare in-memory state of filesystem cache with what it has on disk and fix in-memory state if needed. [#51622](https://github.com/ClickHouse/ClickHouse/pull/51622) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Attempt to create a generic proxy resolver for CH while keeping backwards compatibility with existing S3 storage conf proxy resolver. [#51749](https://github.com/ClickHouse/ClickHouse/pull/51749) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Support reading tuple subcolumns from file/s3/hdfs/url/azureBlobStorage table functions. [#51806](https://github.com/ClickHouse/ClickHouse/pull/51806) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Function `arrayIntersect` now returns the values sorted like the first argument. Closes [#27622](https://github.com/ClickHouse/ClickHouse/issues/27622). [#51850](https://github.com/ClickHouse/ClickHouse/pull/51850) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add new queries, which allow to create/drop of access entities in specified access storage or move access entities from one access storage to another. [#51912](https://github.com/ClickHouse/ClickHouse/pull/51912) ([pufit](https://github.com/pufit)).
|
||||||
|
* ALTER TABLE FREEZE are not replicated in Replicated engine. [#52064](https://github.com/ClickHouse/ClickHouse/pull/52064) ([Mike Kot](https://github.com/myrrc)).
|
||||||
|
* Added possibility to flush logs to the disk on crash - Added logs buffer configuration. [#52174](https://github.com/ClickHouse/ClickHouse/pull/52174) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix S3 table function does not work for pre-signed URL. close [#50846](https://github.com/ClickHouse/ClickHouse/issues/50846). [#52310](https://github.com/ClickHouse/ClickHouse/pull/52310) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* System.events and system.metrics tables add column name as an alias to event and metric. close [#51257](https://github.com/ClickHouse/ClickHouse/issues/51257). [#52315](https://github.com/ClickHouse/ClickHouse/pull/52315) ([chen](https://github.com/xiedeyantu)).
|
||||||
|
* Added support of syntax `CREATE UNIQUE INDEX` in parser for better SQL compatibility. `UNIQUE` index is not supported. Set `create_index_ignore_unique=1` to ignore UNIQUE keyword in queries. [#52320](https://github.com/ClickHouse/ClickHouse/pull/52320) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Add support of predefined macro (`{database}` and `{table}`) in some kafka engine settings: topic, consumer, client_id, etc. [#52386](https://github.com/ClickHouse/ClickHouse/pull/52386) ([Yury Bogomolov](https://github.com/ybogo)).
|
||||||
|
* Disable updating fs cache during backup/restore. Filesystem cache must not be updated during backup/restore, it seems it just slows down the process without any profit (because the BACKUP command can read a lot of data and it's no use to put all the data to the filesystem cache and immediately evict it). [#52402](https://github.com/ClickHouse/ClickHouse/pull/52402) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Updated parameterized view implementation to create new StorageView with substituted parameters for every SELECT query of a parameterized view. [#52569](https://github.com/ClickHouse/ClickHouse/pull/52569) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* The configuration of S3 endpoint allow using it from the root, and append '/' automatically if needed. [#47809](https://github.com/ClickHouse/ClickHouse/issues/47809). [#52600](https://github.com/ClickHouse/ClickHouse/pull/52600) ([xiaolei565](https://github.com/xiaolei565)).
|
||||||
|
* Added support for adding and subtracting arrays: `[5,2] + [1,7]`. Division and multiplication were not implemented due to confusion between pointwise multiplication and the scalar product of arguments. Closes [#49939](https://github.com/ClickHouse/ClickHouse/issues/49939). [#52625](https://github.com/ClickHouse/ClickHouse/pull/52625) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Add support for string literals as table name. Closes [#52178](https://github.com/ClickHouse/ClickHouse/issues/52178). [#52635](https://github.com/ClickHouse/ClickHouse/pull/52635) ([hendrik-m](https://github.com/hendrik-m)).
|
||||||
|
* For clickhouse-local allow positional options and populate global UDF settings (user_scripts_path and user_defined_executable_functions_config). [#52643](https://github.com/ClickHouse/ClickHouse/pull/52643) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* System.asynchronous_metrics now includes metrics "querycacheentries" and "querycachebytes" to inspect the query cache. [#52650](https://github.com/ClickHouse/ClickHouse/pull/52650) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Added possibility use s3_storage_class parameter in SETTINGS of BACKUP statement for backups to S3. [#52658](https://github.com/ClickHouse/ClickHouse/pull/52658) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Improve insert retries on keeper session expiration. [#52688](https://github.com/ClickHouse/ClickHouse/pull/52688) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Add utility `print-backup-info.py` which parses a backup metadata file and prints information about the backup. [#52690](https://github.com/ClickHouse/ClickHouse/pull/52690) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Closes [#49510](https://github.com/ClickHouse/ClickHouse/issues/49510). Currently we have database and table names case-sensitive, but the tools query `information_schema` sometimes in lowercase, sometimes in uppercase. For this reason we have `information_schema` database, containing lowercase tables, such as `information_schema.tables` and `INFORMATION_SCHEMA` database, containing uppercase tables, such as `INFORMATION_SCHEMA.TABLES`. But some tools are querying `INFORMATION_SCHEMA.tables` and `information_schema.TABLES`. The proposed solution is to duplicate both lowercase and uppercase tables in lowercase and uppercase `information_schema` database. [#52695](https://github.com/ClickHouse/ClickHouse/pull/52695) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* `GET_PART` and `ATTACH_PART` are almost identical, so they should use same executor pool. [#52716](https://github.com/ClickHouse/ClickHouse/pull/52716) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Query`CHECK TABLE` has better performance and usability (sends progress updates, cancellable). [#52745](https://github.com/ClickHouse/ClickHouse/pull/52745) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Add modulo, intDiv, intDivOrZero for tuple. [#52758](https://github.com/ClickHouse/ClickHouse/pull/52758) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Search for default `yaml` and `yml` configs in clickhouse-client after `xml`. [#52767](https://github.com/ClickHouse/ClickHouse/pull/52767) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* When merging into non-'clickhouse' rooted configuration, configs with different root node name just bypassed without exception. [#52770](https://github.com/ClickHouse/ClickHouse/pull/52770) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Now it's possible to specify min (`memory_profiler_sample_min_allocation_size`) and max (`memory_profiler_sample_max_allocation_size`) size for allocations to be tracked with sampling memory profiler. [#52779](https://github.com/ClickHouse/ClickHouse/pull/52779) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Add `precise_float_parsing` setting to switch float parsing methods (fast/precise). [#52791](https://github.com/ClickHouse/ClickHouse/pull/52791) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Use the same default paths for `clickhouse_keeper` (symlink) as for `clickhouse_keeper` (executable). [#52861](https://github.com/ClickHouse/ClickHouse/pull/52861) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* CVE-2016-2183: disable 3DES. [#52893](https://github.com/ClickHouse/ClickHouse/pull/52893) ([Kenji Noguchi](https://github.com/knoguchi)).
|
||||||
|
* Load filesystem cache metadata on startup in parallel. Configured by `load_metadata_threads` (default: 1) cache config setting. Related to [#52037](https://github.com/ClickHouse/ClickHouse/issues/52037). [#52943](https://github.com/ClickHouse/ClickHouse/pull/52943) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Improve error message for table function remote. Closes [#40220](https://github.com/ClickHouse/ClickHouse/issues/40220). [#52959](https://github.com/ClickHouse/ClickHouse/pull/52959) ([jiyoungyoooo](https://github.com/jiyoungyoooo)).
|
||||||
|
* Added the possibility to specify custom storage policy in the `SETTINGS` clause of `RESTORE` queries. [#52970](https://github.com/ClickHouse/ClickHouse/pull/52970) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||||
|
* Add the ability to throttle the S3 requests on backup operations (`BACKUP` and `RESTORE` commands now honor `s3_max_[get/put]_[rps/burst]`). [#52974](https://github.com/ClickHouse/ClickHouse/pull/52974) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||||
|
* Add settings to ignore ON CLUSTER clause in queries for management of replicated user-defined functions or access control entities with replicated storage. [#52975](https://github.com/ClickHouse/ClickHouse/pull/52975) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Enable parallel reading from replicas over distributed table. Related to [#49708](https://github.com/ClickHouse/ClickHouse/issues/49708). [#53005](https://github.com/ClickHouse/ClickHouse/pull/53005) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* EXPLAIN actions for JOIN step. [#53006](https://github.com/ClickHouse/ClickHouse/pull/53006) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Make `hasTokenOrNull` and `hasTokenCaseInsensitiveOrNull` return null for empty needles. [#53059](https://github.com/ClickHouse/ClickHouse/pull/53059) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Allow to restrict allowed paths for filesystem caches. Mainly useful for dynamic disks. If in server config `filesystem_caches_path` is specified, all filesystem caches' paths will be restricted to this directory. E.g. if the `path` in cache config is relative - it will be put in `filesystem_caches_path`; if `path` in cache config is absolute, it will be required to lie inside `filesystem_caches_path`. If `filesystem_caches_path` is not specified in config, then behaviour will be the same as in earlier versions. [#53124](https://github.com/ClickHouse/ClickHouse/pull/53124) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Added a bunch of custom commands (mostly to make ClickHouse debugging easier). [#53127](https://github.com/ClickHouse/ClickHouse/pull/53127) ([pufit](https://github.com/pufit)).
|
||||||
|
* Add diagnostic info about file name during schema inference - it helps when you process multiple files with globs. [#53135](https://github.com/ClickHouse/ClickHouse/pull/53135) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Client will load suggestions using the main connection if the second connection is not allowed to create a session. [#53177](https://github.com/ClickHouse/ClickHouse/pull/53177) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Add EXCEPT clause to `SYSTEM STOP/START LISTEN QUERIES [ALL/DEFAULT/CUSTOM]` query, for example `SYSTEM STOP LISTEN QUERIES ALL EXCEPT TCP, HTTP`. [#53280](https://github.com/ClickHouse/ClickHouse/pull/53280) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Change the default of `max_concurrent_queries` from 100 to 1000. It's ok to have many concurrent queries if they are not heavy, and mostly waiting for the network. Note: don't confuse concurrent queries and QPS: for example, ClickHouse server can do tens of thousands of QPS with less than 100 concurrent queries. [#53285](https://github.com/ClickHouse/ClickHouse/pull/53285) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add ability to override credentials for accessing base backup in S3 (since tokens may be expired). [#53326](https://github.com/ClickHouse/ClickHouse/pull/53326) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improve `move_primary_key_columns_to_end_of_prewhere`. [#53337](https://github.com/ClickHouse/ClickHouse/pull/53337) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Limit number of concurrent background partition optimize merges. [#53405](https://github.com/ClickHouse/ClickHouse/pull/53405) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Added a setting `allow_moving_table_directory_to_trash` that allows to ignore `Directory for table data already exists` error when replicating/recovering a `Replicated` database. [#53425](https://github.com/ClickHouse/ClickHouse/pull/53425) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Server settings asynchronous_metrics_update_period_s and asynchronous_heavy_metrics_update_period_s configured to 0 now fail gracefully instead of crash the server. [#53428](https://github.com/ClickHouse/ClickHouse/pull/53428) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Previously the caller could register the same watch callback multiple times. In that case each entry was consuming memory and the same callback was called multiple times which didn't make much sense. In order to avoid this the caller could have some logic to not add the same watch multiple times. With this change this deduplication is done internally if the watch callback is passed via shared_ptr. [#53452](https://github.com/ClickHouse/ClickHouse/pull/53452) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* The ClickHouse server now respects memory limits changed via cgroups when reloading its configuration. [#53455](https://github.com/ClickHouse/ClickHouse/pull/53455) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add ability to turn off flush of Distributed tables on `DETACH`/`DROP`/server shutdown. [#53501](https://github.com/ClickHouse/ClickHouse/pull/53501) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Domainrfc support ipv6(ip literal within square brackets). [#53506](https://github.com/ClickHouse/ClickHouse/pull/53506) ([Chen768959](https://github.com/Chen768959)).
|
||||||
|
* Use filter by file/path before reading in url/file/hdfs table functins. [#53529](https://github.com/ClickHouse/ClickHouse/pull/53529) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Use longer timeout for S3 CopyObject requests. [#53533](https://github.com/ClickHouse/ClickHouse/pull/53533) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Added server setting `aggregate_function_group_array_max_element_size`. This setting is used to limit array size for `groupArray` function at serialization. The default value is `16777215`. [#53550](https://github.com/ClickHouse/ClickHouse/pull/53550) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* `SCHEMA()` was added as alias for `DATABASE()` to improve MySQL compatibility. [#53587](https://github.com/ClickHouse/ClickHouse/pull/53587) ([Daniël van Eeden](https://github.com/dveeden)).
|
||||||
|
* Add asynchronous metrics about tables in the system database. For example, `TotalBytesOfMergeTreeTablesSystem`. This closes [#53603](https://github.com/ClickHouse/ClickHouse/issues/53603). [#53604](https://github.com/ClickHouse/ClickHouse/pull/53604) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* SQL editor in the Play UI and Dashboard will not use Grammarly. [#53614](https://github.com/ClickHouse/ClickHouse/pull/53614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The advanced dashboard now has an option to maximize charts and move them around. [#53622](https://github.com/ClickHouse/ClickHouse/pull/53622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* As expert-level settings, it is now possible to 1. configure the size_ratio (i.e. the relative size of the protected queue) of the [index] mark/uncompressed caches, 2. configure the cache policy of the index mark and index uncompressed caches. [#53657](https://github.com/ClickHouse/ClickHouse/pull/53657) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* More careful thread management will improve the speed of the S3 table function over a large number of files by more than ~25%. [#53668](https://github.com/ClickHouse/ClickHouse/pull/53668) ([pufit](https://github.com/pufit)).
|
||||||
|
* Upgrade snappy to 1.1.10, clickhouse may benefit from it. [#53672](https://github.com/ClickHouse/ClickHouse/pull/53672) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Added client info validation to the query packet in TCPHandler. [#53673](https://github.com/ClickHouse/ClickHouse/pull/53673) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Cache number of rows in files for count in file/s3/url/hdfs/azure functions. The cache can be enabled/disabled by setting `use_cache_for_count_from_files` (enabled by default). Continuation of https://github.com/ClickHouse/ClickHouse/pull/53637. [#53692](https://github.com/ClickHouse/ClickHouse/pull/53692) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Updated to retry loading part in case of Azure::Core::Http::TransportException (https://github.com/ClickHouse/ClickHouse/issues/39700#issuecomment-1686442785). [#53750](https://github.com/ClickHouse/ClickHouse/pull/53750) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Stacktrace for exceptions, Materailized view exceptions are propagated. [#53766](https://github.com/ClickHouse/ClickHouse/pull/53766) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||||
|
* If no hostname or port were specified, keeper client will try to search for a connection string in the ClickHouse's config.xml. [#53769](https://github.com/ClickHouse/ClickHouse/pull/53769) ([pufit](https://github.com/pufit)).
|
||||||
|
* Add profile event `PartsLockMicroseconds` which shows the amount of microseconds we hold the data parts lock in MergeTree table engine family. [#53797](https://github.com/ClickHouse/ClickHouse/pull/53797) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Make reconnect limit in raft limits configurable for keeper. This configuration can help to make keeper to rebuild connection with peers quicker if the current connection is broken. [#53817](https://github.com/ClickHouse/ClickHouse/pull/53817) ([Pengyuan Bian](https://github.com/bianpengyuan)).
|
||||||
|
* Supported globs in select from file in clickhouse-local. [#53863](https://github.com/ClickHouse/ClickHouse/pull/53863) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* ...Ignore foreign keys in tables definition to improve compatibility with MySQL, so a user wouldn't need to rewrite his SQL of the foreign key part, ref [#53380](https://github.com/ClickHouse/ClickHouse/issues/53380). [#53864](https://github.com/ClickHouse/ClickHouse/pull/53864) ([jsc0218](https://github.com/jsc0218)).
|
||||||
|
* 'from' is supported as a Expression. [#53914](https://github.com/ClickHouse/ClickHouse/pull/53914) ([Chen768959](https://github.com/Chen768959)).
|
||||||
|
* Changes of the server configuration are now detected with high precision (milliseconds and less). [#54065](https://github.com/ClickHouse/ClickHouse/pull/54065) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Don't expose symbols from ClickHouse binary to dynamic linker. It might fix [#43933](https://github.com/ClickHouse/ClickHouse/issues/43933). [#47475](https://github.com/ClickHouse/ClickHouse/pull/47475) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed endian issues in native protocol. [#50267](https://github.com/ClickHouse/ClickHouse/pull/50267) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Build `clickhouse/nginx-dav` and use it in integration tests instead of `kssenii/nginx-test`. Addresses [#43182](https://github.com/ClickHouse/ClickHouse/issues/43182). [#51843](https://github.com/ClickHouse/ClickHouse/pull/51843) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Add `clickhouse-keeper-client` symlink to the clickhouse-server package. [#51882](https://github.com/ClickHouse/ClickHouse/pull/51882) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fixed ForEach aggregate function state for s390x. [#52040](https://github.com/ClickHouse/ClickHouse/pull/52040) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Add https://github.com/elliotchance/sqltest to CI to report the SQL 2016 conformance. [#52293](https://github.com/ClickHouse/ClickHouse/pull/52293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed codec delta endian issue for s390x. [#52592](https://github.com/ClickHouse/ClickHouse/pull/52592) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Upgrade PRQL to 0.9.3. [#53060](https://github.com/ClickHouse/ClickHouse/pull/53060) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||||
|
* System tables from CI checks are exported to ClickHouse Cloud. [#53086](https://github.com/ClickHouse/ClickHouse/pull/53086) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud. [#53100](https://github.com/ClickHouse/ClickHouse/pull/53100) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Speed up Debug and Tidy builds. [#53178](https://github.com/ClickHouse/ClickHouse/pull/53178) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Speed up the build by removing tons and tonnes of garbage. One of the frequently included headers was poisoned by boost. [#53180](https://github.com/ClickHouse/ClickHouse/pull/53180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add ClickHouse builds for Linux s390x to CI. [#53181](https://github.com/ClickHouse/ClickHouse/pull/53181) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Remove even more garbage. [#53182](https://github.com/ClickHouse/ClickHouse/pull/53182) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The function `arrayAUC` was using heavy C++ templates. [#53183](https://github.com/ClickHouse/ClickHouse/pull/53183) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Some translation units were always rebuilt regardless of ccache. The culprit is found and fixed. [#53184](https://github.com/ClickHouse/ClickHouse/pull/53184) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Three tests were failing / flaky: 1. test_host_regexp_multiple_ptr_records 2. test_host_regexp_multiple_ptr_records_concurrent 3. test_reverse_dns_query. [#53286](https://github.com/ClickHouse/ClickHouse/pull/53286) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* Export logs from CI in stateful tests to ClickHouse Cloud. [#53351](https://github.com/ClickHouse/ClickHouse/pull/53351) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Export logs from CI in stress tests. [#53353](https://github.com/ClickHouse/ClickHouse/pull/53353) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Export logs from CI in fuzzer. [#53354](https://github.com/ClickHouse/ClickHouse/pull/53354) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Export logs from CI in performance test to ClickHouse Cloud. [#53355](https://github.com/ClickHouse/ClickHouse/pull/53355) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Follow up for [#53418](https://github.com/ClickHouse/ClickHouse/issues/53418). Small improvements for install_check.py, adding tests for proper ENV parameters passing to the main process on `init.d start`. [#53457](https://github.com/ClickHouse/ClickHouse/pull/53457) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fixed base64 endian issue for s390x. [#53570](https://github.com/ClickHouse/ClickHouse/pull/53570) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Reorganize file management in CMake to prevent potential duplications. For instance, `indexHint.cpp` is duplicated in both `dbms_sources` and `clickhouse_functions_sources`. [#53621](https://github.com/ClickHouse/ClickHouse/pull/53621) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fixed functional test in 02354_distributed_with_external_aggregation_memory_usage in s390x. [#53648](https://github.com/ClickHouse/ClickHouse/pull/53648) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Skipped QPL functional test for s390x. [#53758](https://github.com/ClickHouse/ClickHouse/pull/53758) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Slightly improve cmake build by sanitizing some dependencies and removing some duplicates. Each commit includes a short description of the changes made. [#53759](https://github.com/ClickHouse/ClickHouse/pull/53759) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fixed StripeLog storage endian issue on the s390x platform. [#53902](https://github.com/ClickHouse/ClickHouse/pull/53902) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
|
||||||
|
* Do not reset Annoy index during build-up with > 1 mark [#51325](https://github.com/ClickHouse/ClickHouse/pull/51325) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||||
|
* Fix usage of temporary directories during RESTORE [#51493](https://github.com/ClickHouse/ClickHouse/pull/51493) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Bug fix for checksum of compress marks [#51777](https://github.com/ClickHouse/ClickHouse/pull/51777) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fix mistakenly comma parsing as part of datetime in CSV best effort parsing [#51950](https://github.com/ClickHouse/ClickHouse/pull/51950) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Don't throw exception when exec udf has parameters [#51961](https://github.com/ClickHouse/ClickHouse/pull/51961) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix recalculation of skip indexes and projections in `ALTER DELETE` queries [#52530](https://github.com/ClickHouse/ClickHouse/pull/52530) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* MaterializedMySQL: Fix the infinite loop in ReadBuffer::read [#52621](https://github.com/ClickHouse/ClickHouse/pull/52621) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Load suggestion only with `clickhouse` dialect [#52628](https://github.com/ClickHouse/ClickHouse/pull/52628) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
|
* RFC: Fix filtering by virtual columns with OR expression [#52653](https://github.com/ClickHouse/ClickHouse/pull/52653) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix named collections on cluster 23.7 [#52687](https://github.com/ClickHouse/ClickHouse/pull/52687) ([Al Korgun](https://github.com/alkorgun)).
|
||||||
|
* Fix reading of unnecessary column in case of multistage `PREWHERE` [#52689](https://github.com/ClickHouse/ClickHouse/pull/52689) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix unexpected sort result on multi columns with nulls first direction [#52761](https://github.com/ClickHouse/ClickHouse/pull/52761) ([copperybean](https://github.com/copperybean)).
|
||||||
|
* Fix data race in Keeper reconfiguration [#52804](https://github.com/ClickHouse/ClickHouse/pull/52804) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix sorting of sparse columns with large limit [#52827](https://github.com/ClickHouse/ClickHouse/pull/52827) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||||
|
* make regexp analyzer recognize named capturing groups [#52840](https://github.com/ClickHouse/ClickHouse/pull/52840) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Fix possible assert in ~PushingAsyncPipelineExecutor in clickhouse-local [#52862](https://github.com/ClickHouse/ClickHouse/pull/52862) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix reading of empty `Nested(Array(LowCardinality(...)))` [#52949](https://github.com/ClickHouse/ClickHouse/pull/52949) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Added new tests for session_log and fixed the inconsistency between login and logout. [#52958](https://github.com/ClickHouse/ClickHouse/pull/52958) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Convert sparse to full in CreateSetAndFilterOnTheFlyStep [#53000](https://github.com/ClickHouse/ClickHouse/pull/53000) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix rare race condition with empty key prefix directory deletion in fs cache [#53055](https://github.com/ClickHouse/ClickHouse/pull/53055) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix ZstdDeflatingWriteBuffer truncating the output sometimes [#53064](https://github.com/ClickHouse/ClickHouse/pull/53064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix query_id in part_log with async flush queries [#53103](https://github.com/ClickHouse/ClickHouse/pull/53103) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix possible error from cache "Read unexpected size" [#53121](https://github.com/ClickHouse/ClickHouse/pull/53121) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Disable the new parquet encoder [#53130](https://github.com/ClickHouse/ClickHouse/pull/53130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix character escaping in the PostgreSQL engine [#53250](https://github.com/ClickHouse/ClickHouse/pull/53250) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* #2 Added new tests for session_log and fixed the inconsistency between login and logout. [#53255](https://github.com/ClickHouse/ClickHouse/pull/53255) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* #3 Fixed inconsistency between login success and logout [#53302](https://github.com/ClickHouse/ClickHouse/pull/53302) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix adding sub-second intervals to DateTime [#53309](https://github.com/ClickHouse/ClickHouse/pull/53309) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix "Context has expired" error in dictionaries [#53342](https://github.com/ClickHouse/ClickHouse/pull/53342) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Forbid use_structure_from_insertion_table_in_table_functions when execute Scalar [#53348](https://github.com/ClickHouse/ClickHouse/pull/53348) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Fixed system.data_skipping_indices for MaterializedMySQL [#53381](https://github.com/ClickHouse/ClickHouse/pull/53381) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||||
|
* Fix processing single carriage return in TSV file segmentation engine [#53407](https://github.com/ClickHouse/ClickHouse/pull/53407) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix 'Context has expired' error properly [#53433](https://github.com/ClickHouse/ClickHouse/pull/53433) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix timeout_overflow_mode when having subquery in the rhs of IN [#53439](https://github.com/ClickHouse/ClickHouse/pull/53439) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix an unexpected behavior in [#53152](https://github.com/ClickHouse/ClickHouse/issues/53152) [#53440](https://github.com/ClickHouse/ClickHouse/pull/53440) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Fix JSON_QUERY Function parse error while path is all number [#53470](https://github.com/ClickHouse/ClickHouse/pull/53470) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||||
|
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed SELECTing from ReplacingMergeTree with do_not_merge_across_partitions_select_final [#53511](https://github.com/ClickHouse/ClickHouse/pull/53511) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||||
|
* bugfix: Flush async insert queue first on shutdown [#53547](https://github.com/ClickHouse/ClickHouse/pull/53547) ([joelynch](https://github.com/joelynch)).
|
||||||
|
* Fix crash in join on sparse column [#53548](https://github.com/ClickHouse/ClickHouse/pull/53548) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix possible UB in Set skipping index for functions with incorrect args [#53559](https://github.com/ClickHouse/ClickHouse/pull/53559) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible UB in inverted indexes (experimental feature) [#53560](https://github.com/ClickHouse/ClickHouse/pull/53560) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Fix number of dropped granules in EXPLAIN PLAN index=1 [#53616](https://github.com/ClickHouse/ClickHouse/pull/53616) ([wangxiaobo](https://github.com/wzb5212)).
|
||||||
|
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Prepared set cache in mutation pipeline stuck [#53645](https://github.com/ClickHouse/ClickHouse/pull/53645) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix bug on mutations with subcolumns of type JSON in predicates of UPDATE and DELETE queries. [#53677](https://github.com/ClickHouse/ClickHouse/pull/53677) ([VanDarkholme7](https://github.com/VanDarkholme7)).
|
||||||
|
* Fix filter pushdown for full_sorting_merge join [#53699](https://github.com/ClickHouse/ClickHouse/pull/53699) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Try to fix bug with NULL::LowCardinality(Nullable(...)) NOT IN [#53706](https://github.com/ClickHouse/ClickHouse/pull/53706) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* transform: correctly handle default column with multiple rows [#53742](https://github.com/ClickHouse/ClickHouse/pull/53742) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||||
|
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Materialized postgres: fix uncaught exception in getCreateTableQueryImpl [#53832](https://github.com/ClickHouse/ClickHouse/pull/53832) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix possible segfault while using PostgreSQL engine [#53847](https://github.com/ClickHouse/ClickHouse/pull/53847) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix named_collection_admin alias [#54066](https://github.com/ClickHouse/ClickHouse/pull/54066) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix rows_before_limit_at_least for DelayedSource. [#54122](https://github.com/ClickHouse/ClickHouse/pull/54122) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Implementing new commands for keeper-client"'. [#52985](https://github.com/ClickHouse/ClickHouse/pull/52985) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Remove try/catch from DatabaseFilesystem"'. [#53044](https://github.com/ClickHouse/ClickHouse/pull/53044) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Upload build time-trace data to CI database"'. [#53210](https://github.com/ClickHouse/ClickHouse/pull/53210) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* NO CL ENTRY: 'Revert "Added new tests for session_log and fixed the inconsistency between login and logout."'. [#53247](https://github.com/ClickHouse/ClickHouse/pull/53247) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Improve CHECK TABLE system query"'. [#53272](https://github.com/ClickHouse/ClickHouse/pull/53272) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "#2 Added new tests for session_log and fixed the inconsistency between login and logout."'. [#53294](https://github.com/ClickHouse/ClickHouse/pull/53294) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Documentation: add Ibis project to the integrations section"'. [#53374](https://github.com/ClickHouse/ClickHouse/pull/53374) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* NO CL ENTRY: 'Revert "Planner prepare filters for analysis"'. [#53782](https://github.com/ClickHouse/ClickHouse/pull/53782) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "dateDiff: add support for plural units."'. [#53795](https://github.com/ClickHouse/ClickHouse/pull/53795) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Fixed wrong python test name pattern"'. [#53929](https://github.com/ClickHouse/ClickHouse/pull/53929) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Fix bug on mutations with subcolumns of type JSON in predicates of UPDATE and DELETE queries."'. [#54063](https://github.com/ClickHouse/ClickHouse/pull/54063) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* clickhouse-copier add check drop partition [#35263](https://github.com/ClickHouse/ClickHouse/pull/35263) ([sunny](https://github.com/sunny19930321)).
|
||||||
|
* Add more checks into ThreadStatus ctor. [#42019](https://github.com/ClickHouse/ClickHouse/pull/42019) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Refactor Query Tree visitor [#46740](https://github.com/ClickHouse/ClickHouse/pull/46740) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Revert "Revert "Randomize JIT settings in tests"" [#48282](https://github.com/ClickHouse/ClickHouse/pull/48282) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix outdated cache configuration in s3 tests: s3_storage_policy_by_defau… [#48424](https://github.com/ClickHouse/ClickHouse/pull/48424) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix IN with decimal in analyzer [#48754](https://github.com/ClickHouse/ClickHouse/pull/48754) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Some unclear change in StorageBuffer::reschedule() for something [#49723](https://github.com/ClickHouse/ClickHouse/pull/49723) ([DimasKovas](https://github.com/DimasKovas)).
|
||||||
|
* MergeTree & SipHash checksum big-endian support [#50276](https://github.com/ClickHouse/ClickHouse/pull/50276) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Maintain same aggregate function merge behavior for small and big endian machine [#50609](https://github.com/ClickHouse/ClickHouse/pull/50609) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||||
|
* Add a test to limit client max opening fd [#51213](https://github.com/ClickHouse/ClickHouse/pull/51213) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Add info about acquired space in cache to not enough space error [#51537](https://github.com/ClickHouse/ClickHouse/pull/51537) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* KeeperDispatcher: remove reductant lock as the ConcurrentBoundedQueue is thread-safe [#51766](https://github.com/ClickHouse/ClickHouse/pull/51766) ([frinkr](https://github.com/frinkr)).
|
||||||
|
* Fix build type in packager [#51771](https://github.com/ClickHouse/ClickHouse/pull/51771) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* metrics_perf_events_enabled turn off in perf tests [#52072](https://github.com/ClickHouse/ClickHouse/pull/52072) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Remove try/catch from DatabaseFilesystem [#52155](https://github.com/ClickHouse/ClickHouse/pull/52155) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test that clickhouse-client or local do not throw/catch on startup [#52159](https://github.com/ClickHouse/ClickHouse/pull/52159) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Retry blob listing in test_alter_moving_garbage [#52193](https://github.com/ClickHouse/ClickHouse/pull/52193) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Try to make `test_kafka_formats_with_broken_message` and `test_kafka_formats` integration tests stable [#52273](https://github.com/ClickHouse/ClickHouse/pull/52273) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Kill the runner process with all subprocesses [#52277](https://github.com/ClickHouse/ClickHouse/pull/52277) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Implement endianness-indepedent support for MergeTree checksums [#52329](https://github.com/ClickHouse/ClickHouse/pull/52329) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* add tests with connection reset by peer error, and retry it inside client [#52441](https://github.com/ClickHouse/ClickHouse/pull/52441) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Fix logging for asynchronous non-batched distributed sends [#52583](https://github.com/ClickHouse/ClickHouse/pull/52583) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Follow-up to "Implement support of encrypted elements in configuration file" [#52609](https://github.com/ClickHouse/ClickHouse/pull/52609) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Return zxid from TestKeeper and in multi responses [#52618](https://github.com/ClickHouse/ClickHouse/pull/52618) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Analyzer: Support ARRAY JOIN COLUMNS(...) syntax [#52622](https://github.com/ClickHouse/ClickHouse/pull/52622) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix stress test: check if storage shutdown before we operate MergeTreeDeduplicationLog [#52623](https://github.com/ClickHouse/ClickHouse/pull/52623) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Suspicious DISTINCT crashes from sqlancer [#52636](https://github.com/ClickHouse/ClickHouse/pull/52636) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Partially fixed test 01747_system_session_log_long [#52640](https://github.com/ClickHouse/ClickHouse/pull/52640) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Check for unexpected Cyrillic [#52641](https://github.com/ClickHouse/ClickHouse/pull/52641) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix `test_keeper_reconfig_replace_leader` [#52651](https://github.com/ClickHouse/ClickHouse/pull/52651) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Rename setting disable_url_encoding to enable_url_encoding and add a test [#52656](https://github.com/ClickHouse/ClickHouse/pull/52656) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Remove creation of a unnecessary temporary ContextAccess on login [#52660](https://github.com/ClickHouse/ClickHouse/pull/52660) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Update version after release [#52661](https://github.com/ClickHouse/ClickHouse/pull/52661) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.7.1.2470-stable [#52664](https://github.com/ClickHouse/ClickHouse/pull/52664) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix bugs and better test for SYSTEM STOP LISTEN [#52680](https://github.com/ClickHouse/ClickHouse/pull/52680) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Remove unneeded readBinary() specializations + update docs [#52683](https://github.com/ClickHouse/ClickHouse/pull/52683) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove remainders of legacy setting 'allow_experimental_query_cache' [#52685](https://github.com/ClickHouse/ClickHouse/pull/52685) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix 02417_opentelemetry_insert_on_distributed_table flakiness [#52691](https://github.com/ClickHouse/ClickHouse/pull/52691) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Improvements to backup restore disallow_concurrency test [#52709](https://github.com/ClickHouse/ClickHouse/pull/52709) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Move UnlinkMetadataFileOperationOutcome to common header [#52710](https://github.com/ClickHouse/ClickHouse/pull/52710) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Improve endianness-independent support for hash functions [#52712](https://github.com/ClickHouse/ClickHouse/pull/52712) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Allow reading zero objects in CachedObjectStorage::readObjects() [#52733](https://github.com/ClickHouse/ClickHouse/pull/52733) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Merging reading from archives [#50321](https://github.com/ClickHouse/ClickHouse/issues/50321) [#52734](https://github.com/ClickHouse/ClickHouse/pull/52734) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Merging [#52640](https://github.com/ClickHouse/ClickHouse/issues/52640) [#52744](https://github.com/ClickHouse/ClickHouse/pull/52744) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Analyzer: fix 00979_set_index_not.sql [#52754](https://github.com/ClickHouse/ClickHouse/pull/52754) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Planner prepare filters for analysis [#52762](https://github.com/ClickHouse/ClickHouse/pull/52762) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Allow reading empty file with no blobs [#52763](https://github.com/ClickHouse/ClickHouse/pull/52763) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
|
* Fix: check correctly window frame bounds for RANGE [#52768](https://github.com/ClickHouse/ClickHouse/pull/52768) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Numerical stability of the test for Polygons [#52769](https://github.com/ClickHouse/ClickHouse/pull/52769) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Change the default timezones in Docker test images [#52772](https://github.com/ClickHouse/ClickHouse/pull/52772) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Upload build statistics to the CI database [#52773](https://github.com/ClickHouse/ClickHouse/pull/52773) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add `instance_type` information to the CI database [#52774](https://github.com/ClickHouse/ClickHouse/pull/52774) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove Coverity (part 2) [#52775](https://github.com/ClickHouse/ClickHouse/pull/52775) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a tool to upload `-ftime-trace` to ClickHouse [#52776](https://github.com/ClickHouse/ClickHouse/pull/52776) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Revert revert of system drop filesystem cache by key [#52778](https://github.com/ClickHouse/ClickHouse/pull/52778) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Remove obsolete part of a check name [#52793](https://github.com/ClickHouse/ClickHouse/pull/52793) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Maybe fix TLS tests [#52796](https://github.com/ClickHouse/ClickHouse/pull/52796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow OOM in Stress and Upgrade checks [#52807](https://github.com/ClickHouse/ClickHouse/pull/52807) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Do not test upper bounds for throttlers [#52821](https://github.com/ClickHouse/ClickHouse/pull/52821) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
|
* Add more logging and touch test for materialize mysql [#52822](https://github.com/ClickHouse/ClickHouse/pull/52822) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Try to remove more leftovers. [#52823](https://github.com/ClickHouse/ClickHouse/pull/52823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Update test_crash_log/test.py [#52825](https://github.com/ClickHouse/ClickHouse/pull/52825) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Don't report LOGICAL_ERROR if a file got truncated during read [#52828](https://github.com/ClickHouse/ClickHouse/pull/52828) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Throw S3Exception whenever possible. [#52829](https://github.com/ClickHouse/ClickHouse/pull/52829) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Increase min protocol version for sparse serialization [#52835](https://github.com/ClickHouse/ClickHouse/pull/52835) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Cleanup localBackup [#52837](https://github.com/ClickHouse/ClickHouse/pull/52837) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Try to fix 02352_rwlock [#52852](https://github.com/ClickHouse/ClickHouse/pull/52852) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Disable a couple of long tests for debug build. [#52854](https://github.com/ClickHouse/ClickHouse/pull/52854) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix flaky tests in test_merge_tree_azure_blob_storage & test_storage_azure_blob_storage [#52855](https://github.com/ClickHouse/ClickHouse/pull/52855) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Cancel merges before renaming a system log table [#52858](https://github.com/ClickHouse/ClickHouse/pull/52858) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Try to fix a rare fail in 00612_http_max_query_size [#52859](https://github.com/ClickHouse/ClickHouse/pull/52859) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Remove duplicated dialect setting value [#52864](https://github.com/ClickHouse/ClickHouse/pull/52864) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Significant improvement of rust caching [#52865](https://github.com/ClickHouse/ClickHouse/pull/52865) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Docker improvements [#52869](https://github.com/ClickHouse/ClickHouse/pull/52869) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Try to continue clickhouse process in stress test after terminating gdb. [#52871](https://github.com/ClickHouse/ClickHouse/pull/52871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* fix master ci for [#52091](https://github.com/ClickHouse/ClickHouse/issues/52091) [#52873](https://github.com/ClickHouse/ClickHouse/pull/52873) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Fix the PR body check for `Reverts #number` [#52874](https://github.com/ClickHouse/ClickHouse/pull/52874) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Analyzer WITH statement references test [#52875](https://github.com/ClickHouse/ClickHouse/pull/52875) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Disable more tests for debug. [#52878](https://github.com/ClickHouse/ClickHouse/pull/52878) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix assertion in mutations with transactions [#52894](https://github.com/ClickHouse/ClickHouse/pull/52894) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fixed test_profile_max_sessions_for_user test flakiness [#52897](https://github.com/ClickHouse/ClickHouse/pull/52897) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Use concepts to replace more std::enable_if_t [#52898](https://github.com/ClickHouse/ClickHouse/pull/52898) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Disable `test_reconfig_replace_leader_in_one_command` [#52901](https://github.com/ClickHouse/ClickHouse/pull/52901) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* tests: fix possible EADDRINUSE v2 [#52906](https://github.com/ClickHouse/ClickHouse/pull/52906) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Merging [#52897](https://github.com/ClickHouse/ClickHouse/issues/52897) [#52907](https://github.com/ClickHouse/ClickHouse/pull/52907) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove obsolete `no-upgrade-check` tag [#52915](https://github.com/ClickHouse/ClickHouse/pull/52915) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix flaky test_storage_s3_queue::test_multiple_tables_streaming_sync_distributed [#52944](https://github.com/ClickHouse/ClickHouse/pull/52944) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Don't create empty parts on drop partittion if we have a transaction [#52945](https://github.com/ClickHouse/ClickHouse/pull/52945) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Analyzer: fix WITH clause resolving [#52947](https://github.com/ClickHouse/ClickHouse/pull/52947) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Refactor CI_CONFIG [#52948](https://github.com/ClickHouse/ClickHouse/pull/52948) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Try to fix assert in remove redundant sorting [#52950](https://github.com/ClickHouse/ClickHouse/pull/52950) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Remove unused code in StorageSystemStackTrace [#52952](https://github.com/ClickHouse/ClickHouse/pull/52952) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix wrong error code "BAD_GET" [#52954](https://github.com/ClickHouse/ClickHouse/pull/52954) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix some issues with databases [#52956](https://github.com/ClickHouse/ClickHouse/pull/52956) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix config update in HTTP Header Filtering [#52957](https://github.com/ClickHouse/ClickHouse/pull/52957) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Added peak_memory_usage to clickhouse-client final progress message [#52961](https://github.com/ClickHouse/ClickHouse/pull/52961) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* tests: fix 01293_client_interactive_vertical_multiline flakiness (increase timeout) [#52965](https://github.com/ClickHouse/ClickHouse/pull/52965) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added TSAN option report_atomic_races=0 for test_max_sessions_for_user [#52969](https://github.com/ClickHouse/ClickHouse/pull/52969) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* MaterializedMySQL: Add tests for unquoted utf8 column names in DML [#52971](https://github.com/ClickHouse/ClickHouse/pull/52971) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.7.2.25-stable [#52976](https://github.com/ClickHouse/ClickHouse/pull/52976) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Decrease a num of tries for a couple of too slow tests for debug. [#52981](https://github.com/ClickHouse/ClickHouse/pull/52981) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix test `00061_storage_buffer` [#52983](https://github.com/ClickHouse/ClickHouse/pull/52983) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove `test_host_regexp_multiple_ptr_records_concurrent`, CC @arthurpassos [#52984](https://github.com/ClickHouse/ClickHouse/pull/52984) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix `test_zookeeper_config` [#52988](https://github.com/ClickHouse/ClickHouse/pull/52988) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove assertion from test_no_ttl_merges_in_busy_pool [#52989](https://github.com/ClickHouse/ClickHouse/pull/52989) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix `test_dictionary_custom_settings` [#52990](https://github.com/ClickHouse/ClickHouse/pull/52990) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix flaky test [#53007](https://github.com/ClickHouse/ClickHouse/pull/53007) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix default port for Keeper Client [#53010](https://github.com/ClickHouse/ClickHouse/pull/53010) ([pufit](https://github.com/pufit)).
|
||||||
|
* Add a test to broken tests (Analyzer) [#53013](https://github.com/ClickHouse/ClickHouse/pull/53013) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Implement big-endian support for transform [#53015](https://github.com/ClickHouse/ClickHouse/pull/53015) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Fix completion for clickhouse-keeper-client [#53029](https://github.com/ClickHouse/ClickHouse/pull/53029) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* clickhouse-keeper-client: fix version parsing for set command [#53031](https://github.com/ClickHouse/ClickHouse/pull/53031) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* MaterializedMySQL: Add tests to alter named collections [#53032](https://github.com/ClickHouse/ClickHouse/pull/53032) ([Val Doroshchuk](https://github.com/valbok)).
|
||||||
|
* Fix description for 's3_upload_part_size_multiply_parts_count_threshold' setting [#53042](https://github.com/ClickHouse/ClickHouse/pull/53042) ([Elena Torró](https://github.com/elenatorro)).
|
||||||
|
* Update 01114_database_atomic.sh [#53043](https://github.com/ClickHouse/ClickHouse/pull/53043) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Revert revert of "Remove try/catch from DatabaseFilesystem" [#53045](https://github.com/ClickHouse/ClickHouse/pull/53045) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix cache related logical error in stress tests [#53047](https://github.com/ClickHouse/ClickHouse/pull/53047) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Remove upgrade checks with sanitizers [#53051](https://github.com/ClickHouse/ClickHouse/pull/53051) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Increase election timeout in integration tests [#53052](https://github.com/ClickHouse/ClickHouse/pull/53052) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Analyzer: do not enable it for old servers in tests [#53053](https://github.com/ClickHouse/ClickHouse/pull/53053) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Try to make `01414_mutations_and_errors_zookeeper` less flaky [#53056](https://github.com/ClickHouse/ClickHouse/pull/53056) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||||
|
* Fix test `02434_cancel_insert_when_client_dies` [#53062](https://github.com/ClickHouse/ClickHouse/pull/53062) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add `abort_on_error=1` to `TSAN_OPTIONS` [#53065](https://github.com/ClickHouse/ClickHouse/pull/53065) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix Parquet stats for Float32 and Float64 [#53067](https://github.com/ClickHouse/ClickHouse/pull/53067) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix a comment [#53072](https://github.com/ClickHouse/ClickHouse/pull/53072) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix 02263_format_insert_settings flakiness [#53080](https://github.com/ClickHouse/ClickHouse/pull/53080) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Something with tests [#53081](https://github.com/ClickHouse/ClickHouse/pull/53081) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.7.3.14-stable [#53084](https://github.com/ClickHouse/ClickHouse/pull/53084) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Simplify system logs creation [#53085](https://github.com/ClickHouse/ClickHouse/pull/53085) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix hung check in stress test [#53090](https://github.com/ClickHouse/ClickHouse/pull/53090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add clusters for running tests locally easily [#53091](https://github.com/ClickHouse/ClickHouse/pull/53091) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wording [#53092](https://github.com/ClickHouse/ClickHouse/pull/53092) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update README.md [#53097](https://github.com/ClickHouse/ClickHouse/pull/53097) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||||
|
* Remove old util [#53099](https://github.com/ClickHouse/ClickHouse/pull/53099) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add optional parameters to Buffer Engine definition [#53102](https://github.com/ClickHouse/ClickHouse/pull/53102) ([Elena Torró](https://github.com/elenatorro)).
|
||||||
|
* Compatibility with clang-17 [#53104](https://github.com/ClickHouse/ClickHouse/pull/53104) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Remove duplicate test: `test_concurrent_alter_with_ttl_move` [#53107](https://github.com/ClickHouse/ClickHouse/pull/53107) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Relax flaky test `test_s3_engine_heavy_write_check_mem` [#53108](https://github.com/ClickHouse/ClickHouse/pull/53108) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Update PocoHTTPClient.cpp [#53109](https://github.com/ClickHouse/ClickHouse/pull/53109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add hints for HTTP handlers [#53110](https://github.com/ClickHouse/ClickHouse/pull/53110) ([Ruslan Mardugalliamov](https://github.com/rmarduga)).
|
||||||
|
* Revert changes in `ZstdDeflatingAppendableWriteBuffer` [#53111](https://github.com/ClickHouse/ClickHouse/pull/53111) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix flaky test by using azure_query function [#53113](https://github.com/ClickHouse/ClickHouse/pull/53113) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Update `test_restore_replica` [#53119](https://github.com/ClickHouse/ClickHouse/pull/53119) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* do not fail if prctl is not allowed ([#43589](https://github.com/ClickHouse/ClickHouse/issues/43589)) [#53122](https://github.com/ClickHouse/ClickHouse/pull/53122) ([ekrasikov](https://github.com/ekrasikov)).
|
||||||
|
* Use more unique name for TemporaryFileOnDisk [#53123](https://github.com/ClickHouse/ClickHouse/pull/53123) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Update `Mergeable Check` at the finishing CI [#53126](https://github.com/ClickHouse/ClickHouse/pull/53126) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Added retry for TransportException in azure blob storage [#53128](https://github.com/ClickHouse/ClickHouse/pull/53128) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Small fix for HTTPHeaderFilter [#53146](https://github.com/ClickHouse/ClickHouse/pull/53146) ([San](https://github.com/santrancisco)).
|
||||||
|
* Added functions to disallow concurrency of backup restore test [#53150](https://github.com/ClickHouse/ClickHouse/pull/53150) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||||
|
* Attempt to fix test_insert_quorum by adding sync second replica [#53155](https://github.com/ClickHouse/ClickHouse/pull/53155) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* fix mem leak in RegExpTreeDictionary [#53160](https://github.com/ClickHouse/ClickHouse/pull/53160) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Fixes for detach/attach partition and broken detached parts cleanup [#53164](https://github.com/ClickHouse/ClickHouse/pull/53164) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update conftest.py [#53166](https://github.com/ClickHouse/ClickHouse/pull/53166) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Allow experimantal features when recovering Replicated db replica [#53167](https://github.com/ClickHouse/ClickHouse/pull/53167) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.7.4.5-stable [#53169](https://github.com/ClickHouse/ClickHouse/pull/53169) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Analyzer: fix test_system_flush_logs [#53171](https://github.com/ClickHouse/ClickHouse/pull/53171) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix warning in test_replicated_database [#53173](https://github.com/ClickHouse/ClickHouse/pull/53173) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Fix: 00838_unique_index test with analyzer [#53175](https://github.com/ClickHouse/ClickHouse/pull/53175) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Improved efficiency for array operations [#53193](https://github.com/ClickHouse/ClickHouse/pull/53193) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Improve reading from archives [#53198](https://github.com/ClickHouse/ClickHouse/pull/53198) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Enable hedged requests under tsan [#53219](https://github.com/ClickHouse/ClickHouse/pull/53219) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Remove garbage [#53241](https://github.com/ClickHouse/ClickHouse/pull/53241) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix LOGICAL_ERROR exception in ALTER query [#53242](https://github.com/ClickHouse/ClickHouse/pull/53242) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix bad test `00417_kill_query` [#53244](https://github.com/ClickHouse/ClickHouse/pull/53244) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test `02428_delete_with_settings` [#53246](https://github.com/ClickHouse/ClickHouse/pull/53246) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove unrecognizable garbage from the performance test [#53249](https://github.com/ClickHouse/ClickHouse/pull/53249) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Disable randomization in `02273_full_sort_join` [#53251](https://github.com/ClickHouse/ClickHouse/pull/53251) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove outdated Dockerfile [#53252](https://github.com/ClickHouse/ClickHouse/pull/53252) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Improve fs cache cleanup [#53273](https://github.com/ClickHouse/ClickHouse/pull/53273) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add garbage [#53279](https://github.com/ClickHouse/ClickHouse/pull/53279) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Inhibit randomization in `00906_low_cardinality_cache` [#53283](https://github.com/ClickHouse/ClickHouse/pull/53283) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test 01169_old_alter_partition_isolation_stress [#53292](https://github.com/ClickHouse/ClickHouse/pull/53292) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove no-parallel tag from some tests [#53295](https://github.com/ClickHouse/ClickHouse/pull/53295) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix test `00002_log_and_exception_messages_formatting` [#53296](https://github.com/ClickHouse/ClickHouse/pull/53296) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix `01485_256_bit_multiply` [#53297](https://github.com/ClickHouse/ClickHouse/pull/53297) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove flaky tests for the experimental `UNDROP` feature [#53298](https://github.com/ClickHouse/ClickHouse/pull/53298) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Added test for session_log using remote and mysql sessions [#53304](https://github.com/ClickHouse/ClickHouse/pull/53304) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Added integration test for session_log using concurrrent GRPC/PostgreSQL/MySQL sessions [#53305](https://github.com/ClickHouse/ClickHouse/pull/53305) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Added test for session_log using concurrrent TCP/HTTP/MySQL sessions [#53306](https://github.com/ClickHouse/ClickHouse/pull/53306) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Added test for session_log dropping user/role/profile currently used in active session [#53307](https://github.com/ClickHouse/ClickHouse/pull/53307) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Added an integration test for client peak_memory_usage value [#53308](https://github.com/ClickHouse/ClickHouse/pull/53308) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix log message [#53339](https://github.com/ClickHouse/ClickHouse/pull/53339) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Analyzer: fix quotas for system tables [#53343](https://github.com/ClickHouse/ClickHouse/pull/53343) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Relax mergeable check [#53344](https://github.com/ClickHouse/ClickHouse/pull/53344) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add clickhouse-keeper-client and clickhouse-keeper-converter symlinks to clickhouse-keeper package [#53357](https://github.com/ClickHouse/ClickHouse/pull/53357) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* Add linux s390x to universal installer [#53358](https://github.com/ClickHouse/ClickHouse/pull/53358) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* Make one exception message longer [#53375](https://github.com/ClickHouse/ClickHouse/pull/53375) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix wrong query in log messages check [#53376](https://github.com/ClickHouse/ClickHouse/pull/53376) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Non-significant changes [#53377](https://github.com/ClickHouse/ClickHouse/pull/53377) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Inhibit randomization in more tests [#53378](https://github.com/ClickHouse/ClickHouse/pull/53378) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make some Keeper exceptions more structured [#53379](https://github.com/ClickHouse/ClickHouse/pull/53379) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Follow-up to [#52695](https://github.com/ClickHouse/ClickHouse/issues/52695): Move tests to a more appropriate place [#53400](https://github.com/ClickHouse/ClickHouse/pull/53400) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Minor fixes (hints for wrong DB or table name) [#53402](https://github.com/ClickHouse/ClickHouse/pull/53402) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Quick fail undocumented features [#53413](https://github.com/ClickHouse/ClickHouse/pull/53413) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* boost getNestedColumnWithDefaultOnNull by insertManyDefaults [#53414](https://github.com/ClickHouse/ClickHouse/pull/53414) ([frinkr](https://github.com/frinkr)).
|
||||||
|
* Update test_distributed_inter_server_secret to pass with analyzer [#53416](https://github.com/ClickHouse/ClickHouse/pull/53416) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Parallel replicas: remove unnecessary code [#53419](https://github.com/ClickHouse/ClickHouse/pull/53419) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Refactorings for configuration of in-memory caches [#53422](https://github.com/ClickHouse/ClickHouse/pull/53422) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Less exceptions with runtime format string [#53424](https://github.com/ClickHouse/ClickHouse/pull/53424) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Analyzer: fix virtual columns in StorageDistributed [#53426](https://github.com/ClickHouse/ClickHouse/pull/53426) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* Fix creation of empty parts [#53429](https://github.com/ClickHouse/ClickHouse/pull/53429) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Merging [#53177](https://github.com/ClickHouse/ClickHouse/issues/53177) [#53430](https://github.com/ClickHouse/ClickHouse/pull/53430) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Merging [#53142](https://github.com/ClickHouse/ClickHouse/issues/53142) [#53431](https://github.com/ClickHouse/ClickHouse/pull/53431) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Do not send logs to CI if the credentials are not set [#53441](https://github.com/ClickHouse/ClickHouse/pull/53441) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Minor: Factorize constants in Annoy index [#53444](https://github.com/ClickHouse/ClickHouse/pull/53444) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Restart killed PublishedReleaseCI workflows [#53445](https://github.com/ClickHouse/ClickHouse/pull/53445) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Follow-up: Do not send logs to CI if the credentials are not set [#53456](https://github.com/ClickHouse/ClickHouse/pull/53456) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Merging [#53307](https://github.com/ClickHouse/ClickHouse/issues/53307) [#53472](https://github.com/ClickHouse/ClickHouse/pull/53472) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Merging [#53306](https://github.com/ClickHouse/ClickHouse/issues/53306) [#53473](https://github.com/ClickHouse/ClickHouse/pull/53473) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Merging [#53304](https://github.com/ClickHouse/ClickHouse/issues/53304) [#53474](https://github.com/ClickHouse/ClickHouse/pull/53474) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Merging [#53373](https://github.com/ClickHouse/ClickHouse/issues/53373) [#53475](https://github.com/ClickHouse/ClickHouse/pull/53475) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix flaky test `02443_detach_attach_partition` [#53478](https://github.com/ClickHouse/ClickHouse/pull/53478) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Remove outdated code in ReplicatedMergeTreeQueue::initialize() [#53484](https://github.com/ClickHouse/ClickHouse/pull/53484) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* krb5: Fix CVE-2023-36054 [#53485](https://github.com/ClickHouse/ClickHouse/pull/53485) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* curl: update to latest master (fixes CVE-2023-32001) [#53487](https://github.com/ClickHouse/ClickHouse/pull/53487) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update boost to 1.79 [#53490](https://github.com/ClickHouse/ClickHouse/pull/53490) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Get rid of secrets CLICKHOUSE_CI_LOGS [#53491](https://github.com/ClickHouse/ClickHouse/pull/53491) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update style checker [#53493](https://github.com/ClickHouse/ClickHouse/pull/53493) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update materialized_with_ddl.py [#53494](https://github.com/ClickHouse/ClickHouse/pull/53494) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix a race condition between RESTART REPLICAS and DROP DATABASE [#53495](https://github.com/ClickHouse/ClickHouse/pull/53495) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix tiny thing in Replicated database [#53496](https://github.com/ClickHouse/ClickHouse/pull/53496) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Simplify performance test [#53499](https://github.com/ClickHouse/ClickHouse/pull/53499) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Added waiting for PostgreSQL compatibility port open in integrational tests. [#53505](https://github.com/ClickHouse/ClickHouse/pull/53505) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Allow non standalone keeper run in integration tests [#53512](https://github.com/ClickHouse/ClickHouse/pull/53512) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Make sending logs to the cloud less fragile (and fix an unrelated flaky test) [#53528](https://github.com/ClickHouse/ClickHouse/pull/53528) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update test.py [#53534](https://github.com/ClickHouse/ClickHouse/pull/53534) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix `AddressSanitizer failed to allocate 0x0 (0) bytes of SetAlternateSignalStack` in integration tests [#53535](https://github.com/ClickHouse/ClickHouse/pull/53535) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix keeper default path check [#53539](https://github.com/ClickHouse/ClickHouse/pull/53539) ([pufit](https://github.com/pufit)).
|
||||||
|
* Follow-up to [#53528](https://github.com/ClickHouse/ClickHouse/issues/53528) [#53544](https://github.com/ClickHouse/ClickHouse/pull/53544) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update 00002_log_and_exception_messages_formatting.sql [#53545](https://github.com/ClickHouse/ClickHouse/pull/53545) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update krb5 to 1.21.2 [#53552](https://github.com/ClickHouse/ClickHouse/pull/53552) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Enable ISA-L on x86-64 only by default [#53553](https://github.com/ClickHouse/ClickHouse/pull/53553) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Change Big Endian-UUID to work the same as Little Endian-UUID [#53556](https://github.com/ClickHouse/ClickHouse/pull/53556) ([Austin Kothig](https://github.com/kothiga)).
|
||||||
|
* Bump openldap to LTS version (v2.5.16) [#53558](https://github.com/ClickHouse/ClickHouse/pull/53558) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update 02443_detach_attach_partition.sh [#53564](https://github.com/ClickHouse/ClickHouse/pull/53564) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Proper destruction of task in ShellCommandSource [#53573](https://github.com/ClickHouse/ClickHouse/pull/53573) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix for flaky test_ssl_cert_authentication [#53586](https://github.com/ClickHouse/ClickHouse/pull/53586) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||||
|
* AARCH64 Neon memequal wide [#53588](https://github.com/ClickHouse/ClickHouse/pull/53588) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Experiment Aggregator merge and destroy states in batch [#53589](https://github.com/ClickHouse/ClickHouse/pull/53589) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix test `02102_row_binary_with_names_and_types` [#53592](https://github.com/ClickHouse/ClickHouse/pull/53592) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove useless test [#53599](https://github.com/ClickHouse/ClickHouse/pull/53599) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Simplify test `01600_parts_types_metrics_long` [#53606](https://github.com/ClickHouse/ClickHouse/pull/53606) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* :lipstick: [S3::URI] Fix comment typos around versionId [#53607](https://github.com/ClickHouse/ClickHouse/pull/53607) ([Tomáš Hromada](https://github.com/gyfis)).
|
||||||
|
* Fix upgrade check [#53611](https://github.com/ClickHouse/ClickHouse/pull/53611) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Cleanup cluster test: remove unnecessary zookeeper [#53617](https://github.com/ClickHouse/ClickHouse/pull/53617) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Bump boost to 1.80 [#53625](https://github.com/ClickHouse/ClickHouse/pull/53625) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.9.55-lts [#53626](https://github.com/ClickHouse/ClickHouse/pull/53626) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* CMake small refactoring [#53628](https://github.com/ClickHouse/ClickHouse/pull/53628) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix data race of shell command [#53631](https://github.com/ClickHouse/ClickHouse/pull/53631) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix 02443_detach_attach_partition [#53633](https://github.com/ClickHouse/ClickHouse/pull/53633) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add default timeout value for ClickHouseHelper [#53639](https://github.com/ClickHouse/ClickHouse/pull/53639) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Implement support for more aggregate functions on big-endian [#53650](https://github.com/ClickHouse/ClickHouse/pull/53650) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* fix Logical Error in AsynchronousBoundedReadBuffer [#53651](https://github.com/ClickHouse/ClickHouse/pull/53651) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* State of State and avg aggregation function fix for big endian [#53655](https://github.com/ClickHouse/ClickHouse/pull/53655) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||||
|
* Resubmit [#50171](https://github.com/ClickHouse/ClickHouse/issues/50171) [#53678](https://github.com/ClickHouse/ClickHouse/pull/53678) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Bump boost to 1.81 [#53679](https://github.com/ClickHouse/ClickHouse/pull/53679) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Whitespaces [#53690](https://github.com/ClickHouse/ClickHouse/pull/53690) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove bad test [#53691](https://github.com/ClickHouse/ClickHouse/pull/53691) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix bad path format in logs [#53693](https://github.com/ClickHouse/ClickHouse/pull/53693) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Correct a functional test to not use endianness-specific input [#53697](https://github.com/ClickHouse/ClickHouse/pull/53697) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Fix running clickhouse-test with python 3.8 [#53700](https://github.com/ClickHouse/ClickHouse/pull/53700) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
|
* refactor some old code [#53704](https://github.com/ClickHouse/ClickHouse/pull/53704) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fixed wrong python test name pattern [#53713](https://github.com/ClickHouse/ClickHouse/pull/53713) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Fix flaky `shutdown_wait_unfinished_queries` integration test [#53714](https://github.com/ClickHouse/ClickHouse/pull/53714) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.10.5-lts [#53733](https://github.com/ClickHouse/ClickHouse/pull/53733) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix flaky test_storage_s3_queue/test.py::test_delete_after_processing [#53736](https://github.com/ClickHouse/ClickHouse/pull/53736) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix broken `02862_sorted_distinct_sparse_fix` [#53738](https://github.com/ClickHouse/ClickHouse/pull/53738) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Do not warn about arch_sys_counter clock [#53739](https://github.com/ClickHouse/ClickHouse/pull/53739) ([Artur Malchanau](https://github.com/Hexta)).
|
||||||
|
* Add some profile events [#53741](https://github.com/ClickHouse/ClickHouse/pull/53741) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support clang-18 (Wmissing-field-initializers) [#53751](https://github.com/ClickHouse/ClickHouse/pull/53751) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Upgrade openSSL to v3.0.10 [#53756](https://github.com/ClickHouse/ClickHouse/pull/53756) ([bhavnajindal](https://github.com/bhavnajindal)).
|
||||||
|
* Improve JSON-handling on s390x [#53760](https://github.com/ClickHouse/ClickHouse/pull/53760) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Reduce API calls to SSM client [#53762](https://github.com/ClickHouse/ClickHouse/pull/53762) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Remove branch references from .gitmodules [#53763](https://github.com/ClickHouse/ClickHouse/pull/53763) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix reading from `url` with all filtered paths [#53796](https://github.com/ClickHouse/ClickHouse/pull/53796) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Follow-up to [#53611](https://github.com/ClickHouse/ClickHouse/issues/53611) [#53799](https://github.com/ClickHouse/ClickHouse/pull/53799) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix a bug in attach partition [#53811](https://github.com/ClickHouse/ClickHouse/pull/53811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Bump boost to 1.82 [#53812](https://github.com/ClickHouse/ClickHouse/pull/53812) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Enable producing endianness-independent output in lz4 [#53816](https://github.com/ClickHouse/ClickHouse/pull/53816) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Fix typo in cluster name. [#53829](https://github.com/ClickHouse/ClickHouse/pull/53829) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Update 00002_log_and_exception_messages_formatting.sql [#53839](https://github.com/ClickHouse/ClickHouse/pull/53839) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix after [#51622](https://github.com/ClickHouse/ClickHouse/issues/51622) [#53840](https://github.com/ClickHouse/ClickHouse/pull/53840) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix segfault in `TableNameHints` (with `Lazy` database) [#53849](https://github.com/ClickHouse/ClickHouse/pull/53849) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Follow-up to [#53501](https://github.com/ClickHouse/ClickHouse/issues/53501) [#53851](https://github.com/ClickHouse/ClickHouse/pull/53851) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Follow-up to [#53528](https://github.com/ClickHouse/ClickHouse/issues/53528) [#53852](https://github.com/ClickHouse/ClickHouse/pull/53852) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* refactor some code [#53856](https://github.com/ClickHouse/ClickHouse/pull/53856) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Bump boost to 1.83 [#53859](https://github.com/ClickHouse/ClickHouse/pull/53859) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Remove unused parallel replicas coordinator in query info [#53862](https://github.com/ClickHouse/ClickHouse/pull/53862) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.7.5.30-stable [#53870](https://github.com/ClickHouse/ClickHouse/pull/53870) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.6.3.87-stable [#53872](https://github.com/ClickHouse/ClickHouse/pull/53872) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.3.11.5-lts [#53873](https://github.com/ClickHouse/ClickHouse/pull/53873) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v23.5.5.92-stable [#53874](https://github.com/ClickHouse/ClickHouse/pull/53874) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.21.38-lts [#53875](https://github.com/ClickHouse/ClickHouse/pull/53875) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix: USearch deserialize [#53876](https://github.com/ClickHouse/ClickHouse/pull/53876) ([Davit Vardanyan](https://github.com/davvard)).
|
||||||
|
* Improve schema inference for archives [#53880](https://github.com/ClickHouse/ClickHouse/pull/53880) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Make UInt128TrivialHash endianness-independent [#53891](https://github.com/ClickHouse/ClickHouse/pull/53891) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Use iterators instead of std::ranges [#53893](https://github.com/ClickHouse/ClickHouse/pull/53893) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Finalize file descriptor in ~WriteBufferToFileSegment [#53895](https://github.com/ClickHouse/ClickHouse/pull/53895) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Fix: respect skip_unavailable_shards with parallel replicas [#53904](https://github.com/ClickHouse/ClickHouse/pull/53904) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix flakiness of 00514_interval_operators [#53906](https://github.com/ClickHouse/ClickHouse/pull/53906) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Change IStorage interface by random walk, no goal in particular [#54009](https://github.com/ClickHouse/ClickHouse/pull/54009) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Refactor logic around async insert with deduplication [#54012](https://github.com/ClickHouse/ClickHouse/pull/54012) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* More assertive [#54044](https://github.com/ClickHouse/ClickHouse/pull/54044) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Correct doc for filesystem_prefetch_max_memory_usage [#54058](https://github.com/ClickHouse/ClickHouse/pull/54058) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix after [#52943](https://github.com/ClickHouse/ClickHouse/issues/52943) [#54064](https://github.com/ClickHouse/ClickHouse/pull/54064) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Parse IS NOT DISTINCT and <=> operators [#54067](https://github.com/ClickHouse/ClickHouse/pull/54067) ([vdimir](https://github.com/vdimir)).
|
||||||
|
* Replace dlcdn.apache.org by archive domain [#54081](https://github.com/ClickHouse/ClickHouse/pull/54081) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Increased log waiting timeout in test_profile_max_sessions_for_user [#54092](https://github.com/ClickHouse/ClickHouse/pull/54092) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||||
|
* Update Dockerfile [#54118](https://github.com/ClickHouse/ClickHouse/pull/54118) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Small improvements in `getAlterMutationCommandsForPart` [#54126](https://github.com/ClickHouse/ClickHouse/pull/54126) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix some more analyzer tests [#54128](https://github.com/ClickHouse/ClickHouse/pull/54128) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Disable `01600_parts_types_metrics_long` for asan [#54132](https://github.com/ClickHouse/ClickHouse/pull/54132) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fixing 01086_odbc_roundtrip with analyzer. [#54133](https://github.com/ClickHouse/ClickHouse/pull/54133) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Add warnings about ingestion script speed and memory usage in Laion dataset instructions [#54153](https://github.com/ClickHouse/ClickHouse/pull/54153) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* tests: mark 02152_http_external_tables_memory_tracking as no-parallel [#54155](https://github.com/ClickHouse/ClickHouse/pull/54155) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* The external logs have had colliding arguments [#54165](https://github.com/ClickHouse/ClickHouse/pull/54165) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Rename macro [#54169](https://github.com/ClickHouse/ClickHouse/pull/54169) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
@ -163,6 +163,61 @@ Result:
|
|||||||
│ 4 │ -4 │ 4 │
|
│ 4 │ -4 │ 4 │
|
||||||
└───┴────┴─────┘
|
└───┴────┴─────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## NULL values in JOIN keys
|
||||||
|
|
||||||
|
The NULL is not equal to any value, including itself. It means that if a JOIN key has a NULL value in one table, it won't match a NULL value in the other table.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Table `A`:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌───id─┬─name────┐
|
||||||
|
│ 1 │ Alice │
|
||||||
|
│ 2 │ Bob │
|
||||||
|
│ ᴺᵁᴸᴸ │ Charlie │
|
||||||
|
└──────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Table `B`:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌───id─┬─score─┐
|
||||||
|
│ 1 │ 90 │
|
||||||
|
│ 3 │ 85 │
|
||||||
|
│ ᴺᵁᴸᴸ │ 88 │
|
||||||
|
└──────┴───────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT A.name, B.score FROM A LEFT JOIN B ON A.id = B.id
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─name────┬─score─┐
|
||||||
|
│ Alice │ 90 │
|
||||||
|
│ Bob │ 0 │
|
||||||
|
│ Charlie │ 0 │
|
||||||
|
└─────────┴───────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
Notice that the row with `Charlie` from table `A` and the row with score 88 from table `B` are not in the result because of the NULL value in the JOIN key.
|
||||||
|
|
||||||
|
In case you want to match NULL values, use the `isNotDistinctFrom` function to compare the JOIN keys.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT A.name, B.score FROM A LEFT JOIN B ON isNotDistinctFrom(A.id, B.id)
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─name────┬─score─┐
|
||||||
|
│ Alice │ 90 │
|
||||||
|
│ Bob │ 0 │
|
||||||
|
│ Charlie │ 88 │
|
||||||
|
└─────────┴───────┘
|
||||||
|
```
|
||||||
|
|
||||||
## ASOF JOIN Usage
|
## ASOF JOIN Usage
|
||||||
|
|
||||||
`ASOF JOIN` is useful when you need to join records that have no exact match.
|
`ASOF JOIN` is useful when you need to join records that have no exact match.
|
||||||
|
@ -59,7 +59,7 @@ public:
|
|||||||
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
||||||
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
||||||
|
|
||||||
disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to);
|
disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to, /* settings= */ {});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -174,7 +174,7 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
||||||
|
|
||||||
# Remove some redundant dependencies
|
# Remove some redundant dependencies
|
||||||
target_compile_definitions (clickhouse-keeper PRIVATE -DCLICKHOUSE_PROGRAM_STANDALONE_BUILD)
|
target_compile_definitions (clickhouse-keeper PRIVATE -DCLICKHOUSE_KEEPER_STANDALONE_BUILD)
|
||||||
target_compile_definitions (clickhouse-keeper PUBLIC -DWITHOUT_TEXT_LOG)
|
target_compile_definitions (clickhouse-keeper PUBLIC -DWITHOUT_TEXT_LOG)
|
||||||
|
|
||||||
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT AND TARGET ch_rust::skim)
|
if (ENABLE_CLICKHOUSE_KEEPER_CLIENT AND TARGET ch_rust::skim)
|
||||||
|
@ -67,7 +67,7 @@ int mainEntryClickHouseKeeper(int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifdef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
|
|
||||||
// Weak symbols don't work correctly on Darwin
|
// Weak symbols don't work correctly on Darwin
|
||||||
// so we have a stub implementation to avoid linker errors
|
// so we have a stub implementation to avoid linker errors
|
||||||
|
@ -109,24 +109,12 @@ private:
|
|||||||
inline size_t max_fill() const { return 1ULL << (size_degree - 1); } /// NOLINT
|
inline size_t max_fill() const { return 1ULL << (size_degree - 1); } /// NOLINT
|
||||||
inline size_t mask() const { return buf_size() - 1; }
|
inline size_t mask() const { return buf_size() - 1; }
|
||||||
|
|
||||||
inline size_t place(HashValue x) const
|
inline size_t place(HashValue x) const { return (x >> UNIQUES_HASH_BITS_FOR_SKIP) & mask(); }
|
||||||
{
|
|
||||||
if constexpr (std::endian::native == std::endian::little)
|
|
||||||
return (x >> UNIQUES_HASH_BITS_FOR_SKIP) & mask();
|
|
||||||
else
|
|
||||||
return (std::byteswap(x) >> UNIQUES_HASH_BITS_FOR_SKIP) & mask();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The value is divided by 2 ^ skip_degree
|
/// The value is divided by 2 ^ skip_degree
|
||||||
inline bool good(HashValue hash) const
|
inline bool good(HashValue hash) const { return hash == ((hash >> skip_degree) << skip_degree); }
|
||||||
{
|
|
||||||
return hash == ((hash >> skip_degree) << skip_degree);
|
|
||||||
}
|
|
||||||
|
|
||||||
HashValue hash(Value key) const
|
HashValue hash(Value key) const { return static_cast<HashValue>(Hash()(key)); }
|
||||||
{
|
|
||||||
return static_cast<HashValue>(Hash()(key));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Delete all values whose hashes do not divide by 2 ^ skip_degree
|
/// Delete all values whose hashes do not divide by 2 ^ skip_degree
|
||||||
void rehash()
|
void rehash()
|
||||||
@ -338,11 +326,7 @@ public:
|
|||||||
|
|
||||||
void ALWAYS_INLINE insert(Value x)
|
void ALWAYS_INLINE insert(Value x)
|
||||||
{
|
{
|
||||||
HashValue hash_value;
|
const HashValue hash_value = hash(x);
|
||||||
if constexpr (std::endian::native == std::endian::little)
|
|
||||||
hash_value = hash(x);
|
|
||||||
else
|
|
||||||
hash_value = std::byteswap(hash(x));
|
|
||||||
if (!good(hash_value))
|
if (!good(hash_value))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -403,25 +387,25 @@ public:
|
|||||||
if (m_size > UNIQUES_HASH_MAX_SIZE)
|
if (m_size > UNIQUES_HASH_MAX_SIZE)
|
||||||
throw Poco::Exception("Cannot write UniquesHashSet: too large size_degree.");
|
throw Poco::Exception("Cannot write UniquesHashSet: too large size_degree.");
|
||||||
|
|
||||||
DB::writeIntBinary(skip_degree, wb);
|
DB::writeBinaryLittleEndian(skip_degree, wb);
|
||||||
DB::writeVarUInt(m_size, wb);
|
DB::writeVarUInt(m_size, wb);
|
||||||
|
|
||||||
if (has_zero)
|
if (has_zero)
|
||||||
{
|
{
|
||||||
HashValue x = 0;
|
HashValue x = 0;
|
||||||
DB::writeIntBinary(x, wb);
|
DB::writeBinaryLittleEndian(x, wb);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < buf_size(); ++i)
|
for (size_t i = 0; i < buf_size(); ++i)
|
||||||
if (buf[i])
|
if (buf[i])
|
||||||
DB::writeIntBinary(buf[i], wb);
|
DB::writeBinaryLittleEndian(buf[i], wb);
|
||||||
}
|
}
|
||||||
|
|
||||||
void read(DB::ReadBuffer & rb)
|
void read(DB::ReadBuffer & rb)
|
||||||
{
|
{
|
||||||
has_zero = false;
|
has_zero = false;
|
||||||
|
|
||||||
DB::readIntBinary(skip_degree, rb);
|
DB::readBinaryLittleEndian(skip_degree, rb);
|
||||||
DB::readVarUInt(m_size, rb);
|
DB::readVarUInt(m_size, rb);
|
||||||
|
|
||||||
if (m_size > UNIQUES_HASH_MAX_SIZE)
|
if (m_size > UNIQUES_HASH_MAX_SIZE)
|
||||||
@ -440,7 +424,7 @@ public:
|
|||||||
for (size_t i = 0; i < m_size; ++i)
|
for (size_t i = 0; i < m_size; ++i)
|
||||||
{
|
{
|
||||||
HashValue x = 0;
|
HashValue x = 0;
|
||||||
DB::readIntBinary(x, rb);
|
DB::readBinaryLittleEndian(x, rb);
|
||||||
if (x == 0)
|
if (x == 0)
|
||||||
has_zero = true;
|
has_zero = true;
|
||||||
else
|
else
|
||||||
@ -454,6 +438,7 @@ public:
|
|||||||
|
|
||||||
for (size_t i = 0; i < m_size; ++i)
|
for (size_t i = 0; i < m_size; ++i)
|
||||||
{
|
{
|
||||||
|
DB::transformEndianness<std::endian::native, std::endian::little>(hs[i]);
|
||||||
if (hs[i] == 0)
|
if (hs[i] == 0)
|
||||||
has_zero = true;
|
has_zero = true;
|
||||||
else
|
else
|
||||||
@ -465,7 +450,7 @@ public:
|
|||||||
void readAndMerge(DB::ReadBuffer & rb)
|
void readAndMerge(DB::ReadBuffer & rb)
|
||||||
{
|
{
|
||||||
UInt8 rhs_skip_degree = 0;
|
UInt8 rhs_skip_degree = 0;
|
||||||
DB::readIntBinary(rhs_skip_degree, rb);
|
DB::readBinaryLittleEndian(rhs_skip_degree, rb);
|
||||||
|
|
||||||
if (rhs_skip_degree > skip_degree)
|
if (rhs_skip_degree > skip_degree)
|
||||||
{
|
{
|
||||||
@ -490,7 +475,7 @@ public:
|
|||||||
for (size_t i = 0; i < rhs_size; ++i)
|
for (size_t i = 0; i < rhs_size; ++i)
|
||||||
{
|
{
|
||||||
HashValue x = 0;
|
HashValue x = 0;
|
||||||
DB::readIntBinary(x, rb);
|
DB::readBinaryLittleEndian(x, rb);
|
||||||
insertHash(x);
|
insertHash(x);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -501,6 +486,7 @@ public:
|
|||||||
|
|
||||||
for (size_t i = 0; i < rhs_size; ++i)
|
for (size_t i = 0; i < rhs_size; ++i)
|
||||||
{
|
{
|
||||||
|
DB::transformEndianness<std::endian::native, std::endian::little>(hs[i]);
|
||||||
insertHash(hs[i]);
|
insertHash(hs[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -164,10 +164,10 @@ void ConfigReloader::reloadIfNewer(bool force, bool throw_on_error, bool fallbac
|
|||||||
struct ConfigReloader::FileWithTimestamp
|
struct ConfigReloader::FileWithTimestamp
|
||||||
{
|
{
|
||||||
std::string path;
|
std::string path;
|
||||||
time_t modification_time;
|
fs::file_time_type modification_time;
|
||||||
|
|
||||||
FileWithTimestamp(const std::string & path_, time_t modification_time_)
|
explicit FileWithTimestamp(const std::string & path_)
|
||||||
: path(path_), modification_time(modification_time_) {}
|
: path(path_), modification_time(fs::last_write_time(path_)) {}
|
||||||
|
|
||||||
bool operator < (const FileWithTimestamp & rhs) const
|
bool operator < (const FileWithTimestamp & rhs) const
|
||||||
{
|
{
|
||||||
@ -184,7 +184,7 @@ struct ConfigReloader::FileWithTimestamp
|
|||||||
void ConfigReloader::FilesChangesTracker::addIfExists(const std::string & path_to_add)
|
void ConfigReloader::FilesChangesTracker::addIfExists(const std::string & path_to_add)
|
||||||
{
|
{
|
||||||
if (!path_to_add.empty() && fs::exists(path_to_add))
|
if (!path_to_add.empty() && fs::exists(path_to_add))
|
||||||
files.emplace(path_to_add, FS::getModificationTime(path_to_add));
|
files.emplace(path_to_add);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ConfigReloader::FilesChangesTracker::isDifferOrNewerThan(const FilesChangesTracker & rhs)
|
bool ConfigReloader::FilesChangesTracker::isDifferOrNewerThan(const FilesChangesTracker & rhs)
|
||||||
|
@ -200,11 +200,7 @@ public:
|
|||||||
ALWAYS_INLINE UInt128 get128()
|
ALWAYS_INLINE UInt128 get128()
|
||||||
{
|
{
|
||||||
UInt128 res;
|
UInt128 res;
|
||||||
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
get128(res.items[UInt128::_impl::little(0)], res.items[UInt128::_impl::little(1)]);
|
||||||
get128(res.items[1], res.items[0]);
|
|
||||||
#else
|
|
||||||
get128(res.items[0], res.items[1]);
|
|
||||||
#endif
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,20 +210,13 @@ public:
|
|||||||
throw DB::Exception(
|
throw DB::Exception(
|
||||||
DB::ErrorCodes::LOGICAL_ERROR, "Logical error: can't call get128Reference when is_reference_128 is not set");
|
DB::ErrorCodes::LOGICAL_ERROR, "Logical error: can't call get128Reference when is_reference_128 is not set");
|
||||||
finalize();
|
finalize();
|
||||||
auto lo = v0 ^ v1 ^ v2 ^ v3;
|
const auto lo = v0 ^ v1 ^ v2 ^ v3;
|
||||||
v1 ^= 0xdd;
|
v1 ^= 0xdd;
|
||||||
SIPROUND;
|
SIPROUND;
|
||||||
SIPROUND;
|
SIPROUND;
|
||||||
SIPROUND;
|
SIPROUND;
|
||||||
SIPROUND;
|
SIPROUND;
|
||||||
auto hi = v0 ^ v1 ^ v2 ^ v3;
|
const auto hi = v0 ^ v1 ^ v2 ^ v3;
|
||||||
|
|
||||||
if constexpr (std::endian::native == std::endian::big)
|
|
||||||
{
|
|
||||||
lo = std::byteswap(lo);
|
|
||||||
hi = std::byteswap(hi);
|
|
||||||
std::swap(lo, hi);
|
|
||||||
}
|
|
||||||
|
|
||||||
UInt128 res = hi;
|
UInt128 res = hi;
|
||||||
res <<= 64;
|
res <<= 64;
|
||||||
|
@ -172,7 +172,7 @@ void registerCodecDeflateQpl(CompressionCodecFactory & factory);
|
|||||||
|
|
||||||
/// Keeper use only general-purpose codecs, so we don't need these special codecs
|
/// Keeper use only general-purpose codecs, so we don't need these special codecs
|
||||||
/// in standalone build
|
/// in standalone build
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
void registerCodecDelta(CompressionCodecFactory & factory);
|
void registerCodecDelta(CompressionCodecFactory & factory);
|
||||||
void registerCodecT64(CompressionCodecFactory & factory);
|
void registerCodecT64(CompressionCodecFactory & factory);
|
||||||
void registerCodecDoubleDelta(CompressionCodecFactory & factory);
|
void registerCodecDoubleDelta(CompressionCodecFactory & factory);
|
||||||
@ -189,7 +189,7 @@ CompressionCodecFactory::CompressionCodecFactory()
|
|||||||
registerCodecZSTD(*this);
|
registerCodecZSTD(*this);
|
||||||
registerCodecLZ4HC(*this);
|
registerCodecLZ4HC(*this);
|
||||||
registerCodecMultiple(*this);
|
registerCodecMultiple(*this);
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
registerCodecDelta(*this);
|
registerCodecDelta(*this);
|
||||||
registerCodecT64(*this);
|
registerCodecT64(*this);
|
||||||
registerCodecDoubleDelta(*this);
|
registerCodecDoubleDelta(*this);
|
||||||
|
@ -106,7 +106,7 @@ class IColumn;
|
|||||||
M(UInt64, s3_retry_attempts, 10, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries", 0) \
|
M(UInt64, s3_retry_attempts, 10, "Setting for Aws::Client::RetryStrategy, Aws::Client does retries itself, 0 means no retries", 0) \
|
||||||
M(UInt64, s3_request_timeout_ms, 3000, "Idleness timeout for sending and receiving data to/from S3. Fail if a single TCP read or write call blocks for this long.", 0) \
|
M(UInt64, s3_request_timeout_ms, 3000, "Idleness timeout for sending and receiving data to/from S3. Fail if a single TCP read or write call blocks for this long.", 0) \
|
||||||
M(Bool, enable_s3_requests_logging, false, "Enable very explicit logging of S3 requests. Makes sense for debug only.", 0) \
|
M(Bool, enable_s3_requests_logging, false, "Enable very explicit logging of S3 requests. Makes sense for debug only.", 0) \
|
||||||
M(String, s3queue_default_zookeeper_path, "/s3queue/", "Default zookeeper path prefix for S3Queue engine", 0) \
|
M(String, s3queue_default_zookeeper_path, "/clickhouse/s3queue/", "Default zookeeper path prefix for S3Queue engine", 0) \
|
||||||
M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \
|
M(UInt64, hdfs_replication, 0, "The actual number of replications can be specified when the hdfs file is created.", 0) \
|
||||||
M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \
|
M(Bool, hdfs_truncate_on_insert, false, "Enables or disables truncate before insert in s3 engine tables", 0) \
|
||||||
M(Bool, hdfs_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in hdfs engine tables", 0) \
|
M(Bool, hdfs_create_new_file_on_insert, false, "Enables or disables creating a new file on each insert in hdfs engine tables", 0) \
|
||||||
|
@ -337,7 +337,7 @@ void SettingFieldString::readBinary(ReadBuffer & in)
|
|||||||
/// that. The linker does not complain only because clickhouse-keeper does not call any of below
|
/// that. The linker does not complain only because clickhouse-keeper does not call any of below
|
||||||
/// functions. A cleaner alternative would be more modular libraries, e.g. one for data types, which
|
/// functions. A cleaner alternative would be more modular libraries, e.g. one for data types, which
|
||||||
/// could then be linked by the server and the linker.
|
/// could then be linked by the server and the linker.
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
|
|
||||||
SettingFieldMap::SettingFieldMap(const Field & f) : value(fieldToMap(f)) {}
|
SettingFieldMap::SettingFieldMap(const Field & f) : value(fieldToMap(f)) {}
|
||||||
|
|
||||||
|
@ -247,7 +247,7 @@ struct SettingFieldString
|
|||||||
void readBinary(ReadBuffer & in);
|
void readBinary(ReadBuffer & in);
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifdef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
#define NORETURN [[noreturn]]
|
#define NORETURN [[noreturn]]
|
||||||
#else
|
#else
|
||||||
#define NORETURN
|
#define NORETURN
|
||||||
|
@ -466,7 +466,7 @@ private:
|
|||||||
if (collectCrashLog)
|
if (collectCrashLog)
|
||||||
collectCrashLog(sig, thread_num, query_id, stack_trace);
|
collectCrashLog(sig, thread_num, query_id, stack_trace);
|
||||||
|
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
Context::getGlobalContextInstance()->handleCrash();
|
Context::getGlobalContextInstance()->handleCrash();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -501,7 +501,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// ClickHouse Keeper does not link to some part of Settings.
|
/// ClickHouse Keeper does not link to some part of Settings.
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
/// List changed settings.
|
/// List changed settings.
|
||||||
if (!query_id.empty())
|
if (!query_id.empty())
|
||||||
{
|
{
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "config_version.h"
|
#include "config_version.h"
|
||||||
|
|
||||||
#if USE_SENTRY && !defined(CLICKHOUSE_PROGRAM_STANDALONE_BUILD)
|
#if USE_SENTRY && !defined(CLICKHOUSE_KEEPER_STANDALONE_BUILD)
|
||||||
|
|
||||||
# include <sentry.h>
|
# include <sentry.h>
|
||||||
# include <cstdio>
|
# include <cstdio>
|
||||||
|
@ -324,7 +324,7 @@ ReservationPtr DiskEncrypted::reserve(UInt64 bytes)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir)
|
void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings)
|
||||||
{
|
{
|
||||||
/// Check if we can copy the file without deciphering.
|
/// Check if we can copy the file without deciphering.
|
||||||
if (isSameDiskType(*this, *to_disk))
|
if (isSameDiskType(*this, *to_disk))
|
||||||
@ -340,14 +340,14 @@ void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::sha
|
|||||||
auto wrapped_from_path = wrappedPath(from_dir);
|
auto wrapped_from_path = wrappedPath(from_dir);
|
||||||
auto to_delegate = to_disk_enc->delegate;
|
auto to_delegate = to_disk_enc->delegate;
|
||||||
auto wrapped_to_path = to_disk_enc->wrappedPath(to_dir);
|
auto wrapped_to_path = to_disk_enc->wrappedPath(to_dir);
|
||||||
delegate->copyDirectoryContent(wrapped_from_path, to_delegate, wrapped_to_path);
|
delegate->copyDirectoryContent(wrapped_from_path, to_delegate, wrapped_to_path, settings);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Copy the file through buffers with deciphering.
|
/// Copy the file through buffers with deciphering.
|
||||||
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir);
|
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> DiskEncrypted::readFile(
|
std::unique_ptr<ReadBufferFromFileBase> DiskEncrypted::readFile(
|
||||||
|
@ -112,7 +112,7 @@ public:
|
|||||||
delegate->listFiles(wrapped_path, file_names);
|
delegate->listFiles(wrapped_path, file_names);
|
||||||
}
|
}
|
||||||
|
|
||||||
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir) override;
|
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings) override;
|
||||||
|
|
||||||
std::unique_ptr<ReadBufferFromFileBase> readFile(
|
std::unique_ptr<ReadBufferFromFileBase> readFile(
|
||||||
const String & path,
|
const String & path,
|
||||||
|
@ -53,11 +53,11 @@ String DiskEncryptedSettings::findKeyByFingerprint(UInt128 key_fingerprint, cons
|
|||||||
return it->second;
|
return it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskEncryptedTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path)
|
void DiskEncryptedTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings)
|
||||||
{
|
{
|
||||||
auto wrapped_from_path = wrappedPath(from_file_path);
|
auto wrapped_from_path = wrappedPath(from_file_path);
|
||||||
auto wrapped_to_path = wrappedPath(to_file_path);
|
auto wrapped_to_path = wrappedPath(to_file_path);
|
||||||
delegate_transaction->copyFile(wrapped_from_path, wrapped_to_path);
|
delegate_transaction->copyFile(wrapped_from_path, wrapped_to_path, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<WriteBufferFromFileBase> DiskEncryptedTransaction::writeFile( // NOLINT
|
std::unique_ptr<WriteBufferFromFileBase> DiskEncryptedTransaction::writeFile( // NOLINT
|
||||||
|
@ -116,7 +116,7 @@ public:
|
|||||||
/// but it's impossible to implement correctly in transactions because other disk can
|
/// but it's impossible to implement correctly in transactions because other disk can
|
||||||
/// use different metadata storage.
|
/// use different metadata storage.
|
||||||
/// TODO: maybe remove it at all, we don't want copies
|
/// TODO: maybe remove it at all, we don't want copies
|
||||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path) override;
|
void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings) override;
|
||||||
|
|
||||||
/// Open the file for write and return WriteBufferFromFileBase object.
|
/// Open the file for write and return WriteBufferFromFileBase object.
|
||||||
std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
||||||
|
@ -432,12 +432,13 @@ bool inline isSameDiskType(const IDisk & one, const IDisk & another)
|
|||||||
return typeid(one) == typeid(another);
|
return typeid(one) == typeid(another);
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskLocal::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir)
|
void DiskLocal::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings)
|
||||||
{
|
{
|
||||||
if (isSameDiskType(*this, *to_disk))
|
/// If throttling was configured we cannot use copying directly.
|
||||||
|
if (isSameDiskType(*this, *to_disk) && !settings.local_throttler)
|
||||||
fs::copy(fs::path(disk_path) / from_dir, fs::path(to_disk->getPath()) / to_dir, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
fs::copy(fs::path(disk_path) / from_dir, fs::path(to_disk->getPath()) / to_dir, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
||||||
else
|
else
|
||||||
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir);
|
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
||||||
|
@ -65,7 +65,7 @@ public:
|
|||||||
|
|
||||||
void replaceFile(const String & from_path, const String & to_path) override;
|
void replaceFile(const String & from_path, const String & to_path) override;
|
||||||
|
|
||||||
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir) override;
|
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings) override;
|
||||||
|
|
||||||
void listFiles(const String & path, std::vector<String> & file_names) const override;
|
void listFiles(const String & path, std::vector<String> & file_names) const override;
|
||||||
|
|
||||||
|
@ -54,9 +54,9 @@ public:
|
|||||||
disk.replaceFile(from_path, to_path);
|
disk.replaceFile(from_path, to_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path) override
|
void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings) override
|
||||||
{
|
{
|
||||||
disk.copyFile(from_file_path, disk, to_file_path);
|
disk.copyFile(from_file_path, disk, to_file_path, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <IO/WriteBufferFromFileBase.h>
|
#include <IO/WriteBufferFromFileBase.h>
|
||||||
#include <IO/copyData.h>
|
#include <IO/copyData.h>
|
||||||
#include <Poco/Logger.h>
|
#include <Poco/Logger.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
#include <Core/ServerUUID.h>
|
#include <Core/ServerUUID.h>
|
||||||
@ -122,11 +123,10 @@ void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_p
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir)
|
void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir, WriteSettings settings)
|
||||||
{
|
{
|
||||||
ResultsCollector results;
|
ResultsCollector results;
|
||||||
|
|
||||||
WriteSettings settings;
|
|
||||||
/// Disable parallel write. We already copy in parallel.
|
/// Disable parallel write. We already copy in parallel.
|
||||||
/// Avoid high memory usage. See test_s3_zero_copy_ttl/test.py::test_move_and_s3_memory_usage
|
/// Avoid high memory usage. See test_s3_zero_copy_ttl/test.py::test_move_and_s3_memory_usage
|
||||||
settings.s3_allow_parallel_part_upload = false;
|
settings.s3_allow_parallel_part_upload = false;
|
||||||
@ -140,12 +140,12 @@ void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr<I
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void IDisk::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir)
|
void IDisk::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings)
|
||||||
{
|
{
|
||||||
if (!to_disk->exists(to_dir))
|
if (!to_disk->exists(to_dir))
|
||||||
to_disk->createDirectories(to_dir);
|
to_disk->createDirectories(to_dir);
|
||||||
|
|
||||||
copyThroughBuffers(from_dir, to_disk, to_dir, /* copy_root_dir */ false);
|
copyThroughBuffers(from_dir, to_disk, to_dir, /* copy_root_dir= */ false, settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IDisk::truncateFile(const String &, size_t)
|
void IDisk::truncateFile(const String &, size_t)
|
||||||
|
@ -193,7 +193,7 @@ public:
|
|||||||
virtual void replaceFile(const String & from_path, const String & to_path) = 0;
|
virtual void replaceFile(const String & from_path, const String & to_path) = 0;
|
||||||
|
|
||||||
/// Recursively copy files from from_dir to to_dir. Create to_dir if not exists.
|
/// Recursively copy files from from_dir to to_dir. Create to_dir if not exists.
|
||||||
virtual void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir);
|
virtual void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings);
|
||||||
|
|
||||||
/// Copy file `from_file_path` to `to_file_path` located at `to_disk`.
|
/// Copy file `from_file_path` to `to_file_path` located at `to_disk`.
|
||||||
virtual void copyFile( /// NOLINT
|
virtual void copyFile( /// NOLINT
|
||||||
@ -470,7 +470,7 @@ protected:
|
|||||||
/// Base implementation of the function copy().
|
/// Base implementation of the function copy().
|
||||||
/// It just opens two files, reads data by portions from the first file, and writes it to the second one.
|
/// It just opens two files, reads data by portions from the first file, and writes it to the second one.
|
||||||
/// A derived class may override copy() to provide a faster implementation.
|
/// A derived class may override copy() to provide a faster implementation.
|
||||||
void copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir = true);
|
void copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir, WriteSettings settings);
|
||||||
|
|
||||||
virtual void checkAccessImpl(const String & path);
|
virtual void checkAccessImpl(const String & path);
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ public:
|
|||||||
/// but it's impossible to implement correctly in transactions because other disk can
|
/// but it's impossible to implement correctly in transactions because other disk can
|
||||||
/// use different metadata storage.
|
/// use different metadata storage.
|
||||||
/// TODO: maybe remove it at all, we don't want copies
|
/// TODO: maybe remove it at all, we don't want copies
|
||||||
virtual void copyFile(const std::string & from_file_path, const std::string & to_file_path) = 0;
|
virtual void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings = {}) = 0;
|
||||||
|
|
||||||
/// Open the file for write and return WriteBufferFromFileBase object.
|
/// Open the file for write and return WriteBufferFromFileBase object.
|
||||||
virtual std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
virtual std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
||||||
|
@ -74,7 +74,7 @@ SeekableReadBufferPtr ReadBufferFromRemoteFSGather::createImplementationBuffer(c
|
|||||||
size_t current_read_until_position = read_until_position ? read_until_position : object.bytes_size;
|
size_t current_read_until_position = read_until_position ? read_until_position : object.bytes_size;
|
||||||
auto current_read_buffer_creator = [=, this]() { return read_buffer_creator(object_path, current_read_until_position); };
|
auto current_read_buffer_creator = [=, this]() { return read_buffer_creator(object_path, current_read_until_position); };
|
||||||
|
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
if (with_cache)
|
if (with_cache)
|
||||||
{
|
{
|
||||||
auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path);
|
auto cache_key = settings.remote_fs_cache->createKeyForPath(object_path);
|
||||||
|
@ -189,7 +189,7 @@ public:
|
|||||||
/// DiskObjectStorage(CachedObjectStorage(CachedObjectStorage(S3ObjectStorage)))
|
/// DiskObjectStorage(CachedObjectStorage(CachedObjectStorage(S3ObjectStorage)))
|
||||||
String getStructure() const { return fmt::format("DiskObjectStorage-{}({})", getName(), object_storage->getName()); }
|
String getStructure() const { return fmt::format("DiskObjectStorage-{}({})", getName(), object_storage->getName()); }
|
||||||
|
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
/// Add a cache layer.
|
/// Add a cache layer.
|
||||||
/// Example: DiskObjectStorage(S3ObjectStorage) -> DiskObjectStorage(CachedObjectStorage(S3ObjectStorage))
|
/// Example: DiskObjectStorage(S3ObjectStorage) -> DiskObjectStorage(CachedObjectStorage(S3ObjectStorage))
|
||||||
/// There can be any number of cache layers:
|
/// There can be any number of cache layers:
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include <ranges>
|
#include <ranges>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
#include <base/defines.h>
|
||||||
|
|
||||||
#include <Disks/ObjectStorages/MetadataStorageFromDisk.h>
|
#include <Disks/ObjectStorages/MetadataStorageFromDisk.h>
|
||||||
|
|
||||||
@ -769,8 +770,11 @@ void DiskObjectStorageTransaction::createFile(const std::string & path)
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
void DiskObjectStorageTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path)
|
void DiskObjectStorageTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings)
|
||||||
{
|
{
|
||||||
|
/// NOTE: For native copy we can ignore throttling, so no need to use WriteSettings
|
||||||
|
UNUSED(settings);
|
||||||
|
|
||||||
operations_to_execute.emplace_back(
|
operations_to_execute.emplace_back(
|
||||||
std::make_unique<CopyFileObjectStorageOperation>(object_storage, metadata_storage, from_file_path, to_file_path));
|
std::make_unique<CopyFileObjectStorageOperation>(object_storage, metadata_storage, from_file_path, to_file_path));
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,7 @@ public:
|
|||||||
|
|
||||||
void createFile(const String & path) override;
|
void createFile(const String & path) override;
|
||||||
|
|
||||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path) override;
|
void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings) override;
|
||||||
|
|
||||||
/// writeFile is a difficult function for transactions.
|
/// writeFile is a difficult function for transactions.
|
||||||
/// Now it's almost noop because metadata added to transaction in finalize method
|
/// Now it's almost noop because metadata added to transaction in finalize method
|
||||||
|
@ -32,7 +32,7 @@ void registerDiskCache(DiskFactory & factory, bool global_skip_access_check);
|
|||||||
void registerDiskLocalObjectStorage(DiskFactory & factory, bool global_skip_access_check);
|
void registerDiskLocalObjectStorage(DiskFactory & factory, bool global_skip_access_check);
|
||||||
|
|
||||||
|
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
|
|
||||||
void registerDisks(bool global_skip_access_check)
|
void registerDisks(bool global_skip_access_check)
|
||||||
{
|
{
|
||||||
|
27
src/Functions/isNotDistinctFrom.cpp
Normal file
27
src/Functions/isNotDistinctFrom.cpp
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#include <Functions/isNotDistinctFrom.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
REGISTER_FUNCTION(IsNotDistinctFrom)
|
||||||
|
{
|
||||||
|
factory.registerFunction<FunctionIsNotDistinctFrom>(
|
||||||
|
FunctionDocumentation{
|
||||||
|
.description = R"(
|
||||||
|
Performs a null-safe comparison between two values. This function will consider
|
||||||
|
two `NULL` values as identical and will return `true`, which is distinct from the usual
|
||||||
|
equals behavior where comparing two `NULL` values would return `NULL`.
|
||||||
|
|
||||||
|
Currently, this function can only be used in the `JOIN ON` section of a query.
|
||||||
|
[example:join_on_is_not_distinct_from]
|
||||||
|
)",
|
||||||
|
.examples{
|
||||||
|
{"join_on_is_not_distinct_from", "SELECT * FROM (SELECT NULL AS a) AS t1 JOIN (SELECT NULL AS b) AS t2 ON isNotDistinctFrom(t1.a, t2.b)", "NULL\tNULL"},
|
||||||
|
},
|
||||||
|
.categories = {"Comparison", "Join Operators"},
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
52
src/Functions/isNotDistinctFrom.h
Normal file
52
src/Functions/isNotDistinctFrom.h
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Functions/IFunction.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs null-safe comparison.
|
||||||
|
* equals(NULL, NULL) is NULL, while isNotDistinctFrom(NULL, NULL) is true.
|
||||||
|
* Currently, it can be used only in the JOIN ON section.
|
||||||
|
* This wrapper is needed to register function to make possible query analysis, syntax completion and so on.
|
||||||
|
*/
|
||||||
|
class FunctionIsNotDistinctFrom : public IFunction
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "isNotDistinctFrom";
|
||||||
|
|
||||||
|
static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionIsNotDistinctFrom>(); }
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
|
||||||
|
bool isVariadic() const override { return false; }
|
||||||
|
|
||||||
|
size_t getNumberOfArguments() const override { return 2; }
|
||||||
|
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||||
|
|
||||||
|
bool useDefaultImplementationForNulls() const override { return false; }
|
||||||
|
|
||||||
|
bool useDefaultImplementationForNothing() const override { return false; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
bool useDefaultImplementationForLowCardinalityColumns() const override { return true; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const DataTypes &) const override { return std::make_shared<DataTypeUInt8>(); }
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & /* arguments */, const DataTypePtr &, size_t /* rows_count */) const override
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} can be used only in the JOIN ON section", getName());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -1,87 +1,7 @@
|
|||||||
#include <Functions/IFunction.h>
|
#include <Functions/tuple.h>
|
||||||
#include <Functions/FunctionFactory.h>
|
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
|
||||||
#include <Columns/ColumnTuple.h>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace
|
|
||||||
{
|
|
||||||
|
|
||||||
/** tuple(x, y, ...) is a function that allows you to group several columns
|
|
||||||
* tupleElement(tuple, n) is a function that allows you to retrieve a column from tuple.
|
|
||||||
*/
|
|
||||||
|
|
||||||
class FunctionTuple : public IFunction
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
static constexpr auto name = "tuple";
|
|
||||||
|
|
||||||
static FunctionPtr create(ContextPtr)
|
|
||||||
{
|
|
||||||
return std::make_shared<FunctionTuple>();
|
|
||||||
}
|
|
||||||
|
|
||||||
String getName() const override
|
|
||||||
{
|
|
||||||
return name;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isVariadic() const override
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t getNumberOfArguments() const override
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isInjective(const ColumnsWithTypeAndName &) const override
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
|
||||||
|
|
||||||
bool useDefaultImplementationForNulls() const override { return false; }
|
|
||||||
/// tuple(..., Nothing, ...) -> Tuple(..., Nothing, ...)
|
|
||||||
bool useDefaultImplementationForNothing() const override { return false; }
|
|
||||||
bool useDefaultImplementationForConstants() const override { return true; }
|
|
||||||
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
|
||||||
|
|
||||||
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
|
||||||
{
|
|
||||||
if (arguments.empty())
|
|
||||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument.", getName());
|
|
||||||
|
|
||||||
return std::make_shared<DataTypeTuple>(arguments);
|
|
||||||
}
|
|
||||||
|
|
||||||
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
|
||||||
{
|
|
||||||
size_t tuple_size = arguments.size();
|
|
||||||
Columns tuple_columns(tuple_size);
|
|
||||||
for (size_t i = 0; i < tuple_size; ++i)
|
|
||||||
{
|
|
||||||
/** If tuple is mixed of constant and not constant columns,
|
|
||||||
* convert all to non-constant columns,
|
|
||||||
* because many places in code expect all non-constant columns in non-constant tuple.
|
|
||||||
*/
|
|
||||||
tuple_columns[i] = arguments[i].column->convertToFullColumnIfConst();
|
|
||||||
}
|
|
||||||
return ColumnTuple::create(tuple_columns);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
REGISTER_FUNCTION(Tuple)
|
REGISTER_FUNCTION(Tuple)
|
||||||
{
|
{
|
||||||
|
70
src/Functions/tuple.h
Normal file
70
src/Functions/tuple.h
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Functions/IFunction.h>
|
||||||
|
|
||||||
|
#include <Columns/ColumnTuple.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** tuple(x, y, ...) is a function that allows you to group several columns
|
||||||
|
* tupleElement(tuple, n) is a function that allows you to retrieve a column from tuple.
|
||||||
|
*/
|
||||||
|
class FunctionTuple : public IFunction
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
static constexpr auto name = "tuple";
|
||||||
|
|
||||||
|
/// maybe_unused: false-positive
|
||||||
|
[[ maybe_unused ]] static FunctionPtr create(ContextPtr) { return std::make_shared<FunctionTuple>(); }
|
||||||
|
|
||||||
|
String getName() const override { return name; }
|
||||||
|
|
||||||
|
bool isVariadic() const override { return true; }
|
||||||
|
|
||||||
|
size_t getNumberOfArguments() const override { return 0; }
|
||||||
|
|
||||||
|
bool isInjective(const ColumnsWithTypeAndName &) const override { return true; }
|
||||||
|
|
||||||
|
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
|
||||||
|
|
||||||
|
bool useDefaultImplementationForNulls() const override { return false; }
|
||||||
|
|
||||||
|
/// tuple(..., Nothing, ...) -> Tuple(..., Nothing, ...)
|
||||||
|
bool useDefaultImplementationForNothing() const override { return false; }
|
||||||
|
bool useDefaultImplementationForConstants() const override { return true; }
|
||||||
|
bool useDefaultImplementationForLowCardinalityColumns() const override { return false; }
|
||||||
|
|
||||||
|
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
|
||||||
|
{
|
||||||
|
if (arguments.empty())
|
||||||
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument.", getName());
|
||||||
|
|
||||||
|
return std::make_shared<DataTypeTuple>(arguments);
|
||||||
|
}
|
||||||
|
|
||||||
|
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
|
||||||
|
{
|
||||||
|
size_t tuple_size = arguments.size();
|
||||||
|
Columns tuple_columns(tuple_size);
|
||||||
|
for (size_t i = 0; i < tuple_size; ++i)
|
||||||
|
{
|
||||||
|
/** If tuple is mixed of constant and not constant columns,
|
||||||
|
* convert all to non-constant columns,
|
||||||
|
* because many places in code expect all non-constant columns in non-constant tuple.
|
||||||
|
*/
|
||||||
|
tuple_columns[i] = arguments[i].column->convertToFullColumnIfConst();
|
||||||
|
}
|
||||||
|
return ColumnTuple::create(tuple_columns);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -38,15 +38,15 @@ bool isRightIdentifier(JoinIdentifierPos pos)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectJoinOnKeysMatcher::Data::addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, JoinIdentifierPosPair table_pos)
|
void CollectJoinOnKeysMatcher::Data::addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, JoinIdentifierPosPair table_pos, bool null_safe_comparison)
|
||||||
{
|
{
|
||||||
ASTPtr left = left_ast->clone();
|
ASTPtr left = left_ast->clone();
|
||||||
ASTPtr right = right_ast->clone();
|
ASTPtr right = right_ast->clone();
|
||||||
|
|
||||||
if (isLeftIdentifier(table_pos.first) && isRightIdentifier(table_pos.second))
|
if (isLeftIdentifier(table_pos.first) && isRightIdentifier(table_pos.second))
|
||||||
analyzed_join.addOnKeys(left, right);
|
analyzed_join.addOnKeys(left, right, null_safe_comparison);
|
||||||
else if (isRightIdentifier(table_pos.first) && isLeftIdentifier(table_pos.second))
|
else if (isRightIdentifier(table_pos.first) && isLeftIdentifier(table_pos.second))
|
||||||
analyzed_join.addOnKeys(right, left);
|
analyzed_join.addOnKeys(right, left, null_safe_comparison);
|
||||||
else
|
else
|
||||||
throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, "Cannot detect left and right JOIN keys. JOIN ON section is ambiguous.");
|
throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, "Cannot detect left and right JOIN keys. JOIN ON section is ambiguous.");
|
||||||
}
|
}
|
||||||
@ -78,7 +78,7 @@ void CollectJoinOnKeysMatcher::Data::asofToJoinKeys()
|
|||||||
{
|
{
|
||||||
if (!asof_left_key || !asof_right_key)
|
if (!asof_left_key || !asof_right_key)
|
||||||
throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, "No inequality in ASOF JOIN ON section.");
|
throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, "No inequality in ASOF JOIN ON section.");
|
||||||
addJoinKeys(asof_left_key, asof_right_key, {JoinIdentifierPos::Left, JoinIdentifierPos::Right});
|
addJoinKeys(asof_left_key, asof_right_key, {JoinIdentifierPos::Left, JoinIdentifierPos::Right}, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectJoinOnKeysMatcher::visit(const ASTIdentifier & ident, const ASTPtr & ast, CollectJoinOnKeysMatcher::Data & data)
|
void CollectJoinOnKeysMatcher::visit(const ASTIdentifier & ident, const ASTPtr & ast, CollectJoinOnKeysMatcher::Data & data)
|
||||||
@ -96,14 +96,14 @@ void CollectJoinOnKeysMatcher::visit(const ASTFunction & func, const ASTPtr & as
|
|||||||
|
|
||||||
ASOFJoinInequality inequality = getASOFJoinInequality(func.name);
|
ASOFJoinInequality inequality = getASOFJoinInequality(func.name);
|
||||||
|
|
||||||
if (func.name == "equals" || inequality != ASOFJoinInequality::None)
|
if (func.name == "equals" || func.name == "isNotDistinctFrom" || inequality != ASOFJoinInequality::None)
|
||||||
{
|
{
|
||||||
if (func.arguments->children.size() != 2)
|
if (func.arguments->children.size() != 2)
|
||||||
throw Exception(ErrorCodes::SYNTAX_ERROR, "Function {} takes two arguments, got '{}' instead",
|
throw Exception(ErrorCodes::SYNTAX_ERROR, "Function {} takes two arguments, got '{}' instead",
|
||||||
func.name, func.formatForErrorMessage());
|
func.name, func.formatForErrorMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (func.name == "equals")
|
if (func.name == "equals" || func.name == "isNotDistinctFrom")
|
||||||
{
|
{
|
||||||
ASTPtr left = func.arguments->children.at(0);
|
ASTPtr left = func.arguments->children.at(0);
|
||||||
ASTPtr right = func.arguments->children.at(1);
|
ASTPtr right = func.arguments->children.at(1);
|
||||||
@ -121,7 +121,8 @@ void CollectJoinOnKeysMatcher::visit(const ASTFunction & func, const ASTPtr & as
|
|||||||
if ((isLeftIdentifier(table_numbers.first) && isRightIdentifier(table_numbers.second)) ||
|
if ((isLeftIdentifier(table_numbers.first) && isRightIdentifier(table_numbers.second)) ||
|
||||||
(isRightIdentifier(table_numbers.first) && isLeftIdentifier(table_numbers.second)))
|
(isRightIdentifier(table_numbers.first) && isLeftIdentifier(table_numbers.second)))
|
||||||
{
|
{
|
||||||
data.addJoinKeys(left, right, table_numbers);
|
bool null_safe_comparison = func.name == "isNotDistinctFrom";
|
||||||
|
data.addJoinKeys(left, right, table_numbers, null_safe_comparison);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ public:
|
|||||||
ASTPtr asof_left_key{};
|
ASTPtr asof_left_key{};
|
||||||
ASTPtr asof_right_key{};
|
ASTPtr asof_right_key{};
|
||||||
|
|
||||||
void addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, JoinIdentifierPosPair table_pos);
|
void addJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, JoinIdentifierPosPair table_pos, bool null_safe_comparison);
|
||||||
void addAsofJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, JoinIdentifierPosPair table_pos,
|
void addAsofJoinKeys(const ASTPtr & left_ast, const ASTPtr & right_ast, JoinIdentifierPosPair table_pos,
|
||||||
const ASOFJoinInequality & asof_inequality);
|
const ASOFJoinInequality & asof_inequality);
|
||||||
void asofToJoinKeys();
|
void asofToJoinKeys();
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#ifndef CLICKHOUSE_PROGRAM_STANDALONE_BUILD
|
#ifndef CLICKHOUSE_KEEPER_STANDALONE_BUILD
|
||||||
|
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <Common/isLocalAddress.h>
|
#include <Common/isLocalAddress.h>
|
||||||
|
@ -11,9 +11,12 @@
|
|||||||
#include <Columns/ColumnVector.h>
|
#include <Columns/ColumnVector.h>
|
||||||
#include <Columns/ColumnFixedString.h>
|
#include <Columns/ColumnFixedString.h>
|
||||||
#include <Columns/ColumnNullable.h>
|
#include <Columns/ColumnNullable.h>
|
||||||
|
#include <Columns/ColumnTuple.h>
|
||||||
|
|
||||||
|
|
||||||
#include <DataTypes/DataTypeNullable.h>
|
#include <DataTypes/DataTypeNullable.h>
|
||||||
#include <DataTypes/DataTypeLowCardinality.h>
|
#include <DataTypes/DataTypeLowCardinality.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
|
||||||
#include <Interpreters/HashJoin.h>
|
#include <Interpreters/HashJoin.h>
|
||||||
#include <Interpreters/JoinUtils.h>
|
#include <Interpreters/JoinUtils.h>
|
||||||
@ -28,6 +31,9 @@
|
|||||||
#include <Common/typeid_cast.h>
|
#include <Common/typeid_cast.h>
|
||||||
#include <Common/assert_cast.h>
|
#include <Common/assert_cast.h>
|
||||||
|
|
||||||
|
#include <Functions/FunctionHelpers.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -10,6 +10,9 @@
|
|||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
|
|
||||||
#include <DataTypes/DataTypeNullable.h>
|
#include <DataTypes/DataTypeNullable.h>
|
||||||
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
#include <Functions/IFunctionAdaptors.h>
|
||||||
|
#include <Functions/tuple.h>
|
||||||
|
|
||||||
#include <Dictionaries/DictionaryStructure.h>
|
#include <Dictionaries/DictionaryStructure.h>
|
||||||
|
|
||||||
@ -40,6 +43,7 @@ namespace ErrorCodes
|
|||||||
extern const int TYPE_MISMATCH;
|
extern const int TYPE_MISMATCH;
|
||||||
extern const int LOGICAL_ERROR;
|
extern const int LOGICAL_ERROR;
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
extern const int NOT_FOUND_COLUMN_IN_BLOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
@ -135,7 +139,12 @@ void TableJoin::resetCollected()
|
|||||||
|
|
||||||
void TableJoin::addUsingKey(const ASTPtr & ast)
|
void TableJoin::addUsingKey(const ASTPtr & ast)
|
||||||
{
|
{
|
||||||
addKey(ast->getColumnName(), renamedRightColumnName(ast->getAliasOrColumnName()), ast);
|
/** For USING key and right key AST are the same.
|
||||||
|
* Example:
|
||||||
|
* SELECT ... FROM t1 JOIN t2 USING (key)
|
||||||
|
* Both key_asts_left and key_asts_right will reference the same ASTIdentifer `key`
|
||||||
|
*/
|
||||||
|
addKey(ast->getColumnName(), renamedRightColumnName(ast->getAliasOrColumnName()), ast, ast);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TableJoin::addDisjunct()
|
void TableJoin::addDisjunct()
|
||||||
@ -146,9 +155,9 @@ void TableJoin::addDisjunct()
|
|||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "StorageJoin with ORs is not supported");
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "StorageJoin with ORs is not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
void TableJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast)
|
void TableJoin::addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast, bool null_safe_comparison)
|
||||||
{
|
{
|
||||||
addKey(left_table_ast->getColumnName(), right_table_ast->getAliasOrColumnName(), left_table_ast, right_table_ast);
|
addKey(left_table_ast->getColumnName(), right_table_ast->getAliasOrColumnName(), left_table_ast, right_table_ast, null_safe_comparison);
|
||||||
right_key_aliases[right_table_ast->getColumnName()] = right_table_ast->getAliasOrColumnName();
|
right_key_aliases[right_table_ast->getColumnName()] = right_table_ast->getAliasOrColumnName();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -425,55 +434,180 @@ static void renameIfNeeded(String & name, const NameToNameMap & renames)
|
|||||||
name = it->second;
|
name = it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void makeColumnNameUnique(const ColumnsWithTypeAndName & source_columns, String & name)
|
||||||
|
{
|
||||||
|
for (const auto & source_col : source_columns)
|
||||||
|
{
|
||||||
|
if (source_col.name != name)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/// Duplicate found, slow path
|
||||||
|
NameSet names;
|
||||||
|
for (const auto & col : source_columns)
|
||||||
|
names.insert(col.name);
|
||||||
|
|
||||||
|
String base_name = name;
|
||||||
|
for (size_t i = 0; ; ++i)
|
||||||
|
{
|
||||||
|
name = base_name + "_" + toString(i);
|
||||||
|
if (!names.contains(name))
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static ActionsDAGPtr createWrapWithTupleActions(
|
||||||
|
const ColumnsWithTypeAndName & source_columns,
|
||||||
|
std::unordered_set<std::string_view> && column_names_to_wrap,
|
||||||
|
NameToNameMap & new_names)
|
||||||
|
{
|
||||||
|
if (column_names_to_wrap.empty())
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
auto actions_dag = std::make_shared<ActionsDAG>(source_columns);
|
||||||
|
|
||||||
|
FunctionOverloadResolverPtr func_builder = std::make_unique<FunctionToOverloadResolverAdaptor>(std::make_shared<FunctionTuple>());
|
||||||
|
|
||||||
|
for (const auto * input_node : actions_dag->getInputs())
|
||||||
|
{
|
||||||
|
const auto & column_name = input_node->result_name;
|
||||||
|
auto it = column_names_to_wrap.find(column_name);
|
||||||
|
if (it == column_names_to_wrap.end())
|
||||||
|
continue;
|
||||||
|
column_names_to_wrap.erase(it);
|
||||||
|
|
||||||
|
String node_name = "__wrapNullsafe(" + column_name + ")";
|
||||||
|
makeColumnNameUnique(source_columns, node_name);
|
||||||
|
|
||||||
|
const auto & dst_node = actions_dag->addFunction(func_builder, {input_node}, node_name);
|
||||||
|
new_names[column_name] = dst_node.result_name;
|
||||||
|
actions_dag->addOrReplaceInOutputs(dst_node);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!column_names_to_wrap.empty())
|
||||||
|
throw Exception(ErrorCodes::NOT_FOUND_COLUMN_IN_BLOCK, "Can't find columns {} in input columns [{}]",
|
||||||
|
fmt::join(column_names_to_wrap, ", "), Block(source_columns).dumpNames());
|
||||||
|
|
||||||
|
return actions_dag;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrap only those keys that are nullable on both sides
|
||||||
|
std::pair<NameSet, NameSet> TableJoin::getKeysForNullSafeComparion(const ColumnsWithTypeAndName & left_sample_columns, const ColumnsWithTypeAndName & right_sample_columns)
|
||||||
|
{
|
||||||
|
std::unordered_map<String, size_t> left_idx;
|
||||||
|
for (size_t i = 0; i < left_sample_columns.size(); ++i)
|
||||||
|
left_idx[left_sample_columns[i].name] = i;
|
||||||
|
|
||||||
|
std::unordered_map<String, size_t> right_idx;
|
||||||
|
for (size_t i = 0; i < right_sample_columns.size(); ++i)
|
||||||
|
right_idx[right_sample_columns[i].name] = i;
|
||||||
|
|
||||||
|
NameSet left_keys_to_wrap;
|
||||||
|
NameSet right_keys_to_wrap;
|
||||||
|
|
||||||
|
for (const auto & clause : clauses)
|
||||||
|
{
|
||||||
|
for (size_t i : clause.nullsafe_compare_key_indexes)
|
||||||
|
{
|
||||||
|
const auto & left_key = clause.key_names_left[i];
|
||||||
|
const auto & right_key = clause.key_names_right[i];
|
||||||
|
auto lit = left_idx.find(left_key);
|
||||||
|
if (lit == left_idx.end())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't find key {} in left columns [{}]",
|
||||||
|
left_key, Block(left_sample_columns).dumpNames());
|
||||||
|
auto rit = right_idx.find(right_key);
|
||||||
|
if (rit == right_idx.end())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't find key {} in right columns [{}]",
|
||||||
|
right_key, Block(right_sample_columns).dumpNames());
|
||||||
|
|
||||||
|
if (!left_sample_columns[lit->second].type->isNullable() || !right_sample_columns[rit->second].type->isNullable())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
left_keys_to_wrap.insert(left_key);
|
||||||
|
right_keys_to_wrap.insert(right_key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {left_keys_to_wrap, right_keys_to_wrap};
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mergeDags(ActionsDAGPtr & result_dag, ActionsDAGPtr && new_dag)
|
||||||
|
{
|
||||||
|
if (result_dag)
|
||||||
|
result_dag->mergeInplace(std::move(*new_dag));
|
||||||
|
else
|
||||||
|
result_dag = std::move(new_dag);
|
||||||
|
}
|
||||||
|
|
||||||
std::pair<ActionsDAGPtr, ActionsDAGPtr>
|
std::pair<ActionsDAGPtr, ActionsDAGPtr>
|
||||||
TableJoin::createConvertingActions(
|
TableJoin::createConvertingActions(
|
||||||
const ColumnsWithTypeAndName & left_sample_columns,
|
const ColumnsWithTypeAndName & left_sample_columns,
|
||||||
const ColumnsWithTypeAndName & right_sample_columns)
|
const ColumnsWithTypeAndName & right_sample_columns)
|
||||||
{
|
{
|
||||||
|
ActionsDAGPtr left_dag = nullptr;
|
||||||
|
ActionsDAGPtr right_dag = nullptr;
|
||||||
|
/** If the types are not equal, we need to convert them to a common type.
|
||||||
|
* Example:
|
||||||
|
* SELECT * FROM t1 JOIN t2 ON t1.a = t2.b
|
||||||
|
* Assume that t1.a is UInt16 and t2.b is Int8. The supertype for them is Int32.
|
||||||
|
* The query will be semantically transformed to:
|
||||||
|
* SELECT * FROM t1 JOIN t2 ON CAST(t1.a AS 'Int32') = CAST(t2.b AS 'Int32')
|
||||||
|
* As a result, the user will get the original columns `a` and `b` without `CAST`.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
NameToNameMap left_column_rename;
|
||||||
|
NameToNameMap right_column_rename;
|
||||||
inferJoinKeyCommonType(left_sample_columns, right_sample_columns, !isSpecialStorage(), isEnabledAlgorithm(JoinAlgorithm::FULL_SORTING_MERGE));
|
inferJoinKeyCommonType(left_sample_columns, right_sample_columns, !isSpecialStorage(), isEnabledAlgorithm(JoinAlgorithm::FULL_SORTING_MERGE));
|
||||||
|
if (!left_type_map.empty() || !right_type_map.empty())
|
||||||
NameToNameMap left_key_column_rename;
|
|
||||||
NameToNameMap right_key_column_rename;
|
|
||||||
auto left_converting_actions = applyKeyConvertToTable(
|
|
||||||
left_sample_columns, left_type_map, left_key_column_rename, forceNullableLeft());
|
|
||||||
auto right_converting_actions = applyKeyConvertToTable(
|
|
||||||
right_sample_columns, right_type_map, right_key_column_rename, forceNullableRight());
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto log_actions = [](const String & side, const ActionsDAGPtr & dag)
|
left_dag = applyKeyConvertToTable(left_sample_columns, left_type_map, JoinTableSide::Left, left_column_rename);
|
||||||
{
|
right_dag = applyKeyConvertToTable(right_sample_columns, right_type_map, JoinTableSide::Right, right_column_rename);
|
||||||
if (dag)
|
|
||||||
{
|
|
||||||
/// Just debug message
|
|
||||||
std::vector<std::string> input_cols;
|
|
||||||
for (const auto & col : dag->getRequiredColumns())
|
|
||||||
input_cols.push_back(col.name + ": " + col.type->getName());
|
|
||||||
|
|
||||||
std::vector<std::string> output_cols;
|
|
||||||
for (const auto & col : dag->getResultColumns())
|
|
||||||
output_cols.push_back(col.name + ": " + col.type->getName());
|
|
||||||
|
|
||||||
LOG_DEBUG(&Poco::Logger::get("TableJoin"), "{} JOIN converting actions: [{}] -> [{}]",
|
|
||||||
side, fmt::join(input_cols, ", "), fmt::join(output_cols, ", "));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
LOG_DEBUG(&Poco::Logger::get("TableJoin"), "{} JOIN converting actions: empty", side);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
log_actions("Left", left_converting_actions);
|
|
||||||
log_actions("Right", right_converting_actions);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
forAllKeys(clauses, [&](auto & left_key, auto & right_key)
|
/**
|
||||||
|
* Similarly, when we have a null-safe comparison (a IS NOT DISTICT FROM b),
|
||||||
|
* we need to wrap keys with a non-nullable type.
|
||||||
|
* The type `tuple` can be used for this purpose,
|
||||||
|
* because value tuple(NULL) is not NULL itself (moreover it has type Tuple(Nullable(T) which is not Nullable).
|
||||||
|
* Thus, join algorithm will match keys with values tuple(NULL).
|
||||||
|
* Example:
|
||||||
|
* SELECT * FROM t1 JOIN t2 ON t1.a <=> t2.b
|
||||||
|
* This will be semantically transformed to:
|
||||||
|
* SELECT * FROM t1 JOIN t2 ON tuple(t1.a) == tuple(t2.b)
|
||||||
|
*/
|
||||||
|
auto [left_keys_nullsafe_comparison, right_keys_nullsafe_comparison] = getKeysForNullSafeComparion(
|
||||||
|
left_dag ? left_dag->getResultColumns() : left_sample_columns,
|
||||||
|
right_dag ? right_dag->getResultColumns() : right_sample_columns);
|
||||||
|
if (!left_keys_nullsafe_comparison.empty() || !right_keys_nullsafe_comparison.empty())
|
||||||
{
|
{
|
||||||
renameIfNeeded(left_key, left_key_column_rename);
|
auto new_left_dag = applyNullsafeWrapper(
|
||||||
renameIfNeeded(right_key, right_key_column_rename);
|
left_dag ? left_dag->getResultColumns() : left_sample_columns,
|
||||||
return true;
|
left_keys_nullsafe_comparison, JoinTableSide::Left, left_column_rename);
|
||||||
});
|
mergeDags(left_dag, std::move(new_left_dag));
|
||||||
|
|
||||||
return {left_converting_actions, right_converting_actions};
|
auto new_right_dag = applyNullsafeWrapper(
|
||||||
|
right_dag ? right_dag->getResultColumns() : right_sample_columns,
|
||||||
|
right_keys_nullsafe_comparison, JoinTableSide::Right, right_column_rename);
|
||||||
|
mergeDags(right_dag, std::move(new_right_dag));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (forceNullableLeft())
|
||||||
|
{
|
||||||
|
auto new_left_dag = applyJoinUseNullsConversion(
|
||||||
|
left_dag ? left_dag->getResultColumns() : left_sample_columns,
|
||||||
|
left_column_rename);
|
||||||
|
mergeDags(left_dag, std::move(new_left_dag));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (forceNullableRight())
|
||||||
|
{
|
||||||
|
auto new_right_dag = applyJoinUseNullsConversion(
|
||||||
|
right_dag ? right_dag->getResultColumns() : right_sample_columns,
|
||||||
|
right_column_rename);
|
||||||
|
mergeDags(right_dag, std::move(new_right_dag));
|
||||||
|
}
|
||||||
|
|
||||||
|
return {left_dag, right_dag};
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename LeftNamesAndTypes, typename RightNamesAndTypes>
|
template <typename LeftNamesAndTypes, typename RightNamesAndTypes>
|
||||||
@ -608,30 +742,66 @@ static ActionsDAGPtr changeTypesToNullable(
|
|||||||
ActionsDAGPtr TableJoin::applyKeyConvertToTable(
|
ActionsDAGPtr TableJoin::applyKeyConvertToTable(
|
||||||
const ColumnsWithTypeAndName & cols_src,
|
const ColumnsWithTypeAndName & cols_src,
|
||||||
const NameToTypeMap & type_mapping,
|
const NameToTypeMap & type_mapping,
|
||||||
NameToNameMap & key_column_rename,
|
JoinTableSide table_side,
|
||||||
bool make_nullable) const
|
NameToNameMap & key_column_rename)
|
||||||
{
|
{
|
||||||
|
if (type_mapping.empty())
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
/// Create DAG to convert key columns
|
/// Create DAG to convert key columns
|
||||||
ActionsDAGPtr dag_stage1 = changeKeyTypes(cols_src, type_mapping, !hasUsing(), key_column_rename);
|
ActionsDAGPtr convert_dag = changeKeyTypes(cols_src, type_mapping, !hasUsing(), key_column_rename);
|
||||||
|
applyRename(table_side, key_column_rename);
|
||||||
|
return convert_dag;
|
||||||
|
}
|
||||||
|
|
||||||
|
ActionsDAGPtr TableJoin::applyNullsafeWrapper(
|
||||||
|
const ColumnsWithTypeAndName & cols_src,
|
||||||
|
const NameSet & columns_for_nullsafe_comparison,
|
||||||
|
JoinTableSide table_side,
|
||||||
|
NameToNameMap & key_column_rename)
|
||||||
|
{
|
||||||
|
if (columns_for_nullsafe_comparison.empty())
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
|
std::unordered_set<std::string_view> column_names_to_wrap;
|
||||||
|
for (const auto & name : columns_for_nullsafe_comparison)
|
||||||
|
{
|
||||||
|
/// Take into account column renaming for type conversion
|
||||||
|
/// if we changed key `a == b` to `_CAST(a, 'UInt64') = b` we need to wrap `tuple(_CAST(a, 'UInt64')) = tuple(b)`
|
||||||
|
if (auto it = key_column_rename.find(name); it != key_column_rename.end())
|
||||||
|
column_names_to_wrap.insert(it->second);
|
||||||
|
else
|
||||||
|
column_names_to_wrap.insert(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create DAG to wrap keys with tuple for null-safe comparison
|
||||||
|
ActionsDAGPtr null_safe_wrap_dag = createWrapWithTupleActions(cols_src, std::move(column_names_to_wrap), key_column_rename);
|
||||||
|
for (auto & clause : clauses)
|
||||||
|
{
|
||||||
|
for (size_t i : clause.nullsafe_compare_key_indexes)
|
||||||
|
{
|
||||||
|
if (table_side == JoinTableSide::Left)
|
||||||
|
renameIfNeeded(clause.key_names_left[i], key_column_rename);
|
||||||
|
else
|
||||||
|
renameIfNeeded(clause.key_names_right[i], key_column_rename);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null_safe_wrap_dag;
|
||||||
|
}
|
||||||
|
|
||||||
|
ActionsDAGPtr TableJoin::applyJoinUseNullsConversion(
|
||||||
|
const ColumnsWithTypeAndName & cols_src,
|
||||||
|
const NameToNameMap & key_column_rename)
|
||||||
|
{
|
||||||
|
/// Do not need to make nullable temporary columns that would be used only as join keys, but is not visible to user
|
||||||
|
NameSet exclude_columns;
|
||||||
|
for (const auto & it : key_column_rename)
|
||||||
|
exclude_columns.insert(it.second);
|
||||||
|
|
||||||
/// Create DAG to make columns nullable if needed
|
/// Create DAG to make columns nullable if needed
|
||||||
if (make_nullable)
|
ActionsDAGPtr add_nullable_dag = changeTypesToNullable(cols_src, exclude_columns);
|
||||||
{
|
return add_nullable_dag;
|
||||||
/// Do not need to make nullable temporary columns that would be used only as join keys, but is not visible to user
|
|
||||||
NameSet cols_not_nullable;
|
|
||||||
for (const auto & t : key_column_rename)
|
|
||||||
cols_not_nullable.insert(t.second);
|
|
||||||
|
|
||||||
ColumnsWithTypeAndName input_cols = dag_stage1 ? dag_stage1->getResultColumns() : cols_src;
|
|
||||||
ActionsDAGPtr dag_stage2 = changeTypesToNullable(input_cols, cols_not_nullable);
|
|
||||||
|
|
||||||
/// Merge dags if we got two ones
|
|
||||||
if (dag_stage1)
|
|
||||||
return ActionsDAG::merge(std::move(*dag_stage1), std::move(*dag_stage2));
|
|
||||||
else
|
|
||||||
return dag_stage2;
|
|
||||||
}
|
|
||||||
return dag_stage1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void TableJoin::setStorageJoin(std::shared_ptr<const IKeyValueEntity> storage)
|
void TableJoin::setStorageJoin(std::shared_ptr<const IKeyValueEntity> storage)
|
||||||
@ -674,12 +844,13 @@ void TableJoin::setRename(const String & from, const String & to)
|
|||||||
renames[from] = to;
|
renames[from] = to;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TableJoin::addKey(const String & left_name, const String & right_name, const ASTPtr & left_ast, const ASTPtr & right_ast)
|
void TableJoin::addKey(const String & left_name, const String & right_name,
|
||||||
|
const ASTPtr & left_ast, const ASTPtr & right_ast,
|
||||||
|
bool null_safe_comparison)
|
||||||
{
|
{
|
||||||
clauses.back().key_names_left.emplace_back(left_name);
|
clauses.back().addKey(left_name, right_name, null_safe_comparison);
|
||||||
key_asts_left.emplace_back(left_ast);
|
|
||||||
|
|
||||||
clauses.back().key_names_right.emplace_back(right_name);
|
key_asts_left.emplace_back(left_ast);
|
||||||
key_asts_right.emplace_back(right_ast ? right_ast : left_ast);
|
key_asts_right.emplace_back(right_ast ? right_ast : left_ast);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -731,6 +902,19 @@ Names TableJoin::getAllNames(JoinTableSide side) const
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TableJoin::applyRename(JoinTableSide side, const NameToNameMap & name_map)
|
||||||
|
{
|
||||||
|
auto rename_callback = [&name_map](auto & key_name)
|
||||||
|
{
|
||||||
|
renameIfNeeded(key_name, name_map);
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
if (side == JoinTableSide::Left)
|
||||||
|
forAllKeys<LeftSideTag>(clauses, rename_callback);
|
||||||
|
else
|
||||||
|
forAllKeys<RightSideTag>(clauses, rename_callback);
|
||||||
|
}
|
||||||
|
|
||||||
void TableJoin::assertHasOneOnExpr() const
|
void TableJoin::assertHasOneOnExpr() const
|
||||||
{
|
{
|
||||||
if (!oneDisjunct())
|
if (!oneDisjunct())
|
||||||
|
@ -51,6 +51,13 @@ public:
|
|||||||
Names key_names_left;
|
Names key_names_left;
|
||||||
Names key_names_right; /// Duplicating right key names are qualified
|
Names key_names_right; /// Duplicating right key names are qualified
|
||||||
|
|
||||||
|
/** JOIN ON a1 == a2 AND b1 <=> b2 AND c1 == c2 AND d1 <=> d2
|
||||||
|
* key_names_left: [a1, b1, c1, d1]
|
||||||
|
* key_names_right: [a2, b2, c2, d2]
|
||||||
|
* nullsafe_compare_key_indexes: {1, 3}
|
||||||
|
*/
|
||||||
|
std::unordered_set<size_t> nullsafe_compare_key_indexes;
|
||||||
|
|
||||||
ASTPtr on_filter_condition_left;
|
ASTPtr on_filter_condition_left;
|
||||||
ASTPtr on_filter_condition_right;
|
ASTPtr on_filter_condition_right;
|
||||||
|
|
||||||
@ -59,6 +66,14 @@ public:
|
|||||||
|
|
||||||
JoinOnClause() = default;
|
JoinOnClause() = default;
|
||||||
|
|
||||||
|
void addKey(const String & left_name, const String & right_name, bool null_safe_comparison)
|
||||||
|
{
|
||||||
|
key_names_left.push_back(left_name);
|
||||||
|
key_names_right.push_back(right_name);
|
||||||
|
if (null_safe_comparison)
|
||||||
|
nullsafe_compare_key_indexes.insert(key_names_left.size() - 1);
|
||||||
|
}
|
||||||
|
|
||||||
std::pair<String, String> condColumnNames() const
|
std::pair<String, String> condColumnNames() const
|
||||||
{
|
{
|
||||||
std::pair<String, String> res;
|
std::pair<String, String> res;
|
||||||
@ -177,11 +192,24 @@ private:
|
|||||||
|
|
||||||
/// Create converting actions and change key column names if required
|
/// Create converting actions and change key column names if required
|
||||||
ActionsDAGPtr applyKeyConvertToTable(
|
ActionsDAGPtr applyKeyConvertToTable(
|
||||||
const ColumnsWithTypeAndName & cols_src, const NameToTypeMap & type_mapping,
|
const ColumnsWithTypeAndName & cols_src,
|
||||||
NameToNameMap & key_column_rename,
|
const NameToTypeMap & type_mapping,
|
||||||
bool make_nullable) const;
|
JoinTableSide table_side,
|
||||||
|
NameToNameMap & key_column_rename);
|
||||||
|
|
||||||
void addKey(const String & left_name, const String & right_name, const ASTPtr & left_ast, const ASTPtr & right_ast = nullptr);
|
ActionsDAGPtr applyNullsafeWrapper(
|
||||||
|
const ColumnsWithTypeAndName & cols_src,
|
||||||
|
const NameSet & columns_for_nullsafe_comparison,
|
||||||
|
JoinTableSide table_side,
|
||||||
|
NameToNameMap & key_column_rename);
|
||||||
|
|
||||||
|
ActionsDAGPtr applyJoinUseNullsConversion(
|
||||||
|
const ColumnsWithTypeAndName & cols_src,
|
||||||
|
const NameToNameMap & key_column_rename);
|
||||||
|
|
||||||
|
void applyRename(JoinTableSide side, const NameToNameMap & name_map);
|
||||||
|
|
||||||
|
void addKey(const String & left_name, const String & right_name, const ASTPtr & left_ast, const ASTPtr & right_ast, bool null_safe_comparison = false);
|
||||||
|
|
||||||
void assertHasOneOnExpr() const;
|
void assertHasOneOnExpr() const;
|
||||||
|
|
||||||
@ -189,9 +217,11 @@ private:
|
|||||||
template <typename LeftNamesAndTypes, typename RightNamesAndTypes>
|
template <typename LeftNamesAndTypes, typename RightNamesAndTypes>
|
||||||
void inferJoinKeyCommonType(const LeftNamesAndTypes & left, const RightNamesAndTypes & right, bool allow_right, bool strict);
|
void inferJoinKeyCommonType(const LeftNamesAndTypes & left, const RightNamesAndTypes & right, bool allow_right, bool strict);
|
||||||
|
|
||||||
|
|
||||||
void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix);
|
void deduplicateAndQualifyColumnNames(const NameSet & left_table_columns, const String & right_table_prefix);
|
||||||
|
|
||||||
|
std::pair<NameSet, NameSet>
|
||||||
|
getKeysForNullSafeComparion(const ColumnsWithTypeAndName & left_sample_columns, const ColumnsWithTypeAndName & right_sample_columns);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
TableJoin() = default;
|
TableJoin() = default;
|
||||||
|
|
||||||
@ -270,7 +300,7 @@ public:
|
|||||||
|
|
||||||
void addDisjunct();
|
void addDisjunct();
|
||||||
|
|
||||||
void addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast);
|
void addOnKeys(ASTPtr & left_table_ast, ASTPtr & right_table_ast, bool null_safe_comparison);
|
||||||
|
|
||||||
/* Conditions for left/right table from JOIN ON section.
|
/* Conditions for left/right table from JOIN ON section.
|
||||||
*
|
*
|
||||||
|
@ -46,7 +46,7 @@ static std::string renderFileNameTemplate(time_t now, const std::string & file_p
|
|||||||
std::tm buf;
|
std::tm buf;
|
||||||
localtime_r(&now, &buf);
|
localtime_r(&now, &buf);
|
||||||
std::ostringstream ss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
std::ostringstream ss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||||
ss << std::put_time(&buf, file_path.c_str());
|
ss << std::put_time(&buf, path.filename().c_str());
|
||||||
return path.replace_filename(ss.str());
|
return path.replace_filename(ss.str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,6 +208,9 @@ bool applyTrivialCountIfPossible(
|
|||||||
if (row_policy_filter)
|
if (row_policy_filter)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
|
if (select_query_info.additional_filter_ast)
|
||||||
|
return false;
|
||||||
|
|
||||||
/** Transaction check here is necessary because
|
/** Transaction check here is necessary because
|
||||||
* MergeTree maintains total count for all parts in Active state and it simply returns that number for trivial select count() from table query.
|
* MergeTree maintains total count for all parts in Active state and it simply returns that number for trivial select count() from table query.
|
||||||
* But if we have current transaction, then we should return number of rows in current snapshot (that may include parts in Outdated state),
|
* But if we have current transaction, then we should return number of rows in current snapshot (that may include parts in Outdated state),
|
||||||
@ -663,6 +666,7 @@ JoinTreeQueryPlan buildQueryPlanForTableExpression(QueryTreeNodePtr table_expres
|
|||||||
is_single_table_expression &&
|
is_single_table_expression &&
|
||||||
(table_node || table_function_node) &&
|
(table_node || table_function_node) &&
|
||||||
select_query_info.has_aggregates &&
|
select_query_info.has_aggregates &&
|
||||||
|
settings.additional_table_filters.value.empty() &&
|
||||||
applyTrivialCountIfPossible(query_plan, table_expression_query_info, table_node, table_function_node, select_query_info.query_tree, planner_context->getMutableQueryContext(), table_expression_data.getColumnNames());
|
applyTrivialCountIfPossible(query_plan, table_expression_query_info, table_node, table_function_node, select_query_info.query_tree, planner_context->getMutableQueryContext(), table_expression_data.getColumnNames());
|
||||||
|
|
||||||
if (is_trivial_count_applied)
|
if (is_trivial_count_applied)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include <Server/ProtocolServerAdapter.h>
|
#include <Server/ProtocolServerAdapter.h>
|
||||||
#include <Server/TCPServer.h>
|
#include <Server/TCPServer.h>
|
||||||
|
|
||||||
#if USE_GRPC && !defined(CLICKHOUSE_PROGRAM_STANDALONE_BUILD)
|
#if USE_GRPC && !defined(CLICKHOUSE_KEEPER_STANDALONE_BUILD)
|
||||||
#include <Server/GRPCServer.h>
|
#include <Server/GRPCServer.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ ProtocolServerAdapter::ProtocolServerAdapter(
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#if USE_GRPC && !defined(CLICKHOUSE_PROGRAM_STANDALONE_BUILD)
|
#if USE_GRPC && !defined(CLICKHOUSE_KEEPER_STANDALONE_BUILD)
|
||||||
class ProtocolServerAdapter::GRPCServerAdapterImpl : public Impl
|
class ProtocolServerAdapter::GRPCServerAdapterImpl : public Impl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
@ -23,7 +23,7 @@ public:
|
|||||||
ProtocolServerAdapter & operator =(ProtocolServerAdapter && src) = default;
|
ProtocolServerAdapter & operator =(ProtocolServerAdapter && src) = default;
|
||||||
ProtocolServerAdapter(const std::string & listen_host_, const char * port_name_, const std::string & description_, std::unique_ptr<TCPServer> tcp_server_);
|
ProtocolServerAdapter(const std::string & listen_host_, const char * port_name_, const std::string & description_, std::unique_ptr<TCPServer> tcp_server_);
|
||||||
|
|
||||||
#if USE_GRPC && !defined(CLICKHOUSE_PROGRAM_STANDALONE_BUILD)
|
#if USE_GRPC && !defined(CLICKHOUSE_KEEPER_STANDALONE_BUILD)
|
||||||
ProtocolServerAdapter(const std::string & listen_host_, const char * port_name_, const std::string & description_, std::unique_ptr<GRPCServer> grpc_server_);
|
ProtocolServerAdapter(const std::string & listen_host_, const char * port_name_, const std::string & description_, std::unique_ptr<GRPCServer> grpc_server_);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -416,6 +416,7 @@ void DataPartStorageOnDiskBase::backup(
|
|||||||
MutableDataPartStoragePtr DataPartStorageOnDiskBase::freeze(
|
MutableDataPartStoragePtr DataPartStorageOnDiskBase::freeze(
|
||||||
const std::string & to,
|
const std::string & to,
|
||||||
const std::string & dir_path,
|
const std::string & dir_path,
|
||||||
|
const WriteSettings & settings,
|
||||||
std::function<void(const DiskPtr &)> save_metadata_callback,
|
std::function<void(const DiskPtr &)> save_metadata_callback,
|
||||||
const ClonePartParams & params) const
|
const ClonePartParams & params) const
|
||||||
{
|
{
|
||||||
@ -425,8 +426,16 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::freeze(
|
|||||||
else
|
else
|
||||||
disk->createDirectories(to);
|
disk->createDirectories(to);
|
||||||
|
|
||||||
localBackup(disk, getRelativePath(), fs::path(to) / dir_path, params.make_source_readonly, {}, params.copy_instead_of_hardlink,
|
localBackup(
|
||||||
params.files_to_copy_instead_of_hardlinks, params.external_transaction);
|
disk,
|
||||||
|
getRelativePath(),
|
||||||
|
fs::path(to) / dir_path,
|
||||||
|
settings,
|
||||||
|
params.make_source_readonly,
|
||||||
|
/* max_level= */ {},
|
||||||
|
params.copy_instead_of_hardlink,
|
||||||
|
params.files_to_copy_instead_of_hardlinks,
|
||||||
|
params.external_transaction);
|
||||||
|
|
||||||
if (save_metadata_callback)
|
if (save_metadata_callback)
|
||||||
save_metadata_callback(disk);
|
save_metadata_callback(disk);
|
||||||
@ -457,6 +466,7 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::clonePart(
|
|||||||
const std::string & to,
|
const std::string & to,
|
||||||
const std::string & dir_path,
|
const std::string & dir_path,
|
||||||
const DiskPtr & dst_disk,
|
const DiskPtr & dst_disk,
|
||||||
|
const WriteSettings & write_settings,
|
||||||
Poco::Logger * log) const
|
Poco::Logger * log) const
|
||||||
{
|
{
|
||||||
String path_to_clone = fs::path(to) / dir_path / "";
|
String path_to_clone = fs::path(to) / dir_path / "";
|
||||||
@ -472,7 +482,7 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::clonePart(
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
dst_disk->createDirectories(to);
|
dst_disk->createDirectories(to);
|
||||||
src_disk->copyDirectoryContent(getRelativePath(), dst_disk, path_to_clone);
|
src_disk->copyDirectoryContent(getRelativePath(), dst_disk, path_to_clone, write_settings);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -63,6 +63,7 @@ public:
|
|||||||
MutableDataPartStoragePtr freeze(
|
MutableDataPartStoragePtr freeze(
|
||||||
const std::string & to,
|
const std::string & to,
|
||||||
const std::string & dir_path,
|
const std::string & dir_path,
|
||||||
|
const WriteSettings & settings,
|
||||||
std::function<void(const DiskPtr &)> save_metadata_callback,
|
std::function<void(const DiskPtr &)> save_metadata_callback,
|
||||||
const ClonePartParams & params) const override;
|
const ClonePartParams & params) const override;
|
||||||
|
|
||||||
@ -70,6 +71,7 @@ public:
|
|||||||
const std::string & to,
|
const std::string & to,
|
||||||
const std::string & dir_path,
|
const std::string & dir_path,
|
||||||
const DiskPtr & dst_disk,
|
const DiskPtr & dst_disk,
|
||||||
|
const WriteSettings & write_settings,
|
||||||
Poco::Logger * log) const override;
|
Poco::Logger * log) const override;
|
||||||
|
|
||||||
void rename(
|
void rename(
|
||||||
|
@ -252,6 +252,7 @@ public:
|
|||||||
virtual std::shared_ptr<IDataPartStorage> freeze(
|
virtual std::shared_ptr<IDataPartStorage> freeze(
|
||||||
const std::string & to,
|
const std::string & to,
|
||||||
const std::string & dir_path,
|
const std::string & dir_path,
|
||||||
|
const WriteSettings & settings,
|
||||||
std::function<void(const DiskPtr &)> save_metadata_callback,
|
std::function<void(const DiskPtr &)> save_metadata_callback,
|
||||||
const ClonePartParams & params) const = 0;
|
const ClonePartParams & params) const = 0;
|
||||||
|
|
||||||
@ -260,6 +261,7 @@ public:
|
|||||||
const std::string & to,
|
const std::string & to,
|
||||||
const std::string & dir_path,
|
const std::string & dir_path,
|
||||||
const DiskPtr & disk,
|
const DiskPtr & disk,
|
||||||
|
const WriteSettings & write_settings,
|
||||||
Poco::Logger * log) const = 0;
|
Poco::Logger * log) const = 0;
|
||||||
|
|
||||||
/// Change part's root. from_root should be a prefix path of current root path.
|
/// Change part's root. from_root should be a prefix path of current root path.
|
||||||
|
@ -1802,11 +1802,12 @@ DataPartStoragePtr IMergeTreeDataPart::makeCloneInDetached(const String & prefix
|
|||||||
return getDataPartStorage().freeze(
|
return getDataPartStorage().freeze(
|
||||||
storage.relative_data_path,
|
storage.relative_data_path,
|
||||||
*maybe_path_in_detached,
|
*maybe_path_in_detached,
|
||||||
/*save_metadata_callback=*/ {},
|
Context::getGlobalContextInstance()->getWriteSettings(),
|
||||||
|
/* save_metadata_callback= */ {},
|
||||||
params);
|
params);
|
||||||
}
|
}
|
||||||
|
|
||||||
MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const
|
MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name, const WriteSettings & write_settings) const
|
||||||
{
|
{
|
||||||
assertOnDisk();
|
assertOnDisk();
|
||||||
|
|
||||||
@ -1816,7 +1817,7 @@ MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & di
|
|||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can not clone data part {} to empty directory.", name);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can not clone data part {} to empty directory.", name);
|
||||||
|
|
||||||
String path_to_clone = fs::path(storage.relative_data_path) / directory_name / "";
|
String path_to_clone = fs::path(storage.relative_data_path) / directory_name / "";
|
||||||
return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log);
|
return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, write_settings, storage.log);
|
||||||
}
|
}
|
||||||
|
|
||||||
UInt64 IMergeTreeDataPart::getIndexSizeFromFile() const
|
UInt64 IMergeTreeDataPart::getIndexSizeFromFile() const
|
||||||
|
@ -377,7 +377,7 @@ public:
|
|||||||
const DiskTransactionPtr & disk_transaction) const;
|
const DiskTransactionPtr & disk_transaction) const;
|
||||||
|
|
||||||
/// Makes full clone of part in specified subdirectory (relative to storage data directory, e.g. "detached") on another disk
|
/// Makes full clone of part in specified subdirectory (relative to storage data directory, e.g. "detached") on another disk
|
||||||
MutableDataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const;
|
MutableDataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name, const WriteSettings & write_settings) const;
|
||||||
|
|
||||||
/// Checks that .bin and .mrk files exist.
|
/// Checks that .bin and .mrk files exist.
|
||||||
///
|
///
|
||||||
|
@ -4985,7 +4985,7 @@ void MergeTreeData::movePartitionToDisk(const ASTPtr & partition, const String &
|
|||||||
throw Exception(ErrorCodes::UNKNOWN_DISK, "All parts of partition '{}' are already on disk '{}'", partition_id, disk->getName());
|
throw Exception(ErrorCodes::UNKNOWN_DISK, "All parts of partition '{}' are already on disk '{}'", partition_id, disk->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(disk));
|
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(disk), local_context->getWriteSettings());
|
||||||
switch (moves_outcome)
|
switch (moves_outcome)
|
||||||
{
|
{
|
||||||
case MovePartsOutcome::MovesAreCancelled:
|
case MovePartsOutcome::MovesAreCancelled:
|
||||||
@ -5048,7 +5048,7 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String
|
|||||||
throw Exception(ErrorCodes::UNKNOWN_DISK, "All parts of partition '{}' are already on volume '{}'", partition_id, volume->getName());
|
throw Exception(ErrorCodes::UNKNOWN_DISK, "All parts of partition '{}' are already on volume '{}'", partition_id, volume->getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(volume));
|
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(volume), local_context->getWriteSettings());
|
||||||
switch (moves_outcome)
|
switch (moves_outcome)
|
||||||
{
|
{
|
||||||
case MovePartsOutcome::MovesAreCancelled:
|
case MovePartsOutcome::MovesAreCancelled:
|
||||||
@ -7401,7 +7401,8 @@ std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> MergeTreeData::cloneAn
|
|||||||
const String & tmp_part_prefix,
|
const String & tmp_part_prefix,
|
||||||
const MergeTreePartInfo & dst_part_info,
|
const MergeTreePartInfo & dst_part_info,
|
||||||
const StorageMetadataPtr & metadata_snapshot,
|
const StorageMetadataPtr & metadata_snapshot,
|
||||||
const IDataPartStorage::ClonePartParams & params)
|
const IDataPartStorage::ClonePartParams & params,
|
||||||
|
const WriteSettings & write_settings)
|
||||||
{
|
{
|
||||||
/// Check that the storage policy contains the disk where the src_part is located.
|
/// Check that the storage policy contains the disk where the src_part is located.
|
||||||
bool does_storage_policy_allow_same_disk = false;
|
bool does_storage_policy_allow_same_disk = false;
|
||||||
@ -7458,7 +7459,8 @@ std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> MergeTreeData::cloneAn
|
|||||||
auto dst_part_storage = src_part_storage->freeze(
|
auto dst_part_storage = src_part_storage->freeze(
|
||||||
relative_data_path,
|
relative_data_path,
|
||||||
tmp_dst_part_name,
|
tmp_dst_part_name,
|
||||||
/*save_metadata_callback=*/ {},
|
write_settings,
|
||||||
|
/* save_metadata_callback= */ {},
|
||||||
params);
|
params);
|
||||||
|
|
||||||
if (params.metadata_version_to_write.has_value())
|
if (params.metadata_version_to_write.has_value())
|
||||||
@ -7715,6 +7717,7 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
|
|||||||
auto new_storage = data_part_storage->freeze(
|
auto new_storage = data_part_storage->freeze(
|
||||||
backup_part_path,
|
backup_part_path,
|
||||||
part->getDataPartStorage().getPartDirectory(),
|
part->getDataPartStorage().getPartDirectory(),
|
||||||
|
local_context->getWriteSettings(),
|
||||||
callback,
|
callback,
|
||||||
params);
|
params);
|
||||||
|
|
||||||
@ -7913,7 +7916,8 @@ bool MergeTreeData::scheduleDataMovingJob(BackgroundJobsAssignee & assignee)
|
|||||||
assignee.scheduleMoveTask(std::make_shared<ExecutableLambdaAdapter>(
|
assignee.scheduleMoveTask(std::make_shared<ExecutableLambdaAdapter>(
|
||||||
[this, moving_tagger] () mutable
|
[this, moving_tagger] () mutable
|
||||||
{
|
{
|
||||||
return moveParts(moving_tagger) == MovePartsOutcome::PartsMoved;
|
WriteSettings write_settings = Context::getGlobalContextInstance()->getWriteSettings();
|
||||||
|
return moveParts(moving_tagger, write_settings, /* wait_for_move_if_zero_copy= */ false) == MovePartsOutcome::PartsMoved;
|
||||||
}, moves_assignee_trigger, getStorageID()));
|
}, moves_assignee_trigger, getStorageID()));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -7928,7 +7932,7 @@ bool MergeTreeData::areBackgroundMovesNeeded() const
|
|||||||
return policy->getVolumes().size() == 1 && policy->getVolumes()[0]->getDisks().size() > 1;
|
return policy->getVolumes().size() == 1 && policy->getVolumes()[0]->getDisks().size() > 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
MovePartsOutcome MergeTreeData::movePartsToSpace(const DataPartsVector & parts, SpacePtr space)
|
MovePartsOutcome MergeTreeData::movePartsToSpace(const DataPartsVector & parts, SpacePtr space, const WriteSettings & write_settings)
|
||||||
{
|
{
|
||||||
if (parts_mover.moves_blocker.isCancelled())
|
if (parts_mover.moves_blocker.isCancelled())
|
||||||
return MovePartsOutcome::MovesAreCancelled;
|
return MovePartsOutcome::MovesAreCancelled;
|
||||||
@ -7937,7 +7941,7 @@ MovePartsOutcome MergeTreeData::movePartsToSpace(const DataPartsVector & parts,
|
|||||||
if (moving_tagger->parts_to_move.empty())
|
if (moving_tagger->parts_to_move.empty())
|
||||||
return MovePartsOutcome::NothingToMove;
|
return MovePartsOutcome::NothingToMove;
|
||||||
|
|
||||||
return moveParts(moving_tagger, true);
|
return moveParts(moving_tagger, write_settings, /* wait_for_move_if_zero_copy= */ true);
|
||||||
}
|
}
|
||||||
|
|
||||||
MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::selectPartsForMove()
|
MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::selectPartsForMove()
|
||||||
@ -7992,7 +7996,7 @@ MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::checkPartsForMove(co
|
|||||||
return std::make_shared<CurrentlyMovingPartsTagger>(std::move(parts_to_move), *this);
|
return std::make_shared<CurrentlyMovingPartsTagger>(std::move(parts_to_move), *this);
|
||||||
}
|
}
|
||||||
|
|
||||||
MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, bool wait_for_move_if_zero_copy)
|
MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, const WriteSettings & write_settings, bool wait_for_move_if_zero_copy)
|
||||||
{
|
{
|
||||||
LOG_INFO(log, "Got {} parts to move.", moving_tagger->parts_to_move.size());
|
LOG_INFO(log, "Got {} parts to move.", moving_tagger->parts_to_move.size());
|
||||||
|
|
||||||
@ -8053,7 +8057,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr &
|
|||||||
{
|
{
|
||||||
if (lock->isLocked())
|
if (lock->isLocked())
|
||||||
{
|
{
|
||||||
cloned_part = parts_mover.clonePart(moving_part);
|
cloned_part = parts_mover.clonePart(moving_part, write_settings);
|
||||||
parts_mover.swapClonedPart(cloned_part);
|
parts_mover.swapClonedPart(cloned_part);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -8080,7 +8084,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr &
|
|||||||
}
|
}
|
||||||
else /// Ordinary move as it should be
|
else /// Ordinary move as it should be
|
||||||
{
|
{
|
||||||
cloned_part = parts_mover.clonePart(moving_part);
|
cloned_part = parts_mover.clonePart(moving_part, write_settings);
|
||||||
parts_mover.swapClonedPart(cloned_part);
|
parts_mover.swapClonedPart(cloned_part);
|
||||||
}
|
}
|
||||||
write_part_log({});
|
write_part_log({});
|
||||||
|
@ -63,6 +63,8 @@ using BackupEntries = std::vector<std::pair<String, std::shared_ptr<const IBacku
|
|||||||
class MergeTreeTransaction;
|
class MergeTreeTransaction;
|
||||||
using MergeTreeTransactionPtr = std::shared_ptr<MergeTreeTransaction>;
|
using MergeTreeTransactionPtr = std::shared_ptr<MergeTreeTransaction>;
|
||||||
|
|
||||||
|
struct WriteSettings;
|
||||||
|
|
||||||
/// Auxiliary struct holding information about the future merged or mutated part.
|
/// Auxiliary struct holding information about the future merged or mutated part.
|
||||||
struct EmergingPartInfo
|
struct EmergingPartInfo
|
||||||
{
|
{
|
||||||
@ -841,9 +843,12 @@ public:
|
|||||||
MergeTreeData & checkStructureAndGetMergeTreeData(IStorage & source_table, const StorageMetadataPtr & src_snapshot, const StorageMetadataPtr & my_snapshot) const;
|
MergeTreeData & checkStructureAndGetMergeTreeData(IStorage & source_table, const StorageMetadataPtr & src_snapshot, const StorageMetadataPtr & my_snapshot) const;
|
||||||
|
|
||||||
std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> cloneAndLoadDataPartOnSameDisk(
|
std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> cloneAndLoadDataPartOnSameDisk(
|
||||||
const MergeTreeData::DataPartPtr & src_part, const String & tmp_part_prefix,
|
const MergeTreeData::DataPartPtr & src_part,
|
||||||
const MergeTreePartInfo & dst_part_info, const StorageMetadataPtr & metadata_snapshot,
|
const String & tmp_part_prefix,
|
||||||
const IDataPartStorage::ClonePartParams & params);
|
const MergeTreePartInfo & dst_part_info,
|
||||||
|
const StorageMetadataPtr & metadata_snapshot,
|
||||||
|
const IDataPartStorage::ClonePartParams & params,
|
||||||
|
const WriteSettings & write_settings);
|
||||||
|
|
||||||
virtual std::vector<MergeTreeMutationStatus> getMutationsStatus() const = 0;
|
virtual std::vector<MergeTreeMutationStatus> getMutationsStatus() const = 0;
|
||||||
|
|
||||||
@ -1336,7 +1341,7 @@ protected:
|
|||||||
/// MergeTree because they store mutations in different way.
|
/// MergeTree because they store mutations in different way.
|
||||||
virtual std::map<int64_t, MutationCommands> getAlterMutationCommandsForPart(const DataPartPtr & part) const = 0;
|
virtual std::map<int64_t, MutationCommands> getAlterMutationCommandsForPart(const DataPartPtr & part) const = 0;
|
||||||
/// Moves part to specified space, used in ALTER ... MOVE ... queries
|
/// Moves part to specified space, used in ALTER ... MOVE ... queries
|
||||||
MovePartsOutcome movePartsToSpace(const DataPartsVector & parts, SpacePtr space);
|
MovePartsOutcome movePartsToSpace(const DataPartsVector & parts, SpacePtr space, const WriteSettings & write_settings);
|
||||||
|
|
||||||
struct PartBackupEntries
|
struct PartBackupEntries
|
||||||
{
|
{
|
||||||
@ -1489,7 +1494,7 @@ private:
|
|||||||
using CurrentlyMovingPartsTaggerPtr = std::shared_ptr<CurrentlyMovingPartsTagger>;
|
using CurrentlyMovingPartsTaggerPtr = std::shared_ptr<CurrentlyMovingPartsTagger>;
|
||||||
|
|
||||||
/// Move selected parts to corresponding disks
|
/// Move selected parts to corresponding disks
|
||||||
MovePartsOutcome moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, bool wait_for_move_if_zero_copy=false);
|
MovePartsOutcome moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, const WriteSettings & write_settings, bool wait_for_move_if_zero_copy);
|
||||||
|
|
||||||
/// Select parts for move and disks for them. Used in background moving processes.
|
/// Select parts for move and disks for them. Used in background moving processes.
|
||||||
CurrentlyMovingPartsTaggerPtr selectPartsForMove();
|
CurrentlyMovingPartsTaggerPtr selectPartsForMove();
|
||||||
|
@ -208,7 +208,7 @@ bool MergeTreePartsMover::selectPartsForMove(
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part) const
|
MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const MergeTreeMoveEntry & moving_part, const WriteSettings & write_settings) const
|
||||||
{
|
{
|
||||||
if (moves_blocker.isCancelled())
|
if (moves_blocker.isCancelled())
|
||||||
throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts.");
|
throw Exception(ErrorCodes::ABORTED, "Cancelled moving parts.");
|
||||||
@ -248,12 +248,13 @@ MergeTreePartsMover::TemporaryClonedPart MergeTreePartsMover::clonePart(const Me
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_INFO(log, "Part {} was not fetched, we are the first who move it to another disk, so we will copy it", part->name);
|
LOG_INFO(log, "Part {} was not fetched, we are the first who move it to another disk, so we will copy it", part->name);
|
||||||
cloned_part_storage = part->getDataPartStorage().clonePart(path_to_clone, part->getDataPartStorage().getPartDirectory(), disk, log);
|
cloned_part_storage = part->getDataPartStorage().clonePart(
|
||||||
|
path_to_clone, part->getDataPartStorage().getPartDirectory(), disk, write_settings, log);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME);
|
cloned_part_storage = part->makeCloneOnDisk(disk, MergeTreeData::MOVING_DIR_NAME, write_settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage);
|
MergeTreeDataPartBuilder builder(*data, part->name, cloned_part_storage);
|
||||||
|
@ -65,7 +65,7 @@ public:
|
|||||||
const std::lock_guard<std::mutex> & moving_parts_lock);
|
const std::lock_guard<std::mutex> & moving_parts_lock);
|
||||||
|
|
||||||
/// Copies part to selected reservation in detached folder. Throws exception if part already exists.
|
/// Copies part to selected reservation in detached folder. Throws exception if part already exists.
|
||||||
TemporaryClonedPart clonePart(const MergeTreeMoveEntry & moving_part) const;
|
TemporaryClonedPart clonePart(const MergeTreeMoveEntry & moving_part, const WriteSettings & write_settings) const;
|
||||||
|
|
||||||
/// Replaces cloned part from detached directory into active data parts set.
|
/// Replaces cloned part from detached directory into active data parts set.
|
||||||
/// Replacing part changes state to DeleteOnDestroy and will be removed from disk after destructor of
|
/// Replacing part changes state to DeleteOnDestroy and will be removed from disk after destructor of
|
||||||
|
@ -1845,7 +1845,7 @@ bool MutateTask::prepare()
|
|||||||
.txn = ctx->txn, .hardlinked_files = &ctx->hardlinked_files,
|
.txn = ctx->txn, .hardlinked_files = &ctx->hardlinked_files,
|
||||||
.files_to_copy_instead_of_hardlinks = std::move(files_to_copy_instead_of_hardlinks), .keep_metadata_version = true
|
.files_to_copy_instead_of_hardlinks = std::move(files_to_copy_instead_of_hardlinks), .keep_metadata_version = true
|
||||||
};
|
};
|
||||||
auto [part, lock] = ctx->data->cloneAndLoadDataPartOnSameDisk(ctx->source_part, prefix, ctx->future_part->part_info, ctx->metadata_snapshot, clone_params);
|
auto [part, lock] = ctx->data->cloneAndLoadDataPartOnSameDisk(ctx->source_part, prefix, ctx->future_part->part_info, ctx->metadata_snapshot, clone_params, ctx->context->getWriteSettings());
|
||||||
part->getDataPartStorage().beginTransaction();
|
part->getDataPartStorage().beginTransaction();
|
||||||
|
|
||||||
ctx->temporary_directory_lock = std::move(lock);
|
ctx->temporary_directory_lock = std::move(lock);
|
||||||
|
@ -1797,11 +1797,18 @@ std::map<int64_t, MutationCommands> ReplicatedMergeTreeQueue::getAlterMutationCo
|
|||||||
LOG_TEST(log, "Looking for mutations for part {} (part data version {}, part metadata version {})", part->name, part_data_version, part_metadata_version);
|
LOG_TEST(log, "Looking for mutations for part {} (part data version {}, part metadata version {})", part->name, part_data_version, part_metadata_version);
|
||||||
|
|
||||||
std::map<int64_t, MutationCommands> result;
|
std::map<int64_t, MutationCommands> result;
|
||||||
|
|
||||||
|
bool seen_all_data_mutations = false;
|
||||||
|
bool seen_all_metadata_mutations = false;
|
||||||
|
|
||||||
/// Here we return mutation commands for part which has bigger alter version than part metadata version.
|
/// Here we return mutation commands for part which has bigger alter version than part metadata version.
|
||||||
/// Please note, we don't use getDataVersion(). It's because these alter commands are used for in-fly conversions
|
/// Please note, we don't use getDataVersion(). It's because these alter commands are used for in-fly conversions
|
||||||
/// of part's metadata.
|
/// of part's metadata.
|
||||||
for (const auto & [mutation_version, mutation_status] : in_partition->second | std::views::reverse)
|
for (const auto & [mutation_version, mutation_status] : in_partition->second | std::views::reverse)
|
||||||
{
|
{
|
||||||
|
if (seen_all_data_mutations && seen_all_metadata_mutations)
|
||||||
|
break;
|
||||||
|
|
||||||
auto alter_version = mutation_status->entry->alter_version;
|
auto alter_version = mutation_status->entry->alter_version;
|
||||||
if (alter_version != -1)
|
if (alter_version != -1)
|
||||||
{
|
{
|
||||||
@ -1811,14 +1818,19 @@ std::map<int64_t, MutationCommands> ReplicatedMergeTreeQueue::getAlterMutationCo
|
|||||||
/// We take commands with bigger metadata version
|
/// We take commands with bigger metadata version
|
||||||
if (alter_version > part_metadata_version)
|
if (alter_version > part_metadata_version)
|
||||||
result[mutation_version] = mutation_status->entry->commands;
|
result[mutation_version] = mutation_status->entry->commands;
|
||||||
|
else
|
||||||
|
seen_all_metadata_mutations = true;
|
||||||
}
|
}
|
||||||
else if (mutation_version > part_data_version)
|
else
|
||||||
{
|
{
|
||||||
|
if (mutation_version > part_data_version)
|
||||||
result[mutation_version] = mutation_status->entry->commands;
|
result[mutation_version] = mutation_status->entry->commands;
|
||||||
|
else
|
||||||
|
seen_all_data_mutations = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TRACE(log, "Got {} commands for part {} (part data version {}, part metadata version {})",
|
LOG_TEST(log, "Got {} commands for part {} (part data version {}, part metadata version {})",
|
||||||
result.size(), part->name, part_data_version, part_metadata_version);
|
result.size(), part->name, part_data_version, part_metadata_version);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
@ -17,9 +17,15 @@ namespace
|
|||||||
{
|
{
|
||||||
|
|
||||||
void localBackupImpl(
|
void localBackupImpl(
|
||||||
const DiskPtr & disk, IDiskTransaction * transaction, const String & source_path,
|
const DiskPtr & disk,
|
||||||
const String & destination_path, bool make_source_readonly, size_t level,
|
IDiskTransaction * transaction,
|
||||||
std::optional<size_t> max_level, bool copy_instead_of_hardlinks,
|
const String & source_path,
|
||||||
|
const String & destination_path,
|
||||||
|
const WriteSettings & settings,
|
||||||
|
bool make_source_readonly,
|
||||||
|
size_t level,
|
||||||
|
std::optional<size_t> max_level,
|
||||||
|
bool copy_instead_of_hardlinks,
|
||||||
const NameSet & files_to_copy_instead_of_hardlinks)
|
const NameSet & files_to_copy_instead_of_hardlinks)
|
||||||
{
|
{
|
||||||
if (max_level && level > *max_level)
|
if (max_level && level > *max_level)
|
||||||
@ -51,11 +57,11 @@ void localBackupImpl(
|
|||||||
{
|
{
|
||||||
if (transaction)
|
if (transaction)
|
||||||
{
|
{
|
||||||
transaction->copyFile(source, destination);
|
transaction->copyFile(source, destination, settings);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
disk->copyFile(source, *disk, destination);
|
disk->copyFile(source, *disk, destination, settings);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -69,8 +75,16 @@ void localBackupImpl(
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
localBackupImpl(
|
localBackupImpl(
|
||||||
disk, transaction, source, destination, make_source_readonly, level + 1, max_level,
|
disk,
|
||||||
copy_instead_of_hardlinks, files_to_copy_instead_of_hardlinks);
|
transaction,
|
||||||
|
source,
|
||||||
|
destination,
|
||||||
|
settings,
|
||||||
|
make_source_readonly,
|
||||||
|
level + 1,
|
||||||
|
max_level,
|
||||||
|
copy_instead_of_hardlinks,
|
||||||
|
files_to_copy_instead_of_hardlinks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -112,9 +126,15 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void localBackup(
|
void localBackup(
|
||||||
const DiskPtr & disk, const String & source_path,
|
const DiskPtr & disk,
|
||||||
const String & destination_path, bool make_source_readonly,
|
const String & source_path,
|
||||||
std::optional<size_t> max_level, bool copy_instead_of_hardlinks, const NameSet & files_to_copy_intead_of_hardlinks, DiskTransactionPtr disk_transaction)
|
const String & destination_path,
|
||||||
|
const WriteSettings & settings,
|
||||||
|
bool make_source_readonly,
|
||||||
|
std::optional<size_t> max_level,
|
||||||
|
bool copy_instead_of_hardlinks,
|
||||||
|
const NameSet & files_to_copy_intead_of_hardlinks,
|
||||||
|
DiskTransactionPtr disk_transaction)
|
||||||
{
|
{
|
||||||
if (disk->exists(destination_path) && !disk->isDirectoryEmpty(destination_path))
|
if (disk->exists(destination_path) && !disk->isDirectoryEmpty(destination_path))
|
||||||
{
|
{
|
||||||
@ -135,12 +155,22 @@ void localBackup(
|
|||||||
{
|
{
|
||||||
if (disk_transaction)
|
if (disk_transaction)
|
||||||
{
|
{
|
||||||
localBackupImpl(disk, disk_transaction.get(), source_path, destination_path, make_source_readonly, 0, max_level, copy_instead_of_hardlinks, files_to_copy_intead_of_hardlinks);
|
localBackupImpl(
|
||||||
|
disk,
|
||||||
|
disk_transaction.get(),
|
||||||
|
source_path,
|
||||||
|
destination_path,
|
||||||
|
settings,
|
||||||
|
make_source_readonly,
|
||||||
|
/* level= */ 0,
|
||||||
|
max_level,
|
||||||
|
copy_instead_of_hardlinks,
|
||||||
|
files_to_copy_intead_of_hardlinks);
|
||||||
}
|
}
|
||||||
else if (copy_instead_of_hardlinks)
|
else if (copy_instead_of_hardlinks)
|
||||||
{
|
{
|
||||||
CleanupOnFail cleanup([disk, destination_path]() { disk->removeRecursive(destination_path); });
|
CleanupOnFail cleanup([disk, destination_path]() { disk->removeRecursive(destination_path); });
|
||||||
disk->copyDirectoryContent(source_path, disk, destination_path);
|
disk->copyDirectoryContent(source_path, disk, destination_path, settings);
|
||||||
cleanup.success();
|
cleanup.success();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -154,7 +184,17 @@ void localBackup(
|
|||||||
cleaner = [disk, destination_path]() { disk->removeRecursive(destination_path); };
|
cleaner = [disk, destination_path]() { disk->removeRecursive(destination_path); };
|
||||||
|
|
||||||
CleanupOnFail cleanup(std::move(cleaner));
|
CleanupOnFail cleanup(std::move(cleaner));
|
||||||
localBackupImpl(disk, disk_transaction.get(), source_path, destination_path, make_source_readonly, 0, max_level, false, files_to_copy_intead_of_hardlinks);
|
localBackupImpl(
|
||||||
|
disk,
|
||||||
|
disk_transaction.get(),
|
||||||
|
source_path,
|
||||||
|
destination_path,
|
||||||
|
settings,
|
||||||
|
make_source_readonly,
|
||||||
|
/* level= */ 0,
|
||||||
|
max_level,
|
||||||
|
/* copy_instead_of_hardlinks= */ false,
|
||||||
|
files_to_copy_intead_of_hardlinks);
|
||||||
cleanup.success();
|
cleanup.success();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
struct WriteSettings;
|
||||||
|
|
||||||
/** Creates a local (at the same mount point) backup (snapshot) directory.
|
/** Creates a local (at the same mount point) backup (snapshot) directory.
|
||||||
*
|
*
|
||||||
* In the specified destination directory, it creates hard links on all source-directory files
|
* In the specified destination directory, it creates hard links on all source-directory files
|
||||||
@ -22,6 +24,15 @@ namespace DB
|
|||||||
*
|
*
|
||||||
* If `transaction` is provided, the changes will be added to it instead of performend on disk.
|
* If `transaction` is provided, the changes will be added to it instead of performend on disk.
|
||||||
*/
|
*/
|
||||||
void localBackup(const DiskPtr & disk, const String & source_path, const String & destination_path, bool make_source_readonly = true, std::optional<size_t> max_level = {}, bool copy_instead_of_hardlinks = false, const NameSet & files_to_copy_intead_of_hardlinks = {}, DiskTransactionPtr disk_transaction = nullptr);
|
void localBackup(
|
||||||
|
const DiskPtr & disk,
|
||||||
|
const String & source_path,
|
||||||
|
const String & destination_path,
|
||||||
|
const WriteSettings & settings,
|
||||||
|
bool make_source_readonly = true,
|
||||||
|
std::optional<size_t> max_level = {},
|
||||||
|
bool copy_instead_of_hardlinks = false,
|
||||||
|
const NameSet & files_to_copy_intead_of_hardlinks = {},
|
||||||
|
DiskTransactionPtr disk_transaction = nullptr);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,6 @@ namespace ErrorCodes
|
|||||||
extern const int S3_ERROR;
|
extern const int S3_ERROR;
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
extern const int QUERY_NOT_ALLOWED;
|
extern const int QUERY_NOT_ALLOWED;
|
||||||
extern const int NO_ZOOKEEPER;
|
|
||||||
extern const int REPLICA_ALREADY_EXISTS;
|
extern const int REPLICA_ALREADY_EXISTS;
|
||||||
extern const int INCOMPATIBLE_COLUMNS;
|
extern const int INCOMPATIBLE_COLUMNS;
|
||||||
}
|
}
|
||||||
@ -106,42 +105,23 @@ StorageS3Queue::StorageS3Queue(
|
|||||||
if (!withGlobs())
|
if (!withGlobs())
|
||||||
throw Exception(ErrorCodes::QUERY_NOT_ALLOWED, "S3Queue url must either end with '/' or contain globs");
|
throw Exception(ErrorCodes::QUERY_NOT_ALLOWED, "S3Queue url must either end with '/' or contain globs");
|
||||||
|
|
||||||
String setting_zk_path = s3queue_settings->keeper_path;
|
std::string zk_path_prefix = getContext()->getSettingsRef().s3queue_default_zookeeper_path.value;
|
||||||
if (setting_zk_path.empty())
|
if (zk_path_prefix.empty())
|
||||||
{
|
zk_path_prefix = "/";
|
||||||
auto database = DatabaseCatalog::instance().getDatabase(table_id_.database_name);
|
|
||||||
bool is_in_replicated_database = database->getEngineName() == "Replicated";
|
|
||||||
|
|
||||||
auto default_path = getContext()->getSettingsRef().s3queue_default_zookeeper_path.value;
|
std::string result_zk_path;
|
||||||
String zk_path_prefix;
|
if (s3queue_settings->keeper_path.changed)
|
||||||
|
|
||||||
if (!default_path.empty())
|
|
||||||
{
|
|
||||||
zk_path_prefix = default_path;
|
|
||||||
}
|
|
||||||
else if (is_in_replicated_database)
|
|
||||||
{
|
|
||||||
LOG_INFO(log, "S3Queue engine zookeeper path is not specified. "
|
|
||||||
"Using replicated database zookeeper path");
|
|
||||||
|
|
||||||
zk_path_prefix = fs::path(assert_cast<const DatabaseReplicated *>(database.get())->getZooKeeperPath()) / "s3queue";
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::NO_ZOOKEEPER,
|
|
||||||
"S3Queue keeper_path engine setting not specified, "
|
|
||||||
"s3queue_default_zookeeper_path_prefix not specified");
|
|
||||||
}
|
|
||||||
|
|
||||||
zk_path = zkutil::extractZooKeeperPath(
|
|
||||||
fs::path(zk_path_prefix) / toString(table_id_.uuid), /* check_starts_with_slash */ true, log);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
/// We do not add table uuid here on purpose.
|
/// We do not add table uuid here on purpose.
|
||||||
zk_path = zkutil::extractZooKeeperPath(s3queue_settings->keeper_path.value, /* check_starts_with_slash */ true, log);
|
result_zk_path = fs::path(zk_path_prefix) / s3queue_settings->keeper_path.value;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto database_uuid = DatabaseCatalog::instance().getDatabase(table_id_.database_name)->getUUID();
|
||||||
|
result_zk_path = fs::path(zk_path_prefix) / toString(database_uuid) / toString(table_id_.uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zk_path = zkutil::extractZooKeeperPath(result_zk_path, true/* check_starts_with_slash */, log);
|
||||||
LOG_INFO(log, "Using zookeeper path: {}", zk_path);
|
LOG_INFO(log, "Using zookeeper path: {}", zk_path);
|
||||||
|
|
||||||
FormatFactory::instance().checkFormatName(configuration.format);
|
FormatFactory::instance().checkFormatName(configuration.format);
|
||||||
|
@ -2035,7 +2035,7 @@ void StorageMergeTree::replacePartitionFrom(const StoragePtr & source_table, con
|
|||||||
MergeTreePartInfo dst_part_info(partition_id, temp_index, temp_index, src_part->info.level);
|
MergeTreePartInfo dst_part_info(partition_id, temp_index, temp_index, src_part->info.level);
|
||||||
|
|
||||||
IDataPartStorage::ClonePartParams clone_params{.txn = local_context->getCurrentTransaction()};
|
IDataPartStorage::ClonePartParams clone_params{.txn = local_context->getCurrentTransaction()};
|
||||||
auto [dst_part, part_lock] = cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, my_metadata_snapshot, clone_params);
|
auto [dst_part, part_lock] = cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, my_metadata_snapshot, clone_params, local_context->getWriteSettings());
|
||||||
dst_parts.emplace_back(std::move(dst_part));
|
dst_parts.emplace_back(std::move(dst_part));
|
||||||
dst_parts_locks.emplace_back(std::move(part_lock));
|
dst_parts_locks.emplace_back(std::move(part_lock));
|
||||||
}
|
}
|
||||||
@ -2134,7 +2134,7 @@ void StorageMergeTree::movePartitionToTable(const StoragePtr & dest_table, const
|
|||||||
MergeTreePartInfo dst_part_info(partition_id, temp_index, temp_index, src_part->info.level);
|
MergeTreePartInfo dst_part_info(partition_id, temp_index, temp_index, src_part->info.level);
|
||||||
|
|
||||||
IDataPartStorage::ClonePartParams clone_params{.txn = local_context->getCurrentTransaction()};
|
IDataPartStorage::ClonePartParams clone_params{.txn = local_context->getCurrentTransaction()};
|
||||||
auto [dst_part, part_lock] = dest_table_storage->cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, dest_metadata_snapshot, clone_params);
|
auto [dst_part, part_lock] = dest_table_storage->cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, dest_metadata_snapshot, clone_params, local_context->getWriteSettings());
|
||||||
dst_parts.emplace_back(std::move(dst_part));
|
dst_parts.emplace_back(std::move(dst_part));
|
||||||
dst_parts_locks.emplace_back(std::move(part_lock));
|
dst_parts_locks.emplace_back(std::move(part_lock));
|
||||||
}
|
}
|
||||||
|
@ -2465,7 +2465,7 @@ bool StorageReplicatedMergeTree::executeReplaceRange(const LogEntry & entry)
|
|||||||
.metadata_version_to_write = metadata_snapshot->getMetadataVersion()
|
.metadata_version_to_write = metadata_snapshot->getMetadataVersion()
|
||||||
};
|
};
|
||||||
auto [res_part, temporary_part_lock] = cloneAndLoadDataPartOnSameDisk(
|
auto [res_part, temporary_part_lock] = cloneAndLoadDataPartOnSameDisk(
|
||||||
part_desc->src_table_part, TMP_PREFIX + "clone_", part_desc->new_part_info, metadata_snapshot, clone_params);
|
part_desc->src_table_part, TMP_PREFIX + "clone_", part_desc->new_part_info, metadata_snapshot, clone_params, getContext()->getWriteSettings());
|
||||||
part_desc->res_part = std::move(res_part);
|
part_desc->res_part = std::move(res_part);
|
||||||
part_desc->temporary_part_lock = std::move(temporary_part_lock);
|
part_desc->temporary_part_lock = std::move(temporary_part_lock);
|
||||||
}
|
}
|
||||||
@ -4560,7 +4560,7 @@ bool StorageReplicatedMergeTree::fetchPart(
|
|||||||
{
|
{
|
||||||
chassert(!is_zero_copy_part(part_to_clone));
|
chassert(!is_zero_copy_part(part_to_clone));
|
||||||
IDataPartStorage::ClonePartParams clone_params{ .keep_metadata_version = true };
|
IDataPartStorage::ClonePartParams clone_params{ .keep_metadata_version = true };
|
||||||
auto [cloned_part, lock] = cloneAndLoadDataPartOnSameDisk(part_to_clone, "tmp_clone_", part_info, metadata_snapshot, clone_params);
|
auto [cloned_part, lock] = cloneAndLoadDataPartOnSameDisk(part_to_clone, "tmp_clone_", part_info, metadata_snapshot, clone_params, getContext()->getWriteSettings());
|
||||||
part_directory_lock = std::move(lock);
|
part_directory_lock = std::move(lock);
|
||||||
return cloned_part;
|
return cloned_part;
|
||||||
};
|
};
|
||||||
@ -7606,7 +7606,7 @@ void StorageReplicatedMergeTree::replacePartitionFrom(
|
|||||||
.copy_instead_of_hardlink = zero_copy_enabled && src_part->isStoredOnRemoteDiskWithZeroCopySupport(),
|
.copy_instead_of_hardlink = zero_copy_enabled && src_part->isStoredOnRemoteDiskWithZeroCopySupport(),
|
||||||
.metadata_version_to_write = metadata_snapshot->getMetadataVersion()
|
.metadata_version_to_write = metadata_snapshot->getMetadataVersion()
|
||||||
};
|
};
|
||||||
auto [dst_part, part_lock] = cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, metadata_snapshot, clone_params);
|
auto [dst_part, part_lock] = cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, metadata_snapshot, clone_params, query_context->getWriteSettings());
|
||||||
src_parts.emplace_back(src_part);
|
src_parts.emplace_back(src_part);
|
||||||
dst_parts.emplace_back(dst_part);
|
dst_parts.emplace_back(dst_part);
|
||||||
dst_parts_locks.emplace_back(std::move(part_lock));
|
dst_parts_locks.emplace_back(std::move(part_lock));
|
||||||
@ -7846,7 +7846,7 @@ void StorageReplicatedMergeTree::movePartitionToTable(const StoragePtr & dest_ta
|
|||||||
.copy_instead_of_hardlink = zero_copy_enabled && src_part->isStoredOnRemoteDiskWithZeroCopySupport(),
|
.copy_instead_of_hardlink = zero_copy_enabled && src_part->isStoredOnRemoteDiskWithZeroCopySupport(),
|
||||||
.metadata_version_to_write = dest_metadata_snapshot->getMetadataVersion()
|
.metadata_version_to_write = dest_metadata_snapshot->getMetadataVersion()
|
||||||
};
|
};
|
||||||
auto [dst_part, dst_part_lock] = dest_table_storage->cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, dest_metadata_snapshot, clone_params);
|
auto [dst_part, dst_part_lock] = dest_table_storage->cloneAndLoadDataPartOnSameDisk(src_part, TMP_PREFIX, dst_part_info, dest_metadata_snapshot, clone_params, query_context->getWriteSettings());
|
||||||
|
|
||||||
src_parts.emplace_back(src_part);
|
src_parts.emplace_back(src_part);
|
||||||
dst_parts.emplace_back(dst_part);
|
dst_parts.emplace_back(dst_part);
|
||||||
|
@ -20,6 +20,7 @@ const char * auto_contributors[] {
|
|||||||
"Ahmed Dardery",
|
"Ahmed Dardery",
|
||||||
"Aimiyoo",
|
"Aimiyoo",
|
||||||
"Akazz",
|
"Akazz",
|
||||||
|
"Al Korgun",
|
||||||
"AlPerevyshin",
|
"AlPerevyshin",
|
||||||
"Alain BERRIER",
|
"Alain BERRIER",
|
||||||
"Albert Kidrachev",
|
"Albert Kidrachev",
|
||||||
@ -73,6 +74,7 @@ const char * auto_contributors[] {
|
|||||||
"Alexander Tokmakov",
|
"Alexander Tokmakov",
|
||||||
"Alexander Tretiakov",
|
"Alexander Tretiakov",
|
||||||
"Alexander Yakovlev",
|
"Alexander Yakovlev",
|
||||||
|
"Alexander Zaitsev",
|
||||||
"Alexandr Kondratev",
|
"Alexandr Kondratev",
|
||||||
"Alexandr Krasheninnikov",
|
"Alexandr Krasheninnikov",
|
||||||
"Alexandr Orlov",
|
"Alexandr Orlov",
|
||||||
@ -140,6 +142,7 @@ const char * auto_contributors[] {
|
|||||||
"Andrey Zvonov",
|
"Andrey Zvonov",
|
||||||
"Andrii Buriachevskyi",
|
"Andrii Buriachevskyi",
|
||||||
"Andrii R",
|
"Andrii R",
|
||||||
|
"Andy Fiddaman",
|
||||||
"Andy Liang",
|
"Andy Liang",
|
||||||
"Andy Yang",
|
"Andy Yang",
|
||||||
"AndyB",
|
"AndyB",
|
||||||
@ -186,8 +189,11 @@ const char * auto_contributors[] {
|
|||||||
"Artur",
|
"Artur",
|
||||||
"Artur Beglaryan",
|
"Artur Beglaryan",
|
||||||
"Artur Filatenkov",
|
"Artur Filatenkov",
|
||||||
|
"Artur Malchanau",
|
||||||
|
"Ash Vardanian",
|
||||||
"AsiaKorushkina",
|
"AsiaKorushkina",
|
||||||
"Atri Sharma",
|
"Atri Sharma",
|
||||||
|
"Austin Kothig",
|
||||||
"Avogar",
|
"Avogar",
|
||||||
"Azat Khuzhin",
|
"Azat Khuzhin",
|
||||||
"BSD_Conqueror",
|
"BSD_Conqueror",
|
||||||
@ -203,9 +209,11 @@ const char * auto_contributors[] {
|
|||||||
"Bertrand Junqua",
|
"Bertrand Junqua",
|
||||||
"Bharat Nallan",
|
"Bharat Nallan",
|
||||||
"Bharat Nallan Chakravarthy",
|
"Bharat Nallan Chakravarthy",
|
||||||
|
"Bhavna Jindal",
|
||||||
"Big Elephant",
|
"Big Elephant",
|
||||||
"BigRedEye",
|
"BigRedEye",
|
||||||
"Bill",
|
"Bill",
|
||||||
|
"Bin Xie",
|
||||||
"BiteTheDDDDt",
|
"BiteTheDDDDt",
|
||||||
"BlahGeek",
|
"BlahGeek",
|
||||||
"Bo Lu",
|
"Bo Lu",
|
||||||
@ -259,12 +267,15 @@ const char * auto_contributors[] {
|
|||||||
"Dalitso Banda",
|
"Dalitso Banda",
|
||||||
"Dan Roscigno",
|
"Dan Roscigno",
|
||||||
"DanRoscigno",
|
"DanRoscigno",
|
||||||
|
"Dani Pozo",
|
||||||
"Daniel Bershatsky",
|
"Daniel Bershatsky",
|
||||||
"Daniel Dao",
|
"Daniel Dao",
|
||||||
"Daniel Kutenin",
|
"Daniel Kutenin",
|
||||||
|
"Daniel Pozo Escalona",
|
||||||
"Daniel Qin",
|
"Daniel Qin",
|
||||||
"Daniil Rubin",
|
"Daniil Rubin",
|
||||||
"Danila Kutenin",
|
"Danila Kutenin",
|
||||||
|
"Daniël van Eeden",
|
||||||
"Dao",
|
"Dao",
|
||||||
"Dao Minh Thuc",
|
"Dao Minh Thuc",
|
||||||
"Daria Mozhaeva",
|
"Daria Mozhaeva",
|
||||||
@ -272,6 +283,7 @@ const char * auto_contributors[] {
|
|||||||
"DarkWanderer",
|
"DarkWanderer",
|
||||||
"Darío",
|
"Darío",
|
||||||
"Dave Lahn",
|
"Dave Lahn",
|
||||||
|
"Davit Vardanyan",
|
||||||
"Denis Burlaka",
|
"Denis Burlaka",
|
||||||
"Denis Glazachev",
|
"Denis Glazachev",
|
||||||
"Denis Krivak",
|
"Denis Krivak",
|
||||||
@ -361,6 +373,7 @@ const char * auto_contributors[] {
|
|||||||
"FgoDt",
|
"FgoDt",
|
||||||
"Filatenkov Artur",
|
"Filatenkov Artur",
|
||||||
"Filipe Caixeta",
|
"Filipe Caixeta",
|
||||||
|
"Filipp Ozinov",
|
||||||
"Filippov Denis",
|
"Filippov Denis",
|
||||||
"Flowyi",
|
"Flowyi",
|
||||||
"Francisco Barón",
|
"Francisco Barón",
|
||||||
@ -411,6 +424,7 @@ const char * auto_contributors[] {
|
|||||||
"Hasnat",
|
"Hasnat",
|
||||||
"Heena Bansal",
|
"Heena Bansal",
|
||||||
"HeenaBansal2009",
|
"HeenaBansal2009",
|
||||||
|
"Hendrik M",
|
||||||
"Hiroaki Nakamura",
|
"Hiroaki Nakamura",
|
||||||
"Hongbin",
|
"Hongbin",
|
||||||
"Hongbin Ma",
|
"Hongbin Ma",
|
||||||
@ -463,6 +477,7 @@ const char * auto_contributors[] {
|
|||||||
"JackyWoo",
|
"JackyWoo",
|
||||||
"Jacob Hayes",
|
"Jacob Hayes",
|
||||||
"Jacob Herrington",
|
"Jacob Herrington",
|
||||||
|
"Jai Jhala",
|
||||||
"Jake Bamrah",
|
"Jake Bamrah",
|
||||||
"Jake Liu",
|
"Jake Liu",
|
||||||
"Jakub Kuklis",
|
"Jakub Kuklis",
|
||||||
@ -478,11 +493,15 @@ const char * auto_contributors[] {
|
|||||||
"Jean Baptiste Favre",
|
"Jean Baptiste Favre",
|
||||||
"Jeffrey Dang",
|
"Jeffrey Dang",
|
||||||
"Jiading Guo",
|
"Jiading Guo",
|
||||||
|
"Jianfei Hu",
|
||||||
"Jiang Tao",
|
"Jiang Tao",
|
||||||
|
"Jiang Yuqing",
|
||||||
"Jianmei Zhang",
|
"Jianmei Zhang",
|
||||||
"Jiebin Sun",
|
"Jiebin Sun",
|
||||||
|
"Jiyoung Yoo",
|
||||||
"Joanna Hulboj",
|
"Joanna Hulboj",
|
||||||
"Jochen Schalanda",
|
"Jochen Schalanda",
|
||||||
|
"Joe Lynch",
|
||||||
"Joey",
|
"Joey",
|
||||||
"Johannes Visintini",
|
"Johannes Visintini",
|
||||||
"John",
|
"John",
|
||||||
@ -511,6 +530,7 @@ const char * auto_contributors[] {
|
|||||||
"Keiji Yoshida",
|
"Keiji Yoshida",
|
||||||
"Ken Chen",
|
"Ken Chen",
|
||||||
"Ken MacInnis",
|
"Ken MacInnis",
|
||||||
|
"Kenji Noguchi",
|
||||||
"Kerry Clendinning",
|
"Kerry Clendinning",
|
||||||
"Kevin Chiang",
|
"Kevin Chiang",
|
||||||
"Kevin Michel",
|
"Kevin Michel",
|
||||||
@ -537,6 +557,7 @@ const char * auto_contributors[] {
|
|||||||
"Korviakov Andrey",
|
"Korviakov Andrey",
|
||||||
"Kostiantyn Storozhuk",
|
"Kostiantyn Storozhuk",
|
||||||
"Kozlov Ivan",
|
"Kozlov Ivan",
|
||||||
|
"Krisztián Szűcs",
|
||||||
"Kruglov Pavel",
|
"Kruglov Pavel",
|
||||||
"Krzysztof Góralski",
|
"Krzysztof Góralski",
|
||||||
"Kseniia Sumarokova",
|
"Kseniia Sumarokova",
|
||||||
@ -569,6 +590,7 @@ const char * auto_contributors[] {
|
|||||||
"Lorenzo Mangani",
|
"Lorenzo Mangani",
|
||||||
"Loud_Scream",
|
"Loud_Scream",
|
||||||
"Lucas Chang",
|
"Lucas Chang",
|
||||||
|
"Lucas Fernando Cardoso Nunes",
|
||||||
"Lucid Dreams",
|
"Lucid Dreams",
|
||||||
"Luck-Chang",
|
"Luck-Chang",
|
||||||
"Luis Bosque",
|
"Luis Bosque",
|
||||||
@ -627,6 +649,7 @@ const char * auto_contributors[] {
|
|||||||
"Maxim Smirnov",
|
"Maxim Smirnov",
|
||||||
"Maxim Ulanovskiy",
|
"Maxim Ulanovskiy",
|
||||||
"MaximAL",
|
"MaximAL",
|
||||||
|
"Maximilian Roos",
|
||||||
"Mc.Spring",
|
"Mc.Spring",
|
||||||
"Meena Renganathan",
|
"Meena Renganathan",
|
||||||
"Meena-Renganathan",
|
"Meena-Renganathan",
|
||||||
@ -707,6 +730,7 @@ const char * auto_contributors[] {
|
|||||||
"Nikhil Raman",
|
"Nikhil Raman",
|
||||||
"Nikifor Seriakov",
|
"Nikifor Seriakov",
|
||||||
"Nikita",
|
"Nikita",
|
||||||
|
"Nikita Keba",
|
||||||
"Nikita Lapkov",
|
"Nikita Lapkov",
|
||||||
"Nikita Mikhailov",
|
"Nikita Mikhailov",
|
||||||
"Nikita Mikhalev",
|
"Nikita Mikhalev",
|
||||||
@ -770,6 +794,7 @@ const char * auto_contributors[] {
|
|||||||
"Peignon Melvyn",
|
"Peignon Melvyn",
|
||||||
"Peng Jian",
|
"Peng Jian",
|
||||||
"Peng Liu",
|
"Peng Liu",
|
||||||
|
"Pengyuan Bian",
|
||||||
"Persiyanov Dmitriy Andreevich",
|
"Persiyanov Dmitriy Andreevich",
|
||||||
"Pervakov Grigorii",
|
"Pervakov Grigorii",
|
||||||
"Pervakov Grigory",
|
"Pervakov Grigory",
|
||||||
@ -823,6 +848,7 @@ const char * auto_contributors[] {
|
|||||||
"Rory Crispin",
|
"Rory Crispin",
|
||||||
"Roy Bellingan",
|
"Roy Bellingan",
|
||||||
"Ruslan",
|
"Ruslan",
|
||||||
|
"Ruslan Mardugalliamov",
|
||||||
"Ruslan Savchenko",
|
"Ruslan Savchenko",
|
||||||
"Russ Frank",
|
"Russ Frank",
|
||||||
"Ruzal Ibragimov",
|
"Ruzal Ibragimov",
|
||||||
@ -844,8 +870,10 @@ const char * auto_contributors[] {
|
|||||||
"Saulius Valatka",
|
"Saulius Valatka",
|
||||||
"Sean Haynes",
|
"Sean Haynes",
|
||||||
"Sean Lafferty",
|
"Sean Lafferty",
|
||||||
|
"Selfuppen",
|
||||||
"Sema Checherinda",
|
"Sema Checherinda",
|
||||||
"Serg Kulakov",
|
"Serg Kulakov",
|
||||||
|
"Serge Klochkov",
|
||||||
"Serge Rider",
|
"Serge Rider",
|
||||||
"Sergei Bocharov",
|
"Sergei Bocharov",
|
||||||
"Sergei Semin",
|
"Sergei Semin",
|
||||||
@ -856,6 +884,7 @@ const char * auto_contributors[] {
|
|||||||
"Sergey Demurin",
|
"Sergey Demurin",
|
||||||
"Sergey Elantsev",
|
"Sergey Elantsev",
|
||||||
"Sergey Fedorov",
|
"Sergey Fedorov",
|
||||||
|
"Sergey Katkovskiy",
|
||||||
"Sergey Kazmin",
|
"Sergey Kazmin",
|
||||||
"Sergey Kislov",
|
"Sergey Kislov",
|
||||||
"Sergey Kononenko",
|
"Sergey Kononenko",
|
||||||
@ -919,6 +948,7 @@ const char * auto_contributors[] {
|
|||||||
"Tagir Kuskarov",
|
"Tagir Kuskarov",
|
||||||
"Tai White",
|
"Tai White",
|
||||||
"Taleh Zaliyev",
|
"Taleh Zaliyev",
|
||||||
|
"Tanay Tummalapalli",
|
||||||
"Tangaev",
|
"Tangaev",
|
||||||
"Tanya Bragin",
|
"Tanya Bragin",
|
||||||
"Tatiana",
|
"Tatiana",
|
||||||
@ -964,6 +994,7 @@ const char * auto_contributors[] {
|
|||||||
"Val Doroshchuk",
|
"Val Doroshchuk",
|
||||||
"Valentin Alexeev",
|
"Valentin Alexeev",
|
||||||
"Valera Ryaboshapko",
|
"Valera Ryaboshapko",
|
||||||
|
"VanDarkholme7",
|
||||||
"Varinara",
|
"Varinara",
|
||||||
"Vasily Kozhukhovskiy",
|
"Vasily Kozhukhovskiy",
|
||||||
"Vasily Morozov",
|
"Vasily Morozov",
|
||||||
@ -1062,6 +1093,7 @@ const char * auto_contributors[] {
|
|||||||
"Yuriy Baranov",
|
"Yuriy Baranov",
|
||||||
"Yuriy Chernyshov",
|
"Yuriy Chernyshov",
|
||||||
"Yuriy Korzhenevskiy",
|
"Yuriy Korzhenevskiy",
|
||||||
|
"Yury Bogomolov",
|
||||||
"Yury Karpovich",
|
"Yury Karpovich",
|
||||||
"Yury Stankevich",
|
"Yury Stankevich",
|
||||||
"Yusuke Tanaka",
|
"Yusuke Tanaka",
|
||||||
@ -1149,6 +1181,7 @@ const char * auto_contributors[] {
|
|||||||
"caspian",
|
"caspian",
|
||||||
"cekc",
|
"cekc",
|
||||||
"centos7",
|
"centos7",
|
||||||
|
"cfanbo",
|
||||||
"cfcz48",
|
"cfcz48",
|
||||||
"cgp",
|
"cgp",
|
||||||
"champtar",
|
"champtar",
|
||||||
@ -1177,6 +1210,7 @@ const char * auto_contributors[] {
|
|||||||
"cnmade",
|
"cnmade",
|
||||||
"comunodi",
|
"comunodi",
|
||||||
"congbaoyangrou",
|
"congbaoyangrou",
|
||||||
|
"copperybean",
|
||||||
"coraxster",
|
"coraxster",
|
||||||
"cwkyaoyao",
|
"cwkyaoyao",
|
||||||
"d.v.semenov",
|
"d.v.semenov",
|
||||||
@ -1186,6 +1220,7 @@ const char * auto_contributors[] {
|
|||||||
"daoready",
|
"daoready",
|
||||||
"darkkeks",
|
"darkkeks",
|
||||||
"dasmfm",
|
"dasmfm",
|
||||||
|
"daviddhc20120601",
|
||||||
"davydovska",
|
"davydovska",
|
||||||
"decaseal",
|
"decaseal",
|
||||||
"dependabot-preview[bot]",
|
"dependabot-preview[bot]",
|
||||||
@ -1208,6 +1243,7 @@ const char * auto_contributors[] {
|
|||||||
"eaxdev",
|
"eaxdev",
|
||||||
"eejoin",
|
"eejoin",
|
||||||
"egatov",
|
"egatov",
|
||||||
|
"ekrasikov",
|
||||||
"elBroom",
|
"elBroom",
|
||||||
"elenaspb2019",
|
"elenaspb2019",
|
||||||
"elevankoff",
|
"elevankoff",
|
||||||
@ -1262,6 +1298,7 @@ const char * auto_contributors[] {
|
|||||||
"guov100",
|
"guov100",
|
||||||
"guyco87",
|
"guyco87",
|
||||||
"guykohen",
|
"guykohen",
|
||||||
|
"gyfis",
|
||||||
"gyuton",
|
"gyuton",
|
||||||
"hanqf-git",
|
"hanqf-git",
|
||||||
"hao.he",
|
"hao.he",
|
||||||
@ -1270,6 +1307,7 @@ const char * auto_contributors[] {
|
|||||||
"hcz",
|
"hcz",
|
||||||
"heleihelei",
|
"heleihelei",
|
||||||
"helifu",
|
"helifu",
|
||||||
|
"hendrik-m",
|
||||||
"heng zhao",
|
"heng zhao",
|
||||||
"hermano",
|
"hermano",
|
||||||
"hexiaoting",
|
"hexiaoting",
|
||||||
@ -1292,6 +1330,7 @@ const char * auto_contributors[] {
|
|||||||
"ikopylov",
|
"ikopylov",
|
||||||
"imgbot[bot]",
|
"imgbot[bot]",
|
||||||
"ip",
|
"ip",
|
||||||
|
"irenjj",
|
||||||
"ismailakpolat",
|
"ismailakpolat",
|
||||||
"it1804",
|
"it1804",
|
||||||
"ivan-klass",
|
"ivan-klass",
|
||||||
@ -1311,11 +1350,14 @@ const char * auto_contributors[] {
|
|||||||
"jiahui-97",
|
"jiahui-97",
|
||||||
"jianmei zhang",
|
"jianmei zhang",
|
||||||
"jinjunzh",
|
"jinjunzh",
|
||||||
|
"jiyoungyoooo",
|
||||||
"jkuklis",
|
"jkuklis",
|
||||||
"johanngan",
|
"johanngan",
|
||||||
|
"jsc0218",
|
||||||
"jthmath",
|
"jthmath",
|
||||||
"jun won",
|
"jun won",
|
||||||
"jus1096",
|
"jus1096",
|
||||||
|
"justindeguzman",
|
||||||
"jyz0309",
|
"jyz0309",
|
||||||
"karnevil13",
|
"karnevil13",
|
||||||
"kashwy",
|
"kashwy",
|
||||||
@ -1332,6 +1374,7 @@ const char * auto_contributors[] {
|
|||||||
"kolsys",
|
"kolsys",
|
||||||
"konnectr",
|
"konnectr",
|
||||||
"koshachy",
|
"koshachy",
|
||||||
|
"kothiga",
|
||||||
"kreuzerkrieg",
|
"kreuzerkrieg",
|
||||||
"ks1322",
|
"ks1322",
|
||||||
"kshvakov",
|
"kshvakov",
|
||||||
@ -1451,6 +1494,7 @@ const char * auto_contributors[] {
|
|||||||
"ni1l",
|
"ni1l",
|
||||||
"nicelulu",
|
"nicelulu",
|
||||||
"nickzhwang",
|
"nickzhwang",
|
||||||
|
"nikitakeba",
|
||||||
"nikitamikhaylov",
|
"nikitamikhaylov",
|
||||||
"nonexistence",
|
"nonexistence",
|
||||||
"ns-vasilev",
|
"ns-vasilev",
|
||||||
@ -1521,6 +1565,7 @@ const char * auto_contributors[] {
|
|||||||
"satanson",
|
"satanson",
|
||||||
"save-my-heart",
|
"save-my-heart",
|
||||||
"sdk2",
|
"sdk2",
|
||||||
|
"selfuppen",
|
||||||
"serebrserg",
|
"serebrserg",
|
||||||
"serxa",
|
"serxa",
|
||||||
"sev7e0",
|
"sev7e0",
|
||||||
@ -1607,8 +1652,10 @@ const char * auto_contributors[] {
|
|||||||
"wuxiaobai24",
|
"wuxiaobai24",
|
||||||
"wzl",
|
"wzl",
|
||||||
"xPoSx",
|
"xPoSx",
|
||||||
|
"xbthink",
|
||||||
"xiao",
|
"xiao",
|
||||||
"xiaolei565",
|
"xiaolei565",
|
||||||
|
"xiebin",
|
||||||
"xiedeyantu",
|
"xiedeyantu",
|
||||||
"xieyichen",
|
"xieyichen",
|
||||||
"xinhuitian",
|
"xinhuitian",
|
||||||
@ -1678,6 +1725,7 @@ const char * auto_contributors[] {
|
|||||||
"Дмитрий Канатников",
|
"Дмитрий Канатников",
|
||||||
"Иванов Евгений",
|
"Иванов Евгений",
|
||||||
"Илья Исаев",
|
"Илья Исаев",
|
||||||
|
"Илья Коргун",
|
||||||
"Коренберг Марк",
|
"Коренберг Марк",
|
||||||
"Коренберг ☢️ Марк",
|
"Коренберг ☢️ Марк",
|
||||||
"Павел Литвиненко",
|
"Павел Литвиненко",
|
||||||
@ -1702,6 +1750,7 @@ const char * auto_contributors[] {
|
|||||||
"李扬",
|
"李扬",
|
||||||
"极客青年",
|
"极客青年",
|
||||||
"枢木",
|
"枢木",
|
||||||
|
"王智博",
|
||||||
"董海镔",
|
"董海镔",
|
||||||
"谢磊",
|
"谢磊",
|
||||||
"贾顺名(Jarvis)",
|
"贾顺名(Jarvis)",
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#include <Parsers/ASTSubquery.h>
|
||||||
|
#include <Parsers/queryToString.h>
|
||||||
#include <Storages/transformQueryForExternalDatabaseAnalyzer.h>
|
#include <Storages/transformQueryForExternalDatabaseAnalyzer.h>
|
||||||
|
|
||||||
#include <Parsers/ASTSelectWithUnionQuery.h>
|
#include <Parsers/ASTSelectWithUnionQuery.h>
|
||||||
@ -61,11 +63,15 @@ ASTPtr getASTForExternalDatabaseFromQueryTree(const QueryTreeNodePtr & query_tre
|
|||||||
visitor.visit(new_tree);
|
visitor.visit(new_tree);
|
||||||
const auto * query_node = new_tree->as<QueryNode>();
|
const auto * query_node = new_tree->as<QueryNode>();
|
||||||
|
|
||||||
const auto & query_node_ast = query_node->toAST({ .add_cast_for_constants = false, .fully_qualified_identifiers = false });
|
auto query_node_ast = query_node->toAST({ .add_cast_for_constants = false, .fully_qualified_identifiers = false });
|
||||||
|
const IAST * ast = query_node_ast.get();
|
||||||
|
|
||||||
const auto * union_ast = query_node_ast->as<ASTSelectWithUnionQuery>();
|
if (const auto * ast_subquery = ast->as<ASTSubquery>())
|
||||||
|
ast = ast_subquery->children.at(0).get();
|
||||||
|
|
||||||
|
const auto * union_ast = ast->as<ASTSelectWithUnionQuery>();
|
||||||
if (!union_ast)
|
if (!union_ast)
|
||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST is not a ASTSelectWithUnionQuery");
|
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST ({}) is not a ASTSelectWithUnionQuery", query_node_ast->getID());
|
||||||
|
|
||||||
if (union_ast->list_of_selects->children.size() != 1)
|
if (union_ast->list_of_selects->children.size() != 1)
|
||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST is not a single ASTSelectQuery, got {}", union_ast->list_of_selects->children.size());
|
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "QueryNode AST is not a single ASTSelectQuery, got {}", union_ast->list_of_selects->children.size());
|
||||||
|
@ -127,3 +127,4 @@
|
|||||||
02790_optimize_skip_unused_shards_join
|
02790_optimize_skip_unused_shards_join
|
||||||
01940_custom_tld_sharding_key
|
01940_custom_tld_sharding_key
|
||||||
02815_range_dict_no_direct_join
|
02815_range_dict_no_direct_join
|
||||||
|
02861_join_on_nullsafe_compare
|
||||||
|
@ -72,6 +72,12 @@
|
|||||||
</s3_cache_multi_2>
|
</s3_cache_multi_2>
|
||||||
</disks>
|
</disks>
|
||||||
<policies>
|
<policies>
|
||||||
|
<local_remote>
|
||||||
|
<volumes>
|
||||||
|
<local><disk>default</disk></local>
|
||||||
|
<remote><disk>s3_disk</disk></remote>
|
||||||
|
</volumes>
|
||||||
|
</local_remote>
|
||||||
<s3_cache>
|
<s3_cache>
|
||||||
<volumes>
|
<volumes>
|
||||||
<main>
|
<main>
|
||||||
|
@ -21,34 +21,34 @@ array-join
|
|||||||
lambda
|
lambda
|
||||||
1
|
1
|
||||||
optimize_read_in_order
|
optimize_read_in_order
|
||||||
Expression (Projection)
|
Expression
|
||||||
Limit (preliminary LIMIT (without OFFSET))
|
Limit
|
||||||
Sorting (Sorting for ORDER BY)
|
Sorting
|
||||||
Expression (Before ORDER BY)
|
Expression
|
||||||
ReadFromMergeTree (default.test_table)
|
ReadFromMergeTree
|
||||||
Expression (Projection)
|
Expression
|
||||||
Limit (preliminary LIMIT (without OFFSET))
|
Limit
|
||||||
Sorting (Sorting for ORDER BY)
|
Sorting
|
||||||
Expression (Before ORDER BY)
|
Expression
|
||||||
ReadFromMergeTree (default.test_table)
|
ReadFromMergeTree
|
||||||
Expression ((Projection + Before ORDER BY [lifted up part]))
|
Expression
|
||||||
Limit (preliminary LIMIT (without OFFSET))
|
Limit
|
||||||
Sorting (Sorting for ORDER BY)
|
Sorting
|
||||||
Expression (Before ORDER BY)
|
Expression
|
||||||
ReadFromMergeTree (default.test_table)
|
ReadFromMergeTree
|
||||||
optimize_aggregation_in_order
|
optimize_aggregation_in_order
|
||||||
Expression ((Projection + Before ORDER BY))
|
Expression
|
||||||
Aggregating
|
Aggregating
|
||||||
Expression (Before GROUP BY)
|
Expression
|
||||||
ReadFromMergeTree (default.test_table)
|
ReadFromMergeTree
|
||||||
Expression ((Projection + Before ORDER BY))
|
Expression
|
||||||
Aggregating
|
Aggregating
|
||||||
Expression (Before GROUP BY)
|
Expression
|
||||||
ReadFromMergeTree (default.test_table)
|
ReadFromMergeTree
|
||||||
Expression ((Projection + Before ORDER BY))
|
Expression
|
||||||
Aggregating
|
Aggregating
|
||||||
Expression (Before GROUP BY)
|
Expression
|
||||||
ReadFromMergeTree (default.test_table)
|
ReadFromMergeTree
|
||||||
second-index
|
second-index
|
||||||
1
|
1
|
||||||
1
|
1
|
||||||
|
@ -81,15 +81,15 @@ SELECT count() == 10 FROM test_table WHERE arrayMap((day) -> day + 1, [1,2,3])
|
|||||||
set max_rows_to_read = 0;
|
set max_rows_to_read = 0;
|
||||||
|
|
||||||
SELECT 'optimize_read_in_order';
|
SELECT 'optimize_read_in_order';
|
||||||
EXPLAIN SELECT day AS s FROM test_table ORDER BY s LIMIT 1 SETTINGS optimize_read_in_order = 0;
|
EXPLAIN description = 0 SELECT day AS s FROM test_table ORDER BY s LIMIT 1 SETTINGS optimize_read_in_order = 0;
|
||||||
EXPLAIN SELECT day AS s FROM test_table ORDER BY s LIMIT 1 SETTINGS optimize_read_in_order = 1;
|
EXPLAIN description = 0 SELECT day AS s FROM test_table ORDER BY s LIMIT 1 SETTINGS optimize_read_in_order = 1;
|
||||||
EXPLAIN SELECT toDate(timestamp) AS s FROM test_table ORDER BY toDate(timestamp) LIMIT 1 SETTINGS optimize_read_in_order = 1;
|
EXPLAIN description = 0 SELECT toDate(timestamp) AS s FROM test_table ORDER BY toDate(timestamp) LIMIT 1 SETTINGS optimize_read_in_order = 1;
|
||||||
|
|
||||||
|
|
||||||
SELECT 'optimize_aggregation_in_order';
|
SELECT 'optimize_aggregation_in_order';
|
||||||
EXPLAIN SELECT day, count() AS s FROM test_table GROUP BY day SETTINGS optimize_aggregation_in_order = 0;
|
EXPLAIN description = 0 SELECT day, count() AS s FROM test_table GROUP BY day SETTINGS optimize_aggregation_in_order = 0;
|
||||||
EXPLAIN SELECT day, count() AS s FROM test_table GROUP BY day SETTINGS optimize_aggregation_in_order = 1;
|
EXPLAIN description = 0 SELECT day, count() AS s FROM test_table GROUP BY day SETTINGS optimize_aggregation_in_order = 1;
|
||||||
EXPLAIN SELECT toDate(timestamp), count() AS s FROM test_table GROUP BY toDate(timestamp) SETTINGS optimize_aggregation_in_order = 1;
|
EXPLAIN description = 0 SELECT toDate(timestamp), count() AS s FROM test_table GROUP BY toDate(timestamp) SETTINGS optimize_aggregation_in_order = 1;
|
||||||
|
|
||||||
DROP TABLE test_table;
|
DROP TABLE test_table;
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: no-s3-storage
|
# Tags: no-s3-storage, no-asan
|
||||||
|
|
||||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||||
# shellcheck source=../shell_config.sh
|
# shellcheck source=../shell_config.sh
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# Tags: no-tsan, no-cpu-aarch64
|
# Tags: no-tsan, no-cpu-aarch64, no-parallel
|
||||||
# TSan does not supports tracing.
|
# TSan does not supports tracing.
|
||||||
# trace_log doesn't work on aarch64
|
# trace_log doesn't work on aarch64
|
||||||
|
|
||||||
|
@ -0,0 +1,671 @@
|
|||||||
|
{% for join_algorithm in ['default', 'grace_hash', 'full_sorting_merge'] -%}
|
||||||
|
join_algorithm = {{ join_algorithm }}, join_use_nulls = 0, t1 JOIN t2
|
||||||
|
--
|
||||||
|
\N 0 2 2
|
||||||
|
\N 0 6 6
|
||||||
|
\N 0 10 10
|
||||||
|
\N 0 14 14
|
||||||
|
\N 0 18 18
|
||||||
|
1 1 1 1
|
||||||
|
\N 2 \N 4
|
||||||
|
\N 2 \N 8
|
||||||
|
\N 2 \N 12
|
||||||
|
\N 2 \N 16
|
||||||
|
\N 2 \N 20
|
||||||
|
3 3 3 3
|
||||||
|
\N 4 \N 4
|
||||||
|
\N 4 \N 8
|
||||||
|
\N 4 \N 12
|
||||||
|
\N 4 \N 16
|
||||||
|
\N 4 \N 20
|
||||||
|
5 5 5 5
|
||||||
|
\N 6 \N 4
|
||||||
|
\N 6 \N 8
|
||||||
|
\N 6 \N 12
|
||||||
|
\N 6 \N 16
|
||||||
|
\N 6 \N 20
|
||||||
|
7 7 7 7
|
||||||
|
\N 8 \N 4
|
||||||
|
\N 8 \N 8
|
||||||
|
\N 8 \N 12
|
||||||
|
\N 8 \N 16
|
||||||
|
\N 8 \N 20
|
||||||
|
9 9 9 9
|
||||||
|
\N 10 \N 4
|
||||||
|
\N 10 \N 8
|
||||||
|
\N 10 \N 12
|
||||||
|
\N 10 \N 16
|
||||||
|
\N 10 \N 20
|
||||||
|
11 11 11 11
|
||||||
|
\N 12 \N 4
|
||||||
|
\N 12 \N 8
|
||||||
|
\N 12 \N 12
|
||||||
|
\N 12 \N 16
|
||||||
|
\N 12 \N 20
|
||||||
|
13 13 13 13
|
||||||
|
\N 14 \N 4
|
||||||
|
\N 14 \N 8
|
||||||
|
\N 14 \N 12
|
||||||
|
\N 14 \N 16
|
||||||
|
\N 14 \N 20
|
||||||
|
15 15 15 15
|
||||||
|
\N 16 \N 4
|
||||||
|
\N 16 \N 8
|
||||||
|
\N 16 \N 12
|
||||||
|
\N 16 \N 16
|
||||||
|
\N 16 \N 20
|
||||||
|
17 17 17 17
|
||||||
|
\N 18 \N 4
|
||||||
|
\N 18 \N 8
|
||||||
|
\N 18 \N 12
|
||||||
|
\N 18 \N 16
|
||||||
|
\N 18 \N 20
|
||||||
|
19 19 19 19
|
||||||
|
\N 20 \N 4
|
||||||
|
\N 20 \N 8
|
||||||
|
\N 20 \N 12
|
||||||
|
\N 20 \N 16
|
||||||
|
\N 20 \N 20
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 \N 3
|
||||||
|
\N \N 0 6 \N 6
|
||||||
|
\N \N 0 9 \N 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 \N 15
|
||||||
|
\N \N 0 18 \N 18
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N \N 0
|
||||||
|
3 3 3 \N \N 0
|
||||||
|
\N 4 4 \N 4 4
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N \N 12
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N 8 8
|
||||||
|
9 9 9 \N \N 0
|
||||||
|
\N 10 10 \N \N 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N \N 12
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N \N 0
|
||||||
|
15 15 15 \N \N 0
|
||||||
|
\N 16 16 \N 16 16
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N \N 12
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N 20 20
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 \N 3
|
||||||
|
\N \N 0 \N 4 4
|
||||||
|
\N \N 0 6 \N 6
|
||||||
|
\N \N 0 \N 8 8
|
||||||
|
\N \N 0 9 \N 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 \N \N 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 \N 15
|
||||||
|
\N \N 0 \N 16 16
|
||||||
|
\N \N 0 18 \N 18
|
||||||
|
\N \N 0 \N 20 20
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N \N 0
|
||||||
|
3 3 3 \N \N 0
|
||||||
|
\N 4 4 \N \N 0
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N \N 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N \N 0
|
||||||
|
9 9 9 \N \N 0
|
||||||
|
\N 10 10 \N \N 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N \N 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N \N 0
|
||||||
|
15 15 15 \N \N 0
|
||||||
|
\N 16 16 \N \N 0
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N \N 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N \N 0
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 \N 3
|
||||||
|
\N \N 0 6 \N 6
|
||||||
|
\N \N 0 9 \N 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 \N \N 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 \N 15
|
||||||
|
\N \N 0 18 \N 18
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N \N 0
|
||||||
|
3 3 3 \N \N 0
|
||||||
|
\N 4 4 \N 4 4
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N \N 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N 8 8
|
||||||
|
9 9 9 \N \N 0
|
||||||
|
\N 10 10 \N \N 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N \N 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N \N 0
|
||||||
|
15 15 15 \N \N 0
|
||||||
|
\N 16 16 \N 16 16
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N \N 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N 20 20
|
||||||
|
{% endfor -%}
|
||||||
|
join_algorithm = default, join_use_nulls = 1, t1 JOIN t2
|
||||||
|
--
|
||||||
|
\N \N 2 2
|
||||||
|
\N \N 6 6
|
||||||
|
\N \N 10 10
|
||||||
|
\N \N 14 14
|
||||||
|
\N \N 18 18
|
||||||
|
1 1 1 1
|
||||||
|
\N 2 \N 4
|
||||||
|
\N 2 \N 8
|
||||||
|
\N 2 \N 12
|
||||||
|
\N 2 \N 16
|
||||||
|
\N 2 \N 20
|
||||||
|
3 3 3 3
|
||||||
|
\N 4 \N 4
|
||||||
|
\N 4 \N 8
|
||||||
|
\N 4 \N 12
|
||||||
|
\N 4 \N 16
|
||||||
|
\N 4 \N 20
|
||||||
|
5 5 5 5
|
||||||
|
\N 6 \N 4
|
||||||
|
\N 6 \N 8
|
||||||
|
\N 6 \N 12
|
||||||
|
\N 6 \N 16
|
||||||
|
\N 6 \N 20
|
||||||
|
7 7 7 7
|
||||||
|
\N 8 \N 4
|
||||||
|
\N 8 \N 8
|
||||||
|
\N 8 \N 12
|
||||||
|
\N 8 \N 16
|
||||||
|
\N 8 \N 20
|
||||||
|
9 9 9 9
|
||||||
|
\N 10 \N 4
|
||||||
|
\N 10 \N 8
|
||||||
|
\N 10 \N 12
|
||||||
|
\N 10 \N 16
|
||||||
|
\N 10 \N 20
|
||||||
|
11 11 11 11
|
||||||
|
\N 12 \N 4
|
||||||
|
\N 12 \N 8
|
||||||
|
\N 12 \N 12
|
||||||
|
\N 12 \N 16
|
||||||
|
\N 12 \N 20
|
||||||
|
13 13 13 13
|
||||||
|
\N 14 \N 4
|
||||||
|
\N 14 \N 8
|
||||||
|
\N 14 \N 12
|
||||||
|
\N 14 \N 16
|
||||||
|
\N 14 \N 20
|
||||||
|
15 15 15 15
|
||||||
|
\N 16 \N 4
|
||||||
|
\N 16 \N 8
|
||||||
|
\N 16 \N 12
|
||||||
|
\N 16 \N 16
|
||||||
|
\N 16 \N 20
|
||||||
|
17 17 17 17
|
||||||
|
\N 18 \N 4
|
||||||
|
\N 18 \N 8
|
||||||
|
\N 18 \N 12
|
||||||
|
\N 18 \N 16
|
||||||
|
\N 18 \N 20
|
||||||
|
19 19 19 19
|
||||||
|
\N 20 \N 4
|
||||||
|
\N 20 \N 8
|
||||||
|
\N 20 \N 12
|
||||||
|
\N 20 \N 16
|
||||||
|
\N 20 \N 20
|
||||||
|
--
|
||||||
|
\N \N \N 2 2 2
|
||||||
|
\N \N \N 3 \N 3
|
||||||
|
\N \N \N 6 \N 6
|
||||||
|
\N \N \N 9 \N 9
|
||||||
|
\N \N \N 10 10 10
|
||||||
|
\N \N \N 14 14 14
|
||||||
|
\N \N \N 15 \N 15
|
||||||
|
\N \N \N 18 \N 18
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N \N \N
|
||||||
|
3 3 3 \N \N \N
|
||||||
|
\N 4 4 \N 4 4
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N \N 12
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N 8 8
|
||||||
|
9 9 9 \N \N \N
|
||||||
|
\N 10 10 \N \N \N
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N \N 12
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N \N \N
|
||||||
|
15 15 15 \N \N \N
|
||||||
|
\N 16 16 \N 16 16
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N \N 12
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N 20 20
|
||||||
|
--
|
||||||
|
\N \N \N 2 2 2
|
||||||
|
\N \N \N 3 \N 3
|
||||||
|
\N \N \N \N 4 4
|
||||||
|
\N \N \N 6 \N 6
|
||||||
|
\N \N \N \N 8 8
|
||||||
|
\N \N \N 9 \N 9
|
||||||
|
\N \N \N 10 10 10
|
||||||
|
\N \N \N \N \N 12
|
||||||
|
\N \N \N 14 14 14
|
||||||
|
\N \N \N 15 \N 15
|
||||||
|
\N \N \N \N 16 16
|
||||||
|
\N \N \N 18 \N 18
|
||||||
|
\N \N \N \N 20 20
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N \N \N
|
||||||
|
3 3 3 \N \N \N
|
||||||
|
\N 4 4 \N \N \N
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N \N \N
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N \N \N
|
||||||
|
9 9 9 \N \N \N
|
||||||
|
\N 10 10 \N \N \N
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N \N \N
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N \N \N
|
||||||
|
15 15 15 \N \N \N
|
||||||
|
\N 16 16 \N \N \N
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N \N \N
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N \N \N
|
||||||
|
--
|
||||||
|
\N \N \N 2 2 2
|
||||||
|
\N \N \N 3 \N 3
|
||||||
|
\N \N \N 6 \N 6
|
||||||
|
\N \N \N 9 \N 9
|
||||||
|
\N \N \N 10 10 10
|
||||||
|
\N \N \N \N \N 12
|
||||||
|
\N \N \N 14 14 14
|
||||||
|
\N \N \N 15 \N 15
|
||||||
|
\N \N \N 18 \N 18
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N \N \N
|
||||||
|
3 3 3 \N \N \N
|
||||||
|
\N 4 4 \N 4 4
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N \N \N
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N 8 8
|
||||||
|
9 9 9 \N \N \N
|
||||||
|
\N 10 10 \N \N \N
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N \N \N
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N \N \N
|
||||||
|
15 15 15 \N \N \N
|
||||||
|
\N 16 16 \N 16 16
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N \N \N
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N 20 20
|
||||||
|
join_algorithm = default, join_use_nulls = 0, t1 JOIN t3
|
||||||
|
--
|
||||||
|
\N 0 2 2
|
||||||
|
\N 0 6 6
|
||||||
|
\N 0 10 10
|
||||||
|
\N 0 14 14
|
||||||
|
\N 0 18 18
|
||||||
|
1 1 1 1
|
||||||
|
\N 2 \N 4
|
||||||
|
\N 2 \N 8
|
||||||
|
\N 2 \N 12
|
||||||
|
\N 2 \N 16
|
||||||
|
\N 2 \N 20
|
||||||
|
3 3 3 3
|
||||||
|
\N 4 \N 4
|
||||||
|
\N 4 \N 8
|
||||||
|
\N 4 \N 12
|
||||||
|
\N 4 \N 16
|
||||||
|
\N 4 \N 20
|
||||||
|
5 5 5 5
|
||||||
|
\N 6 \N 4
|
||||||
|
\N 6 \N 8
|
||||||
|
\N 6 \N 12
|
||||||
|
\N 6 \N 16
|
||||||
|
\N 6 \N 20
|
||||||
|
7 7 7 7
|
||||||
|
\N 8 \N 4
|
||||||
|
\N 8 \N 8
|
||||||
|
\N 8 \N 12
|
||||||
|
\N 8 \N 16
|
||||||
|
\N 8 \N 20
|
||||||
|
9 9 9 9
|
||||||
|
\N 10 \N 4
|
||||||
|
\N 10 \N 8
|
||||||
|
\N 10 \N 12
|
||||||
|
\N 10 \N 16
|
||||||
|
\N 10 \N 20
|
||||||
|
11 11 11 11
|
||||||
|
\N 12 \N 4
|
||||||
|
\N 12 \N 8
|
||||||
|
\N 12 \N 12
|
||||||
|
\N 12 \N 16
|
||||||
|
\N 12 \N 20
|
||||||
|
13 13 13 13
|
||||||
|
\N 14 \N 4
|
||||||
|
\N 14 \N 8
|
||||||
|
\N 14 \N 12
|
||||||
|
\N 14 \N 16
|
||||||
|
\N 14 \N 20
|
||||||
|
15 15 15 15
|
||||||
|
\N 16 \N 4
|
||||||
|
\N 16 \N 8
|
||||||
|
\N 16 \N 12
|
||||||
|
\N 16 \N 16
|
||||||
|
\N 16 \N 20
|
||||||
|
17 17 17 17
|
||||||
|
\N 18 \N 4
|
||||||
|
\N 18 \N 8
|
||||||
|
\N 18 \N 12
|
||||||
|
\N 18 \N 16
|
||||||
|
\N 18 \N 20
|
||||||
|
19 19 19 19
|
||||||
|
\N 20 \N 4
|
||||||
|
\N 20 \N 8
|
||||||
|
\N 20 \N 12
|
||||||
|
\N 20 \N 16
|
||||||
|
\N 20 \N 20
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 0 3
|
||||||
|
\N \N 0 6 0 6
|
||||||
|
\N \N 0 9 0 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 \N 0 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 0 15
|
||||||
|
\N \N 0 18 0 18
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N 0 0
|
||||||
|
3 3 3 \N 0 0
|
||||||
|
\N 4 4 \N 4 4
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N 0 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N 8 8
|
||||||
|
9 9 9 \N 0 0
|
||||||
|
\N 10 10 \N 0 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N 0 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N 0 0
|
||||||
|
15 15 15 \N 0 0
|
||||||
|
\N 16 16 \N 16 16
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N 0 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N 20 20
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 0 3
|
||||||
|
\N \N 0 \N 4 4
|
||||||
|
\N \N 0 6 0 6
|
||||||
|
\N \N 0 \N 8 8
|
||||||
|
\N \N 0 9 0 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 \N 0 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 0 15
|
||||||
|
\N \N 0 \N 16 16
|
||||||
|
\N \N 0 18 0 18
|
||||||
|
\N \N 0 \N 20 20
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N 0 0
|
||||||
|
3 3 3 \N 0 0
|
||||||
|
\N 4 4 \N 0 0
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N 0 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N 0 0
|
||||||
|
9 9 9 \N 0 0
|
||||||
|
\N 10 10 \N 0 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N 0 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N 0 0
|
||||||
|
15 15 15 \N 0 0
|
||||||
|
\N 16 16 \N 0 0
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N 0 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N 0 0
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 0 3
|
||||||
|
\N \N 0 6 0 6
|
||||||
|
\N \N 0 9 0 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 \N 0 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 0 15
|
||||||
|
\N \N 0 18 0 18
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 \N 0 0
|
||||||
|
3 3 3 \N 0 0
|
||||||
|
\N 4 4 \N 4 4
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 \N 0 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 \N 8 8
|
||||||
|
9 9 9 \N 0 0
|
||||||
|
\N 10 10 \N 0 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 \N 0 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 \N 0 0
|
||||||
|
15 15 15 \N 0 0
|
||||||
|
\N 16 16 \N 16 16
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 \N 0 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 \N 20 20
|
||||||
|
join_algorithm = default, join_use_nulls = 0, t1 JOIN t4
|
||||||
|
--
|
||||||
|
\N 0 2 2
|
||||||
|
\N 0 0 4
|
||||||
|
\N 0 6 6
|
||||||
|
\N 0 0 8
|
||||||
|
\N 0 10 10
|
||||||
|
\N 0 0 12
|
||||||
|
\N 0 14 14
|
||||||
|
\N 0 0 16
|
||||||
|
\N 0 18 18
|
||||||
|
\N 0 0 20
|
||||||
|
1 1 1 1
|
||||||
|
\N 2 0 0
|
||||||
|
3 3 3 3
|
||||||
|
\N 4 0 0
|
||||||
|
5 5 5 5
|
||||||
|
\N 6 0 0
|
||||||
|
7 7 7 7
|
||||||
|
\N 8 0 0
|
||||||
|
9 9 9 9
|
||||||
|
\N 10 0 0
|
||||||
|
11 11 11 11
|
||||||
|
\N 12 0 0
|
||||||
|
13 13 13 13
|
||||||
|
\N 14 0 0
|
||||||
|
15 15 15 15
|
||||||
|
\N 16 0 0
|
||||||
|
17 17 17 17
|
||||||
|
\N 18 0 0
|
||||||
|
19 19 19 19
|
||||||
|
\N 20 0 0
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 0 3
|
||||||
|
\N \N 0 0 4 4
|
||||||
|
\N \N 0 6 0 6
|
||||||
|
\N \N 0 0 8 8
|
||||||
|
\N \N 0 9 0 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 0 0 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 0 15
|
||||||
|
\N \N 0 0 16 16
|
||||||
|
\N \N 0 18 0 18
|
||||||
|
\N \N 0 0 20 20
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 0 0 0
|
||||||
|
3 3 3 0 0 0
|
||||||
|
\N 4 4 0 0 0
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 0 0 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 0 0 0
|
||||||
|
9 9 9 0 0 0
|
||||||
|
\N 10 10 0 0 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 0 0 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 0 0 0
|
||||||
|
15 15 15 0 0 0
|
||||||
|
\N 16 16 0 0 0
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 0 0 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 0 0 0
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 0 3
|
||||||
|
\N \N 0 0 4 4
|
||||||
|
\N \N 0 6 0 6
|
||||||
|
\N \N 0 0 8 8
|
||||||
|
\N \N 0 9 0 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 0 0 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 0 15
|
||||||
|
\N \N 0 0 16 16
|
||||||
|
\N \N 0 18 0 18
|
||||||
|
\N \N 0 0 20 20
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 0 0 0
|
||||||
|
3 3 3 0 0 0
|
||||||
|
\N 4 4 0 0 0
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 0 0 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 0 0 0
|
||||||
|
9 9 9 0 0 0
|
||||||
|
\N 10 10 0 0 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 0 0 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 0 0 0
|
||||||
|
15 15 15 0 0 0
|
||||||
|
\N 16 16 0 0 0
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 0 0 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 0 0 0
|
||||||
|
--
|
||||||
|
\N \N 0 2 2 2
|
||||||
|
\N \N 0 3 0 3
|
||||||
|
\N \N 0 0 4 4
|
||||||
|
\N \N 0 6 0 6
|
||||||
|
\N \N 0 0 8 8
|
||||||
|
\N \N 0 9 0 9
|
||||||
|
\N \N 0 10 10 10
|
||||||
|
\N \N 0 0 0 12
|
||||||
|
\N \N 0 14 14 14
|
||||||
|
\N \N 0 15 0 15
|
||||||
|
\N \N 0 0 16 16
|
||||||
|
\N \N 0 18 0 18
|
||||||
|
\N \N 0 0 20 20
|
||||||
|
1 1 1 1 1 1
|
||||||
|
\N 2 2 0 0 0
|
||||||
|
3 3 3 0 0 0
|
||||||
|
\N 4 4 0 0 0
|
||||||
|
5 5 5 5 5 5
|
||||||
|
\N \N 6 0 0 0
|
||||||
|
7 7 7 7 7 7
|
||||||
|
\N 8 8 0 0 0
|
||||||
|
9 9 9 0 0 0
|
||||||
|
\N 10 10 0 0 0
|
||||||
|
11 11 11 11 11 11
|
||||||
|
\N \N 12 0 0 0
|
||||||
|
13 13 13 13 13 13
|
||||||
|
\N 14 14 0 0 0
|
||||||
|
15 15 15 0 0 0
|
||||||
|
\N 16 16 0 0 0
|
||||||
|
17 17 17 17 17 17
|
||||||
|
\N \N 18 0 0 0
|
||||||
|
19 19 19 19 19 19
|
||||||
|
\N 20 20 0 0 0
|
||||||
|
--
|
||||||
|
\N 0 2 2
|
||||||
|
\N 0 \N 4
|
||||||
|
\N 0 6 6
|
||||||
|
\N 0 \N 8
|
||||||
|
\N 0 10 10
|
||||||
|
\N 0 \N 12
|
||||||
|
\N 0 14 14
|
||||||
|
\N 0 \N 16
|
||||||
|
\N 0 18 18
|
||||||
|
\N 0 \N 20
|
||||||
|
1 1 1 1
|
||||||
|
\N 2 \N 0
|
||||||
|
3 3 3 3
|
||||||
|
\N 4 \N 0
|
||||||
|
5 5 5 5
|
||||||
|
\N 6 \N 0
|
||||||
|
7 7 7 7
|
||||||
|
\N 8 \N 0
|
||||||
|
9 9 9 9
|
||||||
|
\N 10 \N 0
|
||||||
|
11 11 11 11
|
||||||
|
\N 12 \N 0
|
||||||
|
13 13 13 13
|
||||||
|
\N 14 \N 0
|
||||||
|
15 15 15 15
|
||||||
|
\N 16 \N 0
|
||||||
|
17 17 17 17
|
||||||
|
\N 18 \N 0
|
||||||
|
19 19 19 19
|
||||||
|
\N 20 \N 0
|
||||||
|
--
|
||||||
|
1 42 420 1 1 43 430 1
|
||||||
|
\N 42 420 2 \N 43 430 4
|
||||||
|
\N 42 420 2 \N 43 430 8
|
||||||
|
\N 42 420 2 \N 43 430 12
|
||||||
|
\N 42 420 2 \N 43 430 16
|
||||||
|
\N 42 420 2 \N 43 430 20
|
||||||
|
3 42 420 3 3 43 430 3
|
||||||
|
\N 42 420 4 \N 43 430 4
|
||||||
|
\N 42 420 4 \N 43 430 8
|
||||||
|
\N 42 420 4 \N 43 430 12
|
||||||
|
--
|
||||||
|
1 42 420 1 1 43 430 1
|
||||||
|
\N 42 420 2 \N 43 430 4
|
||||||
|
\N 42 420 2 \N 43 430 8
|
||||||
|
\N 42 420 2 \N 43 430 12
|
||||||
|
\N 42 420 2 \N 43 430 16
|
||||||
|
\N 42 420 2 \N 43 430 20
|
||||||
|
3 42 420 3 3 43 430 3
|
||||||
|
\N 42 420 4 \N 43 430 4
|
||||||
|
\N 42 420 4 \N 43 430 8
|
||||||
|
\N 42 420 4 \N 43 430 12
|
||||||
|
--
|
101
tests/queries/0_stateless/02861_join_on_nullsafe_compare.sql.j2
Normal file
101
tests/queries/0_stateless/02861_join_on_nullsafe_compare.sql.j2
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
DROP TABLE IF EXISTS t1;
|
||||||
|
DROP TABLE IF EXISTS t2;
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a Nullable(UInt32), b Nullable(Int16), val UInt32) ENGINE = MergeTree ORDER BY tuple() SETTINGS ratio_of_defaults_for_sparse_serialization = 1;
|
||||||
|
INSERT INTO t1 SELECT if(number % 2 == 0, NULL, number), if(number % 6 == 0, NULL, number), number, FROM numbers(1, 20);
|
||||||
|
|
||||||
|
CREATE TABLE t2 (a Nullable(UInt32), b Nullable(UInt16), val UInt32) ENGINE = MergeTree ORDER BY tuple() SETTINGS ratio_of_defaults_for_sparse_serialization = 1;
|
||||||
|
INSERT INTO t2 SELECT if(number % 4 == 0, NULL, number), if(number % 3 == 0, NULL, number), number, FROM numbers(1, 20);
|
||||||
|
|
||||||
|
CREATE TABLE t3 (a Nullable(UInt32), b UInt16, val UInt32) ENGINE = MergeTree ORDER BY tuple() SETTINGS ratio_of_defaults_for_sparse_serialization = 1;
|
||||||
|
INSERT INTO t3 SELECT if(number % 4 == 0, NULL, number), if(number % 3 == 0, NULL, number), number, FROM numbers(1, 20);
|
||||||
|
|
||||||
|
CREATE TABLE t4 (a UInt32, b UInt16, val UInt32) ENGINE = MergeTree ORDER BY tuple() SETTINGS ratio_of_defaults_for_sparse_serialization = 1;
|
||||||
|
INSERT INTO t4 SELECT if(number % 4 == 0, NULL, number), if(number % 3 == 0, NULL, number), number, FROM numbers(1, 20);
|
||||||
|
|
||||||
|
{% for join_algorithm, join_use_nulls, t1, t2 in [
|
||||||
|
('default', 0, 't1', 't2'),
|
||||||
|
('grace_hash', 0, 't1', 't2'),
|
||||||
|
('full_sorting_merge', 0, 't1', 't2'),
|
||||||
|
('default', 1, 't1', 't2'),
|
||||||
|
('default', 0, 't1', 't3'),
|
||||||
|
('default', 0, 't1', 't4'),
|
||||||
|
] -%}
|
||||||
|
|
||||||
|
SET join_algorithm = '{{ join_algorithm }}';
|
||||||
|
SET join_use_nulls = {{ join_use_nulls }};
|
||||||
|
|
||||||
|
SELECT 'join_algorithm = {{ join_algorithm }}, join_use_nulls = {{ join_use_nulls }}, {{ t1 }} JOIN {{ t2 }}';
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
SELECT {{ t1 }}.a, {{ t1 }}.val, {{ t2 }}.a, {{ t2 }}.val FROM {{ t1 }} FULL JOIN {{ t2 }}
|
||||||
|
ON isNotDistinctFrom({{ t1 }}.a, {{ t2 }}.a)
|
||||||
|
ORDER BY {{ t1 }}.val NULLS FIRST, {{ t2 }}.val NULLS FIRST
|
||||||
|
;
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
SELECT * FROM {{ t1 }} FULL JOIN {{ t2 }}
|
||||||
|
ON isNotDistinctFrom({{ t1 }}.a, {{ t2 }}.a) AND isNotDistinctFrom({{ t1 }}.b, {{ t2 }}.b)
|
||||||
|
ORDER BY {{ t1 }}.val NULLS FIRST, {{ t2 }}.val NULLS FIRST
|
||||||
|
;
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
SELECT * FROM {{ t1 }} FULL JOIN {{ t2 }}
|
||||||
|
ON {{ t1 }}.a == {{ t2 }}.a AND isNotDistinctFrom({{ t1 }}.b, {{ t2 }}.b)
|
||||||
|
ORDER BY {{ t1 }}.val NULLS FIRST, {{ t2 }}.val NULLS FIRST
|
||||||
|
;
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
SELECT * FROM {{ t1 }} FULL JOIN {{ t2 }}
|
||||||
|
ON isNotDistinctFrom({{ t1 }}.a, {{ t2 }}.a) AND {{ t1 }}.b == {{ t2 }}.b
|
||||||
|
ORDER BY {{ t1 }}.val NULLS FIRST, {{ t2 }}.val NULLS FIRST
|
||||||
|
;
|
||||||
|
|
||||||
|
{% endfor -%}
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
SET join_use_nulls = 0;
|
||||||
|
SET join_algorithm = 'hash';
|
||||||
|
SELECT t1.a, t1.val, t2.a, t2.val FROM t1 FULL JOIN t2
|
||||||
|
ON isNotDistinctFrom(t1.a, t2.a) AND t1.b < 2 OR t1.a == t2.a
|
||||||
|
ORDER BY t1.val NULLS FIRST, t2.val NULLS FIRST
|
||||||
|
;
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
SET join_algorithm = 'default';
|
||||||
|
SET join_use_nulls = 1;
|
||||||
|
|
||||||
|
-- try to cause column name clash intentionally using internal name
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM (SELECT a, 42 as `__wrapNullsafe(a)`, 420 as `tuple(a)`, val FROM t1) t1
|
||||||
|
JOIN (SELECT a, 43 as `__wrapNullsafe(t2.a)`, 430 as `tuple(t2.a)`, val FROM t2) t2
|
||||||
|
ON isNotDistinctFrom(t1.a, t2.a)
|
||||||
|
ORDER BY t1.val NULLS FIRST, t2.val NULLS FIRST
|
||||||
|
LIMIT 10;
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
SELECT a, 42 as `__wrapNullsafe(a)`, 420 as `tuple(a)`, val, t2.a, 43 as `__wrapNullsafe(t2.a)`, 430 as `tuple(t2.a)`, t2.val
|
||||||
|
FROM (SELECT a, val, 111 as `__wrapNullsafe(a)_0` FROM t1) t1
|
||||||
|
JOIN (SELECT a, val, 111 as `__wrapNullsafe(t2.a)_0` FROM t2) t2
|
||||||
|
ON isNotDistinctFrom(t1.a, t2.a)
|
||||||
|
ORDER BY t1.val NULLS FIRST, t2.val NULLS FIRST
|
||||||
|
LIMIT 10;
|
||||||
|
|
||||||
|
SELECT '--';
|
||||||
|
|
||||||
|
-- check illegal queries
|
||||||
|
|
||||||
|
SELECT * FROM t1 JOIN t2 ON isNotDistinctFrom(); -- { serverError SYNTAX_ERROR,NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||||
|
SELECT * FROM t1 JOIN t2 ON isNotDistinctFrom(t1.a); -- { serverError SYNTAX_ERROR,NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||||
|
SELECT * FROM t1 JOIN t2 ON isNotDistinctFrom(t1.a, t2.a, t2.b); -- { serverError SYNTAX_ERROR,NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||||
|
|
||||||
|
SELECT isNotDistinctFrom(a) from t1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH }
|
||||||
|
SELECT isNotDistinctFrom(a, b) from t1; -- { serverError NOT_IMPLEMENTED }
|
@ -0,0 +1,3 @@
|
|||||||
|
default tuple() 1000000
|
||||||
|
Alter 1
|
||||||
|
s3_disk tuple() 1000000
|
@ -0,0 +1,12 @@
|
|||||||
|
-- Tags: no-random-merge-tree-settings, no-fasttest, no-replicated-database
|
||||||
|
-- Tag: no-fasttest -- requires S3
|
||||||
|
-- Tag: no-replicated-database -- ALTER MOVE PARTITION TO should not be replicated (will be fixed separatelly)
|
||||||
|
|
||||||
|
CREATE TABLE test_move_partition_throttling (key UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='local_remote';
|
||||||
|
INSERT INTO test_move_partition_throttling SELECT number FROM numbers(1e6);
|
||||||
|
SELECT disk_name, partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'test_move_partition_throttling' and active;
|
||||||
|
ALTER TABLE test_move_partition_throttling MOVE PARTITION tuple() TO VOLUME 'remote' SETTINGS max_remote_write_network_bandwidth=1600000;
|
||||||
|
SYSTEM FLUSH LOGS;
|
||||||
|
-- (8e6-1600000)/1600000=4.0
|
||||||
|
SELECT query_kind, query_duration_ms>4e3 FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query_kind = 'Alter';
|
||||||
|
SELECT disk_name, partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'test_move_partition_throttling' and active;
|
@ -1,3 +1,4 @@
|
|||||||
|
v23.8.1.2992-lts 2023-09-01
|
||||||
v23.7.5.30-stable 2023-08-28
|
v23.7.5.30-stable 2023-08-28
|
||||||
v23.7.4.5-stable 2023-08-08
|
v23.7.4.5-stable 2023-08-08
|
||||||
v23.7.3.14-stable 2023-08-05
|
v23.7.3.14-stable 2023-08-05
|
||||||
|
|
Loading…
Reference in New Issue
Block a user