mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into ADQM-987
This commit is contained in:
commit
78760639d2
@ -13,18 +13,14 @@ The following versions of ClickHouse server are currently being supported with s
|
||||
|
||||
| Version | Supported |
|
||||
|:-|:-|
|
||||
| 23.8 | ✔️ |
|
||||
| 23.7 | ✔️ |
|
||||
| 23.6 | ✔️ |
|
||||
| 23.5 | ✔️ |
|
||||
| 23.5 | ❌ |
|
||||
| 23.4 | ❌ |
|
||||
| 23.3 | ✔️ |
|
||||
| 23.2 | ❌ |
|
||||
| 23.1 | ❌ |
|
||||
| 22.12 | ❌ |
|
||||
| 22.11 | ❌ |
|
||||
| 22.10 | ❌ |
|
||||
| 22.9 | ❌ |
|
||||
| 22.8 | ✔️ |
|
||||
| 22.* | ❌ |
|
||||
| 21.* | ❌ |
|
||||
| 20.* | ❌ |
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54477)
|
||||
SET(VERSION_REVISION 54478)
|
||||
SET(VERSION_MAJOR 23)
|
||||
SET(VERSION_MINOR 8)
|
||||
SET(VERSION_MINOR 9)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH a70127baecc451f1f7073bad7b6198f6703441d8)
|
||||
SET(VERSION_DESCRIBE v23.8.1.1-testing)
|
||||
SET(VERSION_STRING 23.8.1.1)
|
||||
SET(VERSION_GITHASH ebc7d9a9f3b40be89e0b3e738b35d394aabeea3e)
|
||||
SET(VERSION_DESCRIBE v23.9.1.1-testing)
|
||||
SET(VERSION_STRING 23.9.1.1)
|
||||
# end of autochange
|
||||
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.7.5.30"
|
||||
ARG VERSION="23.8.1.2992"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.7.5.30"
|
||||
ARG VERSION="23.8.1.2992"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.7.5.30"
|
||||
ARG VERSION="23.8.1.2992"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
# shellcheck disable=SC2034
|
||||
source /setup_export_logs.sh
|
||||
|
||||
# fail on errors, verbose and export all env variables
|
||||
@ -231,10 +232,16 @@ do
|
||||
fi
|
||||
done
|
||||
|
||||
data_path_config="--path=/var/lib/clickhouse/"
|
||||
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
|
||||
# We need s3 storage configuration (but it's more likely that clickhouse-local will fail for some reason)
|
||||
data_path_config="--config-file=/etc/clickhouse-server/config.xml"
|
||||
fi
|
||||
|
||||
# Also export trace log in flamegraph-friendly format.
|
||||
for trace_type in CPU Memory Real
|
||||
do
|
||||
clickhouse-local --path /var/lib/clickhouse/ --only-system-tables -q "
|
||||
clickhouse-local "data_path_config" --only-system-tables -q "
|
||||
select
|
||||
arrayStringConcat((arrayMap(x -> concat(splitByChar('/', addressToLine(x))[-1], '#', demangle(addressToSymbol(x)) ), trace)), ';') AS stack,
|
||||
count(*) AS samples
|
||||
|
591
docs/changelogs/v23.8.1.2992-lts.md
Normal file
591
docs/changelogs/v23.8.1.2992-lts.md
Normal file
@ -0,0 +1,591 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.8.1.2992-lts (ebc7d9a9f3b) FIXME as compared to v23.7.1.2470-stable (a70127baecc)
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Deprecate the metadata cache feature. It is experimental and we have never used it. The feature is dangerous: [#51182](https://github.com/ClickHouse/ClickHouse/issues/51182). Remove the `system.merge_tree_metadata_cache` system table. The metadata cache is still available in this version but will be removed soon. This closes [#39197](https://github.com/ClickHouse/ClickHouse/issues/39197). [#51303](https://github.com/ClickHouse/ClickHouse/pull/51303) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* If a dynamic disk contains a name, it should be specified as `disk = disk(name = 'disk_name'`, ...) in disk function arguments. In previous version it could be specified as `disk = disk_<disk_name>(...)`, which is no longer supported. [#52820](https://github.com/ClickHouse/ClickHouse/pull/52820) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* `clickhouse-benchmark` will establish connections in parallel when invoked with `--concurrency` more than one. Previously it was unusable if you ran it with 1000 concurrent connections from Europe to the US. Correct calculation of QPS for connections with high latency. Backward incompatible change: the option for JSON output of `clickhouse-benchmark` is removed. If you've used this option, you can also extract data from the `system.query_log` in JSON format as a workaround. [#53293](https://github.com/ClickHouse/ClickHouse/pull/53293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The `microseconds` column is removed from the `system.text_log`, and the `milliseconds` column is removed from the `system.metric_log`, because they are redundant in the presence of the `event_time_microseconds` column. [#53601](https://github.com/ClickHouse/ClickHouse/pull/53601) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Changed zookeeper paths for storage `S3Queue` metadata. [#54137](https://github.com/ClickHouse/ClickHouse/pull/54137) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### New Feature
|
||||
* Add column `ptr` to `system.trace_log` for `trace_type = 'MemorySample'`. This column contains an address of allocation. Added function `flameGraph` which can build flamegraph containing allocated and not released memory. Reworking of [#38391](https://github.com/ClickHouse/ClickHouse/issues/38391). [#45322](https://github.com/ClickHouse/ClickHouse/pull/45322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add setting `rewrite_count_distinct_if_with_count_distinct_implementation` to rewrite `countDistinctIf` with `count_distinct_implementation`. Closes [#30642](https://github.com/ClickHouse/ClickHouse/issues/30642). [#46051](https://github.com/ClickHouse/ClickHouse/pull/46051) ([flynn](https://github.com/ucasfl)).
|
||||
* Add new table engine `S3Queue` for streaming data import from s3. Closes [#37012](https://github.com/ClickHouse/ClickHouse/issues/37012). [#49086](https://github.com/ClickHouse/ClickHouse/pull/49086) ([s-kat](https://github.com/s-kat)).
|
||||
* SevenZipArchiveReader - TarArchiveReader - Table Function file('path_to_archive :: filename') - Functional tests for "Table Function file('path_to_archive :: filename')" - Unit tests for TarArchiveReader/SevenZipArchiveReader. [#50321](https://github.com/ClickHouse/ClickHouse/pull/50321) ([nikitakeba](https://github.com/nikitakeba)).
|
||||
* Added table function azureBlobStorageCluster table function. The supported set of features is very similar to table function S3Cluster. [#50795](https://github.com/ClickHouse/ClickHouse/pull/50795) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Allow using cluster, clusterAllReplicas, remote, remoteRaw and remoteSecure without table name in issue [#50808](https://github.com/ClickHouse/ClickHouse/issues/50808). [#50848](https://github.com/ClickHouse/ClickHouse/pull/50848) ([Yangkuan Liu](https://github.com/LiuYangkuan)).
|
||||
* System table to monitor kafka consumers. [#50999](https://github.com/ClickHouse/ClickHouse/pull/50999) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Added max_sessions_for_user setting. [#51724](https://github.com/ClickHouse/ClickHouse/pull/51724) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Now that clickhouse do not have a function to convert UTC timezone timestamp to other timezone timestamp, which is not same as spark, and so we and the functions `toUTCTimestamp/fromUTCTimestamp` to act same as spark's `to_utc_timestamp/from_utc_timestamp`. [#52117](https://github.com/ClickHouse/ClickHouse/pull/52117) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Add new functions `structureToCapnProtoSchema`/`structureToProtobufSchema` that convert ClickHouse table structure to CapnProto/Protobuf format schema. Allow to intput/output data in CapnProto/Protobuf format without external format schema using autogenerated schema from table structure (controled by settings `format_capn_proto_use_autogenerated_schema`/`format_protobuf_use_autogenerated_schema`). Allow to export autogenerated schema while input/outoput using setting `output_format_schema`. [#52278](https://github.com/ClickHouse/ClickHouse/pull/52278) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* A new field "query_cache_usage" in SYSTEM.QUERY_LOG now shows if and how the query cache was used. [#52384](https://github.com/ClickHouse/ClickHouse/pull/52384) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add new function startsWithUTF8 and endsWithUTF8. [#52555](https://github.com/ClickHouse/ClickHouse/pull/52555) ([李扬](https://github.com/taiyang-li)).
|
||||
* Allow variable number of columns in TSV/CuatomSeprarated/JSONCompactEachRow, make schema inference work with variable number of columns. Add settings `input_format_tsv_allow_variable_number_of_columns`, `input_format_custom_allow_variable_number_of_columns`, `input_format_json_compact_allow_variable_number_of_columns`. [#52692](https://github.com/ClickHouse/ClickHouse/pull/52692) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added `SYSTEM STOP/START PULLING REPLICATION LOG` queries (for testing `ReplicatedMergeTree`). [#52881](https://github.com/ClickHouse/ClickHouse/pull/52881) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Allow to execute constant non-deterministic functions in mutations on initiator. [#53129](https://github.com/ClickHouse/ClickHouse/pull/53129) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add input format One that doesn't read any data and always returns single row with column `dummy` with type `UInt8` and value `0` like `system.one`. It can be used together with `_file/_path` virtual columns to list files in file/s3/url/hdfs/etc table functions without reading any data. [#53209](https://github.com/ClickHouse/ClickHouse/pull/53209) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add tupleConcat function. Closes [#52759](https://github.com/ClickHouse/ClickHouse/issues/52759). [#53239](https://github.com/ClickHouse/ClickHouse/pull/53239) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Support `TRUNCATE DATABASE` operation. [#53261](https://github.com/ClickHouse/ClickHouse/pull/53261) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add max_threads_for_indexes setting to limit number of threads used for primary key processing. [#53313](https://github.com/ClickHouse/ClickHouse/pull/53313) ([jorisgio](https://github.com/jorisgio)).
|
||||
* Add experimental support for HNSW as approximate neighbor search method. [#53447](https://github.com/ClickHouse/ClickHouse/pull/53447) ([Davit Vardanyan](https://github.com/davvard)).
|
||||
* Re-add SipHash keyed functions. [#53525](https://github.com/ClickHouse/ClickHouse/pull/53525) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* ([#52755](https://github.com/ClickHouse/ClickHouse/issues/52755) , [#52895](https://github.com/ClickHouse/ClickHouse/issues/52895)) Added functions `arrayRotateLeft`, `arrayRotateRight`, `arrayShiftLeft`, `arrayShiftRight`. [#53557](https://github.com/ClickHouse/ClickHouse/pull/53557) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Add column `name` to `system.clusters` as an alias to cluster. [#53605](https://github.com/ClickHouse/ClickHouse/pull/53605) ([irenjj](https://github.com/irenjj)).
|
||||
* The advanced dashboard now allows mass editing (save/load). [#53608](https://github.com/ClickHouse/ClickHouse/pull/53608) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add support for plural units. [#53641](https://github.com/ClickHouse/ClickHouse/pull/53641) ([irenjj](https://github.com/irenjj)).
|
||||
* Support function `isNotDistinctFrom` in join on section for null-safe comparison, ref [#53061](https://github.com/ClickHouse/ClickHouse/issues/53061). [#53755](https://github.com/ClickHouse/ClickHouse/pull/53755) ([vdimir](https://github.com/vdimir)).
|
||||
* Added the "hide_in_preprocessed" attribute to ClickHouse's server configuration XML dialect. This is a mechanism to hide certain settings from appearing in preprocessed server configuration files. Useful e.g. for passwords or private keys that should not appear verbatim in files. [#53818](https://github.com/ClickHouse/ClickHouse/pull/53818) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Added server setting validate_tcp_client_information determines whether validation of client information enabled when query packet is received. [#53907](https://github.com/ClickHouse/ClickHouse/pull/53907) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Enable JIT compilation for AArch64, PowerPC, SystemZ, RISCV. [#38217](https://github.com/ClickHouse/ClickHouse/pull/38217) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* This patch will provide a method to deal with all the hashsets in parallel before merge. [#50748](https://github.com/ClickHouse/ClickHouse/pull/50748) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Optimize aggregation performance of nullable string key when using aggregationmethodserialized. [#51399](https://github.com/ClickHouse/ClickHouse/pull/51399) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* The performance experiments of **SSB** on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) show that this change could bring an improvement of **8.5%** to the **geomean QPS** when the experimental analyzer is enabled. The details are shown below: ![image](https://github.com/ClickHouse/ClickHouse/assets/26588299/4e58bf8b-d276-408d-ad45-38c82d3cb918). [#52091](https://github.com/ClickHouse/ClickHouse/pull/52091) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Parquet filter pushdown. I.e. when reading Parquet files, row groups (chunks of the file) are skipped based on the WHERE condition and the min/max values in each column. In particular, if the file is roughly sorted by some column, queries that filter by a short range of that column will be much faster. [#52951](https://github.com/ClickHouse/ClickHouse/pull/52951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Optimize the merge if all hashSets are singleLevel in UniqExactSet. [#52973](https://github.com/ClickHouse/ClickHouse/pull/52973) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* StorageJoin: do not create clone hash join with all columns. [#53046](https://github.com/ClickHouse/ClickHouse/pull/53046) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Optimize reading small row groups by batching them together in Parquet. Closes [#53069](https://github.com/ClickHouse/ClickHouse/issues/53069). [#53281](https://github.com/ClickHouse/ClickHouse/pull/53281) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Implement native orc input format without arrow to improve performance. [#53324](https://github.com/ClickHouse/ClickHouse/pull/53324) ([李扬](https://github.com/taiyang-li)).
|
||||
* The dashboard will tell the server to compress the data, which is useful for large time frames over slow internet connections. For example, one chart with 86400 points can be 1.5 MB uncompressed and 60 KB compressed with `br`. [#53569](https://github.com/ClickHouse/ClickHouse/pull/53569) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Optimize count from files in most input formats. Closes [#44334](https://github.com/ClickHouse/ClickHouse/issues/44334). [#53637](https://github.com/ClickHouse/ClickHouse/pull/53637) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Better utilization of thread pool for BACKUPs&RESTOREs. [#53649](https://github.com/ClickHouse/ClickHouse/pull/53649) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Remove useless slow on client performance check. [#53695](https://github.com/ClickHouse/ClickHouse/pull/53695) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Improvement
|
||||
* Bloom filter indices are pruned so that they correlate with cardinality of the data set they are tracking. [#35102](https://github.com/ClickHouse/ClickHouse/pull/35102) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Add `stderr_reaction` configuration/setting to control the reaction (none, log or throw) when external command stderr has data. This helps make debugging external command easier. [#43210](https://github.com/ClickHouse/ClickHouse/pull/43210) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Https://github.com/clickhouse/clickhouse/issues/48720. @kgoralski helped with some thought about `system.merges` part. :d. [#48990](https://github.com/ClickHouse/ClickHouse/pull/48990) ([Jianfei Hu](https://github.com/incfly)).
|
||||
* If a dictionary is created with a complex key, automatically choose the "complex key" layout variant. [#49587](https://github.com/ClickHouse/ClickHouse/pull/49587) ([xiebin](https://github.com/xbthink)).
|
||||
* Add setting `use_concurrency_control` for better testing of the new concurrency control feature. [#49618](https://github.com/ClickHouse/ClickHouse/pull/49618) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added suggestions for mistyped names for db and tables with different scenarios commented. [#49801](https://github.com/ClickHouse/ClickHouse/pull/49801) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* While read small files from hdfs by gluten, we found that it will cost more times when compare to directly query by spark. [#50063](https://github.com/ClickHouse/ClickHouse/pull/50063) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Too many worthless error logs after session expiration. [#50171](https://github.com/ClickHouse/ClickHouse/pull/50171) ([helifu](https://github.com/helifu)).
|
||||
* Introduce fallback ZooKeeper sessions which are time-bound. Fixed `index` column in system.zookeeper_connection for DNS addresses. [#50424](https://github.com/ClickHouse/ClickHouse/pull/50424) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Add ability to log when max_partitions_per_insert_block is reached ... [#50948](https://github.com/ClickHouse/ClickHouse/pull/50948) ([Sean Haynes](https://github.com/seandhaynes)).
|
||||
* Added a bunch of custom commands (mostly to make ClickHouse debugging easier). [#51117](https://github.com/ClickHouse/ClickHouse/pull/51117) ([pufit](https://github.com/pufit)).
|
||||
* Updated check for connection_string as connection string with sas does not always begin with DefaultEndPoint and updated connection url to include sas token after adding container to url. [#51141](https://github.com/ClickHouse/ClickHouse/pull/51141) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix description for filtering sets in full_sorting_merge join. [#51329](https://github.com/ClickHouse/ClickHouse/pull/51329) ([Tanay Tummalapalli](https://github.com/ttanay)).
|
||||
* The sizes of the (index) uncompressed/mark, mmap and query caches can now be configured dynamically at runtime. [#51446](https://github.com/ClickHouse/ClickHouse/pull/51446) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fixed memory consumption in `Aggregator` when `max_block_size` is huge. [#51566](https://github.com/ClickHouse/ClickHouse/pull/51566) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add `SYSTEM SYNC FILESYSTEM CACHE` command. It will compare in-memory state of filesystem cache with what it has on disk and fix in-memory state if needed. [#51622](https://github.com/ClickHouse/ClickHouse/pull/51622) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Attempt to create a generic proxy resolver for CH while keeping backwards compatibility with existing S3 storage conf proxy resolver. [#51749](https://github.com/ClickHouse/ClickHouse/pull/51749) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Support reading tuple subcolumns from file/s3/hdfs/url/azureBlobStorage table functions. [#51806](https://github.com/ClickHouse/ClickHouse/pull/51806) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Function `arrayIntersect` now returns the values sorted like the first argument. Closes [#27622](https://github.com/ClickHouse/ClickHouse/issues/27622). [#51850](https://github.com/ClickHouse/ClickHouse/pull/51850) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add new queries, which allow to create/drop of access entities in specified access storage or move access entities from one access storage to another. [#51912](https://github.com/ClickHouse/ClickHouse/pull/51912) ([pufit](https://github.com/pufit)).
|
||||
* ALTER TABLE FREEZE are not replicated in Replicated engine. [#52064](https://github.com/ClickHouse/ClickHouse/pull/52064) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Added possibility to flush logs to the disk on crash - Added logs buffer configuration. [#52174](https://github.com/ClickHouse/ClickHouse/pull/52174) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix S3 table function does not work for pre-signed URL. close [#50846](https://github.com/ClickHouse/ClickHouse/issues/50846). [#52310](https://github.com/ClickHouse/ClickHouse/pull/52310) ([chen](https://github.com/xiedeyantu)).
|
||||
* System.events and system.metrics tables add column name as an alias to event and metric. close [#51257](https://github.com/ClickHouse/ClickHouse/issues/51257). [#52315](https://github.com/ClickHouse/ClickHouse/pull/52315) ([chen](https://github.com/xiedeyantu)).
|
||||
* Added support of syntax `CREATE UNIQUE INDEX` in parser for better SQL compatibility. `UNIQUE` index is not supported. Set `create_index_ignore_unique=1` to ignore UNIQUE keyword in queries. [#52320](https://github.com/ClickHouse/ClickHouse/pull/52320) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Add support of predefined macro (`{database}` and `{table}`) in some kafka engine settings: topic, consumer, client_id, etc. [#52386](https://github.com/ClickHouse/ClickHouse/pull/52386) ([Yury Bogomolov](https://github.com/ybogo)).
|
||||
* Disable updating fs cache during backup/restore. Filesystem cache must not be updated during backup/restore, it seems it just slows down the process without any profit (because the BACKUP command can read a lot of data and it's no use to put all the data to the filesystem cache and immediately evict it). [#52402](https://github.com/ClickHouse/ClickHouse/pull/52402) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Updated parameterized view implementation to create new StorageView with substituted parameters for every SELECT query of a parameterized view. [#52569](https://github.com/ClickHouse/ClickHouse/pull/52569) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* The configuration of S3 endpoint allow using it from the root, and append '/' automatically if needed. [#47809](https://github.com/ClickHouse/ClickHouse/issues/47809). [#52600](https://github.com/ClickHouse/ClickHouse/pull/52600) ([xiaolei565](https://github.com/xiaolei565)).
|
||||
* Added support for adding and subtracting arrays: `[5,2] + [1,7]`. Division and multiplication were not implemented due to confusion between pointwise multiplication and the scalar product of arguments. Closes [#49939](https://github.com/ClickHouse/ClickHouse/issues/49939). [#52625](https://github.com/ClickHouse/ClickHouse/pull/52625) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add support for string literals as table name. Closes [#52178](https://github.com/ClickHouse/ClickHouse/issues/52178). [#52635](https://github.com/ClickHouse/ClickHouse/pull/52635) ([hendrik-m](https://github.com/hendrik-m)).
|
||||
* For clickhouse-local allow positional options and populate global UDF settings (user_scripts_path and user_defined_executable_functions_config). [#52643](https://github.com/ClickHouse/ClickHouse/pull/52643) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* System.asynchronous_metrics now includes metrics "querycacheentries" and "querycachebytes" to inspect the query cache. [#52650](https://github.com/ClickHouse/ClickHouse/pull/52650) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added possibility use s3_storage_class parameter in SETTINGS of BACKUP statement for backups to S3. [#52658](https://github.com/ClickHouse/ClickHouse/pull/52658) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Improve insert retries on keeper session expiration. [#52688](https://github.com/ClickHouse/ClickHouse/pull/52688) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Add utility `print-backup-info.py` which parses a backup metadata file and prints information about the backup. [#52690](https://github.com/ClickHouse/ClickHouse/pull/52690) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Closes [#49510](https://github.com/ClickHouse/ClickHouse/issues/49510). Currently we have database and table names case-sensitive, but the tools query `information_schema` sometimes in lowercase, sometimes in uppercase. For this reason we have `information_schema` database, containing lowercase tables, such as `information_schema.tables` and `INFORMATION_SCHEMA` database, containing uppercase tables, such as `INFORMATION_SCHEMA.TABLES`. But some tools are querying `INFORMATION_SCHEMA.tables` and `information_schema.TABLES`. The proposed solution is to duplicate both lowercase and uppercase tables in lowercase and uppercase `information_schema` database. [#52695](https://github.com/ClickHouse/ClickHouse/pull/52695) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* `GET_PART` and `ATTACH_PART` are almost identical, so they should use same executor pool. [#52716](https://github.com/ClickHouse/ClickHouse/pull/52716) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Query`CHECK TABLE` has better performance and usability (sends progress updates, cancellable). [#52745](https://github.com/ClickHouse/ClickHouse/pull/52745) ([vdimir](https://github.com/vdimir)).
|
||||
* Add modulo, intDiv, intDivOrZero for tuple. [#52758](https://github.com/ClickHouse/ClickHouse/pull/52758) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Search for default `yaml` and `yml` configs in clickhouse-client after `xml`. [#52767](https://github.com/ClickHouse/ClickHouse/pull/52767) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* When merging into non-'clickhouse' rooted configuration, configs with different root node name just bypassed without exception. [#52770](https://github.com/ClickHouse/ClickHouse/pull/52770) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Now it's possible to specify min (`memory_profiler_sample_min_allocation_size`) and max (`memory_profiler_sample_max_allocation_size`) size for allocations to be tracked with sampling memory profiler. [#52779](https://github.com/ClickHouse/ClickHouse/pull/52779) ([alesapin](https://github.com/alesapin)).
|
||||
* Add `precise_float_parsing` setting to switch float parsing methods (fast/precise). [#52791](https://github.com/ClickHouse/ClickHouse/pull/52791) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Use the same default paths for `clickhouse_keeper` (symlink) as for `clickhouse_keeper` (executable). [#52861](https://github.com/ClickHouse/ClickHouse/pull/52861) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* CVE-2016-2183: disable 3DES. [#52893](https://github.com/ClickHouse/ClickHouse/pull/52893) ([Kenji Noguchi](https://github.com/knoguchi)).
|
||||
* Load filesystem cache metadata on startup in parallel. Configured by `load_metadata_threads` (default: 1) cache config setting. Related to [#52037](https://github.com/ClickHouse/ClickHouse/issues/52037). [#52943](https://github.com/ClickHouse/ClickHouse/pull/52943) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Improve error message for table function remote. Closes [#40220](https://github.com/ClickHouse/ClickHouse/issues/40220). [#52959](https://github.com/ClickHouse/ClickHouse/pull/52959) ([jiyoungyoooo](https://github.com/jiyoungyoooo)).
|
||||
* Added the possibility to specify custom storage policy in the `SETTINGS` clause of `RESTORE` queries. [#52970](https://github.com/ClickHouse/ClickHouse/pull/52970) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||
* Add the ability to throttle the S3 requests on backup operations (`BACKUP` and `RESTORE` commands now honor `s3_max_[get/put]_[rps/burst]`). [#52974](https://github.com/ClickHouse/ClickHouse/pull/52974) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||
* Add settings to ignore ON CLUSTER clause in queries for management of replicated user-defined functions or access control entities with replicated storage. [#52975](https://github.com/ClickHouse/ClickHouse/pull/52975) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Enable parallel reading from replicas over distributed table. Related to [#49708](https://github.com/ClickHouse/ClickHouse/issues/49708). [#53005](https://github.com/ClickHouse/ClickHouse/pull/53005) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* EXPLAIN actions for JOIN step. [#53006](https://github.com/ClickHouse/ClickHouse/pull/53006) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Make `hasTokenOrNull` and `hasTokenCaseInsensitiveOrNull` return null for empty needles. [#53059](https://github.com/ClickHouse/ClickHouse/pull/53059) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Allow to restrict allowed paths for filesystem caches. Mainly useful for dynamic disks. If in server config `filesystem_caches_path` is specified, all filesystem caches' paths will be restricted to this directory. E.g. if the `path` in cache config is relative - it will be put in `filesystem_caches_path`; if `path` in cache config is absolute, it will be required to lie inside `filesystem_caches_path`. If `filesystem_caches_path` is not specified in config, then behaviour will be the same as in earlier versions. [#53124](https://github.com/ClickHouse/ClickHouse/pull/53124) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added a bunch of custom commands (mostly to make ClickHouse debugging easier). [#53127](https://github.com/ClickHouse/ClickHouse/pull/53127) ([pufit](https://github.com/pufit)).
|
||||
* Add diagnostic info about file name during schema inference - it helps when you process multiple files with globs. [#53135](https://github.com/ClickHouse/ClickHouse/pull/53135) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Client will load suggestions using the main connection if the second connection is not allowed to create a session. [#53177](https://github.com/ClickHouse/ClickHouse/pull/53177) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Add EXCEPT clause to `SYSTEM STOP/START LISTEN QUERIES [ALL/DEFAULT/CUSTOM]` query, for example `SYSTEM STOP LISTEN QUERIES ALL EXCEPT TCP, HTTP`. [#53280](https://github.com/ClickHouse/ClickHouse/pull/53280) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Change the default of `max_concurrent_queries` from 100 to 1000. It's ok to have many concurrent queries if they are not heavy, and mostly waiting for the network. Note: don't confuse concurrent queries and QPS: for example, ClickHouse server can do tens of thousands of QPS with less than 100 concurrent queries. [#53285](https://github.com/ClickHouse/ClickHouse/pull/53285) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ability to override credentials for accessing base backup in S3 (since tokens may be expired). [#53326](https://github.com/ClickHouse/ClickHouse/pull/53326) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Improve `move_primary_key_columns_to_end_of_prewhere`. [#53337](https://github.com/ClickHouse/ClickHouse/pull/53337) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Limit number of concurrent background partition optimize merges. [#53405](https://github.com/ClickHouse/ClickHouse/pull/53405) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Added a setting `allow_moving_table_directory_to_trash` that allows to ignore `Directory for table data already exists` error when replicating/recovering a `Replicated` database. [#53425](https://github.com/ClickHouse/ClickHouse/pull/53425) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Server settings asynchronous_metrics_update_period_s and asynchronous_heavy_metrics_update_period_s configured to 0 now fail gracefully instead of crash the server. [#53428](https://github.com/ClickHouse/ClickHouse/pull/53428) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Previously the caller could register the same watch callback multiple times. In that case each entry was consuming memory and the same callback was called multiple times which didn't make much sense. In order to avoid this the caller could have some logic to not add the same watch multiple times. With this change this deduplication is done internally if the watch callback is passed via shared_ptr. [#53452](https://github.com/ClickHouse/ClickHouse/pull/53452) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* The ClickHouse server now respects memory limits changed via cgroups when reloading its configuration. [#53455](https://github.com/ClickHouse/ClickHouse/pull/53455) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add ability to turn off flush of Distributed tables on `DETACH`/`DROP`/server shutdown. [#53501](https://github.com/ClickHouse/ClickHouse/pull/53501) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Domainrfc support ipv6(ip literal within square brackets). [#53506](https://github.com/ClickHouse/ClickHouse/pull/53506) ([Chen768959](https://github.com/Chen768959)).
|
||||
* Use filter by file/path before reading in url/file/hdfs table functins. [#53529](https://github.com/ClickHouse/ClickHouse/pull/53529) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Use longer timeout for S3 CopyObject requests. [#53533](https://github.com/ClickHouse/ClickHouse/pull/53533) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Added server setting `aggregate_function_group_array_max_element_size`. This setting is used to limit array size for `groupArray` function at serialization. The default value is `16777215`. [#53550](https://github.com/ClickHouse/ClickHouse/pull/53550) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* `SCHEMA()` was added as alias for `DATABASE()` to improve MySQL compatibility. [#53587](https://github.com/ClickHouse/ClickHouse/pull/53587) ([Daniël van Eeden](https://github.com/dveeden)).
|
||||
* Add asynchronous metrics about tables in the system database. For example, `TotalBytesOfMergeTreeTablesSystem`. This closes [#53603](https://github.com/ClickHouse/ClickHouse/issues/53603). [#53604](https://github.com/ClickHouse/ClickHouse/pull/53604) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* SQL editor in the Play UI and Dashboard will not use Grammarly. [#53614](https://github.com/ClickHouse/ClickHouse/pull/53614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The advanced dashboard now has an option to maximize charts and move them around. [#53622](https://github.com/ClickHouse/ClickHouse/pull/53622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* As expert-level settings, it is now possible to 1. configure the size_ratio (i.e. the relative size of the protected queue) of the [index] mark/uncompressed caches, 2. configure the cache policy of the index mark and index uncompressed caches. [#53657](https://github.com/ClickHouse/ClickHouse/pull/53657) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* More careful thread management will improve the speed of the S3 table function over a large number of files by more than ~25%. [#53668](https://github.com/ClickHouse/ClickHouse/pull/53668) ([pufit](https://github.com/pufit)).
|
||||
* Upgrade snappy to 1.1.10, clickhouse may benefit from it. [#53672](https://github.com/ClickHouse/ClickHouse/pull/53672) ([李扬](https://github.com/taiyang-li)).
|
||||
* Added client info validation to the query packet in TCPHandler. [#53673](https://github.com/ClickHouse/ClickHouse/pull/53673) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Cache number of rows in files for count in file/s3/url/hdfs/azure functions. The cache can be enabled/disabled by setting `use_cache_for_count_from_files` (enabled by default). Continuation of https://github.com/ClickHouse/ClickHouse/pull/53637. [#53692](https://github.com/ClickHouse/ClickHouse/pull/53692) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Updated to retry loading part in case of Azure::Core::Http::TransportException (https://github.com/ClickHouse/ClickHouse/issues/39700#issuecomment-1686442785). [#53750](https://github.com/ClickHouse/ClickHouse/pull/53750) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Stacktrace for exceptions, Materailized view exceptions are propagated. [#53766](https://github.com/ClickHouse/ClickHouse/pull/53766) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* If no hostname or port were specified, keeper client will try to search for a connection string in the ClickHouse's config.xml. [#53769](https://github.com/ClickHouse/ClickHouse/pull/53769) ([pufit](https://github.com/pufit)).
|
||||
* Add profile event `PartsLockMicroseconds` which shows the amount of microseconds we hold the data parts lock in MergeTree table engine family. [#53797](https://github.com/ClickHouse/ClickHouse/pull/53797) ([alesapin](https://github.com/alesapin)).
|
||||
* Make reconnect limit in raft limits configurable for keeper. This configuration can help to make keeper to rebuild connection with peers quicker if the current connection is broken. [#53817](https://github.com/ClickHouse/ClickHouse/pull/53817) ([Pengyuan Bian](https://github.com/bianpengyuan)).
|
||||
* Supported globs in select from file in clickhouse-local. [#53863](https://github.com/ClickHouse/ClickHouse/pull/53863) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* ...Ignore foreign keys in tables definition to improve compatibility with MySQL, so a user wouldn't need to rewrite his SQL of the foreign key part, ref [#53380](https://github.com/ClickHouse/ClickHouse/issues/53380). [#53864](https://github.com/ClickHouse/ClickHouse/pull/53864) ([jsc0218](https://github.com/jsc0218)).
|
||||
* 'from' is supported as a Expression. [#53914](https://github.com/ClickHouse/ClickHouse/pull/53914) ([Chen768959](https://github.com/Chen768959)).
|
||||
* Changes of the server configuration are now detected with high precision (milliseconds and less). [#54065](https://github.com/ClickHouse/ClickHouse/pull/54065) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Don't expose symbols from ClickHouse binary to dynamic linker. It might fix [#43933](https://github.com/ClickHouse/ClickHouse/issues/43933). [#47475](https://github.com/ClickHouse/ClickHouse/pull/47475) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed endian issues in native protocol. [#50267](https://github.com/ClickHouse/ClickHouse/pull/50267) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Build `clickhouse/nginx-dav` and use it in integration tests instead of `kssenii/nginx-test`. Addresses [#43182](https://github.com/ClickHouse/ClickHouse/issues/43182). [#51843](https://github.com/ClickHouse/ClickHouse/pull/51843) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add `clickhouse-keeper-client` symlink to the clickhouse-server package. [#51882](https://github.com/ClickHouse/ClickHouse/pull/51882) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fixed ForEach aggregate function state for s390x. [#52040](https://github.com/ClickHouse/ClickHouse/pull/52040) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Add https://github.com/elliotchance/sqltest to CI to report the SQL 2016 conformance. [#52293](https://github.com/ClickHouse/ClickHouse/pull/52293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed codec delta endian issue for s390x. [#52592](https://github.com/ClickHouse/ClickHouse/pull/52592) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Upgrade PRQL to 0.9.3. [#53060](https://github.com/ClickHouse/ClickHouse/pull/53060) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* System tables from CI checks are exported to ClickHouse Cloud. [#53086](https://github.com/ClickHouse/ClickHouse/pull/53086) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud. [#53100](https://github.com/ClickHouse/ClickHouse/pull/53100) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Speed up Debug and Tidy builds. [#53178](https://github.com/ClickHouse/ClickHouse/pull/53178) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Speed up the build by removing tons and tonnes of garbage. One of the frequently included headers was poisoned by boost. [#53180](https://github.com/ClickHouse/ClickHouse/pull/53180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add ClickHouse builds for Linux s390x to CI. [#53181](https://github.com/ClickHouse/ClickHouse/pull/53181) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Remove even more garbage. [#53182](https://github.com/ClickHouse/ClickHouse/pull/53182) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The function `arrayAUC` was using heavy C++ templates. [#53183](https://github.com/ClickHouse/ClickHouse/pull/53183) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Some translation units were always rebuilt regardless of ccache. The culprit is found and fixed. [#53184](https://github.com/ClickHouse/ClickHouse/pull/53184) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Three tests were failing / flaky: 1. test_host_regexp_multiple_ptr_records 2. test_host_regexp_multiple_ptr_records_concurrent 3. test_reverse_dns_query. [#53286](https://github.com/ClickHouse/ClickHouse/pull/53286) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Export logs from CI in stateful tests to ClickHouse Cloud. [#53351](https://github.com/ClickHouse/ClickHouse/pull/53351) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Export logs from CI in stress tests. [#53353](https://github.com/ClickHouse/ClickHouse/pull/53353) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Export logs from CI in fuzzer. [#53354](https://github.com/ClickHouse/ClickHouse/pull/53354) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Export logs from CI in performance test to ClickHouse Cloud. [#53355](https://github.com/ClickHouse/ClickHouse/pull/53355) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Follow up for [#53418](https://github.com/ClickHouse/ClickHouse/issues/53418). Small improvements for install_check.py, adding tests for proper ENV parameters passing to the main process on `init.d start`. [#53457](https://github.com/ClickHouse/ClickHouse/pull/53457) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fixed base64 endian issue for s390x. [#53570](https://github.com/ClickHouse/ClickHouse/pull/53570) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Reorganize file management in CMake to prevent potential duplications. For instance, `indexHint.cpp` is duplicated in both `dbms_sources` and `clickhouse_functions_sources`. [#53621](https://github.com/ClickHouse/ClickHouse/pull/53621) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed functional test in 02354_distributed_with_external_aggregation_memory_usage in s390x. [#53648](https://github.com/ClickHouse/ClickHouse/pull/53648) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Skipped QPL functional test for s390x. [#53758](https://github.com/ClickHouse/ClickHouse/pull/53758) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
* Slightly improve cmake build by sanitizing some dependencies and removing some duplicates. Each commit includes a short description of the changes made. [#53759](https://github.com/ClickHouse/ClickHouse/pull/53759) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed StripeLog storage endian issue on the s390x platform. [#53902](https://github.com/ClickHouse/ClickHouse/pull/53902) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Do not reset Annoy index during build-up with > 1 mark [#51325](https://github.com/ClickHouse/ClickHouse/pull/51325) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||
* Fix usage of temporary directories during RESTORE [#51493](https://github.com/ClickHouse/ClickHouse/pull/51493) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Bug fix for checksum of compress marks [#51777](https://github.com/ClickHouse/ClickHouse/pull/51777) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix mistakenly comma parsing as part of datetime in CSV best effort parsing [#51950](https://github.com/ClickHouse/ClickHouse/pull/51950) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Don't throw exception when exec udf has parameters [#51961](https://github.com/ClickHouse/ClickHouse/pull/51961) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix recalculation of skip indexes and projections in `ALTER DELETE` queries [#52530](https://github.com/ClickHouse/ClickHouse/pull/52530) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* MaterializedMySQL: Fix the infinite loop in ReadBuffer::read [#52621](https://github.com/ClickHouse/ClickHouse/pull/52621) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Load suggestion only with `clickhouse` dialect [#52628](https://github.com/ClickHouse/ClickHouse/pull/52628) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* RFC: Fix filtering by virtual columns with OR expression [#52653](https://github.com/ClickHouse/ClickHouse/pull/52653) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix named collections on cluster 23.7 [#52687](https://github.com/ClickHouse/ClickHouse/pull/52687) ([Al Korgun](https://github.com/alkorgun)).
|
||||
* Fix reading of unnecessary column in case of multistage `PREWHERE` [#52689](https://github.com/ClickHouse/ClickHouse/pull/52689) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix unexpected sort result on multi columns with nulls first direction [#52761](https://github.com/ClickHouse/ClickHouse/pull/52761) ([copperybean](https://github.com/copperybean)).
|
||||
* Fix data race in Keeper reconfiguration [#52804](https://github.com/ClickHouse/ClickHouse/pull/52804) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix sorting of sparse columns with large limit [#52827](https://github.com/ClickHouse/ClickHouse/pull/52827) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* make regexp analyzer recognize named capturing groups [#52840](https://github.com/ClickHouse/ClickHouse/pull/52840) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix possible assert in ~PushingAsyncPipelineExecutor in clickhouse-local [#52862](https://github.com/ClickHouse/ClickHouse/pull/52862) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading of empty `Nested(Array(LowCardinality(...)))` [#52949](https://github.com/ClickHouse/ClickHouse/pull/52949) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added new tests for session_log and fixed the inconsistency between login and logout. [#52958](https://github.com/ClickHouse/ClickHouse/pull/52958) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Convert sparse to full in CreateSetAndFilterOnTheFlyStep [#53000](https://github.com/ClickHouse/ClickHouse/pull/53000) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix rare race condition with empty key prefix directory deletion in fs cache [#53055](https://github.com/ClickHouse/ClickHouse/pull/53055) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix ZstdDeflatingWriteBuffer truncating the output sometimes [#53064](https://github.com/ClickHouse/ClickHouse/pull/53064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix query_id in part_log with async flush queries [#53103](https://github.com/ClickHouse/ClickHouse/pull/53103) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix possible error from cache "Read unexpected size" [#53121](https://github.com/ClickHouse/ClickHouse/pull/53121) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable the new parquet encoder [#53130](https://github.com/ClickHouse/ClickHouse/pull/53130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix character escaping in the PostgreSQL engine [#53250](https://github.com/ClickHouse/ClickHouse/pull/53250) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* #2 Added new tests for session_log and fixed the inconsistency between login and logout. [#53255](https://github.com/ClickHouse/ClickHouse/pull/53255) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* #3 Fixed inconsistency between login success and logout [#53302](https://github.com/ClickHouse/ClickHouse/pull/53302) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix adding sub-second intervals to DateTime [#53309](https://github.com/ClickHouse/ClickHouse/pull/53309) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix "Context has expired" error in dictionaries [#53342](https://github.com/ClickHouse/ClickHouse/pull/53342) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Forbid use_structure_from_insertion_table_in_table_functions when execute Scalar [#53348](https://github.com/ClickHouse/ClickHouse/pull/53348) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fixed system.data_skipping_indices for MaterializedMySQL [#53381](https://github.com/ClickHouse/ClickHouse/pull/53381) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||
* Fix processing single carriage return in TSV file segmentation engine [#53407](https://github.com/ClickHouse/ClickHouse/pull/53407) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix 'Context has expired' error properly [#53433](https://github.com/ClickHouse/ClickHouse/pull/53433) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix timeout_overflow_mode when having subquery in the rhs of IN [#53439](https://github.com/ClickHouse/ClickHouse/pull/53439) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix an unexpected behavior in [#53152](https://github.com/ClickHouse/ClickHouse/issues/53152) [#53440](https://github.com/ClickHouse/ClickHouse/pull/53440) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Fix JSON_QUERY Function parse error while path is all number [#53470](https://github.com/ClickHouse/ClickHouse/pull/53470) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed SELECTing from ReplacingMergeTree with do_not_merge_across_partitions_select_final [#53511](https://github.com/ClickHouse/ClickHouse/pull/53511) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* bugfix: Flush async insert queue first on shutdown [#53547](https://github.com/ClickHouse/ClickHouse/pull/53547) ([joelynch](https://github.com/joelynch)).
|
||||
* Fix crash in join on sparse column [#53548](https://github.com/ClickHouse/ClickHouse/pull/53548) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix possible UB in Set skipping index for functions with incorrect args [#53559](https://github.com/ClickHouse/ClickHouse/pull/53559) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible UB in inverted indexes (experimental feature) [#53560](https://github.com/ClickHouse/ClickHouse/pull/53560) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix number of dropped granules in EXPLAIN PLAN index=1 [#53616](https://github.com/ClickHouse/ClickHouse/pull/53616) ([wangxiaobo](https://github.com/wzb5212)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Prepared set cache in mutation pipeline stuck [#53645](https://github.com/ClickHouse/ClickHouse/pull/53645) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix bug on mutations with subcolumns of type JSON in predicates of UPDATE and DELETE queries. [#53677](https://github.com/ClickHouse/ClickHouse/pull/53677) ([VanDarkholme7](https://github.com/VanDarkholme7)).
|
||||
* Fix filter pushdown for full_sorting_merge join [#53699](https://github.com/ClickHouse/ClickHouse/pull/53699) ([vdimir](https://github.com/vdimir)).
|
||||
* Try to fix bug with NULL::LowCardinality(Nullable(...)) NOT IN [#53706](https://github.com/ClickHouse/ClickHouse/pull/53706) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* transform: correctly handle default column with multiple rows [#53742](https://github.com/ClickHouse/ClickHouse/pull/53742) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Materialized postgres: fix uncaught exception in getCreateTableQueryImpl [#53832](https://github.com/ClickHouse/ClickHouse/pull/53832) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible segfault while using PostgreSQL engine [#53847](https://github.com/ClickHouse/ClickHouse/pull/53847) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix named_collection_admin alias [#54066](https://github.com/ClickHouse/ClickHouse/pull/54066) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix rows_before_limit_at_least for DelayedSource. [#54122](https://github.com/ClickHouse/ClickHouse/pull/54122) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Implementing new commands for keeper-client"'. [#52985](https://github.com/ClickHouse/ClickHouse/pull/52985) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Remove try/catch from DatabaseFilesystem"'. [#53044](https://github.com/ClickHouse/ClickHouse/pull/53044) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Upload build time-trace data to CI database"'. [#53210](https://github.com/ClickHouse/ClickHouse/pull/53210) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* NO CL ENTRY: 'Revert "Added new tests for session_log and fixed the inconsistency between login and logout."'. [#53247](https://github.com/ClickHouse/ClickHouse/pull/53247) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Improve CHECK TABLE system query"'. [#53272](https://github.com/ClickHouse/ClickHouse/pull/53272) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "#2 Added new tests for session_log and fixed the inconsistency between login and logout."'. [#53294](https://github.com/ClickHouse/ClickHouse/pull/53294) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Documentation: add Ibis project to the integrations section"'. [#53374](https://github.com/ClickHouse/ClickHouse/pull/53374) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Planner prepare filters for analysis"'. [#53782](https://github.com/ClickHouse/ClickHouse/pull/53782) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "dateDiff: add support for plural units."'. [#53795](https://github.com/ClickHouse/ClickHouse/pull/53795) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Fixed wrong python test name pattern"'. [#53929](https://github.com/ClickHouse/ClickHouse/pull/53929) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* NO CL ENTRY: 'Revert "Fix bug on mutations with subcolumns of type JSON in predicates of UPDATE and DELETE queries."'. [#54063](https://github.com/ClickHouse/ClickHouse/pull/54063) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* clickhouse-copier add check drop partition [#35263](https://github.com/ClickHouse/ClickHouse/pull/35263) ([sunny](https://github.com/sunny19930321)).
|
||||
* Add more checks into ThreadStatus ctor. [#42019](https://github.com/ClickHouse/ClickHouse/pull/42019) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Refactor Query Tree visitor [#46740](https://github.com/ClickHouse/ClickHouse/pull/46740) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Revert "Revert "Randomize JIT settings in tests"" [#48282](https://github.com/ClickHouse/ClickHouse/pull/48282) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix outdated cache configuration in s3 tests: s3_storage_policy_by_defau… [#48424](https://github.com/ClickHouse/ClickHouse/pull/48424) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix IN with decimal in analyzer [#48754](https://github.com/ClickHouse/ClickHouse/pull/48754) ([vdimir](https://github.com/vdimir)).
|
||||
* Some unclear change in StorageBuffer::reschedule() for something [#49723](https://github.com/ClickHouse/ClickHouse/pull/49723) ([DimasKovas](https://github.com/DimasKovas)).
|
||||
* MergeTree & SipHash checksum big-endian support [#50276](https://github.com/ClickHouse/ClickHouse/pull/50276) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Maintain same aggregate function merge behavior for small and big endian machine [#50609](https://github.com/ClickHouse/ClickHouse/pull/50609) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* Add a test to limit client max opening fd [#51213](https://github.com/ClickHouse/ClickHouse/pull/51213) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Add info about acquired space in cache to not enough space error [#51537](https://github.com/ClickHouse/ClickHouse/pull/51537) ([vdimir](https://github.com/vdimir)).
|
||||
* KeeperDispatcher: remove reductant lock as the ConcurrentBoundedQueue is thread-safe [#51766](https://github.com/ClickHouse/ClickHouse/pull/51766) ([frinkr](https://github.com/frinkr)).
|
||||
* Fix build type in packager [#51771](https://github.com/ClickHouse/ClickHouse/pull/51771) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* metrics_perf_events_enabled turn off in perf tests [#52072](https://github.com/ClickHouse/ClickHouse/pull/52072) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Remove try/catch from DatabaseFilesystem [#52155](https://github.com/ClickHouse/ClickHouse/pull/52155) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test that clickhouse-client or local do not throw/catch on startup [#52159](https://github.com/ClickHouse/ClickHouse/pull/52159) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Retry blob listing in test_alter_moving_garbage [#52193](https://github.com/ClickHouse/ClickHouse/pull/52193) ([vdimir](https://github.com/vdimir)).
|
||||
* Try to make `test_kafka_formats_with_broken_message` and `test_kafka_formats` integration tests stable [#52273](https://github.com/ClickHouse/ClickHouse/pull/52273) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Kill the runner process with all subprocesses [#52277](https://github.com/ClickHouse/ClickHouse/pull/52277) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Implement endianness-indepedent support for MergeTree checksums [#52329](https://github.com/ClickHouse/ClickHouse/pull/52329) ([ltrk2](https://github.com/ltrk2)).
|
||||
* add tests with connection reset by peer error, and retry it inside client [#52441](https://github.com/ClickHouse/ClickHouse/pull/52441) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* Fix logging for asynchronous non-batched distributed sends [#52583](https://github.com/ClickHouse/ClickHouse/pull/52583) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Follow-up to "Implement support of encrypted elements in configuration file" [#52609](https://github.com/ClickHouse/ClickHouse/pull/52609) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Return zxid from TestKeeper and in multi responses [#52618](https://github.com/ClickHouse/ClickHouse/pull/52618) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Analyzer: Support ARRAY JOIN COLUMNS(...) syntax [#52622](https://github.com/ClickHouse/ClickHouse/pull/52622) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix stress test: check if storage shutdown before we operate MergeTreeDeduplicationLog [#52623](https://github.com/ClickHouse/ClickHouse/pull/52623) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Suspicious DISTINCT crashes from sqlancer [#52636](https://github.com/ClickHouse/ClickHouse/pull/52636) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Partially fixed test 01747_system_session_log_long [#52640](https://github.com/ClickHouse/ClickHouse/pull/52640) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Check for unexpected Cyrillic [#52641](https://github.com/ClickHouse/ClickHouse/pull/52641) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `test_keeper_reconfig_replace_leader` [#52651](https://github.com/ClickHouse/ClickHouse/pull/52651) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Rename setting disable_url_encoding to enable_url_encoding and add a test [#52656](https://github.com/ClickHouse/ClickHouse/pull/52656) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove creation of a unnecessary temporary ContextAccess on login [#52660](https://github.com/ClickHouse/ClickHouse/pull/52660) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Update version after release [#52661](https://github.com/ClickHouse/ClickHouse/pull/52661) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v23.7.1.2470-stable [#52664](https://github.com/ClickHouse/ClickHouse/pull/52664) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix bugs and better test for SYSTEM STOP LISTEN [#52680](https://github.com/ClickHouse/ClickHouse/pull/52680) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Remove unneeded readBinary() specializations + update docs [#52683](https://github.com/ClickHouse/ClickHouse/pull/52683) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove remainders of legacy setting 'allow_experimental_query_cache' [#52685](https://github.com/ClickHouse/ClickHouse/pull/52685) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix 02417_opentelemetry_insert_on_distributed_table flakiness [#52691](https://github.com/ClickHouse/ClickHouse/pull/52691) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Improvements to backup restore disallow_concurrency test [#52709](https://github.com/ClickHouse/ClickHouse/pull/52709) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Move UnlinkMetadataFileOperationOutcome to common header [#52710](https://github.com/ClickHouse/ClickHouse/pull/52710) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Improve endianness-independent support for hash functions [#52712](https://github.com/ClickHouse/ClickHouse/pull/52712) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Allow reading zero objects in CachedObjectStorage::readObjects() [#52733](https://github.com/ClickHouse/ClickHouse/pull/52733) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Merging reading from archives [#50321](https://github.com/ClickHouse/ClickHouse/issues/50321) [#52734](https://github.com/ClickHouse/ClickHouse/pull/52734) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Merging [#52640](https://github.com/ClickHouse/ClickHouse/issues/52640) [#52744](https://github.com/ClickHouse/ClickHouse/pull/52744) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Analyzer: fix 00979_set_index_not.sql [#52754](https://github.com/ClickHouse/ClickHouse/pull/52754) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Planner prepare filters for analysis [#52762](https://github.com/ClickHouse/ClickHouse/pull/52762) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow reading empty file with no blobs [#52763](https://github.com/ClickHouse/ClickHouse/pull/52763) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix: check correctly window frame bounds for RANGE [#52768](https://github.com/ClickHouse/ClickHouse/pull/52768) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Numerical stability of the test for Polygons [#52769](https://github.com/ClickHouse/ClickHouse/pull/52769) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Change the default timezones in Docker test images [#52772](https://github.com/ClickHouse/ClickHouse/pull/52772) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Upload build statistics to the CI database [#52773](https://github.com/ClickHouse/ClickHouse/pull/52773) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `instance_type` information to the CI database [#52774](https://github.com/ClickHouse/ClickHouse/pull/52774) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove Coverity (part 2) [#52775](https://github.com/ClickHouse/ClickHouse/pull/52775) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a tool to upload `-ftime-trace` to ClickHouse [#52776](https://github.com/ClickHouse/ClickHouse/pull/52776) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Revert revert of system drop filesystem cache by key [#52778](https://github.com/ClickHouse/ClickHouse/pull/52778) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove obsolete part of a check name [#52793](https://github.com/ClickHouse/ClickHouse/pull/52793) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Maybe fix TLS tests [#52796](https://github.com/ClickHouse/ClickHouse/pull/52796) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow OOM in Stress and Upgrade checks [#52807](https://github.com/ClickHouse/ClickHouse/pull/52807) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Do not test upper bounds for throttlers [#52821](https://github.com/ClickHouse/ClickHouse/pull/52821) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Add more logging and touch test for materialize mysql [#52822](https://github.com/ClickHouse/ClickHouse/pull/52822) ([alesapin](https://github.com/alesapin)).
|
||||
* Try to remove more leftovers. [#52823](https://github.com/ClickHouse/ClickHouse/pull/52823) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Update test_crash_log/test.py [#52825](https://github.com/ClickHouse/ClickHouse/pull/52825) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Don't report LOGICAL_ERROR if a file got truncated during read [#52828](https://github.com/ClickHouse/ClickHouse/pull/52828) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Throw S3Exception whenever possible. [#52829](https://github.com/ClickHouse/ClickHouse/pull/52829) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Increase min protocol version for sparse serialization [#52835](https://github.com/ClickHouse/ClickHouse/pull/52835) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Cleanup localBackup [#52837](https://github.com/ClickHouse/ClickHouse/pull/52837) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Try to fix 02352_rwlock [#52852](https://github.com/ClickHouse/ClickHouse/pull/52852) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Disable a couple of long tests for debug build. [#52854](https://github.com/ClickHouse/ClickHouse/pull/52854) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix flaky tests in test_merge_tree_azure_blob_storage & test_storage_azure_blob_storage [#52855](https://github.com/ClickHouse/ClickHouse/pull/52855) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Cancel merges before renaming a system log table [#52858](https://github.com/ClickHouse/ClickHouse/pull/52858) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Try to fix a rare fail in 00612_http_max_query_size [#52859](https://github.com/ClickHouse/ClickHouse/pull/52859) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Remove duplicated dialect setting value [#52864](https://github.com/ClickHouse/ClickHouse/pull/52864) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Significant improvement of rust caching [#52865](https://github.com/ClickHouse/ClickHouse/pull/52865) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Docker improvements [#52869](https://github.com/ClickHouse/ClickHouse/pull/52869) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Try to continue clickhouse process in stress test after terminating gdb. [#52871](https://github.com/ClickHouse/ClickHouse/pull/52871) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* fix master ci for [#52091](https://github.com/ClickHouse/ClickHouse/issues/52091) [#52873](https://github.com/ClickHouse/ClickHouse/pull/52873) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix the PR body check for `Reverts #number` [#52874](https://github.com/ClickHouse/ClickHouse/pull/52874) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Analyzer WITH statement references test [#52875](https://github.com/ClickHouse/ClickHouse/pull/52875) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Disable more tests for debug. [#52878](https://github.com/ClickHouse/ClickHouse/pull/52878) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix assertion in mutations with transactions [#52894](https://github.com/ClickHouse/ClickHouse/pull/52894) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fixed test_profile_max_sessions_for_user test flakiness [#52897](https://github.com/ClickHouse/ClickHouse/pull/52897) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Use concepts to replace more std::enable_if_t [#52898](https://github.com/ClickHouse/ClickHouse/pull/52898) ([flynn](https://github.com/ucasfl)).
|
||||
* Disable `test_reconfig_replace_leader_in_one_command` [#52901](https://github.com/ClickHouse/ClickHouse/pull/52901) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* tests: fix possible EADDRINUSE v2 [#52906](https://github.com/ClickHouse/ClickHouse/pull/52906) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Merging [#52897](https://github.com/ClickHouse/ClickHouse/issues/52897) [#52907](https://github.com/ClickHouse/ClickHouse/pull/52907) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Remove obsolete `no-upgrade-check` tag [#52915](https://github.com/ClickHouse/ClickHouse/pull/52915) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test_storage_s3_queue::test_multiple_tables_streaming_sync_distributed [#52944](https://github.com/ClickHouse/ClickHouse/pull/52944) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Don't create empty parts on drop partittion if we have a transaction [#52945](https://github.com/ClickHouse/ClickHouse/pull/52945) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Analyzer: fix WITH clause resolving [#52947](https://github.com/ClickHouse/ClickHouse/pull/52947) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Refactor CI_CONFIG [#52948](https://github.com/ClickHouse/ClickHouse/pull/52948) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Try to fix assert in remove redundant sorting [#52950](https://github.com/ClickHouse/ClickHouse/pull/52950) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Remove unused code in StorageSystemStackTrace [#52952](https://github.com/ClickHouse/ClickHouse/pull/52952) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix wrong error code "BAD_GET" [#52954](https://github.com/ClickHouse/ClickHouse/pull/52954) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix some issues with databases [#52956](https://github.com/ClickHouse/ClickHouse/pull/52956) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix config update in HTTP Header Filtering [#52957](https://github.com/ClickHouse/ClickHouse/pull/52957) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Added peak_memory_usage to clickhouse-client final progress message [#52961](https://github.com/ClickHouse/ClickHouse/pull/52961) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* tests: fix 01293_client_interactive_vertical_multiline flakiness (increase timeout) [#52965](https://github.com/ClickHouse/ClickHouse/pull/52965) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Added TSAN option report_atomic_races=0 for test_max_sessions_for_user [#52969](https://github.com/ClickHouse/ClickHouse/pull/52969) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* MaterializedMySQL: Add tests for unquoted utf8 column names in DML [#52971](https://github.com/ClickHouse/ClickHouse/pull/52971) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Update version_date.tsv and changelogs after v23.7.2.25-stable [#52976](https://github.com/ClickHouse/ClickHouse/pull/52976) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Decrease a num of tries for a couple of too slow tests for debug. [#52981](https://github.com/ClickHouse/ClickHouse/pull/52981) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix test `00061_storage_buffer` [#52983](https://github.com/ClickHouse/ClickHouse/pull/52983) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove `test_host_regexp_multiple_ptr_records_concurrent`, CC @arthurpassos [#52984](https://github.com/ClickHouse/ClickHouse/pull/52984) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `test_zookeeper_config` [#52988](https://github.com/ClickHouse/ClickHouse/pull/52988) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove assertion from test_no_ttl_merges_in_busy_pool [#52989](https://github.com/ClickHouse/ClickHouse/pull/52989) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix `test_dictionary_custom_settings` [#52990](https://github.com/ClickHouse/ClickHouse/pull/52990) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix flaky test [#53007](https://github.com/ClickHouse/ClickHouse/pull/53007) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix default port for Keeper Client [#53010](https://github.com/ClickHouse/ClickHouse/pull/53010) ([pufit](https://github.com/pufit)).
|
||||
* Add a test to broken tests (Analyzer) [#53013](https://github.com/ClickHouse/ClickHouse/pull/53013) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Implement big-endian support for transform [#53015](https://github.com/ClickHouse/ClickHouse/pull/53015) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Fix completion for clickhouse-keeper-client [#53029](https://github.com/ClickHouse/ClickHouse/pull/53029) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* clickhouse-keeper-client: fix version parsing for set command [#53031](https://github.com/ClickHouse/ClickHouse/pull/53031) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* MaterializedMySQL: Add tests to alter named collections [#53032](https://github.com/ClickHouse/ClickHouse/pull/53032) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Fix description for 's3_upload_part_size_multiply_parts_count_threshold' setting [#53042](https://github.com/ClickHouse/ClickHouse/pull/53042) ([Elena Torró](https://github.com/elenatorro)).
|
||||
* Update 01114_database_atomic.sh [#53043](https://github.com/ClickHouse/ClickHouse/pull/53043) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Revert revert of "Remove try/catch from DatabaseFilesystem" [#53045](https://github.com/ClickHouse/ClickHouse/pull/53045) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix cache related logical error in stress tests [#53047](https://github.com/ClickHouse/ClickHouse/pull/53047) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove upgrade checks with sanitizers [#53051](https://github.com/ClickHouse/ClickHouse/pull/53051) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Increase election timeout in integration tests [#53052](https://github.com/ClickHouse/ClickHouse/pull/53052) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Analyzer: do not enable it for old servers in tests [#53053](https://github.com/ClickHouse/ClickHouse/pull/53053) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Try to make `01414_mutations_and_errors_zookeeper` less flaky [#53056](https://github.com/ClickHouse/ClickHouse/pull/53056) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Fix test `02434_cancel_insert_when_client_dies` [#53062](https://github.com/ClickHouse/ClickHouse/pull/53062) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `abort_on_error=1` to `TSAN_OPTIONS` [#53065](https://github.com/ClickHouse/ClickHouse/pull/53065) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix Parquet stats for Float32 and Float64 [#53067](https://github.com/ClickHouse/ClickHouse/pull/53067) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix a comment [#53072](https://github.com/ClickHouse/ClickHouse/pull/53072) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix 02263_format_insert_settings flakiness [#53080](https://github.com/ClickHouse/ClickHouse/pull/53080) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Something with tests [#53081](https://github.com/ClickHouse/ClickHouse/pull/53081) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update version_date.tsv and changelogs after v23.7.3.14-stable [#53084](https://github.com/ClickHouse/ClickHouse/pull/53084) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Simplify system logs creation [#53085](https://github.com/ClickHouse/ClickHouse/pull/53085) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix hung check in stress test [#53090](https://github.com/ClickHouse/ClickHouse/pull/53090) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add clusters for running tests locally easily [#53091](https://github.com/ClickHouse/ClickHouse/pull/53091) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wording [#53092](https://github.com/ClickHouse/ClickHouse/pull/53092) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Update README.md [#53097](https://github.com/ClickHouse/ClickHouse/pull/53097) ([Tyler Hannan](https://github.com/tylerhannan)).
|
||||
* Remove old util [#53099](https://github.com/ClickHouse/ClickHouse/pull/53099) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add optional parameters to Buffer Engine definition [#53102](https://github.com/ClickHouse/ClickHouse/pull/53102) ([Elena Torró](https://github.com/elenatorro)).
|
||||
* Compatibility with clang-17 [#53104](https://github.com/ClickHouse/ClickHouse/pull/53104) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Remove duplicate test: `test_concurrent_alter_with_ttl_move` [#53107](https://github.com/ClickHouse/ClickHouse/pull/53107) ([alesapin](https://github.com/alesapin)).
|
||||
* Relax flaky test `test_s3_engine_heavy_write_check_mem` [#53108](https://github.com/ClickHouse/ClickHouse/pull/53108) ([alesapin](https://github.com/alesapin)).
|
||||
* Update PocoHTTPClient.cpp [#53109](https://github.com/ClickHouse/ClickHouse/pull/53109) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add hints for HTTP handlers [#53110](https://github.com/ClickHouse/ClickHouse/pull/53110) ([Ruslan Mardugalliamov](https://github.com/rmarduga)).
|
||||
* Revert changes in `ZstdDeflatingAppendableWriteBuffer` [#53111](https://github.com/ClickHouse/ClickHouse/pull/53111) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix flaky test by using azure_query function [#53113](https://github.com/ClickHouse/ClickHouse/pull/53113) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Update `test_restore_replica` [#53119](https://github.com/ClickHouse/ClickHouse/pull/53119) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* do not fail if prctl is not allowed ([#43589](https://github.com/ClickHouse/ClickHouse/issues/43589)) [#53122](https://github.com/ClickHouse/ClickHouse/pull/53122) ([ekrasikov](https://github.com/ekrasikov)).
|
||||
* Use more unique name for TemporaryFileOnDisk [#53123](https://github.com/ClickHouse/ClickHouse/pull/53123) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Update `Mergeable Check` at the finishing CI [#53126](https://github.com/ClickHouse/ClickHouse/pull/53126) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Added retry for TransportException in azure blob storage [#53128](https://github.com/ClickHouse/ClickHouse/pull/53128) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Small fix for HTTPHeaderFilter [#53146](https://github.com/ClickHouse/ClickHouse/pull/53146) ([San](https://github.com/santrancisco)).
|
||||
* Added functions to disallow concurrency of backup restore test [#53150](https://github.com/ClickHouse/ClickHouse/pull/53150) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Attempt to fix test_insert_quorum by adding sync second replica [#53155](https://github.com/ClickHouse/ClickHouse/pull/53155) ([vdimir](https://github.com/vdimir)).
|
||||
* fix mem leak in RegExpTreeDictionary [#53160](https://github.com/ClickHouse/ClickHouse/pull/53160) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fixes for detach/attach partition and broken detached parts cleanup [#53164](https://github.com/ClickHouse/ClickHouse/pull/53164) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update conftest.py [#53166](https://github.com/ClickHouse/ClickHouse/pull/53166) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Allow experimantal features when recovering Replicated db replica [#53167](https://github.com/ClickHouse/ClickHouse/pull/53167) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update version_date.tsv and changelogs after v23.7.4.5-stable [#53169](https://github.com/ClickHouse/ClickHouse/pull/53169) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Analyzer: fix test_system_flush_logs [#53171](https://github.com/ClickHouse/ClickHouse/pull/53171) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix warning in test_replicated_database [#53173](https://github.com/ClickHouse/ClickHouse/pull/53173) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix: 00838_unique_index test with analyzer [#53175](https://github.com/ClickHouse/ClickHouse/pull/53175) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Improved efficiency for array operations [#53193](https://github.com/ClickHouse/ClickHouse/pull/53193) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve reading from archives [#53198](https://github.com/ClickHouse/ClickHouse/pull/53198) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Enable hedged requests under tsan [#53219](https://github.com/ClickHouse/ClickHouse/pull/53219) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Remove garbage [#53241](https://github.com/ClickHouse/ClickHouse/pull/53241) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix LOGICAL_ERROR exception in ALTER query [#53242](https://github.com/ClickHouse/ClickHouse/pull/53242) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Fix bad test `00417_kill_query` [#53244](https://github.com/ClickHouse/ClickHouse/pull/53244) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test `02428_delete_with_settings` [#53246](https://github.com/ClickHouse/ClickHouse/pull/53246) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove unrecognizable garbage from the performance test [#53249](https://github.com/ClickHouse/ClickHouse/pull/53249) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable randomization in `02273_full_sort_join` [#53251](https://github.com/ClickHouse/ClickHouse/pull/53251) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove outdated Dockerfile [#53252](https://github.com/ClickHouse/ClickHouse/pull/53252) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve fs cache cleanup [#53273](https://github.com/ClickHouse/ClickHouse/pull/53273) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add garbage [#53279](https://github.com/ClickHouse/ClickHouse/pull/53279) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Inhibit randomization in `00906_low_cardinality_cache` [#53283](https://github.com/ClickHouse/ClickHouse/pull/53283) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix test 01169_old_alter_partition_isolation_stress [#53292](https://github.com/ClickHouse/ClickHouse/pull/53292) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Remove no-parallel tag from some tests [#53295](https://github.com/ClickHouse/ClickHouse/pull/53295) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix test `00002_log_and_exception_messages_formatting` [#53296](https://github.com/ClickHouse/ClickHouse/pull/53296) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `01485_256_bit_multiply` [#53297](https://github.com/ClickHouse/ClickHouse/pull/53297) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove flaky tests for the experimental `UNDROP` feature [#53298](https://github.com/ClickHouse/ClickHouse/pull/53298) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added test for session_log using remote and mysql sessions [#53304](https://github.com/ClickHouse/ClickHouse/pull/53304) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Added integration test for session_log using concurrrent GRPC/PostgreSQL/MySQL sessions [#53305](https://github.com/ClickHouse/ClickHouse/pull/53305) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Added test for session_log using concurrrent TCP/HTTP/MySQL sessions [#53306](https://github.com/ClickHouse/ClickHouse/pull/53306) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Added test for session_log dropping user/role/profile currently used in active session [#53307](https://github.com/ClickHouse/ClickHouse/pull/53307) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Added an integration test for client peak_memory_usage value [#53308](https://github.com/ClickHouse/ClickHouse/pull/53308) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix log message [#53339](https://github.com/ClickHouse/ClickHouse/pull/53339) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Analyzer: fix quotas for system tables [#53343](https://github.com/ClickHouse/ClickHouse/pull/53343) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Relax mergeable check [#53344](https://github.com/ClickHouse/ClickHouse/pull/53344) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add clickhouse-keeper-client and clickhouse-keeper-converter symlinks to clickhouse-keeper package [#53357](https://github.com/ClickHouse/ClickHouse/pull/53357) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Add linux s390x to universal installer [#53358](https://github.com/ClickHouse/ClickHouse/pull/53358) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* Make one exception message longer [#53375](https://github.com/ClickHouse/ClickHouse/pull/53375) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wrong query in log messages check [#53376](https://github.com/ClickHouse/ClickHouse/pull/53376) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Non-significant changes [#53377](https://github.com/ClickHouse/ClickHouse/pull/53377) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Inhibit randomization in more tests [#53378](https://github.com/ClickHouse/ClickHouse/pull/53378) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make some Keeper exceptions more structured [#53379](https://github.com/ClickHouse/ClickHouse/pull/53379) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Follow-up to [#52695](https://github.com/ClickHouse/ClickHouse/issues/52695): Move tests to a more appropriate place [#53400](https://github.com/ClickHouse/ClickHouse/pull/53400) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Minor fixes (hints for wrong DB or table name) [#53402](https://github.com/ClickHouse/ClickHouse/pull/53402) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Quick fail undocumented features [#53413](https://github.com/ClickHouse/ClickHouse/pull/53413) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* boost getNestedColumnWithDefaultOnNull by insertManyDefaults [#53414](https://github.com/ClickHouse/ClickHouse/pull/53414) ([frinkr](https://github.com/frinkr)).
|
||||
* Update test_distributed_inter_server_secret to pass with analyzer [#53416](https://github.com/ClickHouse/ClickHouse/pull/53416) ([vdimir](https://github.com/vdimir)).
|
||||
* Parallel replicas: remove unnecessary code [#53419](https://github.com/ClickHouse/ClickHouse/pull/53419) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Refactorings for configuration of in-memory caches [#53422](https://github.com/ClickHouse/ClickHouse/pull/53422) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Less exceptions with runtime format string [#53424](https://github.com/ClickHouse/ClickHouse/pull/53424) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Analyzer: fix virtual columns in StorageDistributed [#53426](https://github.com/ClickHouse/ClickHouse/pull/53426) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix creation of empty parts [#53429](https://github.com/ClickHouse/ClickHouse/pull/53429) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Merging [#53177](https://github.com/ClickHouse/ClickHouse/issues/53177) [#53430](https://github.com/ClickHouse/ClickHouse/pull/53430) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Merging [#53142](https://github.com/ClickHouse/ClickHouse/issues/53142) [#53431](https://github.com/ClickHouse/ClickHouse/pull/53431) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Do not send logs to CI if the credentials are not set [#53441](https://github.com/ClickHouse/ClickHouse/pull/53441) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Minor: Factorize constants in Annoy index [#53444](https://github.com/ClickHouse/ClickHouse/pull/53444) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Restart killed PublishedReleaseCI workflows [#53445](https://github.com/ClickHouse/ClickHouse/pull/53445) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Follow-up: Do not send logs to CI if the credentials are not set [#53456](https://github.com/ClickHouse/ClickHouse/pull/53456) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Merging [#53307](https://github.com/ClickHouse/ClickHouse/issues/53307) [#53472](https://github.com/ClickHouse/ClickHouse/pull/53472) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Merging [#53306](https://github.com/ClickHouse/ClickHouse/issues/53306) [#53473](https://github.com/ClickHouse/ClickHouse/pull/53473) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Merging [#53304](https://github.com/ClickHouse/ClickHouse/issues/53304) [#53474](https://github.com/ClickHouse/ClickHouse/pull/53474) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Merging [#53373](https://github.com/ClickHouse/ClickHouse/issues/53373) [#53475](https://github.com/ClickHouse/ClickHouse/pull/53475) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix flaky test `02443_detach_attach_partition` [#53478](https://github.com/ClickHouse/ClickHouse/pull/53478) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Remove outdated code in ReplicatedMergeTreeQueue::initialize() [#53484](https://github.com/ClickHouse/ClickHouse/pull/53484) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* krb5: Fix CVE-2023-36054 [#53485](https://github.com/ClickHouse/ClickHouse/pull/53485) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* curl: update to latest master (fixes CVE-2023-32001) [#53487](https://github.com/ClickHouse/ClickHouse/pull/53487) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update boost to 1.79 [#53490](https://github.com/ClickHouse/ClickHouse/pull/53490) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Get rid of secrets CLICKHOUSE_CI_LOGS [#53491](https://github.com/ClickHouse/ClickHouse/pull/53491) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update style checker [#53493](https://github.com/ClickHouse/ClickHouse/pull/53493) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update materialized_with_ddl.py [#53494](https://github.com/ClickHouse/ClickHouse/pull/53494) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix a race condition between RESTART REPLICAS and DROP DATABASE [#53495](https://github.com/ClickHouse/ClickHouse/pull/53495) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix tiny thing in Replicated database [#53496](https://github.com/ClickHouse/ClickHouse/pull/53496) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Simplify performance test [#53499](https://github.com/ClickHouse/ClickHouse/pull/53499) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added waiting for PostgreSQL compatibility port open in integrational tests. [#53505](https://github.com/ClickHouse/ClickHouse/pull/53505) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Allow non standalone keeper run in integration tests [#53512](https://github.com/ClickHouse/ClickHouse/pull/53512) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Make sending logs to the cloud less fragile (and fix an unrelated flaky test) [#53528](https://github.com/ClickHouse/ClickHouse/pull/53528) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update test.py [#53534](https://github.com/ClickHouse/ClickHouse/pull/53534) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `AddressSanitizer failed to allocate 0x0 (0) bytes of SetAlternateSignalStack` in integration tests [#53535](https://github.com/ClickHouse/ClickHouse/pull/53535) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix keeper default path check [#53539](https://github.com/ClickHouse/ClickHouse/pull/53539) ([pufit](https://github.com/pufit)).
|
||||
* Follow-up to [#53528](https://github.com/ClickHouse/ClickHouse/issues/53528) [#53544](https://github.com/ClickHouse/ClickHouse/pull/53544) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update 00002_log_and_exception_messages_formatting.sql [#53545](https://github.com/ClickHouse/ClickHouse/pull/53545) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Update krb5 to 1.21.2 [#53552](https://github.com/ClickHouse/ClickHouse/pull/53552) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Enable ISA-L on x86-64 only by default [#53553](https://github.com/ClickHouse/ClickHouse/pull/53553) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Change Big Endian-UUID to work the same as Little Endian-UUID [#53556](https://github.com/ClickHouse/ClickHouse/pull/53556) ([Austin Kothig](https://github.com/kothiga)).
|
||||
* Bump openldap to LTS version (v2.5.16) [#53558](https://github.com/ClickHouse/ClickHouse/pull/53558) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update 02443_detach_attach_partition.sh [#53564](https://github.com/ClickHouse/ClickHouse/pull/53564) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Proper destruction of task in ShellCommandSource [#53573](https://github.com/ClickHouse/ClickHouse/pull/53573) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix for flaky test_ssl_cert_authentication [#53586](https://github.com/ClickHouse/ClickHouse/pull/53586) ([Pradeep Chhetri](https://github.com/chhetripradeep)).
|
||||
* AARCH64 Neon memequal wide [#53588](https://github.com/ClickHouse/ClickHouse/pull/53588) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Experiment Aggregator merge and destroy states in batch [#53589](https://github.com/ClickHouse/ClickHouse/pull/53589) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix test `02102_row_binary_with_names_and_types` [#53592](https://github.com/ClickHouse/ClickHouse/pull/53592) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove useless test [#53599](https://github.com/ClickHouse/ClickHouse/pull/53599) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Simplify test `01600_parts_types_metrics_long` [#53606](https://github.com/ClickHouse/ClickHouse/pull/53606) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* :lipstick: [S3::URI] Fix comment typos around versionId [#53607](https://github.com/ClickHouse/ClickHouse/pull/53607) ([Tomáš Hromada](https://github.com/gyfis)).
|
||||
* Fix upgrade check [#53611](https://github.com/ClickHouse/ClickHouse/pull/53611) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Cleanup cluster test: remove unnecessary zookeeper [#53617](https://github.com/ClickHouse/ClickHouse/pull/53617) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Bump boost to 1.80 [#53625](https://github.com/ClickHouse/ClickHouse/pull/53625) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Update version_date.tsv and changelogs after v23.3.9.55-lts [#53626](https://github.com/ClickHouse/ClickHouse/pull/53626) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* CMake small refactoring [#53628](https://github.com/ClickHouse/ClickHouse/pull/53628) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Fix data race of shell command [#53631](https://github.com/ClickHouse/ClickHouse/pull/53631) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix 02443_detach_attach_partition [#53633](https://github.com/ClickHouse/ClickHouse/pull/53633) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Add default timeout value for ClickHouseHelper [#53639](https://github.com/ClickHouse/ClickHouse/pull/53639) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Implement support for more aggregate functions on big-endian [#53650](https://github.com/ClickHouse/ClickHouse/pull/53650) ([ltrk2](https://github.com/ltrk2)).
|
||||
* fix Logical Error in AsynchronousBoundedReadBuffer [#53651](https://github.com/ClickHouse/ClickHouse/pull/53651) ([Sema Checherinda](https://github.com/CheSema)).
|
||||
* State of State and avg aggregation function fix for big endian [#53655](https://github.com/ClickHouse/ClickHouse/pull/53655) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||
* Resubmit [#50171](https://github.com/ClickHouse/ClickHouse/issues/50171) [#53678](https://github.com/ClickHouse/ClickHouse/pull/53678) ([alesapin](https://github.com/alesapin)).
|
||||
* Bump boost to 1.81 [#53679](https://github.com/ClickHouse/ClickHouse/pull/53679) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Whitespaces [#53690](https://github.com/ClickHouse/ClickHouse/pull/53690) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove bad test [#53691](https://github.com/ClickHouse/ClickHouse/pull/53691) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bad path format in logs [#53693](https://github.com/ClickHouse/ClickHouse/pull/53693) ([alesapin](https://github.com/alesapin)).
|
||||
* Correct a functional test to not use endianness-specific input [#53697](https://github.com/ClickHouse/ClickHouse/pull/53697) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Fix running clickhouse-test with python 3.8 [#53700](https://github.com/ClickHouse/ClickHouse/pull/53700) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* refactor some old code [#53704](https://github.com/ClickHouse/ClickHouse/pull/53704) ([flynn](https://github.com/ucasfl)).
|
||||
* Fixed wrong python test name pattern [#53713](https://github.com/ClickHouse/ClickHouse/pull/53713) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix flaky `shutdown_wait_unfinished_queries` integration test [#53714](https://github.com/ClickHouse/ClickHouse/pull/53714) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||
* Update version_date.tsv and changelogs after v23.3.10.5-lts [#53733](https://github.com/ClickHouse/ClickHouse/pull/53733) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix flaky test_storage_s3_queue/test.py::test_delete_after_processing [#53736](https://github.com/ClickHouse/ClickHouse/pull/53736) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix broken `02862_sorted_distinct_sparse_fix` [#53738](https://github.com/ClickHouse/ClickHouse/pull/53738) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Do not warn about arch_sys_counter clock [#53739](https://github.com/ClickHouse/ClickHouse/pull/53739) ([Artur Malchanau](https://github.com/Hexta)).
|
||||
* Add some profile events [#53741](https://github.com/ClickHouse/ClickHouse/pull/53741) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Support clang-18 (Wmissing-field-initializers) [#53751](https://github.com/ClickHouse/ClickHouse/pull/53751) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Upgrade openSSL to v3.0.10 [#53756](https://github.com/ClickHouse/ClickHouse/pull/53756) ([bhavnajindal](https://github.com/bhavnajindal)).
|
||||
* Improve JSON-handling on s390x [#53760](https://github.com/ClickHouse/ClickHouse/pull/53760) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Reduce API calls to SSM client [#53762](https://github.com/ClickHouse/ClickHouse/pull/53762) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Remove branch references from .gitmodules [#53763](https://github.com/ClickHouse/ClickHouse/pull/53763) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix reading from `url` with all filtered paths [#53796](https://github.com/ClickHouse/ClickHouse/pull/53796) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Follow-up to [#53611](https://github.com/ClickHouse/ClickHouse/issues/53611) [#53799](https://github.com/ClickHouse/ClickHouse/pull/53799) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix a bug in attach partition [#53811](https://github.com/ClickHouse/ClickHouse/pull/53811) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Bump boost to 1.82 [#53812](https://github.com/ClickHouse/ClickHouse/pull/53812) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Enable producing endianness-independent output in lz4 [#53816](https://github.com/ClickHouse/ClickHouse/pull/53816) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Fix typo in cluster name. [#53829](https://github.com/ClickHouse/ClickHouse/pull/53829) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Update 00002_log_and_exception_messages_formatting.sql [#53839](https://github.com/ClickHouse/ClickHouse/pull/53839) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix after [#51622](https://github.com/ClickHouse/ClickHouse/issues/51622) [#53840](https://github.com/ClickHouse/ClickHouse/pull/53840) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix segfault in `TableNameHints` (with `Lazy` database) [#53849](https://github.com/ClickHouse/ClickHouse/pull/53849) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Follow-up to [#53501](https://github.com/ClickHouse/ClickHouse/issues/53501) [#53851](https://github.com/ClickHouse/ClickHouse/pull/53851) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Follow-up to [#53528](https://github.com/ClickHouse/ClickHouse/issues/53528) [#53852](https://github.com/ClickHouse/ClickHouse/pull/53852) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* refactor some code [#53856](https://github.com/ClickHouse/ClickHouse/pull/53856) ([flynn](https://github.com/ucasfl)).
|
||||
* Bump boost to 1.83 [#53859](https://github.com/ClickHouse/ClickHouse/pull/53859) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Remove unused parallel replicas coordinator in query info [#53862](https://github.com/ClickHouse/ClickHouse/pull/53862) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Update version_date.tsv and changelogs after v23.7.5.30-stable [#53870](https://github.com/ClickHouse/ClickHouse/pull/53870) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.6.3.87-stable [#53872](https://github.com/ClickHouse/ClickHouse/pull/53872) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.3.11.5-lts [#53873](https://github.com/ClickHouse/ClickHouse/pull/53873) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v23.5.5.92-stable [#53874](https://github.com/ClickHouse/ClickHouse/pull/53874) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Update version_date.tsv and changelogs after v22.8.21.38-lts [#53875](https://github.com/ClickHouse/ClickHouse/pull/53875) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||
* Fix: USearch deserialize [#53876](https://github.com/ClickHouse/ClickHouse/pull/53876) ([Davit Vardanyan](https://github.com/davvard)).
|
||||
* Improve schema inference for archives [#53880](https://github.com/ClickHouse/ClickHouse/pull/53880) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Make UInt128TrivialHash endianness-independent [#53891](https://github.com/ClickHouse/ClickHouse/pull/53891) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Use iterators instead of std::ranges [#53893](https://github.com/ClickHouse/ClickHouse/pull/53893) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Finalize file descriptor in ~WriteBufferToFileSegment [#53895](https://github.com/ClickHouse/ClickHouse/pull/53895) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix: respect skip_unavailable_shards with parallel replicas [#53904](https://github.com/ClickHouse/ClickHouse/pull/53904) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix flakiness of 00514_interval_operators [#53906](https://github.com/ClickHouse/ClickHouse/pull/53906) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Change IStorage interface by random walk, no goal in particular [#54009](https://github.com/ClickHouse/ClickHouse/pull/54009) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Refactor logic around async insert with deduplication [#54012](https://github.com/ClickHouse/ClickHouse/pull/54012) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* More assertive [#54044](https://github.com/ClickHouse/ClickHouse/pull/54044) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Correct doc for filesystem_prefetch_max_memory_usage [#54058](https://github.com/ClickHouse/ClickHouse/pull/54058) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix after [#52943](https://github.com/ClickHouse/ClickHouse/issues/52943) [#54064](https://github.com/ClickHouse/ClickHouse/pull/54064) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Parse IS NOT DISTINCT and <=> operators [#54067](https://github.com/ClickHouse/ClickHouse/pull/54067) ([vdimir](https://github.com/vdimir)).
|
||||
* Replace dlcdn.apache.org by archive domain [#54081](https://github.com/ClickHouse/ClickHouse/pull/54081) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Increased log waiting timeout in test_profile_max_sessions_for_user [#54092](https://github.com/ClickHouse/ClickHouse/pull/54092) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Update Dockerfile [#54118](https://github.com/ClickHouse/ClickHouse/pull/54118) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Small improvements in `getAlterMutationCommandsForPart` [#54126](https://github.com/ClickHouse/ClickHouse/pull/54126) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix some more analyzer tests [#54128](https://github.com/ClickHouse/ClickHouse/pull/54128) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Disable `01600_parts_types_metrics_long` for asan [#54132](https://github.com/ClickHouse/ClickHouse/pull/54132) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fixing 01086_odbc_roundtrip with analyzer. [#54133](https://github.com/ClickHouse/ClickHouse/pull/54133) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Add warnings about ingestion script speed and memory usage in Laion dataset instructions [#54153](https://github.com/ClickHouse/ClickHouse/pull/54153) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* tests: mark 02152_http_external_tables_memory_tracking as no-parallel [#54155](https://github.com/ClickHouse/ClickHouse/pull/54155) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The external logs have had colliding arguments [#54165](https://github.com/ClickHouse/ClickHouse/pull/54165) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Rename macro [#54169](https://github.com/ClickHouse/ClickHouse/pull/54169) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
@ -738,16 +738,16 @@ age('unit', startdate, enddate, [timezone])
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` (possible abbreviations: `us`, `u`)
|
||||
- `millisecond` (possible abbreviations: `ms`)
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
- `microsecond` `microseconds` `us` `u`
|
||||
- `millisecond` `milliseconds` `ms`
|
||||
- `second` `seconds` `ss` `s`
|
||||
- `minute` `minutes` `mi` `n`
|
||||
- `hour` `hours` `hh` `h`
|
||||
- `day` `days` `dd` `d`
|
||||
- `week` `weeks` `wk` `ww`
|
||||
- `month` `months` `mm` `m`
|
||||
- `quarter` `quarters` `qq` `q`
|
||||
- `year` `years` `yyyy` `yy`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
@ -815,16 +815,16 @@ Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_
|
||||
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
|
||||
Possible values:
|
||||
|
||||
- `microsecond` (possible abbreviations: `us`, `u`)
|
||||
- `millisecond` (possible abbreviations: `ms`)
|
||||
- `second` (possible abbreviations: `ss`, `s`)
|
||||
- `minute` (possible abbreviations: `mi`, `n`)
|
||||
- `hour` (possible abbreviations: `hh`, `h`)
|
||||
- `day` (possible abbreviations: `dd`, `d`)
|
||||
- `week` (possible abbreviations: `wk`, `ww`)
|
||||
- `month` (possible abbreviations: `mm`, `m`)
|
||||
- `quarter` (possible abbreviations: `qq`, `q`)
|
||||
- `year` (possible abbreviations: `yyyy`, `yy`)
|
||||
- `microsecond` `microseconds` `us` `u`
|
||||
- `millisecond` `milliseconds` `ms`
|
||||
- `second` `seconds` `ss` `s`
|
||||
- `minute` `minutes` `mi` `n`
|
||||
- `hour` `hours` `hh` `h`
|
||||
- `day` `days` `dd` `d`
|
||||
- `week` `weeks` `wk` `ww`
|
||||
- `month` `months` `mm` `m`
|
||||
- `quarter` `quarters` `qq` `q`
|
||||
- `year` `years` `yyyy` `yy`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
|
@ -1230,6 +1230,42 @@ Result:
|
||||
< Σ >
|
||||
```
|
||||
|
||||
## decodeHTMLComponent
|
||||
|
||||
Un-escapes substrings with special meaning in HTML. For example: `ℏ` `>` `♦` `♥` `<` etc.
|
||||
|
||||
This function also replaces numeric character references with Unicode characters. Both decimal (like `✓`) and hexadecimal (`✓`) forms are supported.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
decodeHTMComponent(x)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `x` — An input string. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The un-escaped string.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT decodeHTMLComponent(''CH');
|
||||
SELECT decodeHMLComponent('I♥ClickHouse');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```result
|
||||
'CH'
|
||||
I♥ClickHouse'
|
||||
```
|
||||
|
||||
## extractTextFromHTML
|
||||
|
||||
This function extracts plain text from HTML or XHTML.
|
||||
|
@ -59,7 +59,7 @@ public:
|
||||
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
||||
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
||||
|
||||
disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to);
|
||||
disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to, /* settings= */ {});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -1038,41 +1038,6 @@ try
|
||||
fs::create_directories(path / "metadata_dropped/");
|
||||
}
|
||||
|
||||
#if USE_ROCKSDB
|
||||
/// Initialize merge tree metadata cache
|
||||
if (config().has("merge_tree_metadata_cache"))
|
||||
{
|
||||
global_context->addWarningMessage("The setting 'merge_tree_metadata_cache' is enabled."
|
||||
" But the feature of 'metadata cache in RocksDB' is experimental and is not ready for production."
|
||||
" The usage of this feature can lead to data corruption and loss. The setting should be disabled in production."
|
||||
" See the corresponding report at https://github.com/ClickHouse/ClickHouse/issues/51182");
|
||||
|
||||
fs::create_directories(path / "rocksdb/");
|
||||
size_t size = config().getUInt64("merge_tree_metadata_cache.lru_cache_size", 256 << 20);
|
||||
bool continue_if_corrupted = config().getBool("merge_tree_metadata_cache.continue_if_corrupted", false);
|
||||
try
|
||||
{
|
||||
LOG_DEBUG(log, "Initializing MergeTree metadata cache, lru_cache_size: {} continue_if_corrupted: {}",
|
||||
ReadableSize(size), continue_if_corrupted);
|
||||
global_context->initializeMergeTreeMetadataCache(path_str + "/" + "rocksdb", size);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
if (continue_if_corrupted)
|
||||
{
|
||||
/// Rename rocksdb directory and reinitialize merge tree metadata cache
|
||||
time_t now = time(nullptr);
|
||||
fs::rename(path / "rocksdb", path / ("rocksdb.old." + std::to_string(now)));
|
||||
global_context->initializeMergeTreeMetadataCache(path_str + "/" + "rocksdb", size);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (config().has("interserver_http_port") && config().has("interserver_https_port"))
|
||||
throw Exception(ErrorCodes::EXCESSIVE_ELEMENT_IN_CONFIG, "Both http and https interserver ports are specified");
|
||||
|
||||
|
@ -301,10 +301,10 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
|
||||
ReactionOnViolation reaction,
|
||||
SettingSource source) const
|
||||
{
|
||||
if (!explain.empty())
|
||||
if (!explain.text.empty())
|
||||
{
|
||||
if (reaction == THROW_ON_VIOLATION)
|
||||
throw Exception::createDeprecated(explain, code);
|
||||
throw Exception(explain, code);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
@ -389,7 +389,8 @@ SettingsConstraints::Checker SettingsConstraints::getChecker(const Settings & cu
|
||||
{
|
||||
auto resolved_name = resolveSettingNameWithCache(setting_name);
|
||||
if (!current_settings.allow_ddl && resolved_name == "allow_ddl")
|
||||
return Checker("Cannot modify 'allow_ddl' setting when DDL queries are prohibited for the user", ErrorCodes::QUERY_IS_PROHIBITED);
|
||||
return Checker(PreformattedMessage::create("Cannot modify 'allow_ddl' setting when DDL queries are prohibited for the user"),
|
||||
ErrorCodes::QUERY_IS_PROHIBITED);
|
||||
|
||||
/** The `readonly` value is understood as follows:
|
||||
* 0 - no read-only restrictions.
|
||||
@ -398,13 +399,14 @@ SettingsConstraints::Checker SettingsConstraints::getChecker(const Settings & cu
|
||||
*/
|
||||
|
||||
if (current_settings.readonly > 1 && resolved_name == "readonly")
|
||||
return Checker("Cannot modify 'readonly' setting in readonly mode", ErrorCodes::READONLY);
|
||||
return Checker(PreformattedMessage::create("Cannot modify 'readonly' setting in readonly mode"), ErrorCodes::READONLY);
|
||||
|
||||
auto it = constraints.find(resolved_name);
|
||||
if (current_settings.readonly == 1)
|
||||
{
|
||||
if (it == constraints.end() || it->second.writability != SettingConstraintWritability::CHANGEABLE_IN_READONLY)
|
||||
return Checker("Cannot modify '" + String(setting_name) + "' setting in readonly mode", ErrorCodes::READONLY);
|
||||
return Checker(PreformattedMessage::create("Cannot modify '{}' setting in readonly mode", setting_name),
|
||||
ErrorCodes::READONLY);
|
||||
}
|
||||
else // For both readonly=0 and readonly=2
|
||||
{
|
||||
|
@ -113,7 +113,7 @@ private:
|
||||
using NameResolver = std::function<std::string_view(std::string_view)>;
|
||||
NameResolver setting_name_resolver;
|
||||
|
||||
String explain;
|
||||
PreformattedMessage explain;
|
||||
int code = 0;
|
||||
|
||||
// Allows everything
|
||||
@ -122,7 +122,7 @@ private:
|
||||
{}
|
||||
|
||||
// Forbidden with explanation
|
||||
Checker(const String & explain_, int code_)
|
||||
Checker(const PreformattedMessage & explain_, int code_)
|
||||
: constraint{.writability = SettingConstraintWritability::CONST}
|
||||
, explain(explain_)
|
||||
, code(code_)
|
||||
|
@ -109,24 +109,12 @@ private:
|
||||
inline size_t max_fill() const { return 1ULL << (size_degree - 1); } /// NOLINT
|
||||
inline size_t mask() const { return buf_size() - 1; }
|
||||
|
||||
inline size_t place(HashValue x) const
|
||||
{
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
return (x >> UNIQUES_HASH_BITS_FOR_SKIP) & mask();
|
||||
else
|
||||
return (std::byteswap(x) >> UNIQUES_HASH_BITS_FOR_SKIP) & mask();
|
||||
}
|
||||
inline size_t place(HashValue x) const { return (x >> UNIQUES_HASH_BITS_FOR_SKIP) & mask(); }
|
||||
|
||||
/// The value is divided by 2 ^ skip_degree
|
||||
inline bool good(HashValue hash) const
|
||||
{
|
||||
return hash == ((hash >> skip_degree) << skip_degree);
|
||||
}
|
||||
inline bool good(HashValue hash) const { return hash == ((hash >> skip_degree) << skip_degree); }
|
||||
|
||||
HashValue hash(Value key) const
|
||||
{
|
||||
return static_cast<HashValue>(Hash()(key));
|
||||
}
|
||||
HashValue hash(Value key) const { return static_cast<HashValue>(Hash()(key)); }
|
||||
|
||||
/// Delete all values whose hashes do not divide by 2 ^ skip_degree
|
||||
void rehash()
|
||||
@ -338,11 +326,7 @@ public:
|
||||
|
||||
void ALWAYS_INLINE insert(Value x)
|
||||
{
|
||||
HashValue hash_value;
|
||||
if constexpr (std::endian::native == std::endian::little)
|
||||
hash_value = hash(x);
|
||||
else
|
||||
hash_value = std::byteswap(hash(x));
|
||||
const HashValue hash_value = hash(x);
|
||||
if (!good(hash_value))
|
||||
return;
|
||||
|
||||
@ -403,25 +387,25 @@ public:
|
||||
if (m_size > UNIQUES_HASH_MAX_SIZE)
|
||||
throw Poco::Exception("Cannot write UniquesHashSet: too large size_degree.");
|
||||
|
||||
DB::writeIntBinary(skip_degree, wb);
|
||||
DB::writeBinaryLittleEndian(skip_degree, wb);
|
||||
DB::writeVarUInt(m_size, wb);
|
||||
|
||||
if (has_zero)
|
||||
{
|
||||
HashValue x = 0;
|
||||
DB::writeIntBinary(x, wb);
|
||||
DB::writeBinaryLittleEndian(x, wb);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < buf_size(); ++i)
|
||||
if (buf[i])
|
||||
DB::writeIntBinary(buf[i], wb);
|
||||
DB::writeBinaryLittleEndian(buf[i], wb);
|
||||
}
|
||||
|
||||
void read(DB::ReadBuffer & rb)
|
||||
{
|
||||
has_zero = false;
|
||||
|
||||
DB::readIntBinary(skip_degree, rb);
|
||||
DB::readBinaryLittleEndian(skip_degree, rb);
|
||||
DB::readVarUInt(m_size, rb);
|
||||
|
||||
if (m_size > UNIQUES_HASH_MAX_SIZE)
|
||||
@ -440,7 +424,7 @@ public:
|
||||
for (size_t i = 0; i < m_size; ++i)
|
||||
{
|
||||
HashValue x = 0;
|
||||
DB::readIntBinary(x, rb);
|
||||
DB::readBinaryLittleEndian(x, rb);
|
||||
if (x == 0)
|
||||
has_zero = true;
|
||||
else
|
||||
@ -454,6 +438,7 @@ public:
|
||||
|
||||
for (size_t i = 0; i < m_size; ++i)
|
||||
{
|
||||
DB::transformEndianness<std::endian::native, std::endian::little>(hs[i]);
|
||||
if (hs[i] == 0)
|
||||
has_zero = true;
|
||||
else
|
||||
@ -465,7 +450,7 @@ public:
|
||||
void readAndMerge(DB::ReadBuffer & rb)
|
||||
{
|
||||
UInt8 rhs_skip_degree = 0;
|
||||
DB::readIntBinary(rhs_skip_degree, rb);
|
||||
DB::readBinaryLittleEndian(rhs_skip_degree, rb);
|
||||
|
||||
if (rhs_skip_degree > skip_degree)
|
||||
{
|
||||
@ -490,7 +475,7 @@ public:
|
||||
for (size_t i = 0; i < rhs_size; ++i)
|
||||
{
|
||||
HashValue x = 0;
|
||||
DB::readIntBinary(x, rb);
|
||||
DB::readBinaryLittleEndian(x, rb);
|
||||
insertHash(x);
|
||||
}
|
||||
}
|
||||
@ -501,6 +486,7 @@ public:
|
||||
|
||||
for (size_t i = 0; i < rhs_size; ++i)
|
||||
{
|
||||
DB::transformEndianness<std::endian::native, std::endian::little>(hs[i]);
|
||||
insertHash(hs[i]);
|
||||
}
|
||||
}
|
||||
|
@ -429,17 +429,20 @@ PreformattedMessage getCurrentExceptionMessageAndPattern(bool with_stacktrace, b
|
||||
}
|
||||
catch (...) {}
|
||||
|
||||
// #ifdef ABORT_ON_LOGICAL_ERROR
|
||||
// try
|
||||
// {
|
||||
// throw;
|
||||
// }
|
||||
// catch (const std::logic_error &)
|
||||
// {
|
||||
// abortOnFailedAssertion(stream.str());
|
||||
// }
|
||||
// catch (...) {}
|
||||
// #endif
|
||||
#ifdef ABORT_ON_LOGICAL_ERROR
|
||||
try
|
||||
{
|
||||
throw;
|
||||
}
|
||||
catch (const std::logic_error &)
|
||||
{
|
||||
if (!with_stacktrace)
|
||||
stream << ", Stack trace:\n\n" << getExceptionStackTraceString(e);
|
||||
|
||||
abortOnFailedAssertion(stream.str());
|
||||
}
|
||||
catch (...) {}
|
||||
#endif
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -451,7 +451,8 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(ThreadPoolReaderPageCacheMissBytes, "Number of bytes read inside ThreadPoolReader when read was not done from page cache and was hand off to thread pool.") \
|
||||
M(ThreadPoolReaderPageCacheMissElapsedMicroseconds, "Time spent reading data inside the asynchronous job in ThreadPoolReader - when read was not done from page cache.") \
|
||||
\
|
||||
M(AsynchronousReadWaitMicroseconds, "Time spent in waiting for asynchronous reads.") \
|
||||
M(AsynchronousReadWaitMicroseconds, "Time spent in waiting for asynchronous reads in asynchronous local read.") \
|
||||
M(SynchronousReadWaitMicroseconds, "Time spent in waiting for synchronous reads in asynchronous local read.") \
|
||||
M(AsynchronousRemoteReadWaitMicroseconds, "Time spent in waiting for asynchronous remote reads.") \
|
||||
M(SynchronousRemoteReadWaitMicroseconds, "Time spent in waiting for synchronous remote reads.") \
|
||||
\
|
||||
@ -462,13 +463,6 @@ The server successfully detected this situation and will download merged part fr
|
||||
M(AggregationPreallocatedElementsInHashTables, "How many elements were preallocated in hash tables for aggregation.") \
|
||||
M(AggregationHashTablesInitializedAsTwoLevel, "How many hash tables were inited as two-level for aggregation.") \
|
||||
\
|
||||
M(MergeTreeMetadataCacheGet, "Number of rocksdb reads (used for merge tree metadata cache)") \
|
||||
M(MergeTreeMetadataCachePut, "Number of rocksdb puts (used for merge tree metadata cache)") \
|
||||
M(MergeTreeMetadataCacheDelete, "Number of rocksdb deletes (used for merge tree metadata cache)") \
|
||||
M(MergeTreeMetadataCacheSeek, "Number of rocksdb seeks (used for merge tree metadata cache)") \
|
||||
M(MergeTreeMetadataCacheHit, "Number of times the read of meta file was done from MergeTree metadata cache") \
|
||||
M(MergeTreeMetadataCacheMiss, "Number of times the read of meta file was not done from MergeTree metadata cache") \
|
||||
\
|
||||
M(KafkaRebalanceRevocations, "Number of partition revocations (the first stage of consumer group rebalance)") \
|
||||
M(KafkaRebalanceAssignments, "Number of partition assignments (the final stage of consumer group rebalance)") \
|
||||
M(KafkaRebalanceErrors, "Number of failed consumer group rebalances") \
|
||||
@ -576,6 +570,23 @@ Counters global_counters(global_counters_array);
|
||||
const Event Counters::num_counters = END;
|
||||
|
||||
|
||||
Timer::Timer(Counters & counters_, Event timer_event_, Resolution resolution_)
|
||||
: counters(counters_), timer_event(timer_event_), resolution(resolution_)
|
||||
{
|
||||
}
|
||||
|
||||
Timer::Timer(Counters & counters_, Event timer_event_, Event counter_event, Resolution resolution_)
|
||||
: Timer(counters_, timer_event_, resolution_)
|
||||
{
|
||||
counters.increment(counter_event);
|
||||
}
|
||||
|
||||
void Timer::end()
|
||||
{
|
||||
counters.increment(timer_event, watch.elapsedNanoseconds() / static_cast<UInt64>(resolution));
|
||||
watch.reset();
|
||||
}
|
||||
|
||||
Counters::Counters(VariableContext level_, Counters * parent_)
|
||||
: counters_holder(new Counter[num_counters] {}),
|
||||
parent(parent_),
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/VariableContext.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <base/types.h>
|
||||
#include <base/strong_typedef.h>
|
||||
#include <Poco/Message.h>
|
||||
@ -26,6 +27,28 @@ namespace ProfileEvents
|
||||
/// Counters - how many times each event happened
|
||||
extern Counters global_counters;
|
||||
|
||||
class Timer
|
||||
{
|
||||
public:
|
||||
enum class Resolution : UInt64
|
||||
{
|
||||
Nanoseconds = 1,
|
||||
Microseconds = 1000,
|
||||
Milliseconds = 1000000,
|
||||
};
|
||||
Timer(Counters & counters_, Event timer_event_, Resolution resolution_);
|
||||
Timer(Counters & counters_, Event timer_event_, Event counter_event, Resolution resolution_);
|
||||
~Timer() { end(); }
|
||||
void cancel() { watch.reset(); }
|
||||
void end();
|
||||
|
||||
private:
|
||||
Counters & counters;
|
||||
Event timer_event;
|
||||
Stopwatch watch;
|
||||
Resolution resolution;
|
||||
};
|
||||
|
||||
class Counters
|
||||
{
|
||||
private:
|
||||
@ -103,6 +126,24 @@ namespace ProfileEvents
|
||||
/// Set all counters to zero
|
||||
void resetCounters();
|
||||
|
||||
/// Add elapsed time to `timer_event` when returned object goes out of scope.
|
||||
/// Use the template parameter to control timer resolution, the default
|
||||
/// is `Timer::Resolution::Microseconds`.
|
||||
template <Timer::Resolution resolution = Timer::Resolution::Microseconds>
|
||||
Timer timer(Event timer_event)
|
||||
{
|
||||
return Timer(*this, timer_event, resolution);
|
||||
}
|
||||
|
||||
/// Increment `counter_event` and add elapsed time to `timer_event` when returned object goes out of scope.
|
||||
/// Use the template parameter to control timer resolution, the default
|
||||
/// is `Timer::Resolution::Microseconds`.
|
||||
template <Timer::Resolution resolution = Timer::Resolution::Microseconds>
|
||||
Timer timer(Event timer_event, Event counter_event)
|
||||
{
|
||||
return Timer(*this, timer_event, counter_event, resolution);
|
||||
}
|
||||
|
||||
static const Event num_counters;
|
||||
};
|
||||
|
||||
|
@ -7,8 +7,6 @@
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/logger_useful.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
@ -143,13 +141,9 @@ namespace
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<ProxyConfigurationResolver> ProxyConfigurationResolverProvider::get(Protocol protocol)
|
||||
std::shared_ptr<ProxyConfigurationResolver> ProxyConfigurationResolverProvider::get(Protocol protocol, const Poco::Util::AbstractConfiguration & configuration)
|
||||
{
|
||||
auto context = Context::getGlobalContextInstance();
|
||||
|
||||
chassert(context);
|
||||
|
||||
if (auto resolver = getFromSettings(protocol, "", context->getConfigRef()))
|
||||
if (auto resolver = getFromSettings(protocol, "", configuration))
|
||||
{
|
||||
return resolver;
|
||||
}
|
||||
@ -202,7 +196,7 @@ std::shared_ptr<ProxyConfigurationResolver> ProxyConfigurationResolverProvider::
|
||||
* In case the combination of config_prefix and configuration does not provide a resolver, try to get it from general / new settings.
|
||||
* Falls back to Environment resolver if no configuration is found.
|
||||
* */
|
||||
return ProxyConfigurationResolverProvider::get(Protocol::ANY);
|
||||
return ProxyConfigurationResolverProvider::get(Protocol::ANY, configuration);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,7 +18,9 @@ public:
|
||||
* Returns appropriate ProxyConfigurationResolver based on current CH settings (Remote resolver or List resolver).
|
||||
* If no configuration is found, returns Environment Resolver.
|
||||
* */
|
||||
static std::shared_ptr<ProxyConfigurationResolver> get(Protocol protocol);
|
||||
static std::shared_ptr<ProxyConfigurationResolver> get(
|
||||
Protocol protocol,
|
||||
const Poco::Util::AbstractConfiguration & configuration);
|
||||
|
||||
/*
|
||||
* This API exists exclusively for backward compatibility with old S3 storage specific proxy configuration.
|
||||
|
@ -6,6 +6,10 @@
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
|
||||
namespace DB::ErrorCodes
|
||||
{
|
||||
extern const int ALL_CONNECTION_TRIES_FAILED;
|
||||
}
|
||||
|
||||
using namespace mysqlxx;
|
||||
|
||||
@ -191,10 +195,6 @@ PoolWithFailover::Entry PoolWithFailover::get()
|
||||
}
|
||||
|
||||
DB::WriteBufferFromOwnString message;
|
||||
if (replicas_by_priority.size() > 1)
|
||||
message << "Connections to all mysql replicas failed: ";
|
||||
else
|
||||
message << "Connections to mysql failed: ";
|
||||
|
||||
for (auto it = replicas_by_priority.begin(); it != replicas_by_priority.end(); ++it)
|
||||
{
|
||||
@ -211,5 +211,10 @@ PoolWithFailover::Entry PoolWithFailover::get()
|
||||
}
|
||||
}
|
||||
|
||||
throw Poco::Exception(message.str());
|
||||
|
||||
if (replicas_by_priority.size() > 1)
|
||||
throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Connections to all mysql replicas failed: {}", message.str());
|
||||
else
|
||||
throw DB::Exception(DB::ErrorCodes::ALL_CONNECTION_TRIES_FAILED, "Connections to mysql failed: {}", message.str());
|
||||
|
||||
}
|
||||
|
@ -32,9 +32,10 @@ Poco::URI https_list_proxy_server = Poco::URI("http://https_list_proxy:3128");
|
||||
TEST_F(ProxyConfigurationResolverProviderTests, EnvironmentResolverShouldBeUsedIfNoSettings)
|
||||
{
|
||||
EnvironmentProxySetter setter(http_env_proxy_server, https_env_proxy_server);
|
||||
const auto & config = getContext().context->getConfigRef();
|
||||
|
||||
auto http_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
|
||||
auto https_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
|
||||
auto http_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP, config)->resolve();
|
||||
auto https_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS, config)->resolve();
|
||||
|
||||
ASSERT_EQ(http_configuration.host, http_env_proxy_server.getHost());
|
||||
ASSERT_EQ(http_configuration.port, http_env_proxy_server.getPort());
|
||||
@ -54,13 +55,13 @@ TEST_F(ProxyConfigurationResolverProviderTests, ListHTTPOnly)
|
||||
config->setString("proxy.http.uri", http_list_proxy_server.toString());
|
||||
context->setConfig(config);
|
||||
|
||||
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
|
||||
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP, *config)->resolve();
|
||||
|
||||
ASSERT_EQ(http_proxy_configuration.host, http_list_proxy_server.getHost());
|
||||
ASSERT_EQ(http_proxy_configuration.port, http_list_proxy_server.getPort());
|
||||
ASSERT_EQ(http_proxy_configuration.protocol, DB::ProxyConfiguration::protocolFromString(http_list_proxy_server.getScheme()));
|
||||
|
||||
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
|
||||
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS, *config)->resolve();
|
||||
|
||||
// No https configuration since it's not set
|
||||
ASSERT_EQ(https_proxy_configuration.host, "");
|
||||
@ -76,12 +77,12 @@ TEST_F(ProxyConfigurationResolverProviderTests, ListHTTPSOnly)
|
||||
config->setString("proxy.https.uri", https_list_proxy_server.toString());
|
||||
context->setConfig(config);
|
||||
|
||||
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
|
||||
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP, *config)->resolve();
|
||||
|
||||
ASSERT_EQ(http_proxy_configuration.host, "");
|
||||
ASSERT_EQ(http_proxy_configuration.port, 0);
|
||||
|
||||
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
|
||||
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS, *config)->resolve();
|
||||
|
||||
ASSERT_EQ(https_proxy_configuration.host, https_list_proxy_server.getHost());
|
||||
|
||||
@ -104,13 +105,13 @@ TEST_F(ProxyConfigurationResolverProviderTests, ListBoth)
|
||||
|
||||
context->setConfig(config);
|
||||
|
||||
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP)->resolve();
|
||||
auto http_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTP, *config)->resolve();
|
||||
|
||||
ASSERT_EQ(http_proxy_configuration.host, http_list_proxy_server.getHost());
|
||||
ASSERT_EQ(http_proxy_configuration.protocol, DB::ProxyConfiguration::protocolFromString(http_list_proxy_server.getScheme()));
|
||||
ASSERT_EQ(http_proxy_configuration.port, http_list_proxy_server.getPort());
|
||||
|
||||
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS)->resolve();
|
||||
auto https_proxy_configuration = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::Protocol::HTTPS, *config)->resolve();
|
||||
|
||||
ASSERT_EQ(https_proxy_configuration.host, https_list_proxy_server.getHost());
|
||||
|
||||
|
@ -469,7 +469,7 @@ void BaseSettings<TTraits>::write(WriteBuffer & out, SettingsWriteFormat format)
|
||||
{
|
||||
const auto & accessor = Traits::Accessor::instance();
|
||||
|
||||
for (auto field : *this)
|
||||
for (const auto & field : *this)
|
||||
{
|
||||
bool is_custom = field.isCustom();
|
||||
bool is_important = !is_custom && accessor.isImportant(field.index);
|
||||
|
@ -870,7 +870,7 @@ NearestFieldType<std::decay_t<T>> & Field::get()
|
||||
// Disregard signedness when converting between int64 types.
|
||||
constexpr Field::Types::Which target = TypeToEnum<StoredType>::value;
|
||||
if (target != which
|
||||
&& (!isInt64OrUInt64orBoolFieldType(target) || !isInt64OrUInt64orBoolFieldType(which)))
|
||||
&& (!isInt64OrUInt64orBoolFieldType(target) || !isInt64OrUInt64orBoolFieldType(which)) && target != Field::Types::IPv4)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Invalid Field get from type {} to type {}", which, target);
|
||||
#endif
|
||||
|
@ -25,7 +25,7 @@ void ServerSettings::loadSettingsFromConfig(const Poco::Util::AbstractConfigurat
|
||||
"max_remote_write_network_bandwidth_for_server",
|
||||
};
|
||||
|
||||
for (auto setting : all())
|
||||
for (const auto & setting : all())
|
||||
{
|
||||
const auto & name = setting.getName();
|
||||
if (config.has(name))
|
||||
|
@ -87,7 +87,7 @@ void Settings::checkNoSettingNamesAtTopLevel(const Poco::Util::AbstractConfigura
|
||||
return;
|
||||
|
||||
Settings settings;
|
||||
for (auto setting : settings.all())
|
||||
for (const auto & setting : settings.all())
|
||||
{
|
||||
const auto & name = setting.getName();
|
||||
if (config.has(name) && !setting.isObsolete())
|
||||
|
@ -712,15 +712,15 @@ class IColumn;
|
||||
\
|
||||
M(Bool, load_marks_asynchronously, false, "Load MergeTree marks asynchronously", 0) \
|
||||
M(Bool, enable_filesystem_read_prefetches_log, false, "Log to system.filesystem prefetch_log during query. Should be used only for testing or debugging, not recommended to be turned on by default", 0) \
|
||||
M(Bool, allow_prefetched_read_pool_for_remote_filesystem, false, "Prefer prefethed threadpool if all parts are on remote filesystem", 0) \
|
||||
M(Bool, allow_prefetched_read_pool_for_remote_filesystem, true, "Prefer prefethed threadpool if all parts are on remote filesystem", 0) \
|
||||
M(Bool, allow_prefetched_read_pool_for_local_filesystem, false, "Prefer prefethed threadpool if all parts are on remote filesystem", 0) \
|
||||
\
|
||||
M(UInt64, prefetch_buffer_size, DBMS_DEFAULT_BUFFER_SIZE, "The maximum size of the prefetch buffer to read from the filesystem.", 0) \
|
||||
M(UInt64, filesystem_prefetch_step_bytes, 0, "Prefetch step in bytes. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
||||
M(UInt64, filesystem_prefetch_step_marks, 0, "Prefetch step in marks. Zero means `auto` - approximately the best prefetch step will be auto deduced, but might not be 100% the best. The actual value might be different because of setting filesystem_prefetch_min_bytes_for_single_read_task", 0) \
|
||||
M(UInt64, filesystem_prefetch_min_bytes_for_single_read_task, "8Mi", "Do not parallelize within one file read less than this amount of bytes. E.g. one reader will not receive a read task of size less than this amount. This setting is recommended to avoid spikes of time for aws getObject requests to aws", 0) \
|
||||
M(UInt64, filesystem_prefetch_max_memory_usage, "1Gi", "Maximum memory usage for prefetches", 0) \
|
||||
M(UInt64, filesystem_prefetches_limit, 0, "Maximum number of prefetches. Zero means unlimited. A setting `filesystem_prefetches_max_memory_usage` is more recommended if you want to limit the number of prefetches", 0) \
|
||||
M(UInt64, filesystem_prefetch_max_memory_usage, "1Gi", "Maximum memory usage for prefetches.", 0) \
|
||||
M(UInt64, filesystem_prefetches_limit, 200, "Maximum number of prefetches. Zero means unlimited. A setting `filesystem_prefetches_max_memory_usage` is more recommended if you want to limit the number of prefetches", 0) \
|
||||
\
|
||||
M(UInt64, use_structure_from_insertion_table_in_table_functions, 2, "Use structure from insertion table instead of schema inference from data. Possible values: 0 - disabled, 1 - enabled, 2 - auto", 0) \
|
||||
\
|
||||
@ -783,6 +783,7 @@ class IColumn;
|
||||
M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \
|
||||
M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \
|
||||
M(Bool, allow_experimental_usearch_index, false, "Allows to use USearch index. Disabled by default because this feature is experimental", 0) \
|
||||
M(Bool, allow_experimental_s3queue, false, "Allows to use S3Queue engine. Disabled by default, because this feature is experimental", 0) \
|
||||
M(UInt64, max_limit_for_ann_queries, 1'000'000, "SELECT queries with LIMIT bigger than this setting cannot use ANN indexes. Helps to prevent memory overflows in ANN search indexes.", 0) \
|
||||
M(Int64, annoy_index_search_k_nodes, -1, "SELECT queries search up to this many nodes in Annoy indexes.", 0) \
|
||||
M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \
|
||||
|
@ -143,16 +143,16 @@ void SerializationUUID::serializeBinaryBulk(const IColumn & column, WriteBuffer
|
||||
if (limit == 0)
|
||||
return;
|
||||
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
{
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wunreachable-code"
|
||||
if constexpr (std::endian::native == std::endian::big)
|
||||
{
|
||||
for (size_t i = offset; i < offset + limit; ++i)
|
||||
writeBinaryLittleEndian(x[i], ostr);
|
||||
#pragma clang diagnostic pop
|
||||
}
|
||||
else
|
||||
ostr.write(reinterpret_cast<const char *>(&x[offset]), sizeof(UUID) * limit);
|
||||
#pragma clang diagnostic pop
|
||||
}
|
||||
|
||||
void SerializationUUID::deserializeBinaryBulk(IColumn & column, ReadBuffer & istr, size_t limit, double /*avg_value_size_hint*/) const
|
||||
|
@ -324,7 +324,7 @@ ReservationPtr DiskEncrypted::reserve(UInt64 bytes)
|
||||
}
|
||||
|
||||
|
||||
void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir)
|
||||
void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings)
|
||||
{
|
||||
/// Check if we can copy the file without deciphering.
|
||||
if (isSameDiskType(*this, *to_disk))
|
||||
@ -340,14 +340,14 @@ void DiskEncrypted::copyDirectoryContent(const String & from_dir, const std::sha
|
||||
auto wrapped_from_path = wrappedPath(from_dir);
|
||||
auto to_delegate = to_disk_enc->delegate;
|
||||
auto wrapped_to_path = to_disk_enc->wrappedPath(to_dir);
|
||||
delegate->copyDirectoryContent(wrapped_from_path, to_delegate, wrapped_to_path);
|
||||
delegate->copyDirectoryContent(wrapped_from_path, to_delegate, wrapped_to_path, settings);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Copy the file through buffers with deciphering.
|
||||
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir);
|
||||
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir, settings);
|
||||
}
|
||||
|
||||
std::unique_ptr<ReadBufferFromFileBase> DiskEncrypted::readFile(
|
||||
|
@ -112,7 +112,7 @@ public:
|
||||
delegate->listFiles(wrapped_path, file_names);
|
||||
}
|
||||
|
||||
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir) override;
|
||||
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings) override;
|
||||
|
||||
std::unique_ptr<ReadBufferFromFileBase> readFile(
|
||||
const String & path,
|
||||
|
@ -53,11 +53,11 @@ String DiskEncryptedSettings::findKeyByFingerprint(UInt128 key_fingerprint, cons
|
||||
return it->second;
|
||||
}
|
||||
|
||||
void DiskEncryptedTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path)
|
||||
void DiskEncryptedTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings)
|
||||
{
|
||||
auto wrapped_from_path = wrappedPath(from_file_path);
|
||||
auto wrapped_to_path = wrappedPath(to_file_path);
|
||||
delegate_transaction->copyFile(wrapped_from_path, wrapped_to_path);
|
||||
delegate_transaction->copyFile(wrapped_from_path, wrapped_to_path, settings);
|
||||
}
|
||||
|
||||
std::unique_ptr<WriteBufferFromFileBase> DiskEncryptedTransaction::writeFile( // NOLINT
|
||||
|
@ -116,7 +116,7 @@ public:
|
||||
/// but it's impossible to implement correctly in transactions because other disk can
|
||||
/// use different metadata storage.
|
||||
/// TODO: maybe remove it at all, we don't want copies
|
||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path) override;
|
||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings) override;
|
||||
|
||||
/// Open the file for write and return WriteBufferFromFileBase object.
|
||||
std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
||||
|
@ -432,12 +432,13 @@ bool inline isSameDiskType(const IDisk & one, const IDisk & another)
|
||||
return typeid(one) == typeid(another);
|
||||
}
|
||||
|
||||
void DiskLocal::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir)
|
||||
void DiskLocal::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings)
|
||||
{
|
||||
if (isSameDiskType(*this, *to_disk))
|
||||
/// If throttling was configured we cannot use copying directly.
|
||||
if (isSameDiskType(*this, *to_disk) && !settings.local_throttler)
|
||||
fs::copy(fs::path(disk_path) / from_dir, fs::path(to_disk->getPath()) / to_dir, fs::copy_options::recursive | fs::copy_options::overwrite_existing); /// Use more optimal way.
|
||||
else
|
||||
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir);
|
||||
IDisk::copyDirectoryContent(from_dir, to_disk, to_dir, settings);
|
||||
}
|
||||
|
||||
SyncGuardPtr DiskLocal::getDirectorySyncGuard(const String & path) const
|
||||
|
@ -65,7 +65,7 @@ public:
|
||||
|
||||
void replaceFile(const String & from_path, const String & to_path) override;
|
||||
|
||||
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir) override;
|
||||
void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings) override;
|
||||
|
||||
void listFiles(const String & path, std::vector<String> & file_names) const override;
|
||||
|
||||
|
@ -54,9 +54,9 @@ public:
|
||||
disk.replaceFile(from_path, to_path);
|
||||
}
|
||||
|
||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path) override
|
||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings) override
|
||||
{
|
||||
disk.copyFile(from_file_path, disk, to_file_path);
|
||||
disk.copyFile(from_file_path, disk, to_file_path, settings);
|
||||
}
|
||||
|
||||
std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <IO/WriteBufferFromFileBase.h>
|
||||
#include <IO/copyData.h>
|
||||
#include <Poco/Logger.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
@ -122,11 +123,10 @@ void asyncCopy(IDisk & from_disk, String from_path, IDisk & to_disk, String to_p
|
||||
}
|
||||
}
|
||||
|
||||
void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir)
|
||||
void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir, WriteSettings settings)
|
||||
{
|
||||
ResultsCollector results;
|
||||
|
||||
WriteSettings settings;
|
||||
/// Disable parallel write. We already copy in parallel.
|
||||
/// Avoid high memory usage. See test_s3_zero_copy_ttl/test.py::test_move_and_s3_memory_usage
|
||||
settings.s3_allow_parallel_part_upload = false;
|
||||
@ -140,12 +140,12 @@ void IDisk::copyThroughBuffers(const String & from_path, const std::shared_ptr<I
|
||||
}
|
||||
|
||||
|
||||
void IDisk::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir)
|
||||
void IDisk::copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings)
|
||||
{
|
||||
if (!to_disk->exists(to_dir))
|
||||
to_disk->createDirectories(to_dir);
|
||||
|
||||
copyThroughBuffers(from_dir, to_disk, to_dir, /* copy_root_dir */ false);
|
||||
copyThroughBuffers(from_dir, to_disk, to_dir, /* copy_root_dir= */ false, settings);
|
||||
}
|
||||
|
||||
void IDisk::truncateFile(const String &, size_t)
|
||||
|
@ -193,7 +193,7 @@ public:
|
||||
virtual void replaceFile(const String & from_path, const String & to_path) = 0;
|
||||
|
||||
/// Recursively copy files from from_dir to to_dir. Create to_dir if not exists.
|
||||
virtual void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir);
|
||||
virtual void copyDirectoryContent(const String & from_dir, const std::shared_ptr<IDisk> & to_disk, const String & to_dir, const WriteSettings & settings);
|
||||
|
||||
/// Copy file `from_file_path` to `to_file_path` located at `to_disk`.
|
||||
virtual void copyFile( /// NOLINT
|
||||
@ -470,7 +470,7 @@ protected:
|
||||
/// Base implementation of the function copy().
|
||||
/// It just opens two files, reads data by portions from the first file, and writes it to the second one.
|
||||
/// A derived class may override copy() to provide a faster implementation.
|
||||
void copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir = true);
|
||||
void copyThroughBuffers(const String & from_path, const std::shared_ptr<IDisk> & to_disk, const String & to_path, bool copy_root_dir, WriteSettings settings);
|
||||
|
||||
virtual void checkAccessImpl(const String & path);
|
||||
|
||||
|
@ -59,7 +59,7 @@ public:
|
||||
/// but it's impossible to implement correctly in transactions because other disk can
|
||||
/// use different metadata storage.
|
||||
/// TODO: maybe remove it at all, we don't want copies
|
||||
virtual void copyFile(const std::string & from_file_path, const std::string & to_file_path) = 0;
|
||||
virtual void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings = {}) = 0;
|
||||
|
||||
/// Open the file for write and return WriteBufferFromFileBase object.
|
||||
virtual std::unique_ptr<WriteBufferFromFileBase> writeFile( /// NOLINT
|
||||
|
@ -81,8 +81,7 @@ bool AsynchronousBoundedReadBuffer::hasPendingDataToRead()
|
||||
return true;
|
||||
}
|
||||
|
||||
std::future<IAsynchronousReader::Result>
|
||||
AsynchronousBoundedReadBuffer::asyncReadInto(char * data, size_t size, Priority priority)
|
||||
std::future<IAsynchronousReader::Result> AsynchronousBoundedReadBuffer::readAsync(char * data, size_t size, Priority priority)
|
||||
{
|
||||
IAsynchronousReader::Request request;
|
||||
request.descriptor = std::make_shared<RemoteFSFileDescriptor>(*impl, async_read_counters);
|
||||
@ -94,6 +93,17 @@ AsynchronousBoundedReadBuffer::asyncReadInto(char * data, size_t size, Priority
|
||||
return reader.submit(request);
|
||||
}
|
||||
|
||||
IAsynchronousReader::Result AsynchronousBoundedReadBuffer::readSync(char * data, size_t size)
|
||||
{
|
||||
IAsynchronousReader::Request request;
|
||||
request.descriptor = std::make_shared<RemoteFSFileDescriptor>(*impl, async_read_counters);
|
||||
request.buf = data;
|
||||
request.size = size;
|
||||
request.offset = file_offset_of_buffer_end;
|
||||
request.ignore = bytes_to_ignore;
|
||||
return reader.execute(request);
|
||||
}
|
||||
|
||||
void AsynchronousBoundedReadBuffer::prefetch(Priority priority)
|
||||
{
|
||||
if (prefetch_future.valid())
|
||||
@ -106,7 +116,7 @@ void AsynchronousBoundedReadBuffer::prefetch(Priority priority)
|
||||
last_prefetch_info.priority = priority;
|
||||
|
||||
chassert(prefetch_buffer.size() == chooseBufferSizeForRemoteReading(read_settings, impl->getFileSize()));
|
||||
prefetch_future = asyncReadInto(prefetch_buffer.data(), prefetch_buffer.size(), priority);
|
||||
prefetch_future = readAsync(prefetch_buffer.data(), prefetch_buffer.size(), priority);
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSPrefetches);
|
||||
}
|
||||
|
||||
@ -178,58 +188,55 @@ bool AsynchronousBoundedReadBuffer::nextImpl()
|
||||
|
||||
chassert(file_offset_of_buffer_end <= impl->getFileSize());
|
||||
|
||||
size_t size, offset;
|
||||
IAsynchronousReader::Result result;
|
||||
if (prefetch_future.valid())
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::AsynchronousRemoteReadWaitMicroseconds);
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait};
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::AsynchronousRemoteReadWaitMicroseconds);
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait};
|
||||
|
||||
auto result = prefetch_future.get();
|
||||
size = result.size;
|
||||
offset = result.offset;
|
||||
result = prefetch_future.get();
|
||||
}
|
||||
|
||||
prefetch_future = {};
|
||||
prefetch_buffer.swap(memory);
|
||||
|
||||
if (read_settings.enable_filesystem_read_prefetches_log)
|
||||
{
|
||||
appendToPrefetchLog(FilesystemPrefetchState::USED, size, result.execution_watch);
|
||||
}
|
||||
appendToPrefetchLog(FilesystemPrefetchState::USED, result.size, result.execution_watch);
|
||||
|
||||
last_prefetch_info = {};
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSPrefetchedReads);
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSPrefetchedBytes, size);
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSPrefetchedBytes, result.size);
|
||||
}
|
||||
else
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::SynchronousRemoteReadWaitMicroseconds);
|
||||
|
||||
chassert(memory.size() == chooseBufferSizeForRemoteReading(read_settings, impl->getFileSize()));
|
||||
std::tie(size, offset) = impl->readInto(memory.data(), memory.size(), file_offset_of_buffer_end, bytes_to_ignore);
|
||||
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::SynchronousRemoteReadWaitMicroseconds);
|
||||
result = readSync(memory.data(), memory.size());
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSUnprefetchedReads);
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSUnprefetchedBytes, size);
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSUnprefetchedBytes, result.size);
|
||||
}
|
||||
|
||||
bytes_to_ignore = 0;
|
||||
|
||||
chassert(size >= offset);
|
||||
|
||||
size_t bytes_read = size - offset;
|
||||
size_t bytes_read = result.size - result.offset;
|
||||
if (bytes_read)
|
||||
{
|
||||
/// Adjust the working buffer so that it ignores `offset` bytes.
|
||||
internal_buffer = Buffer(memory.data(), memory.data() + memory.size());
|
||||
working_buffer = Buffer(memory.data() + offset, memory.data() + size);
|
||||
working_buffer = Buffer(memory.data() + result.offset, memory.data() + result.size);
|
||||
pos = working_buffer.begin();
|
||||
}
|
||||
|
||||
file_offset_of_buffer_end = impl->getFileOffsetOfBufferEnd();
|
||||
bytes_to_ignore = 0;
|
||||
|
||||
/// In case of multiple files for the same file in clickhouse (i.e. log family)
|
||||
/// file_offset_of_buffer_end will not match getImplementationBufferOffset()
|
||||
/// so we use [impl->getImplementationBufferOffset(), impl->getFileSize()]
|
||||
chassert(file_offset_of_buffer_end >= impl->getFileOffsetOfBufferEnd());
|
||||
chassert(file_offset_of_buffer_end <= impl->getFileSize());
|
||||
|
||||
return bytes_read;
|
||||
@ -304,7 +311,8 @@ off_t AsynchronousBoundedReadBuffer::seek(off_t offset, int whence)
|
||||
|
||||
if (read_until_position && new_pos > *read_until_position)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSSeeksWithReset);
|
||||
if (!impl->seekIsCheap())
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSSeeksWithReset);
|
||||
file_offset_of_buffer_end = new_pos = *read_until_position; /// read_until_position is a non-included boundary.
|
||||
impl->seek(file_offset_of_buffer_end, SEEK_SET);
|
||||
return new_pos;
|
||||
@ -322,7 +330,8 @@ off_t AsynchronousBoundedReadBuffer::seek(off_t offset, int whence)
|
||||
}
|
||||
else
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSSeeksWithReset);
|
||||
if (!impl->seekIsCheap())
|
||||
ProfileEvents::increment(ProfileEvents::RemoteFSSeeksWithReset);
|
||||
file_offset_of_buffer_end = new_pos;
|
||||
impl->seek(file_offset_of_buffer_end, SEEK_SET);
|
||||
}
|
||||
|
@ -90,7 +90,9 @@ private:
|
||||
int64_t size,
|
||||
const std::unique_ptr<Stopwatch> & execution_watch);
|
||||
|
||||
std::future<IAsynchronousReader::Result> asyncReadInto(char * data, size_t size, Priority priority);
|
||||
std::future<IAsynchronousReader::Result> readAsync(char * data, size_t size, Priority priority);
|
||||
|
||||
IAsynchronousReader::Result readSync(char * data, size_t size);
|
||||
|
||||
void resetPrefetch(FilesystemPrefetchState state);
|
||||
|
||||
|
@ -1205,13 +1205,6 @@ off_t CachedOnDiskReadBufferFromFile::getPosition()
|
||||
return file_offset_of_buffer_end - available();
|
||||
}
|
||||
|
||||
void CachedOnDiskReadBufferFromFile::assertCorrectness() const
|
||||
{
|
||||
if (!CachedObjectStorage::canUseReadThroughCache(settings)
|
||||
&& !settings.read_from_filesystem_cache_if_exists_otherwise_bypass_cache)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cache usage is not allowed (query_id: {})", query_id);
|
||||
}
|
||||
|
||||
String CachedOnDiskReadBufferFromFile::getInfoForLog()
|
||||
{
|
||||
String current_file_segment_info;
|
||||
|
@ -64,7 +64,6 @@ private:
|
||||
using ImplementationBufferPtr = std::shared_ptr<ReadBufferFromFileBase>;
|
||||
|
||||
void initialize(size_t offset, size_t size);
|
||||
void assertCorrectness() const;
|
||||
|
||||
/**
|
||||
* Return a list of file segments ordered in ascending order. This list represents
|
||||
|
@ -108,6 +108,10 @@ bool FileSegmentRangeWriter::write(const char * data, size_t size, size_t offset
|
||||
data += size_to_write;
|
||||
}
|
||||
|
||||
size_t available_size = file_segment->range().size() - file_segment->getDownloadedSize();
|
||||
if (available_size == 0)
|
||||
completeFileSegment();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -196,15 +200,16 @@ CachedOnDiskWriteBufferFromFile::CachedOnDiskWriteBufferFromFile(
|
||||
const String & source_path_,
|
||||
const FileCache::Key & key_,
|
||||
const String & query_id_,
|
||||
const WriteSettings & settings_)
|
||||
const WriteSettings & settings_,
|
||||
std::shared_ptr<FilesystemCacheLog> cache_log_)
|
||||
: WriteBufferFromFileDecorator(std::move(impl_))
|
||||
, log(&Poco::Logger::get("CachedOnDiskWriteBufferFromFile"))
|
||||
, cache(cache_)
|
||||
, source_path(source_path_)
|
||||
, key(key_)
|
||||
, query_id(query_id_)
|
||||
, enable_cache_log(!query_id_.empty() && settings_.enable_filesystem_cache_log)
|
||||
, throw_on_error_from_cache(settings_.throw_on_error_from_cache)
|
||||
, cache_log(!query_id_.empty() && settings_.enable_filesystem_cache_log ? cache_log_ : nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
@ -241,10 +246,6 @@ void CachedOnDiskWriteBufferFromFile::cacheData(char * data, size_t size, bool t
|
||||
|
||||
if (!cache_writer)
|
||||
{
|
||||
std::shared_ptr<FilesystemCacheLog> cache_log;
|
||||
if (enable_cache_log)
|
||||
cache_log = Context::getGlobalContextInstance()->getFilesystemCacheLog();
|
||||
|
||||
cache_writer = std::make_unique<FileSegmentRangeWriter>(cache.get(), key, cache_log, query_id, source_path);
|
||||
}
|
||||
|
||||
|
@ -73,7 +73,8 @@ public:
|
||||
const String & source_path_,
|
||||
const FileCache::Key & key_,
|
||||
const String & query_id_,
|
||||
const WriteSettings & settings_);
|
||||
const WriteSettings & settings_,
|
||||
std::shared_ptr<FilesystemCacheLog> cache_log_);
|
||||
|
||||
void nextImpl() override;
|
||||
|
||||
@ -91,12 +92,11 @@ private:
|
||||
size_t current_download_offset = 0;
|
||||
const String query_id;
|
||||
|
||||
bool enable_cache_log;
|
||||
|
||||
bool throw_on_error_from_cache;
|
||||
bool cache_in_error_state_or_disabled = false;
|
||||
|
||||
std::unique_ptr<FileSegmentRangeWriter> cache_writer;
|
||||
std::shared_ptr<FilesystemCacheLog> cache_log;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -15,6 +15,10 @@ namespace Poco { class Logger; }
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
class Exception;
|
||||
|
||||
@ -76,6 +80,7 @@ public:
|
||||
|
||||
inline bool isSupported() { return is_supported; }
|
||||
std::future<Result> submit(Request request) override;
|
||||
Result execute(Request /* request */) override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `execute` not implemented for IOUringReader"); }
|
||||
|
||||
void wait() override {}
|
||||
|
||||
|
@ -116,25 +116,6 @@ void ReadBufferFromRemoteFSGather::appendUncachedReadInfo()
|
||||
cache_log->add(std::move(elem));
|
||||
}
|
||||
|
||||
IAsynchronousReader::Result ReadBufferFromRemoteFSGather::readInto(char * data, size_t size, size_t offset, size_t ignore)
|
||||
{
|
||||
/**
|
||||
* Set `data` to current working and internal buffers.
|
||||
* Internal buffer with size `size`. Working buffer with size 0.
|
||||
*/
|
||||
set(data, size);
|
||||
|
||||
file_offset_of_buffer_end = offset;
|
||||
bytes_to_ignore = ignore;
|
||||
|
||||
const auto result = nextImpl();
|
||||
|
||||
if (result)
|
||||
return { working_buffer.size(), BufferBase::offset(), nullptr };
|
||||
|
||||
return {0, 0, nullptr};
|
||||
}
|
||||
|
||||
void ReadBufferFromRemoteFSGather::initialize()
|
||||
{
|
||||
if (blobs_to_read.empty())
|
||||
@ -204,39 +185,14 @@ bool ReadBufferFromRemoteFSGather::readImpl()
|
||||
{
|
||||
SwapHelper swap(*this, *current_buf);
|
||||
|
||||
bool result = false;
|
||||
|
||||
/**
|
||||
* Lazy seek is performed here.
|
||||
* In asynchronous buffer when seeking to offset in range [pos, pos + min_bytes_for_seek]
|
||||
* we save how many bytes need to be ignored (new_offset - position() bytes).
|
||||
*/
|
||||
if (bytes_to_ignore)
|
||||
{
|
||||
current_buf->ignore(bytes_to_ignore);
|
||||
result = current_buf->hasPendingData();
|
||||
file_offset_of_buffer_end += bytes_to_ignore;
|
||||
bytes_to_ignore = 0;
|
||||
}
|
||||
|
||||
if (!result)
|
||||
result = current_buf->next();
|
||||
|
||||
if (blobs_to_read.size() == 1)
|
||||
{
|
||||
file_offset_of_buffer_end = current_buf->getFileOffsetOfBufferEnd();
|
||||
}
|
||||
else
|
||||
{
|
||||
/// For log family engines there are multiple s3 files for the same clickhouse file
|
||||
file_offset_of_buffer_end += current_buf->available();
|
||||
}
|
||||
|
||||
/// Required for non-async reads.
|
||||
bool result = current_buf->next();
|
||||
if (result)
|
||||
{
|
||||
assert(current_buf->available());
|
||||
file_offset_of_buffer_end += current_buf->available();
|
||||
nextimpl_working_buffer_offset = current_buf->offset();
|
||||
|
||||
chassert(current_buf->available());
|
||||
chassert(blobs_to_read.size() != 1 || file_offset_of_buffer_end == current_buf->getFileOffsetOfBufferEnd());
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -256,7 +212,6 @@ void ReadBufferFromRemoteFSGather::reset()
|
||||
current_object = {};
|
||||
current_buf_idx = {};
|
||||
current_buf.reset();
|
||||
bytes_to_ignore = 0;
|
||||
}
|
||||
|
||||
off_t ReadBufferFromRemoteFSGather::seek(off_t offset, int whence)
|
||||
|
@ -40,15 +40,13 @@ public:
|
||||
|
||||
void setReadUntilEnd() override { return setReadUntilPosition(getFileSize()); }
|
||||
|
||||
IAsynchronousReader::Result readInto(char * data, size_t size, size_t offset, size_t ignore) override;
|
||||
|
||||
size_t getFileSize() override { return getTotalSize(blobs_to_read); }
|
||||
|
||||
size_t getFileOffsetOfBufferEnd() const override { return file_offset_of_buffer_end; }
|
||||
|
||||
off_t seek(off_t offset, int whence) override;
|
||||
|
||||
off_t getPosition() override { return file_offset_of_buffer_end - available() + bytes_to_ignore; }
|
||||
off_t getPosition() override { return file_offset_of_buffer_end - available(); }
|
||||
|
||||
bool seekIsCheap() override { return !current_buf; }
|
||||
|
||||
@ -77,7 +75,6 @@ private:
|
||||
|
||||
size_t read_until_position = 0;
|
||||
size_t file_offset_of_buffer_end = 0;
|
||||
size_t bytes_to_ignore = 0;
|
||||
|
||||
StoredObject current_object;
|
||||
size_t current_buf_idx = 0;
|
||||
|
@ -8,6 +8,10 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
/** Perform reads from separate thread pool of specified size.
|
||||
*
|
||||
@ -36,6 +40,8 @@ public:
|
||||
|
||||
std::future<Result> submit(Request request) override;
|
||||
|
||||
Result execute(Request /* request */) override { throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Method `execute` not implemented for ThreadpoolReader"); }
|
||||
|
||||
void wait() override;
|
||||
|
||||
/// pool automatically waits for all tasks in destructor.
|
||||
|
@ -56,14 +56,10 @@ namespace
|
||||
};
|
||||
}
|
||||
|
||||
IAsynchronousReader::Result RemoteFSFileDescriptor::readInto(char * data, size_t size, size_t offset, size_t ignore)
|
||||
{
|
||||
return reader.readInto(data, size, offset, ignore);
|
||||
}
|
||||
|
||||
|
||||
ThreadPoolRemoteFSReader::ThreadPoolRemoteFSReader(size_t pool_size, size_t queue_size_)
|
||||
: pool(std::make_unique<ThreadPool>(CurrentMetrics::ThreadPoolRemoteFSReaderThreads, CurrentMetrics::ThreadPoolRemoteFSReaderThreadsActive, pool_size, pool_size, queue_size_))
|
||||
: pool(std::make_unique<ThreadPool>(CurrentMetrics::ThreadPoolRemoteFSReaderThreads,
|
||||
CurrentMetrics::ThreadPoolRemoteFSReaderThreadsActive,
|
||||
pool_size, pool_size, queue_size_))
|
||||
{
|
||||
}
|
||||
|
||||
@ -71,23 +67,46 @@ ThreadPoolRemoteFSReader::ThreadPoolRemoteFSReader(size_t pool_size, size_t queu
|
||||
std::future<IAsynchronousReader::Result> ThreadPoolRemoteFSReader::submit(Request request)
|
||||
{
|
||||
ProfileEventTimeIncrement<Microseconds> elapsed(ProfileEvents::ThreadpoolReaderSubmit);
|
||||
return scheduleFromThreadPool<Result>([request]() -> Result
|
||||
return scheduleFromThreadPool<Result>([request, this]() -> Result { return execute(request); },
|
||||
*pool,
|
||||
"VFSRead",
|
||||
request.priority);
|
||||
}
|
||||
|
||||
IAsynchronousReader::Result ThreadPoolRemoteFSReader::execute(Request request)
|
||||
{
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::RemoteRead};
|
||||
|
||||
auto * fd = assert_cast<RemoteFSFileDescriptor *>(request.descriptor.get());
|
||||
auto & reader = fd->getReader();
|
||||
|
||||
auto read_counters = fd->getReadCounters();
|
||||
std::optional<AsyncReadIncrement> increment = read_counters ? std::optional<AsyncReadIncrement>(read_counters) : std::nullopt;
|
||||
|
||||
auto watch = std::make_unique<Stopwatch>(CLOCK_REALTIME);
|
||||
|
||||
reader.set(request.buf, request.size);
|
||||
reader.seek(request.offset, SEEK_SET);
|
||||
if (request.ignore)
|
||||
reader.ignore(request.ignore);
|
||||
|
||||
bool result = reader.available();
|
||||
if (!result)
|
||||
result = reader.next();
|
||||
|
||||
watch->stop();
|
||||
ProfileEvents::increment(ProfileEvents::ThreadpoolReaderTaskMicroseconds, watch->elapsedMicroseconds());
|
||||
|
||||
IAsynchronousReader::Result read_result;
|
||||
if (result)
|
||||
{
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::RemoteRead};
|
||||
auto * remote_fs_fd = assert_cast<RemoteFSFileDescriptor *>(request.descriptor.get());
|
||||
read_result.size = reader.buffer().size();
|
||||
read_result.offset = reader.offset();
|
||||
ProfileEvents::increment(ProfileEvents::ThreadpoolReaderReadBytes, read_result.size);
|
||||
}
|
||||
|
||||
auto async_read_counters = remote_fs_fd->getReadCounters();
|
||||
std::optional<AsyncReadIncrement> increment = async_read_counters ? std::optional<AsyncReadIncrement>(async_read_counters) : std::nullopt;
|
||||
|
||||
auto watch = std::make_unique<Stopwatch>(CLOCK_REALTIME);
|
||||
Result result = remote_fs_fd->readInto(request.buf, request.size, request.offset, request.ignore);
|
||||
watch->stop();
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ThreadpoolReaderTaskMicroseconds, watch->elapsedMicroseconds());
|
||||
ProfileEvents::increment(ProfileEvents::ThreadpoolReaderReadBytes, result.size);
|
||||
|
||||
return Result{ .size = result.size, .offset = result.offset, .execution_watch = std::move(watch) };
|
||||
}, *pool, "VFSRead", request.priority);
|
||||
read_result.execution_watch = std::move(watch);
|
||||
return read_result;
|
||||
}
|
||||
|
||||
void ThreadPoolRemoteFSReader::wait()
|
||||
|
@ -1,7 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/AsynchronousReader.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/SeekableReadBuffer.h>
|
||||
#include <Common/ThreadPool_fwd.h>
|
||||
#include <Interpreters/threadPoolCallbackRunner.h>
|
||||
|
||||
@ -16,6 +16,7 @@ public:
|
||||
ThreadPoolRemoteFSReader(size_t pool_size, size_t queue_size_);
|
||||
|
||||
std::future<IAsynchronousReader::Result> submit(Request request) override;
|
||||
IAsynchronousReader::Result execute(Request request) override;
|
||||
|
||||
void wait() override;
|
||||
|
||||
@ -27,17 +28,18 @@ class RemoteFSFileDescriptor : public IAsynchronousReader::IFileDescriptor
|
||||
{
|
||||
public:
|
||||
explicit RemoteFSFileDescriptor(
|
||||
ReadBuffer & reader_,
|
||||
SeekableReadBuffer & reader_,
|
||||
std::shared_ptr<AsyncReadCounters> async_read_counters_)
|
||||
: reader(reader_)
|
||||
, async_read_counters(async_read_counters_) {}
|
||||
|
||||
IAsynchronousReader::Result readInto(char * data, size_t size, size_t offset, size_t ignore = 0);
|
||||
SeekableReadBuffer & getReader() { return reader; }
|
||||
|
||||
std::shared_ptr<AsyncReadCounters> getReadCounters() const { return async_read_counters; }
|
||||
|
||||
private:
|
||||
ReadBuffer & reader;
|
||||
/// Reader is used for reading only by RemoteFSFileDescriptor.
|
||||
SeekableReadBuffer & reader;
|
||||
std::shared_ptr<AsyncReadCounters> async_read_counters;
|
||||
};
|
||||
|
||||
|
@ -113,7 +113,8 @@ std::unique_ptr<WriteBufferFromFileBase> CachedObjectStorage::writeObject( /// N
|
||||
implementation_buffer->getFileName(),
|
||||
key,
|
||||
CurrentThread::isInitialized() && CurrentThread::get().getQueryContext() ? std::string(CurrentThread::getQueryId()) : "",
|
||||
modified_write_settings);
|
||||
modified_write_settings,
|
||||
Context::getGlobalContextInstance()->getFilesystemCacheLog());
|
||||
}
|
||||
|
||||
return implementation_buffer;
|
||||
|
@ -509,13 +509,11 @@ std::unique_ptr<WriteBufferFromFileBase> DiskObjectStorage::writeFile(
|
||||
LOG_TEST(log, "Write file: {}", path);
|
||||
|
||||
auto transaction = createObjectStorageTransaction();
|
||||
auto result = transaction->writeFile(
|
||||
return transaction->writeFile(
|
||||
path,
|
||||
buf_size,
|
||||
mode,
|
||||
object_storage->getAdjustedSettingsFromMetadataFile(settings, path));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Strings DiskObjectStorage::getBlobPath(const String & path) const
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <ranges>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
#include <Disks/ObjectStorages/MetadataStorageFromDisk.h>
|
||||
|
||||
@ -769,8 +770,11 @@ void DiskObjectStorageTransaction::createFile(const std::string & path)
|
||||
}));
|
||||
}
|
||||
|
||||
void DiskObjectStorageTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path)
|
||||
void DiskObjectStorageTransaction::copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings)
|
||||
{
|
||||
/// NOTE: For native copy we can ignore throttling, so no need to use WriteSettings
|
||||
UNUSED(settings);
|
||||
|
||||
operations_to_execute.emplace_back(
|
||||
std::make_unique<CopyFileObjectStorageOperation>(object_storage, metadata_storage, from_file_path, to_file_path));
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ public:
|
||||
|
||||
void createFile(const String & path) override;
|
||||
|
||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path) override;
|
||||
void copyFile(const std::string & from_file_path, const std::string & to_file_path, const WriteSettings & settings) override;
|
||||
|
||||
/// writeFile is a difficult function for transactions.
|
||||
/// Now it's almost noop because metadata added to transaction in finalize method
|
||||
|
@ -124,6 +124,27 @@ if (ENABLE_FUZZING)
|
||||
add_compile_definitions(FUZZING_MODE=1)
|
||||
endif ()
|
||||
|
||||
if (USE_GPERF)
|
||||
# Only for regenerating
|
||||
add_custom_target(generate-html-char-ref-gperf ./HTMLCharacterReference.sh
|
||||
SOURCES ./HTMLCharacterReference.sh
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
BYPRODUCTS "${CMAKE_CURRENT_SOURCE_DIR}/HTMLCharacterReference.gperf"
|
||||
)
|
||||
add_custom_target(generate-html-char-ref ${GPERF} -t HTMLCharacterReference.gperf --output-file=HTMLCharacterReference.generated.cpp
|
||||
&& clang-format -i HTMLCharacterReference.generated.cpp
|
||||
# for clang-tidy, since string.h is deprecated
|
||||
&& sed -i 's/\#include <string.h>/\#include <cstring>/g' HTMLCharacterReference.generated.cpp
|
||||
SOURCES HTMLCharacterReference.gperf
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
add_dependencies(generate-html-char-ref generate-html-char-ref-gperf)
|
||||
if (NOT TARGET generate-source)
|
||||
add_custom_target(generate-source)
|
||||
endif ()
|
||||
add_dependencies(generate-source generate-html-char-ref)
|
||||
endif ()
|
||||
|
||||
target_link_libraries(clickhouse_functions_obj PUBLIC ${PUBLIC_LIBS} PRIVATE ${PRIVATE_LIBS})
|
||||
|
||||
# Used to forward the linking information to the final binaries such as clickhouse / unit_tests_dbms,
|
||||
|
17877
src/Functions/HTMLCharacterReference.generated.cpp
Normal file
17877
src/Functions/HTMLCharacterReference.generated.cpp
Normal file
File diff suppressed because it is too large
Load Diff
2253
src/Functions/HTMLCharacterReference.gperf
Normal file
2253
src/Functions/HTMLCharacterReference.gperf
Normal file
File diff suppressed because it is too large
Load Diff
20
src/Functions/HTMLCharacterReference.h
Normal file
20
src/Functions/HTMLCharacterReference.h
Normal file
@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
// Definition of the class generated by gperf
|
||||
class HTMLCharacterHash
|
||||
{
|
||||
private:
|
||||
static inline unsigned int hash(const char * str, size_t len);
|
||||
|
||||
public:
|
||||
static const struct NameAndGlyph * Lookup(const char * str, size_t len);
|
||||
};
|
||||
|
||||
// Definition of the struct generated by gperf
|
||||
struct NameAndGlyph
|
||||
{
|
||||
const char * name;
|
||||
const char * glyph;
|
||||
};
|
27
src/Functions/HTMLCharacterReference.sh
Executable file
27
src/Functions/HTMLCharacterReference.sh
Executable file
@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
echo '%language=C++
|
||||
%define class-name HTMLCharacterHash
|
||||
%define lookup-function-name Lookup
|
||||
%readonly-tables
|
||||
%includes
|
||||
%compare-strncmp
|
||||
%{
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
|
||||
#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant"
|
||||
#pragma GCC diagnostic ignored "-Wunused-macros"
|
||||
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
|
||||
#pragma GCC diagnostic ignored "-Wshorten-64-to-32"
|
||||
// NOLINTBEGIN(google-runtime-int,hicpp-use-nullptr,modernize-use-nullptr)
|
||||
%}
|
||||
struct NameAndGlyph {
|
||||
const char *name;
|
||||
const char *glyph;
|
||||
};
|
||||
%%' > HTMLCharacterReference.gperf
|
||||
|
||||
# character reference as available at https://html.spec.whatwg.org/multipage/named-characters.html
|
||||
curl -X GET https://html.spec.whatwg.org/entities.json | jq -r 'keys[] as $k | "\"\($k)\", \(.[$k] | .characters|tojson)"' | sed 's/^"&/"/' >> HTMLCharacterReference.gperf
|
||||
echo '%%' >> HTMLCharacterReference.gperf
|
||||
echo '// NOLINTEND(google-runtime-int,hicpp-use-nullptr,modernize-use-nullptr)' >> HTMLCharacterReference.gperf
|
@ -381,25 +381,25 @@ public:
|
||||
const auto & timezone_x = extractTimeZoneFromFunctionArguments(arguments, 3, 1);
|
||||
const auto & timezone_y = extractTimeZoneFromFunctionArguments(arguments, 3, 2);
|
||||
|
||||
if (unit == "year" || unit == "yy" || unit == "yyyy")
|
||||
if (unit == "year" || unit == "years" || unit == "yy" || unit == "yyyy")
|
||||
impl.template dispatchForColumns<ToRelativeYearNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "quarter" || unit == "qq" || unit == "q")
|
||||
else if (unit == "quarter" || unit == "quarters" || unit == "qq" || unit == "q")
|
||||
impl.template dispatchForColumns<ToRelativeQuarterNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "month" || unit == "mm" || unit == "m")
|
||||
else if (unit == "month" || unit == "months" || unit == "mm" || unit == "m")
|
||||
impl.template dispatchForColumns<ToRelativeMonthNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "week" || unit == "wk" || unit == "ww")
|
||||
else if (unit == "week" || unit == "weeks" || unit == "wk" || unit == "ww")
|
||||
impl.template dispatchForColumns<ToRelativeWeekNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "day" || unit == "dd" || unit == "d")
|
||||
else if (unit == "day" || unit == "days" || unit == "dd" || unit == "d")
|
||||
impl.template dispatchForColumns<ToRelativeDayNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "hour" || unit == "hh" || unit == "h")
|
||||
else if (unit == "hour" || unit == "hours" || unit == "hh" || unit == "h")
|
||||
impl.template dispatchForColumns<ToRelativeHourNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "minute" || unit == "mi" || unit == "n")
|
||||
else if (unit == "minute" || unit == "minutes" || unit == "mi" || unit == "n")
|
||||
impl.template dispatchForColumns<ToRelativeMinuteNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "second" || unit == "ss" || unit == "s")
|
||||
else if (unit == "second" || unit == "seconds" || unit == "ss" || unit == "s")
|
||||
impl.template dispatchForColumns<ToRelativeSecondNumImpl<ResultPrecision::Extended>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "millisecond" || unit == "ms")
|
||||
else if (unit == "millisecond" || unit == "milliseconds" || unit == "ms")
|
||||
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<millisecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else if (unit == "microsecond" || unit == "us" || unit == "u")
|
||||
else if (unit == "microsecond" || unit == "microseconds" || unit == "us" || unit == "u")
|
||||
impl.template dispatchForColumns<ToRelativeSubsecondNumImpl<microsecond_multiplier>>(x, y, timezone_x, timezone_y, res->getData());
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||
|
229
src/Functions/decodeHTMLComponent.cpp
Normal file
229
src/Functions/decodeHTMLComponent.cpp
Normal file
@ -0,0 +1,229 @@
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Functions/FunctionFactory.h>
|
||||
#include <Functions/FunctionStringToString.h>
|
||||
#include <Functions/HTMLCharacterReference.h>
|
||||
#include <base/find_symbols.h>
|
||||
#include <base/hex.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
struct DecodeHTMLComponentName
|
||||
{
|
||||
static constexpr auto name = "decodeHTMLComponent";
|
||||
};
|
||||
|
||||
class FunctionDecodeHTMLComponentImpl
|
||||
{
|
||||
public:
|
||||
static void vector(
|
||||
const ColumnString::Chars & data,
|
||||
const ColumnString::Offsets & offsets,
|
||||
ColumnString::Chars & res_data,
|
||||
ColumnString::Offsets & res_offsets)
|
||||
{
|
||||
/// The size of result is always not more than the size of source.
|
||||
/// Because entities decodes to the shorter byte sequence.
|
||||
/// Example: &#xx... &#xx... will decode to UTF-8 byte sequence not longer than 4 bytes.
|
||||
res_data.resize(data.size());
|
||||
|
||||
size_t size = offsets.size();
|
||||
res_offsets.resize(size);
|
||||
|
||||
size_t prev_offset = 0;
|
||||
size_t res_offset = 0;
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
const char * src_data = reinterpret_cast<const char *>(&data[prev_offset]);
|
||||
size_t src_size = offsets[i] - prev_offset;
|
||||
size_t dst_size = execute(src_data, src_size, reinterpret_cast<char *>(res_data.data() + res_offset));
|
||||
|
||||
res_offset += dst_size;
|
||||
res_offsets[i] = res_offset;
|
||||
prev_offset = offsets[i];
|
||||
}
|
||||
|
||||
res_data.resize(res_offset);
|
||||
}
|
||||
|
||||
[[noreturn]] static void vectorFixed(const ColumnString::Chars &, size_t, ColumnString::Chars &)
|
||||
{
|
||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Function decodeHTMLComponent cannot work with FixedString argument");
|
||||
}
|
||||
|
||||
private:
|
||||
static const int max_legal_unicode_value = 0x10FFFF;
|
||||
static const int max_decimal_length_of_unicode_point = 7; /// 1114111
|
||||
|
||||
|
||||
static size_t execute(const char * src, size_t src_size, char * dst)
|
||||
{
|
||||
const char * src_pos = src;
|
||||
const char * src_end = src + src_size;
|
||||
char * dst_pos = dst;
|
||||
// perfect hashmap to lookup html character references
|
||||
HTMLCharacterHash hash;
|
||||
// to hold char seq for lookup, reuse it
|
||||
std::vector<char> seq;
|
||||
while (true)
|
||||
{
|
||||
const char * entity_pos = find_first_symbols<'&'>(src_pos, src_end);
|
||||
|
||||
/// Copy text between entities.
|
||||
size_t bytes_to_copy = entity_pos - src_pos;
|
||||
memcpySmallAllowReadWriteOverflow15(dst_pos, src_pos, bytes_to_copy);
|
||||
dst_pos += bytes_to_copy;
|
||||
src_pos = entity_pos;
|
||||
|
||||
++entity_pos;
|
||||
|
||||
const char * entity_end = find_first_symbols<';'>(entity_pos, src_end);
|
||||
|
||||
if (entity_end == src_end)
|
||||
break;
|
||||
|
||||
bool parsed = false;
|
||||
|
||||
/// covers &#NNNN; or &#xNNNN hexadecimal values;
|
||||
uint32_t code_point = 0;
|
||||
if (isValidNumericEntity(entity_pos, entity_end, code_point))
|
||||
{
|
||||
codePointToUTF8(code_point, dst_pos);
|
||||
parsed = true;
|
||||
}
|
||||
else /// covers html encoded character sequences
|
||||
{
|
||||
// seq_length should also include `;` at the end
|
||||
size_t seq_length = (entity_end - entity_pos) + 1;
|
||||
seq.assign(entity_pos, entity_pos + seq_length);
|
||||
// null terminate the sequence
|
||||
seq.push_back('\0');
|
||||
// lookup the html sequence in the perfect hashmap.
|
||||
const auto * res = hash.Lookup(seq.data(), strlen(seq.data()));
|
||||
// reset so that it's reused in the next iteration
|
||||
seq.clear();
|
||||
if (res)
|
||||
{
|
||||
const auto * glyph = res->glyph;
|
||||
for (size_t i = 0; i < strlen(glyph); ++i)
|
||||
{
|
||||
*dst_pos = glyph[i];
|
||||
++dst_pos;
|
||||
}
|
||||
parsed = true;
|
||||
}
|
||||
else
|
||||
parsed = false;
|
||||
}
|
||||
|
||||
if (parsed)
|
||||
{
|
||||
/// Skip the parsed entity.
|
||||
src_pos = entity_end + 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// Copy one byte as is and skip it.
|
||||
*dst_pos = *src_pos;
|
||||
++dst_pos;
|
||||
++src_pos;
|
||||
}
|
||||
}
|
||||
|
||||
/// Copy the rest of the string.
|
||||
if (src_pos < src_end)
|
||||
{
|
||||
size_t bytes_to_copy = src_end - src_pos;
|
||||
memcpySmallAllowReadWriteOverflow15(dst_pos, src_pos, bytes_to_copy);
|
||||
dst_pos += bytes_to_copy;
|
||||
}
|
||||
|
||||
return dst_pos - dst;
|
||||
}
|
||||
|
||||
static size_t codePointToUTF8(uint32_t code_point, char *& dst_pos)
|
||||
{
|
||||
if (code_point < (1 << 7))
|
||||
{
|
||||
dst_pos[0] = (code_point & 0x7F);
|
||||
++dst_pos;
|
||||
return 1;
|
||||
}
|
||||
else if (code_point < (1 << 11))
|
||||
{
|
||||
dst_pos[0] = ((code_point >> 6) & 0x1F) + 0xC0;
|
||||
dst_pos[1] = (code_point & 0x3F) + 0x80;
|
||||
dst_pos += 2;
|
||||
return 2;
|
||||
}
|
||||
else if (code_point < (1 << 16))
|
||||
{
|
||||
dst_pos[0] = ((code_point >> 12) & 0x0F) + 0xE0;
|
||||
dst_pos[1] = ((code_point >> 6) & 0x3F) + 0x80;
|
||||
dst_pos[2] = (code_point & 0x3F) + 0x80;
|
||||
dst_pos += 3;
|
||||
return 3;
|
||||
}
|
||||
else
|
||||
{
|
||||
dst_pos[0] = ((code_point >> 18) & 0x07) + 0xF0;
|
||||
dst_pos[1] = ((code_point >> 12) & 0x3F) + 0x80;
|
||||
dst_pos[2] = ((code_point >> 6) & 0x3F) + 0x80;
|
||||
dst_pos[3] = (code_point & 0x3F) + 0x80;
|
||||
dst_pos += 4;
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
|
||||
[[maybe_unused]] static bool isValidNumericEntity(const char * src, const char * end, uint32_t & code_point)
|
||||
{
|
||||
if (src + strlen("#") >= end)
|
||||
return false;
|
||||
if (src[0] != '#' || (end - src > 1 + max_decimal_length_of_unicode_point))
|
||||
return false;
|
||||
|
||||
if (src + 2 < end && (src[1] == 'x' || src[1] == 'X'))
|
||||
{
|
||||
src += 2;
|
||||
for (; src < end; ++src)
|
||||
{
|
||||
if (!isHexDigit(*src))
|
||||
return false;
|
||||
code_point *= 16;
|
||||
code_point += unhex(*src);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
src += 1;
|
||||
for (; src < end; ++src)
|
||||
{
|
||||
if (!isNumericASCII(*src))
|
||||
return false;
|
||||
code_point *= 10;
|
||||
code_point += *src - '0';
|
||||
}
|
||||
}
|
||||
|
||||
return code_point <= max_legal_unicode_value;
|
||||
}
|
||||
};
|
||||
|
||||
using FunctionDecodeHTMLComponent = FunctionStringToString<FunctionDecodeHTMLComponentImpl, DecodeHTMLComponentName>;
|
||||
|
||||
}
|
||||
|
||||
REGISTER_FUNCTION(DecodeHTMLComponent)
|
||||
{
|
||||
factory.registerFunction<FunctionDecodeHTMLComponent>();
|
||||
}
|
||||
}
|
@ -7,6 +7,7 @@
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/Throttler.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/ElapsedTimeProfileEventIncrement.h>
|
||||
#include <IO/AsynchronousReadBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
|
||||
@ -14,6 +15,7 @@
|
||||
namespace ProfileEvents
|
||||
{
|
||||
extern const Event AsynchronousReadWaitMicroseconds;
|
||||
extern const Event SynchronousReadWaitMicroseconds;
|
||||
extern const Event LocalReadThrottlerBytes;
|
||||
extern const Event LocalReadThrottlerSleepMicroseconds;
|
||||
}
|
||||
@ -74,68 +76,43 @@ void AsynchronousReadBufferFromFileDescriptor::prefetch(Priority priority)
|
||||
|
||||
bool AsynchronousReadBufferFromFileDescriptor::nextImpl()
|
||||
{
|
||||
IAsynchronousReader::Result result;
|
||||
if (prefetch_future.valid())
|
||||
{
|
||||
/// Read request already in flight. Wait for its completion.
|
||||
|
||||
size_t size = 0;
|
||||
size_t offset = 0;
|
||||
{
|
||||
Stopwatch watch;
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait};
|
||||
auto result = prefetch_future.get();
|
||||
ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, watch.elapsedMicroseconds());
|
||||
size = result.size;
|
||||
offset = result.offset;
|
||||
assert(offset < size || size == 0);
|
||||
}
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::AsynchronousReadWait};
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::AsynchronousReadWaitMicroseconds);
|
||||
|
||||
result = prefetch_future.get();
|
||||
prefetch_future = {};
|
||||
file_offset_of_buffer_end += size;
|
||||
|
||||
assert(offset <= size);
|
||||
size_t bytes_read = size - offset;
|
||||
if (throttler)
|
||||
throttler->add(bytes_read, ProfileEvents::LocalReadThrottlerBytes, ProfileEvents::LocalReadThrottlerSleepMicroseconds);
|
||||
|
||||
if (bytes_read)
|
||||
{
|
||||
if (result.size - result.offset > 0)
|
||||
prefetch_buffer.swap(memory);
|
||||
/// Adjust the working buffer so that it ignores `offset` bytes.
|
||||
internal_buffer = Buffer(memory.data(), memory.data() + memory.size());
|
||||
working_buffer = Buffer(memory.data() + offset, memory.data() + size);
|
||||
pos = working_buffer.begin();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
/// No pending request. Do synchronous read.
|
||||
|
||||
Stopwatch watch;
|
||||
auto [size, offset, _] = asyncReadInto(memory.data(), memory.size(), DEFAULT_PREFETCH_PRIORITY).get();
|
||||
ProfileEvents::increment(ProfileEvents::AsynchronousReadWaitMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
file_offset_of_buffer_end += size;
|
||||
|
||||
assert(offset <= size);
|
||||
size_t bytes_read = size - offset;
|
||||
if (throttler)
|
||||
throttler->add(bytes_read, ProfileEvents::LocalReadThrottlerBytes, ProfileEvents::LocalReadThrottlerSleepMicroseconds);
|
||||
|
||||
if (bytes_read)
|
||||
{
|
||||
/// Adjust the working buffer so that it ignores `offset` bytes.
|
||||
internal_buffer = Buffer(memory.data(), memory.data() + memory.size());
|
||||
working_buffer = Buffer(memory.data() + offset, memory.data() + size);
|
||||
pos = working_buffer.begin();
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::SynchronousReadWaitMicroseconds);
|
||||
result = asyncReadInto(memory.data(), memory.size(), DEFAULT_PREFETCH_PRIORITY).get();
|
||||
}
|
||||
|
||||
chassert(result.size >= result.offset);
|
||||
size_t bytes_read = result.size - result.offset;
|
||||
file_offset_of_buffer_end += result.size;
|
||||
|
||||
if (throttler)
|
||||
throttler->add(result.size, ProfileEvents::LocalReadThrottlerBytes, ProfileEvents::LocalReadThrottlerSleepMicroseconds);
|
||||
|
||||
if (bytes_read)
|
||||
{
|
||||
/// Adjust the working buffer so that it ignores `offset` bytes.
|
||||
internal_buffer = Buffer(memory.data(), memory.data() + memory.size());
|
||||
working_buffer = Buffer(memory.data() + result.offset, memory.data() + result.size);
|
||||
pos = working_buffer.begin();
|
||||
}
|
||||
|
||||
return bytes_read;
|
||||
}
|
||||
|
||||
|
||||
|
@ -74,6 +74,7 @@ public:
|
||||
/// or destroy the whole reader before destroying the buffer for request.
|
||||
/// The method can be called concurrently from multiple threads.
|
||||
virtual std::future<Result> submit(Request request) = 0;
|
||||
virtual Result execute(Request request) = 0;
|
||||
|
||||
virtual void wait() = 0;
|
||||
|
||||
|
@ -18,7 +18,6 @@ namespace ErrorCodes
|
||||
{
|
||||
extern const int ATTEMPT_TO_READ_AFTER_EOF;
|
||||
extern const int CANNOT_READ_ALL_DATA;
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
static constexpr auto DEFAULT_PREFETCH_PRIORITY = Priority{0};
|
||||
@ -236,14 +235,6 @@ public:
|
||||
|
||||
virtual void setReadUntilEnd() {}
|
||||
|
||||
/// Read at most `size` bytes into data at specified offset `offset`. First ignore `ignore` bytes if `ignore` > 0.
|
||||
/// Notice: this function only need to be implemented in synchronous read buffers to be wrapped in asynchronous read.
|
||||
/// Such as ReadBufferFromRemoteFSGather and AsynchronousReadIndirectBufferFromRemoteFS.
|
||||
virtual IAsynchronousReader::Result readInto(char * /*data*/, size_t /*size*/, size_t /*offset*/, size_t /*ignore*/)
|
||||
{
|
||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "readInto not implemented");
|
||||
}
|
||||
|
||||
protected:
|
||||
/// The number of bytes to ignore from the initial position of `working_buffer`
|
||||
/// buffer. Apparently this is an additional out-parameter for nextImpl(),
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <IO/S3/PocoHTTPClientFactory.h>
|
||||
#include <IO/S3/AWSLogger.h>
|
||||
#include <IO/S3/Credentials.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
#include <Common/assert_cast.h>
|
||||
|
||||
@ -866,7 +867,9 @@ PocoHTTPClientConfiguration ClientFactory::createClientConfiguration( // NOLINT
|
||||
const ThrottlerPtr & put_request_throttler,
|
||||
const String & protocol)
|
||||
{
|
||||
auto proxy_configuration_resolver = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::protocolFromString(protocol));
|
||||
auto context = Context::getGlobalContextInstance();
|
||||
chassert(context);
|
||||
auto proxy_configuration_resolver = DB::ProxyConfigurationResolverProvider::get(DB::ProxyConfiguration::protocolFromString(protocol), context->getConfigRef());
|
||||
|
||||
auto per_request_configuration = [=] () { return proxy_configuration_resolver->resolve(); };
|
||||
auto error_report = [=] (const DB::ProxyConfiguration & req) { proxy_configuration_resolver->errorReport(req); };
|
||||
|
@ -39,51 +39,56 @@ std::future<IAsynchronousReader::Result> SynchronousReader::submit(Request reque
|
||||
/// If size is zero, then read() cannot be distinguished from EOF
|
||||
assert(request.size);
|
||||
|
||||
int fd = assert_cast<const LocalFileDescriptor &>(*request.descriptor).fd;
|
||||
|
||||
#if defined(POSIX_FADV_WILLNEED)
|
||||
int fd = assert_cast<const LocalFileDescriptor &>(*request.descriptor).fd;
|
||||
if (0 != posix_fadvise(fd, request.offset, request.size, POSIX_FADV_WILLNEED))
|
||||
throwFromErrno("Cannot posix_fadvise", ErrorCodes::CANNOT_ADVISE);
|
||||
#endif
|
||||
|
||||
return std::async(std::launch::deferred, [fd, request]
|
||||
return std::async(std::launch::deferred, [request, this]
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorRead);
|
||||
Stopwatch watch(CLOCK_MONOTONIC);
|
||||
|
||||
size_t bytes_read = 0;
|
||||
while (!bytes_read)
|
||||
{
|
||||
ssize_t res = 0;
|
||||
|
||||
{
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::Read};
|
||||
res = ::pread(fd, request.buf, request.size, request.offset);
|
||||
}
|
||||
if (!res)
|
||||
break;
|
||||
|
||||
if (-1 == res && errno != EINTR)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed);
|
||||
throwFromErrno(fmt::format("Cannot read from file {}", fd), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
|
||||
}
|
||||
|
||||
if (res > 0)
|
||||
bytes_read += res;
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadBytes, bytes_read);
|
||||
|
||||
/// It reports real time spent including the time spent while thread was preempted doing nothing.
|
||||
/// And it is Ok for the purpose of this watch (it is used to lower the number of threads to read from tables).
|
||||
/// Sometimes it is better to use taskstats::blkio_delay_total, but it is quite expensive to get it
|
||||
/// (NetlinkMetricsProvider has about 500K RPS).
|
||||
watch.stop();
|
||||
ProfileEvents::increment(ProfileEvents::DiskReadElapsedMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
return Result{ .size = bytes_read, .offset = request.ignore };
|
||||
return execute(request);
|
||||
});
|
||||
}
|
||||
|
||||
IAsynchronousReader::Result SynchronousReader::execute(Request request)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorRead);
|
||||
Stopwatch watch(CLOCK_MONOTONIC);
|
||||
|
||||
int fd = assert_cast<const LocalFileDescriptor &>(*request.descriptor).fd;
|
||||
size_t bytes_read = 0;
|
||||
while (!bytes_read)
|
||||
{
|
||||
ssize_t res = 0;
|
||||
|
||||
{
|
||||
CurrentMetrics::Increment metric_increment{CurrentMetrics::Read};
|
||||
res = ::pread(fd, request.buf, request.size, request.offset);
|
||||
}
|
||||
if (!res)
|
||||
break;
|
||||
|
||||
if (-1 == res && errno != EINTR)
|
||||
{
|
||||
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadFailed);
|
||||
throwFromErrno(fmt::format("Cannot read from file {}", fd), ErrorCodes::CANNOT_READ_FROM_FILE_DESCRIPTOR);
|
||||
}
|
||||
|
||||
if (res > 0)
|
||||
bytes_read += res;
|
||||
}
|
||||
|
||||
ProfileEvents::increment(ProfileEvents::ReadBufferFromFileDescriptorReadBytes, bytes_read);
|
||||
|
||||
/// It reports real time spent including the time spent while thread was preempted doing nothing.
|
||||
/// And it is Ok for the purpose of this watch (it is used to lower the number of threads to read from tables).
|
||||
/// Sometimes it is better to use taskstats::blkio_delay_total, but it is quite expensive to get it
|
||||
/// (NetlinkMetricsProvider has about 500K RPS).
|
||||
watch.stop();
|
||||
ProfileEvents::increment(ProfileEvents::DiskReadElapsedMicroseconds, watch.elapsedMicroseconds());
|
||||
|
||||
return Result{ .size = bytes_read, .offset = request.ignore };
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -14,6 +14,8 @@ class SynchronousReader final : public IAsynchronousReader
|
||||
public:
|
||||
std::future<Result> submit(Request request) override;
|
||||
|
||||
Result execute(Request request) override;
|
||||
|
||||
void wait() override {}
|
||||
};
|
||||
|
||||
|
@ -30,7 +30,7 @@
|
||||
#include <IO/ReadBufferFromS3.h>
|
||||
#include <IO/S3/Client.h>
|
||||
|
||||
#include <Disks/IO/ThreadPoolReader.h>
|
||||
#include <Disks/IO/ThreadPoolRemoteFSReader.h>
|
||||
#include <Disks/IO/ReadBufferFromRemoteFSGather.h>
|
||||
#include <Disks/IO/AsynchronousBoundedReadBuffer.h>
|
||||
|
||||
@ -1207,6 +1207,7 @@ TEST_F(WBS3Test, ReadBeyondLastOffset) {
|
||||
wb.finalize();
|
||||
}
|
||||
|
||||
auto reader = std::make_unique<ThreadPoolRemoteFSReader>(1, 1);
|
||||
std::unique_ptr<ReadBufferFromEncryptedFile> encrypted_read_buffer;
|
||||
|
||||
{
|
||||
@ -1214,7 +1215,6 @@ TEST_F(WBS3Test, ReadBeyondLastOffset) {
|
||||
|
||||
auto cache_log = std::shared_ptr<FilesystemCacheLog>();
|
||||
const StoredObjects objects = { StoredObject(remote_file, data.size() + FileEncryption::Header::kSize) };
|
||||
auto reader = std::make_unique<ThreadPoolReader>(1, 1);
|
||||
auto async_read_counters = std::make_shared<AsyncReadCounters>();
|
||||
auto prefetch_log = std::shared_ptr<FilesystemReadPrefetchesLog>();
|
||||
|
||||
@ -1253,7 +1253,7 @@ TEST_F(WBS3Test, ReadBeyondLastOffset) {
|
||||
ASSERT_EQ(rb_async->getPosition(), FileEncryption::Header::kSize);
|
||||
ASSERT_EQ(rb_async->getFileOffsetOfBufferEnd(), disk_read_settings.remote_fs_buffer_size);
|
||||
|
||||
/// ReadBufferFromEncryptedFile is constructed over an ReadBuffer which was already in use.
|
||||
/// ReadBufferFromEncryptedFile is constructed over a ReadBuffer which was already in use.
|
||||
/// The 'FileEncryption::Header' has been read from `rb_async`.
|
||||
/// 'rb_async' will read the data from `rb_async` working buffer
|
||||
encrypted_read_buffer = std::make_unique<ReadBufferFromEncryptedFile>(
|
||||
|
@ -55,7 +55,7 @@ namespace ErrorCodes
|
||||
|
||||
FileCache::FileCache(const std::string & cache_name, const FileCacheSettings & settings)
|
||||
: max_file_segment_size(settings.max_file_segment_size)
|
||||
, bypass_cache_threshold(settings.enable_bypass_cache_with_threashold ? settings.bypass_cache_threashold : 0)
|
||||
, bypass_cache_threshold(settings.enable_bypass_cache_with_threshold ? settings.bypass_cache_threshold : 0)
|
||||
, boundary_alignment(settings.boundary_alignment)
|
||||
, background_download_threads(settings.background_download_threads)
|
||||
, metadata_download_threads(settings.load_metadata_threads)
|
||||
|
@ -39,10 +39,10 @@ void FileCacheSettings::loadFromConfig(const Poco::Util::AbstractConfiguration &
|
||||
enable_filesystem_query_cache_limit = config.getUInt64(config_prefix + ".enable_filesystem_query_cache_limit", false);
|
||||
cache_hits_threshold = config.getUInt64(config_prefix + ".cache_hits_threshold", FILECACHE_DEFAULT_HITS_THRESHOLD);
|
||||
|
||||
enable_bypass_cache_with_threashold = config.getUInt64(config_prefix + ".enable_bypass_cache_with_threashold", false);
|
||||
enable_bypass_cache_with_threshold = config.getUInt64(config_prefix + ".enable_bypass_cache_with_threshold", false);
|
||||
|
||||
if (config.has(config_prefix + ".bypass_cache_threashold"))
|
||||
bypass_cache_threashold = parseWithSizeSuffix<uint64_t>(config.getString(config_prefix + ".bypass_cache_threashold"));
|
||||
if (config.has(config_prefix + ".bypass_cache_threshold"))
|
||||
bypass_cache_threshold = parseWithSizeSuffix<uint64_t>(config.getString(config_prefix + ".bypass_cache_threshold"));
|
||||
|
||||
if (config.has(config_prefix + ".boundary_alignment"))
|
||||
boundary_alignment = parseWithSizeSuffix<uint64_t>(config.getString(config_prefix + ".boundary_alignment"));
|
||||
|
@ -22,8 +22,8 @@ struct FileCacheSettings
|
||||
size_t cache_hits_threshold = FILECACHE_DEFAULT_HITS_THRESHOLD;
|
||||
bool enable_filesystem_query_cache_limit = false;
|
||||
|
||||
bool enable_bypass_cache_with_threashold = false;
|
||||
size_t bypass_cache_threashold = FILECACHE_BYPASS_THRESHOLD;
|
||||
bool enable_bypass_cache_with_threshold = false;
|
||||
size_t bypass_cache_threshold = FILECACHE_BYPASS_THRESHOLD;
|
||||
|
||||
size_t boundary_alignment = FILECACHE_DEFAULT_FILE_SEGMENT_ALIGNMENT;
|
||||
size_t background_download_threads = FILECACHE_DEFAULT_BACKGROUND_DOWNLOAD_THREADS;
|
||||
|
@ -305,18 +305,19 @@ void executeQueryWithParallelReplicas(
|
||||
LOG_DEBUG(&Poco::Logger::get("executeQueryWithParallelReplicas"), "Parallel replicas query in shard scope: shard_num={} cluster={}",
|
||||
shard_num, not_optimized_cluster->getName());
|
||||
|
||||
const auto shard_replicas_num = not_optimized_cluster->getShardsAddresses()[shard_num - 1].size();
|
||||
all_replicas_count = std::min(static_cast<size_t>(settings.max_parallel_replicas), shard_replicas_num);
|
||||
|
||||
/// shard_num is 1-based, but getClusterWithSingleShard expects 0-based index
|
||||
new_cluster = not_optimized_cluster->getClusterWithSingleShard(shard_num - 1);
|
||||
// get cluster for shard specified by shard_num
|
||||
// shard_num is 1-based, but getClusterWithSingleShard expects 0-based index
|
||||
auto single_shard_cluster = not_optimized_cluster->getClusterWithSingleShard(shard_num - 1);
|
||||
// convert cluster to representation expected by parallel replicas
|
||||
new_cluster = single_shard_cluster->getClusterWithReplicasAsShards(settings);
|
||||
}
|
||||
else
|
||||
{
|
||||
new_cluster = not_optimized_cluster->getClusterWithReplicasAsShards(settings);
|
||||
all_replicas_count = std::min(static_cast<size_t>(settings.max_parallel_replicas), new_cluster->getShardCount());
|
||||
}
|
||||
|
||||
all_replicas_count = std::min(static_cast<size_t>(settings.max_parallel_replicas), new_cluster->getShardCount());
|
||||
|
||||
auto coordinator = std::make_shared<ParallelReplicasReadingCoordinator>(all_replicas_count);
|
||||
auto external_tables = new_context->getExternalTables();
|
||||
auto read_from_remote = std::make_unique<ReadFromParallelRemoteReplicasStep>(
|
||||
|
@ -90,7 +90,6 @@
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Storages/MergeTree/BackgroundJobsAssignee.h>
|
||||
#include <Storages/MergeTree/MergeTreeDataPartUUID.h>
|
||||
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||
#include <Interpreters/SynonymsExtensions.h>
|
||||
#include <Interpreters/Lemmatizers.h>
|
||||
#include <Interpreters/ClusterDiscovery.h>
|
||||
@ -349,11 +348,6 @@ struct ContextSharedPart : boost::noncopyable
|
||||
|
||||
bool is_server_completely_started = false;
|
||||
|
||||
#if USE_ROCKSDB
|
||||
/// Global merge tree metadata cache, stored in rocksdb.
|
||||
MergeTreeMetadataCachePtr merge_tree_metadata_cache;
|
||||
#endif
|
||||
|
||||
ContextSharedPart()
|
||||
: access_control(std::make_unique<AccessControl>())
|
||||
, global_overcommit_tracker(&process_list)
|
||||
@ -583,15 +577,6 @@ struct ContextSharedPart : boost::noncopyable
|
||||
trace_collector.reset();
|
||||
/// Stop zookeeper connection
|
||||
zookeeper.reset();
|
||||
|
||||
#if USE_ROCKSDB
|
||||
/// Shutdown merge tree metadata cache
|
||||
if (merge_tree_metadata_cache)
|
||||
{
|
||||
merge_tree_metadata_cache->shutdown();
|
||||
merge_tree_metadata_cache.reset();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/// Can be removed without context lock
|
||||
@ -2958,13 +2943,6 @@ std::map<String, zkutil::ZooKeeperPtr> Context::getAuxiliaryZooKeepers() const
|
||||
return shared->auxiliary_zookeepers;
|
||||
}
|
||||
|
||||
#if USE_ROCKSDB
|
||||
MergeTreeMetadataCachePtr Context::tryGetMergeTreeMetadataCache() const
|
||||
{
|
||||
return shared->merge_tree_metadata_cache;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Context::resetZooKeeper() const
|
||||
{
|
||||
std::lock_guard lock(shared->zookeeper_mutex);
|
||||
@ -3254,13 +3232,6 @@ void Context::initializeTraceCollector()
|
||||
shared->initializeTraceCollector(getTraceLog());
|
||||
}
|
||||
|
||||
#if USE_ROCKSDB
|
||||
void Context::initializeMergeTreeMetadataCache(const String & dir, size_t size)
|
||||
{
|
||||
shared->merge_tree_metadata_cache = MergeTreeMetadataCache::create(dir, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
/// Call after unexpected crash happen.
|
||||
void Context::handleCrash() const
|
||||
{
|
||||
|
@ -197,11 +197,6 @@ using TemporaryDataOnDiskScopePtr = std::shared_ptr<TemporaryDataOnDiskScope>;
|
||||
class ParallelReplicasReadingCoordinator;
|
||||
using ParallelReplicasReadingCoordinatorPtr = std::shared_ptr<ParallelReplicasReadingCoordinator>;
|
||||
|
||||
#if USE_ROCKSDB
|
||||
class MergeTreeMetadataCache;
|
||||
using MergeTreeMetadataCachePtr = std::shared_ptr<MergeTreeMetadataCache>;
|
||||
#endif
|
||||
|
||||
class PreparedSetsCache;
|
||||
using PreparedSetsCachePtr = std::shared_ptr<PreparedSetsCache>;
|
||||
|
||||
@ -896,10 +891,6 @@ public:
|
||||
UInt64 getClientProtocolVersion() const;
|
||||
void setClientProtocolVersion(UInt64 version);
|
||||
|
||||
#if USE_ROCKSDB
|
||||
MergeTreeMetadataCachePtr tryGetMergeTreeMetadataCache() const;
|
||||
#endif
|
||||
|
||||
#if USE_NURAFT
|
||||
std::shared_ptr<KeeperDispatcher> & getKeeperDispatcher() const;
|
||||
std::shared_ptr<KeeperDispatcher> & tryGetKeeperDispatcher() const;
|
||||
@ -1004,10 +995,6 @@ public:
|
||||
/// Call after initialization before using trace collector.
|
||||
void initializeTraceCollector();
|
||||
|
||||
#if USE_ROCKSDB
|
||||
void initializeMergeTreeMetadataCache(const String & dir, size_t size);
|
||||
#endif
|
||||
|
||||
/// Call after unexpected crash happen.
|
||||
void handleCrash() const;
|
||||
|
||||
|
@ -897,19 +897,37 @@ DDLGuardPtr DatabaseCatalog::getDDLGuard(const String & database, const String &
|
||||
/// TSA does not support unique_lock
|
||||
auto db_guard_iter = TSA_SUPPRESS_WARNING_FOR_WRITE(ddl_guards).try_emplace(database).first;
|
||||
DatabaseGuard & db_guard = db_guard_iter->second;
|
||||
return std::make_unique<DDLGuard>(db_guard.first, db_guard.second, std::move(lock), table, database);
|
||||
return std::make_unique<DDLGuard>(db_guard.table_guards, db_guard.database_ddl_mutex, std::move(lock), table, database);
|
||||
}
|
||||
|
||||
std::unique_lock<SharedMutex> DatabaseCatalog::getExclusiveDDLGuardForDatabase(const String & database)
|
||||
DatabaseCatalog::DatabaseGuard & DatabaseCatalog::getDatabaseGuard(const String & database)
|
||||
{
|
||||
DDLGuards::iterator db_guard_iter;
|
||||
{
|
||||
std::lock_guard lock(ddl_guards_mutex);
|
||||
db_guard_iter = ddl_guards.try_emplace(database).first;
|
||||
assert(db_guard_iter->second.first.contains(""));
|
||||
}
|
||||
DatabaseGuard & db_guard = db_guard_iter->second;
|
||||
return std::unique_lock{db_guard.second};
|
||||
return db_guard;
|
||||
}
|
||||
|
||||
std::unique_lock<SharedMutex> DatabaseCatalog::getExclusiveDDLGuardForDatabase(const String & database)
|
||||
{
|
||||
return std::unique_lock{getDatabaseGuard(database).database_ddl_mutex};
|
||||
}
|
||||
|
||||
std::unique_lock<SharedMutex> DatabaseCatalog::getLockForDropDatabase(const String & database)
|
||||
{
|
||||
return std::unique_lock{getDatabaseGuard(database).restart_replica_mutex};
|
||||
}
|
||||
|
||||
std::optional<std::shared_lock<SharedMutex>> DatabaseCatalog::tryGetLockForRestartReplica(const String & database)
|
||||
{
|
||||
DatabaseGuard & db_guard = getDatabaseGuard(database);
|
||||
std::shared_lock lock(db_guard.restart_replica_mutex, std::defer_lock);
|
||||
if (lock.try_lock())
|
||||
return lock;
|
||||
return {};
|
||||
}
|
||||
|
||||
bool DatabaseCatalog::isDictionaryExist(const StorageID & table_id) const
|
||||
|
@ -174,6 +174,11 @@ public:
|
||||
/// Get an object that protects the database from concurrent DDL queries all tables in the database
|
||||
std::unique_lock<SharedMutex> getExclusiveDDLGuardForDatabase(const String & database);
|
||||
|
||||
/// We need special synchronization between DROP/DETACH DATABASE and SYSTEM RESTART REPLICA
|
||||
/// because IStorage::flushAndPrepareForShutdown cannot be protected by DDLGuard (and a race with IStorage::startup is possible)
|
||||
std::unique_lock<SharedMutex> getLockForDropDatabase(const String & database);
|
||||
std::optional<std::shared_lock<SharedMutex>> tryGetLockForRestartReplica(const String & database);
|
||||
|
||||
|
||||
void assertDatabaseExists(const String & database_name) const;
|
||||
void assertDatabaseDoesntExist(const String & database_name) const;
|
||||
@ -341,7 +346,15 @@ private:
|
||||
/// For the duration of the operation, an element is placed here, and an object is returned,
|
||||
/// which deletes the element in the destructor when counter becomes zero.
|
||||
/// In case the element already exists, waits when query will be executed in other thread. See class DDLGuard below.
|
||||
using DatabaseGuard = std::pair<DDLGuard::Map, SharedMutex>;
|
||||
struct DatabaseGuard
|
||||
{
|
||||
SharedMutex database_ddl_mutex;
|
||||
SharedMutex restart_replica_mutex;
|
||||
|
||||
DDLGuard::Map table_guards;
|
||||
};
|
||||
DatabaseGuard & getDatabaseGuard(const String & database);
|
||||
|
||||
using DDLGuards = std::map<String, DatabaseGuard>;
|
||||
DDLGuards ddl_guards TSA_GUARDED_BY(ddl_guards_mutex);
|
||||
/// If you capture mutex and ddl_guards_mutex, then you need to grab them strictly in this order.
|
||||
|
@ -1513,14 +1513,16 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai
|
||||
for (const auto & child : select_query->select()->children)
|
||||
select.insert(child->getAliasOrColumnName());
|
||||
|
||||
NameSet required_by_interpolate;
|
||||
/// collect columns required for interpolate expressions -
|
||||
/// interpolate expression can use any available column
|
||||
auto find_columns = [&step, &select](IAST * function)
|
||||
auto find_columns = [&step, &select, &required_by_interpolate](IAST * function)
|
||||
{
|
||||
auto f_impl = [&step, &select](IAST * fn, auto fi)
|
||||
auto f_impl = [&step, &select, &required_by_interpolate](IAST * fn, auto fi)
|
||||
{
|
||||
if (auto * ident = fn->as<ASTIdentifier>())
|
||||
{
|
||||
required_by_interpolate.insert(ident->getColumnName());
|
||||
/// exclude columns from select expression - they are already available
|
||||
if (!select.contains(ident->getColumnName()))
|
||||
step.addRequiredOutput(ident->getColumnName());
|
||||
@ -1536,6 +1538,14 @@ ActionsDAGPtr SelectQueryExpressionAnalyzer::appendOrderBy(ExpressionActionsChai
|
||||
|
||||
for (const auto & interpolate : interpolate_list->children)
|
||||
find_columns(interpolate->as<ASTInterpolateElement>()->expr.get());
|
||||
|
||||
if (!required_result_columns.empty())
|
||||
{
|
||||
NameSet required_result_columns_set(required_result_columns.begin(), required_result_columns.end());
|
||||
for (const auto & name : required_by_interpolate)
|
||||
if (!required_result_columns_set.contains(name))
|
||||
required_result_columns.push_back(name);
|
||||
}
|
||||
}
|
||||
|
||||
if (optimize_read_in_order)
|
||||
|
@ -54,7 +54,7 @@ BlockIO InterpreterDescribeCacheQuery::execute()
|
||||
res_columns[i++]->insert(cache->getFileSegmentsNum());
|
||||
res_columns[i++]->insert(cache->getBasePath());
|
||||
res_columns[i++]->insert(settings.background_download_threads);
|
||||
res_columns[i++]->insert(settings.enable_bypass_cache_with_threashold);
|
||||
res_columns[i++]->insert(settings.enable_bypass_cache_with_threshold);
|
||||
|
||||
BlockIO res;
|
||||
size_t num_rows = res_columns[0]->size();
|
||||
|
@ -345,6 +345,10 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query,
|
||||
|
||||
if (database->shouldBeEmptyOnDetach())
|
||||
{
|
||||
/// Cancel restarting replicas in that database, wait for remaining RESTART queries to finish.
|
||||
/// So it will not startup tables concurrently with the flushAndPrepareForShutdown call below.
|
||||
auto restart_replica_lock = DatabaseCatalog::instance().getLockForDropDatabase(database_name);
|
||||
|
||||
ASTDropQuery query_for_table;
|
||||
query_for_table.kind = query.kind;
|
||||
// For truncate operation on database, drop the tables
|
||||
@ -363,8 +367,9 @@ BlockIO InterpreterDropQuery::executeToDatabaseImpl(const ASTDropQuery & query,
|
||||
std::vector<std::pair<String, bool>> tables_to_drop;
|
||||
for (auto iterator = database->getTablesIterator(table_context); iterator->isValid(); iterator->next())
|
||||
{
|
||||
iterator->table()->flushAndPrepareForShutdown();
|
||||
tables_to_drop.push_back({iterator->name(), iterator->table()->isDictionary()});
|
||||
auto table_ptr = iterator->table();
|
||||
table_ptr->flushAndPrepareForShutdown();
|
||||
tables_to_drop.push_back({iterator->name(), table_ptr->isDictionary()});
|
||||
}
|
||||
|
||||
for (const auto & table : tables_to_drop)
|
||||
|
@ -87,6 +87,7 @@ namespace ErrorCodes
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int TABLE_WAS_NOT_DROPPED;
|
||||
extern const int ABORTED;
|
||||
}
|
||||
|
||||
|
||||
@ -683,12 +684,15 @@ void InterpreterSystemQuery::restoreReplica()
|
||||
table_replicated_ptr->restoreMetadataInZooKeeper();
|
||||
}
|
||||
|
||||
StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, ContextMutablePtr system_context, bool need_ddl_guard)
|
||||
StoragePtr InterpreterSystemQuery::tryRestartReplica(const StorageID & replica, ContextMutablePtr system_context)
|
||||
{
|
||||
LOG_TRACE(log, "Restarting replica {}", replica);
|
||||
auto table_ddl_guard = need_ddl_guard
|
||||
? DatabaseCatalog::instance().getDDLGuard(replica.getDatabaseName(), replica.getTableName())
|
||||
: nullptr;
|
||||
auto table_ddl_guard = DatabaseCatalog::instance().getDDLGuard(replica.getDatabaseName(), replica.getTableName());
|
||||
|
||||
auto restart_replica_lock = DatabaseCatalog::instance().tryGetLockForRestartReplica(replica.getDatabaseName());
|
||||
if (!restart_replica_lock)
|
||||
throw Exception(ErrorCodes::ABORTED, "Database {} is being dropped or detached, will not restart replica {}",
|
||||
backQuoteIfNeed(replica.getDatabaseName()), replica.getNameForLogs());
|
||||
|
||||
auto [database, table] = DatabaseCatalog::instance().tryGetDatabaseAndTable(replica, getContext());
|
||||
ASTPtr create_ast;
|
||||
@ -767,21 +771,13 @@ void InterpreterSystemQuery::restartReplicas(ContextMutablePtr system_context)
|
||||
if (replica_names.empty())
|
||||
return;
|
||||
|
||||
TableGuards guards;
|
||||
|
||||
for (const auto & name : replica_names)
|
||||
guards.emplace(UniqueTableName{name.database_name, name.table_name}, nullptr);
|
||||
|
||||
for (auto & guard : guards)
|
||||
guard.second = catalog.getDDLGuard(guard.first.database_name, guard.first.table_name);
|
||||
|
||||
size_t threads = std::min(static_cast<size_t>(getNumberOfPhysicalCPUCores()), replica_names.size());
|
||||
LOG_DEBUG(log, "Will restart {} replicas using {} threads", replica_names.size(), threads);
|
||||
ThreadPool pool(CurrentMetrics::RestartReplicaThreads, CurrentMetrics::RestartReplicaThreadsActive, threads);
|
||||
|
||||
for (auto & replica : replica_names)
|
||||
{
|
||||
pool.scheduleOrThrowOnError([&]() { tryRestartReplica(replica, system_context, false); });
|
||||
pool.scheduleOrThrowOnError([&]() { tryRestartReplica(replica, system_context); });
|
||||
}
|
||||
pool.wait();
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ private:
|
||||
|
||||
/// Tries to get a replicated table and restart it
|
||||
/// Returns pointer to a newly created table if the restart was successful
|
||||
StoragePtr tryRestartReplica(const StorageID & replica, ContextMutablePtr context, bool need_ddl_guard = true);
|
||||
StoragePtr tryRestartReplica(const StorageID & replica, ContextMutablePtr context);
|
||||
|
||||
void restartReplica(const StorageID & replica, ContextMutablePtr system_context);
|
||||
void restartReplicas(ContextMutablePtr system_context);
|
||||
|
@ -20,15 +20,10 @@ namespace
|
||||
|
||||
constexpr auto dummy_subquery_name_prefix = "_subquery";
|
||||
|
||||
String wrongAliasMessage(const ASTPtr & ast, const ASTPtr & prev_ast, const String & alias)
|
||||
PreformattedMessage wrongAliasMessage(const ASTPtr & ast, const ASTPtr & prev_ast, const String & alias)
|
||||
{
|
||||
WriteBufferFromOwnString message;
|
||||
message << "Different expressions with the same alias " << backQuoteIfNeed(alias) << ":\n";
|
||||
formatAST(*ast, message, false, true);
|
||||
message << "\nand\n";
|
||||
formatAST(*prev_ast, message, false, true);
|
||||
message << '\n';
|
||||
return message.str();
|
||||
return PreformattedMessage::create("Different expressions with the same alias {}:\n{}\nand\n{}\n",
|
||||
backQuoteIfNeed(alias), serializeAST(*ast), serializeAST(*prev_ast));
|
||||
}
|
||||
|
||||
}
|
||||
@ -127,7 +122,7 @@ void QueryAliasesMatcher<T>::visitOther(const ASTPtr & ast, Data & data)
|
||||
if (!alias.empty())
|
||||
{
|
||||
if (aliases.contains(alias) && ast->getTreeHash() != aliases[alias]->getTreeHash())
|
||||
throw Exception::createDeprecated(wrongAliasMessage(ast, aliases[alias], alias), ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS);
|
||||
throw Exception(wrongAliasMessage(ast, aliases[alias], alias), ErrorCodes::MULTIPLE_EXPRESSIONS_FOR_ALIAS);
|
||||
|
||||
aliases[alias] = ast;
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <IO/MMappedFileCache.h>
|
||||
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
#include <Storages/MergeTree/MergeTreeMetadataCache.h>
|
||||
#include <Storages/StorageMergeTree.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Storages/MarkCache.h>
|
||||
@ -125,14 +124,6 @@ void ServerAsynchronousMetrics::updateImpl(AsynchronousMetricValues & new_values
|
||||
"Total number of cached file segments in the `cache` virtual filesystem. This cache is hold on disk." };
|
||||
}
|
||||
|
||||
#if USE_ROCKSDB
|
||||
if (auto metadata_cache = getContext()->tryGetMergeTreeMetadataCache())
|
||||
{
|
||||
new_values["MergeTreeMetadataCacheSize"] = { metadata_cache->getEstimateNumKeys(),
|
||||
"The size of the metadata cache for tables. This cache is experimental and not used in production." };
|
||||
}
|
||||
#endif
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
if (auto * compiled_expression_cache = CompiledExpressionCacheFactory::instance().tryGetCache())
|
||||
{
|
||||
|
@ -388,6 +388,44 @@ void removeUnneededColumnsFromSelectClause(ASTSelectQuery * select_query, const
|
||||
else
|
||||
return;
|
||||
|
||||
NameSet required_by_interpolate;
|
||||
|
||||
if (select_query->interpolate())
|
||||
{
|
||||
auto & children = select_query->interpolate()->children;
|
||||
if (!children.empty())
|
||||
{
|
||||
NameToNameSetMap expressions;
|
||||
|
||||
auto interpolate_visitor = [](const ASTPtr ast, NameSet & columns) -> void
|
||||
{
|
||||
auto interpolate_visitor_impl = [](const ASTPtr node, NameSet & cols, auto self) -> void
|
||||
{
|
||||
if (const auto * ident = node->as<ASTIdentifier>())
|
||||
cols.insert(ident->name());
|
||||
else if (const auto * func = node->as<ASTFunction>())
|
||||
for (const auto & elem : func->arguments->children)
|
||||
self(elem, cols, self);
|
||||
};
|
||||
interpolate_visitor_impl(ast, columns, interpolate_visitor_impl);
|
||||
};
|
||||
|
||||
for (const auto & elem : children)
|
||||
{
|
||||
if (auto * interpolate = elem->as<ASTInterpolateElement>())
|
||||
{
|
||||
NameSet needed_columns;
|
||||
interpolate_visitor(interpolate->expr, needed_columns);
|
||||
expressions.emplace(interpolate->column, std::move(needed_columns));
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto & name : required_result_columns)
|
||||
if (const auto it = expressions.find(name); it != expressions.end())
|
||||
required_by_interpolate.insert(it->second.begin(), it->second.end());
|
||||
}
|
||||
}
|
||||
|
||||
ASTs new_elements;
|
||||
new_elements.reserve(elements.size());
|
||||
|
||||
@ -403,6 +441,11 @@ void removeUnneededColumnsFromSelectClause(ASTSelectQuery * select_query, const
|
||||
new_elements.push_back(elem);
|
||||
--it->second;
|
||||
}
|
||||
else if (required_by_interpolate.contains(name))
|
||||
{
|
||||
/// Columns required by interpolate expression are not always in the required_result_columns
|
||||
new_elements.push_back(elem);
|
||||
}
|
||||
else if (select_query->distinct || hasArrayJoin(elem))
|
||||
{
|
||||
/// ARRAY JOIN cannot be optimized out since it may change number of rows,
|
||||
|
@ -283,6 +283,11 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID
|
||||
/// Already in needed type.
|
||||
return src;
|
||||
}
|
||||
if (which_type.isIPv4() && src.getType() == Field::Types::UInt64)
|
||||
{
|
||||
/// convert to UInt32 which is the underlying type for native IPv4
|
||||
return convertNumericType<UInt32>(src, type);
|
||||
}
|
||||
}
|
||||
else if (which_type.isUUID() && src.getType() == Field::Types::UUID)
|
||||
{
|
||||
|
@ -46,7 +46,7 @@ static std::string renderFileNameTemplate(time_t now, const std::string & file_p
|
||||
std::tm buf;
|
||||
localtime_r(&now, &buf);
|
||||
std::ostringstream ss; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
ss << std::put_time(&buf, file_path.c_str());
|
||||
ss << std::put_time(&buf, path.filename().c_str());
|
||||
return path.replace_filename(ss.str());
|
||||
}
|
||||
|
||||
|
@ -719,7 +719,7 @@ static ColumnWithTypeAndName readColumnFromArrowColumn(
|
||||
/// ORC doesn't support Decimal256 as separate type. We read and write it as binary data.
|
||||
case TypeIndex::Decimal256:
|
||||
return readColumnWithBigNumberFromBinaryData<ColumnDecimal<Decimal256>>(arrow_column, column_name, type_hint);
|
||||
default:;
|
||||
default:
|
||||
}
|
||||
}
|
||||
return readColumnWithStringData<arrow::BinaryArray>(arrow_column, column_name);
|
||||
@ -738,7 +738,7 @@ static ColumnWithTypeAndName readColumnFromArrowColumn(
|
||||
return readColumnWithBigIntegerFromFixedBinaryData<Int256>(arrow_column, column_name, type_hint);
|
||||
case TypeIndex::UInt256:
|
||||
return readColumnWithBigIntegerFromFixedBinaryData<UInt256>(arrow_column, column_name, type_hint);
|
||||
default:;
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -897,7 +897,7 @@ DataTypePtr BSONEachRowSchemaReader::getDataTypeFromBSONField(BSONType type, boo
|
||||
in.ignore(size);
|
||||
return std::make_shared<DataTypeString>();
|
||||
}
|
||||
case BSONType::OBJECT_ID:;
|
||||
case BSONType::OBJECT_ID:
|
||||
{
|
||||
in.ignore(BSON_OBJECT_ID_SIZE);
|
||||
return makeNullable(std::make_shared<DataTypeFixedString>(BSON_OBJECT_ID_SIZE));
|
||||
|
@ -244,7 +244,7 @@ static void insertString(IColumn & column, DataTypePtr type, const char * value,
|
||||
case TypeIndex::Decimal256:
|
||||
insertFromBinaryRepresentation<ColumnDecimal<Decimal256>>(column, type, value, size);
|
||||
return;
|
||||
default:;
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -432,7 +432,7 @@ void ORCBlockOutputFormat::writeColumn(
|
||||
[scale](UInt64 value){ return (value % UInt64(std::pow(10, scale))) * std::pow(10, 9 - scale); });
|
||||
break;
|
||||
}
|
||||
case TypeIndex::Decimal32:;
|
||||
case TypeIndex::Decimal32:
|
||||
{
|
||||
writeDecimals<Decimal32, orc::Decimal64VectorBatch>(
|
||||
orc_column,
|
||||
|
@ -250,20 +250,6 @@ size_t ReadBufferFromHDFS::getFileOffsetOfBufferEnd() const
|
||||
return impl->getPosition();
|
||||
}
|
||||
|
||||
IAsynchronousReader::Result ReadBufferFromHDFS::readInto(char * data, size_t size, size_t offset, size_t /*ignore*/)
|
||||
{
|
||||
/// TODO: we don't need to copy if there is no pending data
|
||||
seek(offset, SEEK_SET);
|
||||
if (eof())
|
||||
return {0, 0, nullptr};
|
||||
|
||||
/// Make sure returned size no greater than available bytes in working_buffer
|
||||
size_t count = std::min(size, available());
|
||||
memcpy(data, position(), count);
|
||||
position() += count;
|
||||
return {count, 0, nullptr};
|
||||
}
|
||||
|
||||
String ReadBufferFromHDFS::getFileName() const
|
||||
{
|
||||
return impl->hdfs_file_path;
|
||||
|
@ -44,8 +44,6 @@ public:
|
||||
|
||||
size_t getFileOffsetOfBufferEnd() const override;
|
||||
|
||||
IAsynchronousReader::Result readInto(char * data, size_t size, size_t offset, size_t ignore) override;
|
||||
|
||||
String getFileName() const override;
|
||||
|
||||
private:
|
||||
|
@ -416,6 +416,7 @@ void DataPartStorageOnDiskBase::backup(
|
||||
MutableDataPartStoragePtr DataPartStorageOnDiskBase::freeze(
|
||||
const std::string & to,
|
||||
const std::string & dir_path,
|
||||
const WriteSettings & settings,
|
||||
std::function<void(const DiskPtr &)> save_metadata_callback,
|
||||
const ClonePartParams & params) const
|
||||
{
|
||||
@ -425,8 +426,16 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::freeze(
|
||||
else
|
||||
disk->createDirectories(to);
|
||||
|
||||
localBackup(disk, getRelativePath(), fs::path(to) / dir_path, params.make_source_readonly, {}, params.copy_instead_of_hardlink,
|
||||
params.files_to_copy_instead_of_hardlinks, params.external_transaction);
|
||||
localBackup(
|
||||
disk,
|
||||
getRelativePath(),
|
||||
fs::path(to) / dir_path,
|
||||
settings,
|
||||
params.make_source_readonly,
|
||||
/* max_level= */ {},
|
||||
params.copy_instead_of_hardlink,
|
||||
params.files_to_copy_instead_of_hardlinks,
|
||||
params.external_transaction);
|
||||
|
||||
if (save_metadata_callback)
|
||||
save_metadata_callback(disk);
|
||||
@ -457,6 +466,7 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::clonePart(
|
||||
const std::string & to,
|
||||
const std::string & dir_path,
|
||||
const DiskPtr & dst_disk,
|
||||
const WriteSettings & write_settings,
|
||||
Poco::Logger * log) const
|
||||
{
|
||||
String path_to_clone = fs::path(to) / dir_path / "";
|
||||
@ -472,7 +482,7 @@ MutableDataPartStoragePtr DataPartStorageOnDiskBase::clonePart(
|
||||
try
|
||||
{
|
||||
dst_disk->createDirectories(to);
|
||||
src_disk->copyDirectoryContent(getRelativePath(), dst_disk, path_to_clone);
|
||||
src_disk->copyDirectoryContent(getRelativePath(), dst_disk, path_to_clone, write_settings);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
|
@ -63,6 +63,7 @@ public:
|
||||
MutableDataPartStoragePtr freeze(
|
||||
const std::string & to,
|
||||
const std::string & dir_path,
|
||||
const WriteSettings & settings,
|
||||
std::function<void(const DiskPtr &)> save_metadata_callback,
|
||||
const ClonePartParams & params) const override;
|
||||
|
||||
@ -70,6 +71,7 @@ public:
|
||||
const std::string & to,
|
||||
const std::string & dir_path,
|
||||
const DiskPtr & dst_disk,
|
||||
const WriteSettings & write_settings,
|
||||
Poco::Logger * log) const override;
|
||||
|
||||
void rename(
|
||||
|
@ -250,6 +250,7 @@ public:
|
||||
virtual std::shared_ptr<IDataPartStorage> freeze(
|
||||
const std::string & to,
|
||||
const std::string & dir_path,
|
||||
const WriteSettings & settings,
|
||||
std::function<void(const DiskPtr &)> save_metadata_callback,
|
||||
const ClonePartParams & params) const = 0;
|
||||
|
||||
@ -258,6 +259,7 @@ public:
|
||||
const std::string & to,
|
||||
const std::string & dir_path,
|
||||
const DiskPtr & disk,
|
||||
const WriteSettings & write_settings,
|
||||
Poco::Logger * log) const = 0;
|
||||
|
||||
/// Change part's root. from_root should be a prefix path of current root path.
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <Storages/MergeTree/checkDataPart.h>
|
||||
#include <Storages/StorageReplicatedMergeTree.h>
|
||||
#include <Storages/MergeTree/PartMetadataManagerOrdinary.h>
|
||||
#include <Storages/MergeTree/PartMetadataManagerWithCache.h>
|
||||
#include <Core/NamesAndTypes.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
@ -320,7 +319,6 @@ IMergeTreeDataPart::IMergeTreeDataPart(
|
||||
, part_type(part_type_)
|
||||
, parent_part(parent_part_)
|
||||
, parent_part_name(parent_part ? parent_part->name : "")
|
||||
, use_metadata_cache(storage.use_metadata_cache)
|
||||
{
|
||||
if (parent_part)
|
||||
{
|
||||
@ -1673,14 +1671,7 @@ std::pair<bool, NameSet> IMergeTreeDataPart::canRemovePart() const
|
||||
|
||||
void IMergeTreeDataPart::initializePartMetadataManager()
|
||||
{
|
||||
#if USE_ROCKSDB
|
||||
if (auto metadata_cache = storage.getContext()->tryGetMergeTreeMetadataCache(); metadata_cache && use_metadata_cache)
|
||||
metadata_manager = std::make_shared<PartMetadataManagerWithCache>(this, metadata_cache);
|
||||
else
|
||||
metadata_manager = std::make_shared<PartMetadataManagerOrdinary>(this);
|
||||
#else
|
||||
metadata_manager = std::make_shared<PartMetadataManagerOrdinary>(this);
|
||||
#endif
|
||||
metadata_manager = std::make_shared<PartMetadataManagerOrdinary>(this);
|
||||
}
|
||||
|
||||
void IMergeTreeDataPart::initializeIndexGranularityInfo()
|
||||
@ -1802,11 +1793,12 @@ DataPartStoragePtr IMergeTreeDataPart::makeCloneInDetached(const String & prefix
|
||||
return getDataPartStorage().freeze(
|
||||
storage.relative_data_path,
|
||||
*maybe_path_in_detached,
|
||||
/*save_metadata_callback=*/ {},
|
||||
Context::getGlobalContextInstance()->getWriteSettings(),
|
||||
/* save_metadata_callback= */ {},
|
||||
params);
|
||||
}
|
||||
|
||||
MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const
|
||||
MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & disk, const String & directory_name, const WriteSettings & write_settings) const
|
||||
{
|
||||
assertOnDisk();
|
||||
|
||||
@ -1816,7 +1808,7 @@ MutableDataPartStoragePtr IMergeTreeDataPart::makeCloneOnDisk(const DiskPtr & di
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can not clone data part {} to empty directory.", name);
|
||||
|
||||
String path_to_clone = fs::path(storage.relative_data_path) / directory_name / "";
|
||||
return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, storage.log);
|
||||
return getDataPartStorage().clonePart(path_to_clone, getDataPartStorage().getPartDirectory(), disk, write_settings, storage.log);
|
||||
}
|
||||
|
||||
UInt64 IMergeTreeDataPart::getIndexSizeFromFile() const
|
||||
@ -2064,34 +2056,6 @@ String IMergeTreeDataPart::getZeroLevelPartBlockID(std::string_view token) const
|
||||
return info.partition_id + "_" + toString(hash_value.items[0]) + "_" + toString(hash_value.items[1]);
|
||||
}
|
||||
|
||||
IMergeTreeDataPart::uint128 IMergeTreeDataPart::getActualChecksumByFile(const String & file_name) const
|
||||
{
|
||||
assert(use_metadata_cache);
|
||||
|
||||
const auto filenames_without_checksums = getFileNamesWithoutChecksums();
|
||||
auto it = checksums.files.find(file_name);
|
||||
if (!filenames_without_checksums.contains(file_name) && it != checksums.files.end())
|
||||
{
|
||||
return it->second.file_hash;
|
||||
}
|
||||
|
||||
if (!getDataPartStorage().exists(file_name))
|
||||
{
|
||||
return {};
|
||||
}
|
||||
std::unique_ptr<ReadBufferFromFileBase> in_file = getDataPartStorage().readFile(file_name, {}, std::nullopt, std::nullopt);
|
||||
HashingReadBuffer in_hash(*in_file);
|
||||
|
||||
String value;
|
||||
readStringUntilEOF(value, in_hash);
|
||||
return in_hash.getHash();
|
||||
}
|
||||
|
||||
std::unordered_map<String, IMergeTreeDataPart::uint128> IMergeTreeDataPart::checkMetadata() const
|
||||
{
|
||||
return metadata_manager->check();
|
||||
}
|
||||
|
||||
bool isCompactPart(const MergeTreeDataPartPtr & data_part)
|
||||
{
|
||||
return (data_part && data_part->getType() == MergeTreeDataPartType::Compact);
|
||||
|
@ -377,7 +377,7 @@ public:
|
||||
const DiskTransactionPtr & disk_transaction) const;
|
||||
|
||||
/// Makes full clone of part in specified subdirectory (relative to storage data directory, e.g. "detached") on another disk
|
||||
MutableDataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name) const;
|
||||
MutableDataPartStoragePtr makeCloneOnDisk(const DiskPtr & disk, const String & directory_name, const WriteSettings & write_settings) const;
|
||||
|
||||
/// Checks that .bin and .mrk files exist.
|
||||
///
|
||||
@ -481,12 +481,6 @@ public:
|
||||
/// Required for keep data on remote FS when part has shadow copies.
|
||||
UInt32 getNumberOfRefereneces() const;
|
||||
|
||||
/// Get checksums of metadata file in part directory
|
||||
IMergeTreeDataPart::uint128 getActualChecksumByFile(const String & file_name) const;
|
||||
|
||||
/// Check metadata in cache is consistent with actual metadata on disk(if use_metadata_cache is true)
|
||||
std::unordered_map<String, uint128> checkMetadata() const;
|
||||
|
||||
/// True if the part supports lightweight delete mutate.
|
||||
bool supportLightweightDeleteMutate() const;
|
||||
|
||||
@ -536,9 +530,6 @@ protected:
|
||||
|
||||
std::map<String, std::shared_ptr<IMergeTreeDataPart>> projection_parts;
|
||||
|
||||
/// Disabled when USE_ROCKSDB is OFF or use_metadata_cache is set to false in merge tree settings
|
||||
bool use_metadata_cache = false;
|
||||
|
||||
mutable PartMetadataManagerPtr metadata_manager;
|
||||
|
||||
void removeIfNeeded();
|
||||
|
@ -20,7 +20,6 @@ using DiskPtr = std::shared_ptr<IDisk>;
|
||||
/// - PartMetadataManagerOrdinary: manage metadata from disk directly. deleteAll/assertAllDeleted/updateAll/check
|
||||
/// are all empty implementations because they are not needed for PartMetadataManagerOrdinary(those operations
|
||||
/// are done implicitly when removing or renaming part directory).
|
||||
/// - PartMetadataManagerWithCache: manage metadata from RocksDB cache and disk.
|
||||
class IPartMetadataManager
|
||||
{
|
||||
public:
|
||||
|
@ -354,7 +354,6 @@ MergeTreeData::MergeTreeData(
|
||||
, parts_mover(this)
|
||||
, background_operations_assignee(*this, BackgroundJobsAssignee::Type::DataProcessing, getContext())
|
||||
, background_moves_assignee(*this, BackgroundJobsAssignee::Type::Moving, getContext())
|
||||
, use_metadata_cache(getSettings()->use_metadata_cache)
|
||||
{
|
||||
context_->getGlobalContext()->initializeBackgroundExecutorsIfNeeded();
|
||||
|
||||
@ -405,11 +404,6 @@ MergeTreeData::MergeTreeData(
|
||||
if (!canUsePolymorphicParts(*settings, reason) && !reason.empty())
|
||||
LOG_WARNING(log, "{} Settings 'min_rows_for_wide_part'and 'min_bytes_for_wide_part' will be ignored.", reason);
|
||||
|
||||
#if !USE_ROCKSDB
|
||||
if (use_metadata_cache)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Can't use merge tree metadata cache if clickhouse was compiled without rocksdb");
|
||||
#endif
|
||||
|
||||
common_assignee_trigger = [this] (bool delay) noexcept
|
||||
{
|
||||
if (delay)
|
||||
@ -4985,7 +4979,7 @@ void MergeTreeData::movePartitionToDisk(const ASTPtr & partition, const String &
|
||||
throw Exception(ErrorCodes::UNKNOWN_DISK, "All parts of partition '{}' are already on disk '{}'", partition_id, disk->getName());
|
||||
}
|
||||
|
||||
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(disk));
|
||||
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(disk), local_context->getWriteSettings());
|
||||
switch (moves_outcome)
|
||||
{
|
||||
case MovePartsOutcome::MovesAreCancelled:
|
||||
@ -5048,7 +5042,7 @@ void MergeTreeData::movePartitionToVolume(const ASTPtr & partition, const String
|
||||
throw Exception(ErrorCodes::UNKNOWN_DISK, "All parts of partition '{}' are already on volume '{}'", partition_id, volume->getName());
|
||||
}
|
||||
|
||||
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(volume));
|
||||
MovePartsOutcome moves_outcome = movePartsToSpace(parts, std::static_pointer_cast<Space>(volume), local_context->getWriteSettings());
|
||||
switch (moves_outcome)
|
||||
{
|
||||
case MovePartsOutcome::MovesAreCancelled:
|
||||
@ -7401,7 +7395,8 @@ std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> MergeTreeData::cloneAn
|
||||
const String & tmp_part_prefix,
|
||||
const MergeTreePartInfo & dst_part_info,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const IDataPartStorage::ClonePartParams & params)
|
||||
const IDataPartStorage::ClonePartParams & params,
|
||||
const WriteSettings & write_settings)
|
||||
{
|
||||
/// Check that the storage policy contains the disk where the src_part is located.
|
||||
bool does_storage_policy_allow_same_disk = false;
|
||||
@ -7458,7 +7453,8 @@ std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> MergeTreeData::cloneAn
|
||||
auto dst_part_storage = src_part_storage->freeze(
|
||||
relative_data_path,
|
||||
tmp_dst_part_name,
|
||||
/*save_metadata_callback=*/ {},
|
||||
write_settings,
|
||||
/* save_metadata_callback= */ {},
|
||||
params);
|
||||
|
||||
if (params.metadata_version_to_write.has_value())
|
||||
@ -7715,6 +7711,7 @@ PartitionCommandsResultInfo MergeTreeData::freezePartitionsByMatcher(
|
||||
auto new_storage = data_part_storage->freeze(
|
||||
backup_part_path,
|
||||
part->getDataPartStorage().getPartDirectory(),
|
||||
local_context->getWriteSettings(),
|
||||
callback,
|
||||
params);
|
||||
|
||||
@ -7913,7 +7910,8 @@ bool MergeTreeData::scheduleDataMovingJob(BackgroundJobsAssignee & assignee)
|
||||
assignee.scheduleMoveTask(std::make_shared<ExecutableLambdaAdapter>(
|
||||
[this, moving_tagger] () mutable
|
||||
{
|
||||
return moveParts(moving_tagger) == MovePartsOutcome::PartsMoved;
|
||||
WriteSettings write_settings = Context::getGlobalContextInstance()->getWriteSettings();
|
||||
return moveParts(moving_tagger, write_settings, /* wait_for_move_if_zero_copy= */ false) == MovePartsOutcome::PartsMoved;
|
||||
}, moves_assignee_trigger, getStorageID()));
|
||||
return true;
|
||||
}
|
||||
@ -7928,7 +7926,7 @@ bool MergeTreeData::areBackgroundMovesNeeded() const
|
||||
return policy->getVolumes().size() == 1 && policy->getVolumes()[0]->getDisks().size() > 1;
|
||||
}
|
||||
|
||||
MovePartsOutcome MergeTreeData::movePartsToSpace(const DataPartsVector & parts, SpacePtr space)
|
||||
MovePartsOutcome MergeTreeData::movePartsToSpace(const DataPartsVector & parts, SpacePtr space, const WriteSettings & write_settings)
|
||||
{
|
||||
if (parts_mover.moves_blocker.isCancelled())
|
||||
return MovePartsOutcome::MovesAreCancelled;
|
||||
@ -7937,7 +7935,7 @@ MovePartsOutcome MergeTreeData::movePartsToSpace(const DataPartsVector & parts,
|
||||
if (moving_tagger->parts_to_move.empty())
|
||||
return MovePartsOutcome::NothingToMove;
|
||||
|
||||
return moveParts(moving_tagger, true);
|
||||
return moveParts(moving_tagger, write_settings, /* wait_for_move_if_zero_copy= */ true);
|
||||
}
|
||||
|
||||
MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::selectPartsForMove()
|
||||
@ -7992,7 +7990,7 @@ MergeTreeData::CurrentlyMovingPartsTaggerPtr MergeTreeData::checkPartsForMove(co
|
||||
return std::make_shared<CurrentlyMovingPartsTagger>(std::move(parts_to_move), *this);
|
||||
}
|
||||
|
||||
MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, bool wait_for_move_if_zero_copy)
|
||||
MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, const WriteSettings & write_settings, bool wait_for_move_if_zero_copy)
|
||||
{
|
||||
LOG_INFO(log, "Got {} parts to move.", moving_tagger->parts_to_move.size());
|
||||
|
||||
@ -8053,7 +8051,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr &
|
||||
{
|
||||
if (lock->isLocked())
|
||||
{
|
||||
cloned_part = parts_mover.clonePart(moving_part);
|
||||
cloned_part = parts_mover.clonePart(moving_part, write_settings);
|
||||
parts_mover.swapClonedPart(cloned_part);
|
||||
break;
|
||||
}
|
||||
@ -8080,7 +8078,7 @@ MovePartsOutcome MergeTreeData::moveParts(const CurrentlyMovingPartsTaggerPtr &
|
||||
}
|
||||
else /// Ordinary move as it should be
|
||||
{
|
||||
cloned_part = parts_mover.clonePart(moving_part);
|
||||
cloned_part = parts_mover.clonePart(moving_part, write_settings);
|
||||
parts_mover.swapClonedPart(cloned_part);
|
||||
}
|
||||
write_part_log({});
|
||||
|
@ -63,6 +63,8 @@ using BackupEntries = std::vector<std::pair<String, std::shared_ptr<const IBacku
|
||||
class MergeTreeTransaction;
|
||||
using MergeTreeTransactionPtr = std::shared_ptr<MergeTreeTransaction>;
|
||||
|
||||
struct WriteSettings;
|
||||
|
||||
/// Auxiliary struct holding information about the future merged or mutated part.
|
||||
struct EmergingPartInfo
|
||||
{
|
||||
@ -841,9 +843,12 @@ public:
|
||||
MergeTreeData & checkStructureAndGetMergeTreeData(IStorage & source_table, const StorageMetadataPtr & src_snapshot, const StorageMetadataPtr & my_snapshot) const;
|
||||
|
||||
std::pair<MergeTreeData::MutableDataPartPtr, scope_guard> cloneAndLoadDataPartOnSameDisk(
|
||||
const MergeTreeData::DataPartPtr & src_part, const String & tmp_part_prefix,
|
||||
const MergeTreePartInfo & dst_part_info, const StorageMetadataPtr & metadata_snapshot,
|
||||
const IDataPartStorage::ClonePartParams & params);
|
||||
const MergeTreeData::DataPartPtr & src_part,
|
||||
const String & tmp_part_prefix,
|
||||
const MergeTreePartInfo & dst_part_info,
|
||||
const StorageMetadataPtr & metadata_snapshot,
|
||||
const IDataPartStorage::ClonePartParams & params,
|
||||
const WriteSettings & write_settings);
|
||||
|
||||
virtual std::vector<MergeTreeMutationStatus> getMutationsStatus() const = 0;
|
||||
|
||||
@ -1178,7 +1183,6 @@ protected:
|
||||
/// And for ReplicatedMergeTree we don't have LogEntry type for this operation.
|
||||
BackgroundJobsAssignee background_operations_assignee;
|
||||
BackgroundJobsAssignee background_moves_assignee;
|
||||
bool use_metadata_cache;
|
||||
|
||||
/// Strongly connected with two fields above.
|
||||
/// Every task that is finished will ask to assign a new one into an executor.
|
||||
@ -1336,7 +1340,7 @@ protected:
|
||||
/// MergeTree because they store mutations in different way.
|
||||
virtual std::map<int64_t, MutationCommands> getAlterMutationCommandsForPart(const DataPartPtr & part) const = 0;
|
||||
/// Moves part to specified space, used in ALTER ... MOVE ... queries
|
||||
MovePartsOutcome movePartsToSpace(const DataPartsVector & parts, SpacePtr space);
|
||||
MovePartsOutcome movePartsToSpace(const DataPartsVector & parts, SpacePtr space, const WriteSettings & write_settings);
|
||||
|
||||
struct PartBackupEntries
|
||||
{
|
||||
@ -1489,7 +1493,7 @@ private:
|
||||
using CurrentlyMovingPartsTaggerPtr = std::shared_ptr<CurrentlyMovingPartsTagger>;
|
||||
|
||||
/// Move selected parts to corresponding disks
|
||||
MovePartsOutcome moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, bool wait_for_move_if_zero_copy=false);
|
||||
MovePartsOutcome moveParts(const CurrentlyMovingPartsTaggerPtr & moving_tagger, const WriteSettings & write_settings, bool wait_for_move_if_zero_copy);
|
||||
|
||||
/// Select parts for move and disks for them. Used in background moving processes.
|
||||
CurrentlyMovingPartsTaggerPtr selectPartsForMove();
|
||||
|
@ -737,7 +737,7 @@ void bloomFilterIndexValidator(const IndexDescription & index, bool /*attach*/)
|
||||
data_type = WhichDataType(low_cardinality.getDictionaryType());
|
||||
}
|
||||
|
||||
if (!data_type.isString() && !data_type.isFixedString())
|
||||
if (!data_type.isString() && !data_type.isFixedString() && !data_type.isIPv6())
|
||||
throw Exception(ErrorCodes::INCORRECT_QUERY,
|
||||
"Ngram and token bloom filter indexes can only be used with column types `String`, `FixedString`, `LowCardinality(String)`, `LowCardinality(FixedString)`, `Array(String)` or `Array(FixedString)`");
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user