mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 09:32:06 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
579ec699bf
3
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
3
.github/ISSUE_TEMPLATE/85_bug-report.md
vendored
@ -21,8 +21,7 @@ assignees: ''
|
||||
|
||||
**Enable crash reporting**
|
||||
|
||||
> If possible, change "enabled" to true in "send_crash_reports" section in `config.xml`:
|
||||
|
||||
> Change "enabled" to true in "send_crash_reports" section in `config.xml`:
|
||||
```
|
||||
<send_crash_reports>
|
||||
<!-- Changing <enabled> to true allows sending crash reports to -->
|
||||
|
3
.github/workflows/backport_branches.yml
vendored
3
.github/workflows/backport_branches.yml
vendored
@ -3,9 +3,6 @@ name: BackportPR
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
3
.github/workflows/master.yml
vendored
3
.github/workflows/master.yml
vendored
@ -3,9 +3,6 @@ name: MasterCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
3
.github/workflows/pull_request.yml
vendored
3
.github/workflows/pull_request.yml
vendored
@ -3,9 +3,6 @@ name: PullRequestCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
pull_request:
|
||||
|
3
.github/workflows/release_branches.yml
vendored
3
.github/workflows/release_branches.yml
vendored
@ -3,9 +3,6 @@ name: ReleaseBranchCI
|
||||
env:
|
||||
# Force the stdout and stderr streams to be unbuffered
|
||||
PYTHONUNBUFFERED: 1
|
||||
# Export system tables to ClickHouse Cloud
|
||||
CLICKHOUSE_CI_LOGS_HOST: ${{ secrets.CLICKHOUSE_CI_LOGS_HOST }}
|
||||
CLICKHOUSE_CI_LOGS_PASSWORD: ${{ secrets.CLICKHOUSE_CI_LOGS_PASSWORD }}
|
||||
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
|
25
.gitmodules
vendored
25
.gitmodules
vendored
@ -3,7 +3,7 @@
|
||||
url = https://github.com/facebook/zstd
|
||||
[submodule "contrib/lz4"]
|
||||
path = contrib/lz4
|
||||
url = https://github.com/lz4/lz4
|
||||
url = https://github.com/ClickHouse/lz4
|
||||
[submodule "contrib/librdkafka"]
|
||||
path = contrib/librdkafka
|
||||
url = https://github.com/ClickHouse/librdkafka
|
||||
@ -13,7 +13,6 @@
|
||||
[submodule "contrib/zlib-ng"]
|
||||
path = contrib/zlib-ng
|
||||
url = https://github.com/ClickHouse/zlib-ng
|
||||
branch = clickhouse-2.0.x
|
||||
[submodule "contrib/googletest"]
|
||||
path = contrib/googletest
|
||||
url = https://github.com/google/googletest
|
||||
@ -47,7 +46,6 @@
|
||||
[submodule "contrib/arrow"]
|
||||
path = contrib/arrow
|
||||
url = https://github.com/ClickHouse/arrow
|
||||
branch = blessed/release-6.0.1
|
||||
[submodule "contrib/thrift"]
|
||||
path = contrib/thrift
|
||||
url = https://github.com/apache/thrift
|
||||
@ -93,7 +91,6 @@
|
||||
[submodule "contrib/grpc"]
|
||||
path = contrib/grpc
|
||||
url = https://github.com/ClickHouse/grpc
|
||||
branch = v1.33.2
|
||||
[submodule "contrib/aws"]
|
||||
path = contrib/aws
|
||||
url = https://github.com/ClickHouse/aws-sdk-cpp
|
||||
@ -140,11 +137,9 @@
|
||||
[submodule "contrib/cassandra"]
|
||||
path = contrib/cassandra
|
||||
url = https://github.com/ClickHouse/cpp-driver
|
||||
branch = clickhouse
|
||||
[submodule "contrib/libuv"]
|
||||
path = contrib/libuv
|
||||
url = https://github.com/ClickHouse/libuv
|
||||
branch = clickhouse
|
||||
[submodule "contrib/fmtlib"]
|
||||
path = contrib/fmtlib
|
||||
url = https://github.com/fmtlib/fmt
|
||||
@ -157,11 +152,9 @@
|
||||
[submodule "contrib/cyrus-sasl"]
|
||||
path = contrib/cyrus-sasl
|
||||
url = https://github.com/ClickHouse/cyrus-sasl
|
||||
branch = cyrus-sasl-2.1
|
||||
[submodule "contrib/croaring"]
|
||||
path = contrib/croaring
|
||||
url = https://github.com/RoaringBitmap/CRoaring
|
||||
branch = v0.2.66
|
||||
[submodule "contrib/miniselect"]
|
||||
path = contrib/miniselect
|
||||
url = https://github.com/danlark1/miniselect
|
||||
@ -174,7 +167,6 @@
|
||||
[submodule "contrib/abseil-cpp"]
|
||||
path = contrib/abseil-cpp
|
||||
url = https://github.com/abseil/abseil-cpp
|
||||
branch = lts_2021_11_02
|
||||
[submodule "contrib/dragonbox"]
|
||||
path = contrib/dragonbox
|
||||
url = https://github.com/ClickHouse/dragonbox
|
||||
@ -187,7 +179,6 @@
|
||||
[submodule "contrib/boringssl"]
|
||||
path = contrib/boringssl
|
||||
url = https://github.com/ClickHouse/boringssl
|
||||
branch = unknown_branch_from_artur
|
||||
[submodule "contrib/NuRaft"]
|
||||
path = contrib/NuRaft
|
||||
url = https://github.com/ClickHouse/NuRaft
|
||||
@ -248,7 +239,6 @@
|
||||
[submodule "contrib/annoy"]
|
||||
path = contrib/annoy
|
||||
url = https://github.com/ClickHouse/annoy
|
||||
branch = ClickHouse-master
|
||||
[submodule "contrib/qpl"]
|
||||
path = contrib/qpl
|
||||
url = https://github.com/intel/qpl
|
||||
@ -282,7 +272,6 @@
|
||||
[submodule "contrib/openssl"]
|
||||
path = contrib/openssl
|
||||
url = https://github.com/openssl/openssl
|
||||
branch = openssl-3.0
|
||||
[submodule "contrib/google-benchmark"]
|
||||
path = contrib/google-benchmark
|
||||
url = https://github.com/google/benchmark
|
||||
@ -347,3 +336,15 @@
|
||||
[submodule "contrib/incbin"]
|
||||
path = contrib/incbin
|
||||
url = https://github.com/graphitemaster/incbin.git
|
||||
[submodule "contrib/usearch"]
|
||||
path = contrib/usearch
|
||||
url = https://github.com/unum-cloud/usearch.git
|
||||
[submodule "contrib/SimSIMD"]
|
||||
path = contrib/SimSIMD
|
||||
url = https://github.com/ashvardanian/SimSIMD.git
|
||||
[submodule "contrib/FP16"]
|
||||
path = contrib/FP16
|
||||
url = https://github.com/Maratyszcza/FP16.git
|
||||
[submodule "contrib/robin-map"]
|
||||
path = contrib/robin-map
|
||||
url = https://github.com/Tessil/robin-map.git
|
||||
|
223
CHANGELOG.md
223
CHANGELOG.md
@ -1,4 +1,5 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v23.8 LTS, 2023-08-31](#238)**<br/>
|
||||
**[ClickHouse release v23.7, 2023-07-27](#237)**<br/>
|
||||
**[ClickHouse release v23.6, 2023-06-30](#236)**<br/>
|
||||
**[ClickHouse release v23.5, 2023-06-08](#235)**<br/>
|
||||
@ -10,6 +11,228 @@
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### <a id="238"></a> ClickHouse release 23.8 LTS, 2023-08-31
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* If a dynamic disk contains a name, it should be specified as `disk = disk(name = 'disk_name'`, ...) in disk function arguments. In previous version it could be specified as `disk = disk_<disk_name>(...)`, which is no longer supported. [#52820](https://github.com/ClickHouse/ClickHouse/pull/52820) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* `clickhouse-benchmark` will establish connections in parallel when invoked with `--concurrency` more than one. Previously it was unusable if you ran it with 1000 concurrent connections from Europe to the US. Correct calculation of QPS for connections with high latency. Backward incompatible change: the option for JSON output of `clickhouse-benchmark` is removed. If you've used this option, you can also extract data from the `system.query_log` in JSON format as a workaround. [#53293](https://github.com/ClickHouse/ClickHouse/pull/53293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The `microseconds` column is removed from the `system.text_log`, and the `milliseconds` column is removed from the `system.metric_log`, because they are redundant in the presence of the `event_time_microseconds` column. [#53601](https://github.com/ClickHouse/ClickHouse/pull/53601) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Deprecate the metadata cache feature. It is experimental and we have never used it. The feature is dangerous: [#51182](https://github.com/ClickHouse/ClickHouse/issues/51182). Remove the `system.merge_tree_metadata_cache` system table. The metadata cache is still available in this version but will be removed soon. This closes [#39197](https://github.com/ClickHouse/ClickHouse/issues/39197). [#51303](https://github.com/ClickHouse/ClickHouse/pull/51303) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable support for 3DES in TLS connections. [#52893](https://github.com/ClickHouse/ClickHouse/pull/52893) ([Kenji Noguchi](https://github.com/knoguchi)).
|
||||
|
||||
#### New Feature
|
||||
* Direct import from zip/7z/tar archives. Example: `file('*.zip :: *.csv')`. [#50321](https://github.com/ClickHouse/ClickHouse/pull/50321) ([nikitakeba](https://github.com/nikitakeba)).
|
||||
* Add column `ptr` to `system.trace_log` for `trace_type = 'MemorySample'`. This column contains an address of allocation. Added function `flameGraph` which can build flamegraph containing allocated and not released memory. Reworking of [#38391](https://github.com/ClickHouse/ClickHouse/issues/38391). [#45322](https://github.com/ClickHouse/ClickHouse/pull/45322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Added table function `azureBlobStorageCluster`. The supported set of features is very similar to table function `s3Cluster`. [#50795](https://github.com/ClickHouse/ClickHouse/pull/50795) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Allow using `cluster`, `clusterAllReplicas`, `remote`, and `remoteSecure` without table name in issue [#50808](https://github.com/ClickHouse/ClickHouse/issues/50808). [#50848](https://github.com/ClickHouse/ClickHouse/pull/50848) ([Yangkuan Liu](https://github.com/LiuYangkuan)).
|
||||
* A system table to monitor kafka consumers. [#50999](https://github.com/ClickHouse/ClickHouse/pull/50999) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* Added `max_sessions_for_user` setting. [#51724](https://github.com/ClickHouse/ClickHouse/pull/51724) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* New functions `toUTCTimestamp/fromUTCTimestamp` to act same as spark's `to_utc_timestamp/from_utc_timestamp`. [#52117](https://github.com/ClickHouse/ClickHouse/pull/52117) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Add new functions `structureToCapnProtoSchema`/`structureToProtobufSchema` that convert ClickHouse table structure to CapnProto/Protobuf format schema. Allow to input/output data in CapnProto/Protobuf format without external format schema using autogenerated schema from table structure (controled by settings `format_capn_proto_use_autogenerated_schema`/`format_protobuf_use_autogenerated_schema`). Allow to export autogenerated schema while input/outoput using setting `output_format_schema`. [#52278](https://github.com/ClickHouse/ClickHouse/pull/52278) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* A new field `query_cache_usage` in `system.query_log` now shows if and how the query cache was used. [#52384](https://github.com/ClickHouse/ClickHouse/pull/52384) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add new function `startsWithUTF8` and `endsWithUTF8`. [#52555](https://github.com/ClickHouse/ClickHouse/pull/52555) ([李扬](https://github.com/taiyang-li)).
|
||||
* Allow variable number of columns in TSV/CuatomSeprarated/JSONCompactEachRow, make schema inference work with variable number of columns. Add settings `input_format_tsv_allow_variable_number_of_columns`, `input_format_custom_allow_variable_number_of_columns`, `input_format_json_compact_allow_variable_number_of_columns`. [#52692](https://github.com/ClickHouse/ClickHouse/pull/52692) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Added `SYSTEM STOP/START PULLING REPLICATION LOG` queries (for testing `ReplicatedMergeTree`). [#52881](https://github.com/ClickHouse/ClickHouse/pull/52881) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Allow to execute constant non-deterministic functions in mutations on initiator. [#53129](https://github.com/ClickHouse/ClickHouse/pull/53129) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add input format `One` that doesn't read any data and always returns single row with column `dummy` with type `UInt8` and value `0` like `system.one`. It can be used together with `_file/_path` virtual columns to list files in file/s3/url/hdfs/etc table functions without reading any data. [#53209](https://github.com/ClickHouse/ClickHouse/pull/53209) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add `tupleConcat` function. Closes [#52759](https://github.com/ClickHouse/ClickHouse/issues/52759). [#53239](https://github.com/ClickHouse/ClickHouse/pull/53239) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Support `TRUNCATE DATABASE` operation. [#53261](https://github.com/ClickHouse/ClickHouse/pull/53261) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Add `max_threads_for_indexes` setting to limit number of threads used for primary key processing. [#53313](https://github.com/ClickHouse/ClickHouse/pull/53313) ([jorisgio](https://github.com/jorisgio)).
|
||||
* Re-add SipHash keyed functions. [#53525](https://github.com/ClickHouse/ClickHouse/pull/53525) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* ([#52755](https://github.com/ClickHouse/ClickHouse/issues/52755) , [#52895](https://github.com/ClickHouse/ClickHouse/issues/52895)) Added functions `arrayRotateLeft`, `arrayRotateRight`, `arrayShiftLeft`, `arrayShiftRight`. [#53557](https://github.com/ClickHouse/ClickHouse/pull/53557) ([Mikhail Koviazin](https://github.com/mkmkme)).
|
||||
* Add column `name` to `system.clusters` as an alias to cluster. [#53605](https://github.com/ClickHouse/ClickHouse/pull/53605) ([irenjj](https://github.com/irenjj)).
|
||||
* The advanced dashboard now allows mass editing (save/load). [#53608](https://github.com/ClickHouse/ClickHouse/pull/53608) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The advanced dashboard now has an option to maximize charts and move them around. [#53622](https://github.com/ClickHouse/ClickHouse/pull/53622) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* TODO: edit it: Add support for plural units. [#53641](https://github.com/ClickHouse/ClickHouse/pull/53641) ([irenjj](https://github.com/irenjj)).
|
||||
* TODO: edit it: Added server setting validate_tcp_client_information determines whether validation of client information enabled when query packet is received. [#53907](https://github.com/ClickHouse/ClickHouse/pull/53907) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Added support for adding and subtracting arrays: `[5,2] + [1,7]`. Division and multiplication were not implemented due to confusion between pointwise multiplication and the scalar product of arguments. Closes [#49939](https://github.com/ClickHouse/ClickHouse/issues/49939). [#52625](https://github.com/ClickHouse/ClickHouse/pull/52625) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add support for string literals as table names. Closes [#52178](https://github.com/ClickHouse/ClickHouse/issues/52178). [#52635](https://github.com/ClickHouse/ClickHouse/pull/52635) ([hendrik-m](https://github.com/hendrik-m)).
|
||||
|
||||
#### Experimental Feature
|
||||
* Add new table engine `S3Queue` for streaming data import from s3. Closes [#37012](https://github.com/ClickHouse/ClickHouse/issues/37012). [#49086](https://github.com/ClickHouse/ClickHouse/pull/49086) ([s-kat](https://github.com/s-kat)). It is not ready to use. Do not use it.
|
||||
* Enable parallel reading from replicas over distributed table. Related to [#49708](https://github.com/ClickHouse/ClickHouse/issues/49708). [#53005](https://github.com/ClickHouse/ClickHouse/pull/53005) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Add experimental support for HNSW as approximate neighbor search method. [#53447](https://github.com/ClickHouse/ClickHouse/pull/53447) ([Davit Vardanyan](https://github.com/davvard)). This is currently intended for those who continue working on the implementation. Do not use it.
|
||||
|
||||
#### Performance Improvement
|
||||
* Parquet filter pushdown. I.e. when reading Parquet files, row groups (chunks of the file) are skipped based on the WHERE condition and the min/max values in each column. In particular, if the file is roughly sorted by some column, queries that filter by a short range of that column will be much faster. [#52951](https://github.com/ClickHouse/ClickHouse/pull/52951) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Optimize reading small row groups by batching them together in Parquet. Closes [#53069](https://github.com/ClickHouse/ClickHouse/issues/53069). [#53281](https://github.com/ClickHouse/ClickHouse/pull/53281) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Optimize count from files in most input formats. Closes [#44334](https://github.com/ClickHouse/ClickHouse/issues/44334). [#53637](https://github.com/ClickHouse/ClickHouse/pull/53637) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Use filter by file/path before reading in `url`/`file`/`hdfs` table functins. [#53529](https://github.com/ClickHouse/ClickHouse/pull/53529) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Enable JIT compilation for AArch64, PowerPC, SystemZ, RISC-V. [#38217](https://github.com/ClickHouse/ClickHouse/pull/38217) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add setting `rewrite_count_distinct_if_with_count_distinct_implementation` to rewrite `countDistinctIf` with `count_distinct_implementation`. Closes [#30642](https://github.com/ClickHouse/ClickHouse/issues/30642). [#46051](https://github.com/ClickHouse/ClickHouse/pull/46051) ([flynn](https://github.com/ucasfl)).
|
||||
* TODO: edit it: This patch will provide a method to deal with all the hashsets in parallel before merge. [#50748](https://github.com/ClickHouse/ClickHouse/pull/50748) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* Optimize aggregation performance of nullable string key when using a large number of variable length keys. [#51399](https://github.com/ClickHouse/ClickHouse/pull/51399) ([LiuNeng](https://github.com/liuneng1994)).
|
||||
* Add a pass in Analyzer for time filter optimization with preimage. The performance experiments of SSB on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) show that this change could bring an improvement of 8.5% to the geomean QPS when the experimental analyzer is enabled. [#52091](https://github.com/ClickHouse/ClickHouse/pull/52091) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Optimize the merge if all hash sets are single-level in the `uniqExact` (COUNT DISTINCT) function. [#52973](https://github.com/ClickHouse/ClickHouse/pull/52973) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||
* `Join` table engine: do not clone hash join data structure with all columns. [#53046](https://github.com/ClickHouse/ClickHouse/pull/53046) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Implement native `ORC` input format without the "apache arrow" library to improve performance. [#53324](https://github.com/ClickHouse/ClickHouse/pull/53324) ([李扬](https://github.com/taiyang-li)).
|
||||
* The dashboard will tell the server to compress the data, which is useful for large time frames over slow internet connections. For example, one chart with 86400 points can be 1.5 MB uncompressed and 60 KB compressed with `br`. [#53569](https://github.com/ClickHouse/ClickHouse/pull/53569) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Better utilization of thread pool for BACKUPs and RESTOREs. [#53649](https://github.com/ClickHouse/ClickHouse/pull/53649) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Load filesystem cache metadata on startup in parallel. Configured by `load_metadata_threads` (default: 1) cache config setting. Related to [#52037](https://github.com/ClickHouse/ClickHouse/issues/52037). [#52943](https://github.com/ClickHouse/ClickHouse/pull/52943) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Improve `move_primary_key_columns_to_end_of_prewhere`. [#53337](https://github.com/ClickHouse/ClickHouse/pull/53337) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* This optimizes the interaction with ClickHouse Keeper. Previously the caller could register the same watch callback multiple times. In that case each entry was consuming memory and the same callback was called multiple times which didn't make much sense. In order to avoid this the caller could have some logic to not add the same watch multiple times. With this change this deduplication is done internally if the watch callback is passed via shared_ptr. [#53452](https://github.com/ClickHouse/ClickHouse/pull/53452) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Cache number of rows in files for count in file/s3/url/hdfs/azure functions. The cache can be enabled/disabled by setting `use_cache_for_count_from_files` (enabled by default). Continuation of https://github.com/ClickHouse/ClickHouse/pull/53637. [#53692](https://github.com/ClickHouse/ClickHouse/pull/53692) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* More careful thread management will improve the speed of the S3 table function over a large number of files by more than ~25%. [#53668](https://github.com/ClickHouse/ClickHouse/pull/53668) ([pufit](https://github.com/pufit)).
|
||||
|
||||
#### Improvement
|
||||
* Add `stderr_reaction` configuration/setting to control the reaction (none, log or throw) when external command stderr has data. This helps make debugging external command easier. [#43210](https://github.com/ClickHouse/ClickHouse/pull/43210) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Add `partition` column to the `system part_log` and merge table. [#48990](https://github.com/ClickHouse/ClickHouse/pull/48990) ([Jianfei Hu](https://github.com/incfly)).
|
||||
* The sizes of the (index) uncompressed/mark, mmap and query caches can now be configured dynamically at runtime (without server restart). [#51446](https://github.com/ClickHouse/ClickHouse/pull/51446) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* If a dictionary is created with a complex key, automatically choose the "complex key" layout variant. [#49587](https://github.com/ClickHouse/ClickHouse/pull/49587) ([xiebin](https://github.com/xbthink)).
|
||||
* Add setting `use_concurrency_control` for better testing of the new concurrency control feature. [#49618](https://github.com/ClickHouse/ClickHouse/pull/49618) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added suggestions for mistyped names for databases and tables. [#49801](https://github.com/ClickHouse/ClickHouse/pull/49801) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* While read small files from HDFS by Gluten, we found that it will cost more times when compare to directly query by Spark. And we did something with that. [#50063](https://github.com/ClickHouse/ClickHouse/pull/50063) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* There were too many worthless error logs after session expiration, which we didn't like. [#50171](https://github.com/ClickHouse/ClickHouse/pull/50171) ([helifu](https://github.com/helifu)).
|
||||
* Introduce fallback ZooKeeper sessions which are time-bound. Fixed `index` column in system.zookeeper_connection for DNS addresses. [#50424](https://github.com/ClickHouse/ClickHouse/pull/50424) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Add ability to log when max_partitions_per_insert_block is reached. [#50948](https://github.com/ClickHouse/ClickHouse/pull/50948) ([Sean Haynes](https://github.com/seandhaynes)).
|
||||
* Added a bunch of custom commands to clickhouse-keeper-client (mostly to make ClickHouse debugging easier). [#51117](https://github.com/ClickHouse/ClickHouse/pull/51117) ([pufit](https://github.com/pufit)).
|
||||
* Updated check for connection string in `azureBlobStorage` table function as connection string with "sas" does not always begin with the default endpoint and updated connection URL to include "sas" token after adding Azure's container to URL. [#51141](https://github.com/ClickHouse/ClickHouse/pull/51141) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix description for filtering sets in the `full_sorting_merge` JOIN algorithm. [#51329](https://github.com/ClickHouse/ClickHouse/pull/51329) ([Tanay Tummalapalli](https://github.com/ttanay)).
|
||||
* Fixed memory consumption in `Aggregator` when `max_block_size` is huge. [#51566](https://github.com/ClickHouse/ClickHouse/pull/51566) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Add `SYSTEM SYNC FILESYSTEM CACHE` command. It will compare in-memory state of filesystem cache with what it has on disk and fix in-memory state if needed. This is only needed if you are making manual interventions in on-disk data, which is highly discouraged. [#51622](https://github.com/ClickHouse/ClickHouse/pull/51622) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Attempt to create a generic proxy resolver for CH while keeping backwards compatibility with existing S3 storage conf proxy resolver. [#51749](https://github.com/ClickHouse/ClickHouse/pull/51749) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Support reading tuple subcolumns from file/s3/hdfs/url/azureBlobStorage table functions. [#51806](https://github.com/ClickHouse/ClickHouse/pull/51806) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Function `arrayIntersect` now returns the values in the order, corresponding to the first argument. Closes [#27622](https://github.com/ClickHouse/ClickHouse/issues/27622). [#51850](https://github.com/ClickHouse/ClickHouse/pull/51850) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Add new queries, which allow to create/drop of access entities in specified access storage or move access entities from one access storage to another. [#51912](https://github.com/ClickHouse/ClickHouse/pull/51912) ([pufit](https://github.com/pufit)).
|
||||
* Make `ALTER TABLE FREEZE` queries not replicated in the Replicated database engine. [#52064](https://github.com/ClickHouse/ClickHouse/pull/52064) ([Mike Kot](https://github.com/myrrc)).
|
||||
* Added possibility to flush system tables on unexpected shutdown. [#52174](https://github.com/ClickHouse/ClickHouse/pull/52174) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix the case when `s3` table function refused to work with pre-signed URLs. close [#50846](https://github.com/ClickHouse/ClickHouse/issues/50846). [#52310](https://github.com/ClickHouse/ClickHouse/pull/52310) ([chen](https://github.com/xiedeyantu)).
|
||||
* Add column `name` as an alias to `event` and `metric` in the `system.events` and `system.metrics` tables. Closes [#51257](https://github.com/ClickHouse/ClickHouse/issues/51257). [#52315](https://github.com/ClickHouse/ClickHouse/pull/52315) ([chen](https://github.com/xiedeyantu)).
|
||||
* Added support of syntax `CREATE UNIQUE INDEX` in parser as a no-op for better SQL compatibility. `UNIQUE` index is not supported. Set `create_index_ignore_unique = 1` to ignore UNIQUE keyword in queries. [#52320](https://github.com/ClickHouse/ClickHouse/pull/52320) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Add support of predefined macro (`{database}` and `{table}`) in some Kafka engine settings: topic, consumer, client_id, etc. [#52386](https://github.com/ClickHouse/ClickHouse/pull/52386) ([Yury Bogomolov](https://github.com/ybogo)).
|
||||
* Disable updating the filesystem cache during backup/restore. Filesystem cache must not be updated during backup/restore, it seems it just slows down the process without any profit (because the BACKUP command can read a lot of data and it's no use to put all the data to the filesystem cache and immediately evict it). [#52402](https://github.com/ClickHouse/ClickHouse/pull/52402) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* The configuration of S3 endpoint allow using it from the root, and append '/' automatically if needed. [#47809](https://github.com/ClickHouse/ClickHouse/issues/47809). [#52600](https://github.com/ClickHouse/ClickHouse/pull/52600) ([xiaolei565](https://github.com/xiaolei565)).
|
||||
* For clickhouse-local allow positional options and populate global UDF settings (user_scripts_path and user_defined_executable_functions_config). [#52643](https://github.com/ClickHouse/ClickHouse/pull/52643) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* `system.asynchronous_metrics` now includes metrics "QueryCacheEntries" and "QueryCacheBytes" to inspect the query cache. [#52650](https://github.com/ClickHouse/ClickHouse/pull/52650) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added possibility to use `s3_storage_class` parameter in the `SETTINGS` clause of the `BACKUP` statement for backups to S3. [#52658](https://github.com/ClickHouse/ClickHouse/pull/52658) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Add utility `print-backup-info.py` which parses a backup metadata file and prints information about the backup. [#52690](https://github.com/ClickHouse/ClickHouse/pull/52690) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Closes [#49510](https://github.com/ClickHouse/ClickHouse/issues/49510). Currently we have database and table names case-sensitive, but BI tools query `information_schema` sometimes in lowercase, sometimes in uppercase. For this reason we have `information_schema` database, containing lowercase tables, such as `information_schema.tables` and `INFORMATION_SCHEMA` database, containing uppercase tables, such as `INFORMATION_SCHEMA.TABLES`. But some tools are querying `INFORMATION_SCHEMA.tables` and `information_schema.TABLES`. The proposed solution is to duplicate both lowercase and uppercase tables in lowercase and uppercase `information_schema` database. [#52695](https://github.com/ClickHouse/ClickHouse/pull/52695) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Query`CHECK TABLE` has better performance and usability (sends progress updates, cancellable). [#52745](https://github.com/ClickHouse/ClickHouse/pull/52745) ([vdimir](https://github.com/vdimir)).
|
||||
* Add support for `modulo`, `intDiv`, `intDivOrZero` for tuples by distributing them across tuple's elements. [#52758](https://github.com/ClickHouse/ClickHouse/pull/52758) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Search for default `yaml` and `yml` configs in clickhouse-client after `xml`. [#52767](https://github.com/ClickHouse/ClickHouse/pull/52767) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* When merging into non-'clickhouse' rooted configuration, configs with different root node name just bypassed without exception. [#52770](https://github.com/ClickHouse/ClickHouse/pull/52770) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Now it's possible to specify min (`memory_profiler_sample_min_allocation_size`) and max (`memory_profiler_sample_max_allocation_size`) size for allocations to be tracked with sampling memory profiler. [#52779](https://github.com/ClickHouse/ClickHouse/pull/52779) ([alesapin](https://github.com/alesapin)).
|
||||
* Add `precise_float_parsing` setting to switch float parsing methods (fast/precise). [#52791](https://github.com/ClickHouse/ClickHouse/pull/52791) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Use the same default paths for `clickhouse-keeper` (symlink) as for `clickhouse-keeper` (executable). [#52861](https://github.com/ClickHouse/ClickHouse/pull/52861) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Improve error message for table function `remote`. Closes [#40220](https://github.com/ClickHouse/ClickHouse/issues/40220). [#52959](https://github.com/ClickHouse/ClickHouse/pull/52959) ([jiyoungyoooo](https://github.com/jiyoungyoooo)).
|
||||
* Added the possibility to specify custom storage policy in the `SETTINGS` clause of `RESTORE` queries. [#52970](https://github.com/ClickHouse/ClickHouse/pull/52970) ([Victor Krasnov](https://github.com/sirvickr)).
|
||||
* Add the ability to throttle the S3 requests on backup operations (`BACKUP` and `RESTORE` commands now honor `s3_max_[get/put]_[rps/burst]`). [#52974](https://github.com/ClickHouse/ClickHouse/pull/52974) ([Daniel Pozo Escalona](https://github.com/danipozo)).
|
||||
* Add settings to ignore ON CLUSTER clause in queries for management of replicated user-defined functions or access control entities with replicated storage. [#52975](https://github.com/ClickHouse/ClickHouse/pull/52975) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* EXPLAIN actions for JOIN step. [#53006](https://github.com/ClickHouse/ClickHouse/pull/53006) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Make `hasTokenOrNull` and `hasTokenCaseInsensitiveOrNull` return null for empty needles. [#53059](https://github.com/ClickHouse/ClickHouse/pull/53059) ([ltrk2](https://github.com/ltrk2)).
|
||||
* Allow to restrict allowed paths for filesystem caches. Mainly useful for dynamic disks. If in server config `filesystem_caches_path` is specified, all filesystem caches' paths will be restricted to this directory. E.g. if the `path` in cache config is relative - it will be put in `filesystem_caches_path`; if `path` in cache config is absolute, it will be required to lie inside `filesystem_caches_path`. If `filesystem_caches_path` is not specified in config, then behaviour will be the same as in earlier versions. [#53124](https://github.com/ClickHouse/ClickHouse/pull/53124) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Added a bunch of custom commands (mostly to make ClickHouse debugging easier). [#53127](https://github.com/ClickHouse/ClickHouse/pull/53127) ([pufit](https://github.com/pufit)).
|
||||
* Add diagnostic info about file name during schema inference - it helps when you process multiple files with globs. [#53135](https://github.com/ClickHouse/ClickHouse/pull/53135) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Client will load suggestions using the main connection if the second connection is not allowed to create a session. [#53177](https://github.com/ClickHouse/ClickHouse/pull/53177) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Add EXCEPT clause to `SYSTEM STOP/START LISTEN QUERIES [ALL/DEFAULT/CUSTOM]` query, for example `SYSTEM STOP LISTEN QUERIES ALL EXCEPT TCP, HTTP`. [#53280](https://github.com/ClickHouse/ClickHouse/pull/53280) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Change the default of `max_concurrent_queries` from 100 to 1000. It's ok to have many concurrent queries if they are not heavy, and mostly waiting for the network. Note: don't confuse concurrent queries and QPS: for example, ClickHouse server can do tens of thousands of QPS with less than 100 concurrent queries. [#53285](https://github.com/ClickHouse/ClickHouse/pull/53285) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Limit number of concurrent background partition optimize merges. [#53405](https://github.com/ClickHouse/ClickHouse/pull/53405) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Added a setting `allow_moving_table_directory_to_trash` that allows to ignore `Directory for table data already exists` error when replicating/recovering a `Replicated` database. [#53425](https://github.com/ClickHouse/ClickHouse/pull/53425) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* If server settings `asynchronous_metrics_update_period_s` and `asynchronous_heavy_metrics_update_period_s` are misconfigured to 0, it will now fail gracefully instead of terminating the application. [#53428](https://github.com/ClickHouse/ClickHouse/pull/53428) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* The ClickHouse server now respects memory limits changed via cgroups when reloading its configuration. [#53455](https://github.com/ClickHouse/ClickHouse/pull/53455) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Add ability to turn off flush of Distributed tables on `DETACH`, `DROP`, or server shutdown. [#53501](https://github.com/ClickHouse/ClickHouse/pull/53501) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* The `domainRFC` function now supports IPv6 in square brackets. [#53506](https://github.com/ClickHouse/ClickHouse/pull/53506) ([Chen768959](https://github.com/Chen768959)).
|
||||
* Use longer timeout for S3 CopyObject requests, which are used in backups. [#53533](https://github.com/ClickHouse/ClickHouse/pull/53533) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Added server setting `aggregate_function_group_array_max_element_size`. This setting is used to limit array size for `groupArray` function at serialization. The default value is `16777215`. [#53550](https://github.com/ClickHouse/ClickHouse/pull/53550) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* `SCHEMA()` was added as alias for `DATABASE()` to improve MySQL compatibility. [#53587](https://github.com/ClickHouse/ClickHouse/pull/53587) ([Daniël van Eeden](https://github.com/dveeden)).
|
||||
* Add asynchronous metrics about tables in the system database. For example, `TotalBytesOfMergeTreeTablesSystem`. This closes [#53603](https://github.com/ClickHouse/ClickHouse/issues/53603). [#53604](https://github.com/ClickHouse/ClickHouse/pull/53604) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* SQL editor in the Play UI and Dashboard will not use Grammarly. [#53614](https://github.com/ClickHouse/ClickHouse/pull/53614) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* As expert-level settings, it is now possible to (1) configure the size_ratio (i.e. the relative size of the protected queue) of the [index] mark/uncompressed caches, (2) configure the cache policy of the index mark and index uncompressed caches. [#53657](https://github.com/ClickHouse/ClickHouse/pull/53657) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Added client info validation to the query packet in TCPHandler. [#53673](https://github.com/ClickHouse/ClickHouse/pull/53673) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Retry loading parts in case of network errors while interaction with Microsoft Azure. [#53750](https://github.com/ClickHouse/ClickHouse/pull/53750) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Stacktrace for exceptions, Materailized view exceptions are propagated. [#53766](https://github.com/ClickHouse/ClickHouse/pull/53766) ([Ilya Golshtein](https://github.com/ilejn)).
|
||||
* If no hostname or port were specified, keeper client will try to search for a connection string in the ClickHouse's config.xml. [#53769](https://github.com/ClickHouse/ClickHouse/pull/53769) ([pufit](https://github.com/pufit)).
|
||||
* Add profile event `PartsLockMicroseconds` which shows the amount of microseconds we hold the data parts lock in MergeTree table engine family. [#53797](https://github.com/ClickHouse/ClickHouse/pull/53797) ([alesapin](https://github.com/alesapin)).
|
||||
* Make reconnect limit in RAFT limits configurable for keeper. This configuration can help to make keeper to rebuild connection with peers quicker if the current connection is broken. [#53817](https://github.com/ClickHouse/ClickHouse/pull/53817) ([Pengyuan Bian](https://github.com/bianpengyuan)).
|
||||
* Ignore foreign keys in tables definition to improve compatibility with MySQL, so a user wouldn't need to rewrite his SQL of the foreign key part, ref [#53380](https://github.com/ClickHouse/ClickHouse/issues/53380). [#53864](https://github.com/ClickHouse/ClickHouse/pull/53864) ([jsc0218](https://github.com/jsc0218)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Don't expose symbols from ClickHouse binary to dynamic linker. It might fix [#43933](https://github.com/ClickHouse/ClickHouse/issues/43933). [#47475](https://github.com/ClickHouse/ClickHouse/pull/47475) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add `clickhouse-keeper-client` symlink to the clickhouse-server package. [#51882](https://github.com/ClickHouse/ClickHouse/pull/51882) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add https://github.com/elliotchance/sqltest to CI to report the SQL 2016 conformance. [#52293](https://github.com/ClickHouse/ClickHouse/pull/52293) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Upgrade PRQL to 0.9.3. [#53060](https://github.com/ClickHouse/ClickHouse/pull/53060) ([Maximilian Roos](https://github.com/max-sixty)).
|
||||
* System tables from CI checks are exported to ClickHouse Cloud. [#53086](https://github.com/ClickHouse/ClickHouse/pull/53086) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud. [#53100](https://github.com/ClickHouse/ClickHouse/pull/53100) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Speed up Debug and Tidy builds. [#53178](https://github.com/ClickHouse/ClickHouse/pull/53178) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Speed up the build by removing tons and tonnes of garbage. One of the frequently included headers was poisoned by boost. [#53180](https://github.com/ClickHouse/ClickHouse/pull/53180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Remove even more garbage. [#53182](https://github.com/ClickHouse/ClickHouse/pull/53182) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The function `arrayAUC` was using heavy C++ templates - ditched them. [#53183](https://github.com/ClickHouse/ClickHouse/pull/53183) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Some translation units were always rebuilt regardless of ccache. The culprit is found and fixed. [#53184](https://github.com/ClickHouse/ClickHouse/pull/53184) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Export logs from CI in stateful tests to ClickHouse Cloud. [#53351](https://github.com/ClickHouse/ClickHouse/pull/53351) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Export logs from CI in stress tests. [#53353](https://github.com/ClickHouse/ClickHouse/pull/53353) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Export logs from CI in fuzzer. [#53354](https://github.com/ClickHouse/ClickHouse/pull/53354) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Follow up for [#53418](https://github.com/ClickHouse/ClickHouse/issues/53418). Small improvements for install_check.py, adding tests for proper ENV parameters passing to the main process on `init.d start`. [#53457](https://github.com/ClickHouse/ClickHouse/pull/53457) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Reorganize file management in CMake to prevent potential duplications. For instance, `indexHint.cpp` is duplicated in both `dbms_sources` and `clickhouse_functions_sources`. [#53621](https://github.com/ClickHouse/ClickHouse/pull/53621) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Upgrade snappy to 1.1.10. [#53672](https://github.com/ClickHouse/ClickHouse/pull/53672) ([李扬](https://github.com/taiyang-li)).
|
||||
* Slightly improve cmake build by sanitizing some dependencies and removing some duplicates. Each commit includes a short description of the changes made. [#53759](https://github.com/ClickHouse/ClickHouse/pull/53759) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Do not reset (experimental) Annoy index during build-up with more than one mark [#51325](https://github.com/ClickHouse/ClickHouse/pull/51325) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||
* Fix usage of temporary directories during RESTORE [#51493](https://github.com/ClickHouse/ClickHouse/pull/51493) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 data types as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* A fix for checksum of compress marks [#51777](https://github.com/ClickHouse/ClickHouse/pull/51777) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix mistakenly comma parsing as part of datetime in CSV best effort parsing [#51950](https://github.com/ClickHouse/ClickHouse/pull/51950) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Don't throw exception when executable UDF has parameters [#51961](https://github.com/ClickHouse/ClickHouse/pull/51961) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix recalculation of skip indexes and projections in `ALTER DELETE` queries [#52530](https://github.com/ClickHouse/ClickHouse/pull/52530) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* MaterializedMySQL: Fix the infinite loop in ReadBuffer::read [#52621](https://github.com/ClickHouse/ClickHouse/pull/52621) ([Val Doroshchuk](https://github.com/valbok)).
|
||||
* Load suggestion only with `clickhouse` dialect [#52628](https://github.com/ClickHouse/ClickHouse/pull/52628) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)).
|
||||
* Init and destroy ares channel on demand. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix filtering by virtual columns with OR expression [#52653](https://github.com/ClickHouse/ClickHouse/pull/52653) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix named collections on cluster [#52687](https://github.com/ClickHouse/ClickHouse/pull/52687) ([Al Korgun](https://github.com/alkorgun)).
|
||||
* Fix reading of unnecessary column in case of multistage `PREWHERE` [#52689](https://github.com/ClickHouse/ClickHouse/pull/52689) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix unexpected sort result on multi columns with nulls first direction [#52761](https://github.com/ClickHouse/ClickHouse/pull/52761) ([copperybean](https://github.com/copperybean)).
|
||||
* Fix data race in Keeper reconfiguration [#52804](https://github.com/ClickHouse/ClickHouse/pull/52804) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix sorting of sparse columns with large limit [#52827](https://github.com/ClickHouse/ClickHouse/pull/52827) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll. [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Make regexp analyzer recognize named capturing groups [#52840](https://github.com/ClickHouse/ClickHouse/pull/52840) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Fix possible assert in `~PushingAsyncPipelineExecutor` in clickhouse-local [#52862](https://github.com/ClickHouse/ClickHouse/pull/52862) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix reading of empty `Nested(Array(LowCardinality(...)))` [#52949](https://github.com/ClickHouse/ClickHouse/pull/52949) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Added new tests for session_log and fixed the inconsistency between login and logout. [#52958](https://github.com/ClickHouse/ClickHouse/pull/52958) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Convert sparse column format to full in CreateSetAndFilterOnTheFlyStep [#53000](https://github.com/ClickHouse/ClickHouse/pull/53000) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix rare race condition with empty key prefix directory deletion in fs cache [#53055](https://github.com/ClickHouse/ClickHouse/pull/53055) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix ZstdDeflatingWriteBuffer truncating the output sometimes [#53064](https://github.com/ClickHouse/ClickHouse/pull/53064) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix query_id in part_log with async flush queries [#53103](https://github.com/ClickHouse/ClickHouse/pull/53103) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix possible error from cache "Read unexpected size" [#53121](https://github.com/ClickHouse/ClickHouse/pull/53121) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable the new parquet encoder [#53130](https://github.com/ClickHouse/ClickHouse/pull/53130) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix "Not-ready Set" exception [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix character escaping in the PostgreSQL engine [#53250](https://github.com/ClickHouse/ClickHouse/pull/53250) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Experimental session_log table: Added new tests for session_log and fixed the inconsistency between login and logout. [#53255](https://github.com/ClickHouse/ClickHouse/pull/53255) ([Alexey Gerasimchuck](https://github.com/Demilivor)). Fixed inconsistency between login success and logout [#53302](https://github.com/ClickHouse/ClickHouse/pull/53302) ([Alexey Gerasimchuck](https://github.com/Demilivor)).
|
||||
* Fix adding sub-second intervals to DateTime [#53309](https://github.com/ClickHouse/ClickHouse/pull/53309) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix "Context has expired" error in dictionaries [#53342](https://github.com/ClickHouse/ClickHouse/pull/53342) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Forbid use_structure_from_insertion_table_in_table_functions when execute Scalar [#53348](https://github.com/ClickHouse/ClickHouse/pull/53348) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fixed system.data_skipping_indices for MaterializedMySQL [#53381](https://github.com/ClickHouse/ClickHouse/pull/53381) ([Filipp Ozinov](https://github.com/bakwc)).
|
||||
* Fix processing single carriage return in TSV file segmentation engine [#53407](https://github.com/ClickHouse/ClickHouse/pull/53407) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix `Context has expired` error properly [#53433](https://github.com/ClickHouse/ClickHouse/pull/53433) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix `timeout_overflow_mode` when having subquery in the rhs of IN [#53439](https://github.com/ClickHouse/ClickHouse/pull/53439) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix an unexpected behavior in [#53152](https://github.com/ClickHouse/ClickHouse/issues/53152) [#53440](https://github.com/ClickHouse/ClickHouse/pull/53440) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||
* Fix JSON_QUERY Function parse error while path is all number [#53470](https://github.com/ClickHouse/ClickHouse/pull/53470) ([KevinyhZou](https://github.com/KevinyhZou)).
|
||||
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed SELECTing from ReplacingMergeTree with do_not_merge_across_partitions_select_final [#53511](https://github.com/ClickHouse/ClickHouse/pull/53511) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Flush async insert queue first on shutdown [#53547](https://github.com/ClickHouse/ClickHouse/pull/53547) ([joelynch](https://github.com/joelynch)).
|
||||
* Fix crash in join on sparse columna [#53548](https://github.com/ClickHouse/ClickHouse/pull/53548) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix possible UB in Set skipping index for functions with incorrect args [#53559](https://github.com/ClickHouse/ClickHouse/pull/53559) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible UB in inverted indexes (experimental feature) [#53560](https://github.com/ClickHouse/ClickHouse/pull/53560) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix number of dropped granules in EXPLAIN PLAN index=1 [#53616](https://github.com/ClickHouse/ClickHouse/pull/53616) ([wangxiaobo](https://github.com/wzb5212)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Prepared set cache in mutation pipeline stuck [#53645](https://github.com/ClickHouse/ClickHouse/pull/53645) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix bug on mutations with subcolumns of type JSON in predicates of UPDATE and DELETE queries. [#53677](https://github.com/ClickHouse/ClickHouse/pull/53677) ([VanDarkholme7](https://github.com/VanDarkholme7)).
|
||||
* Fix filter pushdown for full_sorting_merge join [#53699](https://github.com/ClickHouse/ClickHouse/pull/53699) ([vdimir](https://github.com/vdimir)).
|
||||
* Try to fix bug with `NULL::LowCardinality(Nullable(...)) NOT IN` [#53706](https://github.com/ClickHouse/ClickHouse/pull/53706) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* `transform`: correctly handle default column with multiple rows [#53742](https://github.com/ClickHouse/ClickHouse/pull/53742) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Fix fuzzer crash in parseDateTime [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* MaterializedPostgreSQL: fix uncaught exception in getCreateTableQueryImpl [#53832](https://github.com/ClickHouse/ClickHouse/pull/53832) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix possible segfault while using PostgreSQL engine [#53847](https://github.com/ClickHouse/ClickHouse/pull/53847) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix named_collection_admin alias [#54066](https://github.com/ClickHouse/ClickHouse/pull/54066) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
### <a id="237"></a> ClickHouse release 23.7, 2023-07-27
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <base/defines.h>
|
||||
#include <base/types.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <base/simd.h>
|
||||
|
||||
#include <city.h>
|
||||
|
||||
@ -29,6 +30,11 @@
|
||||
#define CRC_INT __crc32cd
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) && defined(__ARM_NEON)
|
||||
#include <arm_neon.h>
|
||||
#pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* The std::string_view-like container to avoid creating strings to find substrings in the hash table.
|
||||
@ -74,14 +80,14 @@ using StringRefs = std::vector<StringRef>;
|
||||
* For more information, see hash_map_string_2.cpp
|
||||
*/
|
||||
|
||||
inline bool compareSSE2(const char * p1, const char * p2)
|
||||
inline bool compare8(const char * p1, const char * p2)
|
||||
{
|
||||
return 0xFFFF == _mm_movemask_epi8(_mm_cmpeq_epi8(
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(p1)),
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(p2))));
|
||||
}
|
||||
|
||||
inline bool compareSSE2x4(const char * p1, const char * p2)
|
||||
inline bool compare64(const char * p1, const char * p2)
|
||||
{
|
||||
return 0xFFFF == _mm_movemask_epi8(
|
||||
_mm_and_si128(
|
||||
@ -101,7 +107,30 @@ inline bool compareSSE2x4(const char * p1, const char * p2)
|
||||
_mm_loadu_si128(reinterpret_cast<const __m128i *>(p2) + 3)))));
|
||||
}
|
||||
|
||||
inline bool memequalSSE2Wide(const char * p1, const char * p2, size_t size)
|
||||
#elif defined(__aarch64__) && defined(__ARM_NEON)
|
||||
|
||||
inline bool compare8(const char * p1, const char * p2)
|
||||
{
|
||||
uint64_t mask = getNibbleMask(vceqq_u8(
|
||||
vld1q_u8(reinterpret_cast<const unsigned char *>(p1)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2))));
|
||||
return 0xFFFFFFFFFFFFFFFF == mask;
|
||||
}
|
||||
|
||||
inline bool compare64(const char * p1, const char * p2)
|
||||
{
|
||||
uint64_t mask = getNibbleMask(vandq_u8(
|
||||
vandq_u8(vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2))),
|
||||
vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1 + 16)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2 + 16)))),
|
||||
vandq_u8(vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1 + 32)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2 + 32))),
|
||||
vceqq_u8(vld1q_u8(reinterpret_cast<const unsigned char *>(p1 + 48)), vld1q_u8(reinterpret_cast<const unsigned char *>(p2 + 48))))));
|
||||
return 0xFFFFFFFFFFFFFFFF == mask;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__SSE2__) || (defined(__aarch64__) && defined(__ARM_NEON))
|
||||
|
||||
inline bool memequalWide(const char * p1, const char * p2, size_t size)
|
||||
{
|
||||
/** The order of branches and the trick with overlapping comparisons
|
||||
* are the same as in memcpy implementation.
|
||||
@ -138,7 +167,7 @@ inline bool memequalSSE2Wide(const char * p1, const char * p2, size_t size)
|
||||
|
||||
while (size >= 64)
|
||||
{
|
||||
if (compareSSE2x4(p1, p2))
|
||||
if (compare64(p1, p2))
|
||||
{
|
||||
p1 += 64;
|
||||
p2 += 64;
|
||||
@ -150,17 +179,16 @@ inline bool memequalSSE2Wide(const char * p1, const char * p2, size_t size)
|
||||
|
||||
switch (size / 16)
|
||||
{
|
||||
case 3: if (!compareSSE2(p1 + 32, p2 + 32)) return false; [[fallthrough]];
|
||||
case 2: if (!compareSSE2(p1 + 16, p2 + 16)) return false; [[fallthrough]];
|
||||
case 1: if (!compareSSE2(p1, p2)) return false;
|
||||
case 3: if (!compare8(p1 + 32, p2 + 32)) return false; [[fallthrough]];
|
||||
case 2: if (!compare8(p1 + 16, p2 + 16)) return false; [[fallthrough]];
|
||||
case 1: if (!compare8(p1, p2)) return false;
|
||||
}
|
||||
|
||||
return compareSSE2(p1 + size - 16, p2 + size - 16);
|
||||
return compare8(p1 + size - 16, p2 + size - 16);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
inline bool operator== (StringRef lhs, StringRef rhs)
|
||||
{
|
||||
if (lhs.size != rhs.size)
|
||||
@ -169,8 +197,8 @@ inline bool operator== (StringRef lhs, StringRef rhs)
|
||||
if (lhs.size == 0)
|
||||
return true;
|
||||
|
||||
#if defined(__SSE2__)
|
||||
return memequalSSE2Wide(lhs.data, rhs.data, lhs.size);
|
||||
#if defined(__SSE2__) || (defined(__aarch64__) && defined(__ARM_NEON))
|
||||
return memequalWide(lhs.data, rhs.data, lhs.size);
|
||||
#else
|
||||
return 0 == memcmp(lhs.data, rhs.data, lhs.size);
|
||||
#endif
|
||||
|
14
base/base/simd.h
Normal file
14
base/base/simd.h
Normal file
@ -0,0 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#if defined(__aarch64__) && defined(__ARM_NEON)
|
||||
|
||||
# include <arm_neon.h>
|
||||
# pragma clang diagnostic ignored "-Wreserved-identifier"
|
||||
|
||||
/// Returns a 64 bit mask of nibbles (4 bits for each byte).
|
||||
inline uint64_t getNibbleMask(uint8x16_t res)
|
||||
{
|
||||
return vget_lane_u64(vreinterpret_u64_u8(vshrn_n_u16(vreinterpretq_u16_u8(res), 4)), 0);
|
||||
}
|
||||
|
||||
#endif
|
@ -4,10 +4,19 @@ macro(add_glob cur_list)
|
||||
endmacro()
|
||||
|
||||
macro(add_headers_and_sources prefix common_path)
|
||||
add_glob(${prefix}_headers ${CMAKE_CURRENT_SOURCE_DIR} ${common_path}/*.h)
|
||||
add_glob(${prefix}_sources ${common_path}/*.cpp ${common_path}/*.c ${common_path}/*.h)
|
||||
add_glob(${prefix}_headers ${common_path}/*.h)
|
||||
add_glob(${prefix}_sources ${common_path}/*.cpp ${common_path}/*.c)
|
||||
endmacro()
|
||||
|
||||
macro(add_headers_only prefix common_path)
|
||||
add_glob(${prefix}_headers ${CMAKE_CURRENT_SOURCE_DIR} ${common_path}/*.h)
|
||||
add_glob(${prefix}_headers ${common_path}/*.h)
|
||||
endmacro()
|
||||
|
||||
macro(extract_into_parent_list src_list dest_list)
|
||||
list(REMOVE_ITEM ${src_list} ${ARGN})
|
||||
get_filename_component(__dir_name ${CMAKE_CURRENT_SOURCE_DIR} NAME)
|
||||
foreach(file IN ITEMS ${ARGN})
|
||||
list(APPEND ${dest_list} ${__dir_name}/${file})
|
||||
endforeach()
|
||||
set(${dest_list} "${${dest_list}}" PARENT_SCOPE)
|
||||
endmacro()
|
||||
|
@ -35,10 +35,6 @@ find_package(Threads REQUIRED)
|
||||
include (cmake/unwind.cmake)
|
||||
include (cmake/cxx.cmake)
|
||||
|
||||
# Delay the call to link the global interface after the libc++ libraries are included to avoid circular dependencies
|
||||
# which are ok with static libraries but not with dynamic ones
|
||||
link_libraries(global-group)
|
||||
|
||||
if (NOT OS_ANDROID)
|
||||
if (NOT USE_MUSL)
|
||||
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||
@ -47,6 +43,8 @@ if (NOT OS_ANDROID)
|
||||
add_subdirectory(base/harmful)
|
||||
endif ()
|
||||
|
||||
link_libraries(global-group)
|
||||
|
||||
target_link_libraries(global-group INTERFACE
|
||||
-Wl,--start-group
|
||||
$<TARGET_PROPERTY:global-libs,INTERFACE_LINK_LIBRARIES>
|
||||
|
@ -19,6 +19,19 @@ else ()
|
||||
message (FATAL_ERROR "Platform ${CMAKE_SYSTEM_NAME} is not supported")
|
||||
endif ()
|
||||
|
||||
# Since we always use toolchain files to generate hermetic builds, cmake will
|
||||
# always think it's a cross-compilation, See
|
||||
# https://cmake.org/cmake/help/latest/variable/CMAKE_CROSSCOMPILING.html
|
||||
#
|
||||
# This will slow down cmake configuration and compilation. For instance, LLVM
|
||||
# will try to configure NATIVE LLVM targets with all tests enabled (You'll see
|
||||
# Building native llvm-tblgen...).
|
||||
#
|
||||
# Here, we set it manually by checking the system name and processor.
|
||||
if (${CMAKE_SYSTEM_NAME} STREQUAL ${CMAKE_HOST_SYSTEM_NAME} AND ${CMAKE_SYSTEM_PROCESSOR} STREQUAL ${CMAKE_HOST_SYSTEM_PROCESSOR})
|
||||
set (CMAKE_CROSSCOMPILING 0)
|
||||
endif ()
|
||||
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
if (OS_DARWIN)
|
||||
# FIXME: broken dependencies
|
||||
|
13
contrib/CMakeLists.txt
vendored
13
contrib/CMakeLists.txt
vendored
@ -136,9 +136,7 @@ add_contrib (aws-cmake
|
||||
)
|
||||
|
||||
add_contrib (base64-cmake base64)
|
||||
if (NOT ARCH_S390X)
|
||||
add_contrib (simdjson-cmake simdjson)
|
||||
endif()
|
||||
add_contrib (rapidjson-cmake rapidjson)
|
||||
add_contrib (fastops-cmake fastops)
|
||||
add_contrib (libuv-cmake libuv)
|
||||
@ -196,6 +194,17 @@ if (ARCH_S390X)
|
||||
add_contrib(crc32-s390x-cmake crc32-s390x)
|
||||
endif()
|
||||
add_contrib (annoy-cmake annoy)
|
||||
|
||||
option(ENABLE_USEARCH "Enable USearch (Approximate Neighborhood Search, HNSW) support" ${ENABLE_LIBRARIES})
|
||||
if (ENABLE_USEARCH)
|
||||
add_contrib (FP16-cmake FP16)
|
||||
add_contrib (robin-map-cmake robin-map)
|
||||
add_contrib (SimSIMD-cmake SimSIMD)
|
||||
add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD
|
||||
else ()
|
||||
message(STATUS "Not using USearch")
|
||||
endif ()
|
||||
|
||||
add_contrib (xxHash-cmake xxHash)
|
||||
|
||||
add_contrib (libbcrypt-cmake libbcrypt)
|
||||
|
1
contrib/FP16
vendored
Submodule
1
contrib/FP16
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 0a92994d729ff76a58f692d3028ca1b64b145d91
|
1
contrib/FP16-cmake/CMakeLists.txt
Normal file
1
contrib/FP16-cmake/CMakeLists.txt
Normal file
@ -0,0 +1 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
1
contrib/SimSIMD
vendored
Submodule
1
contrib/SimSIMD
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf
|
1
contrib/SimSIMD-cmake/CMakeLists.txt
Normal file
1
contrib/SimSIMD-cmake/CMakeLists.txt
Normal file
@ -0,0 +1 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
2
contrib/arrow
vendored
2
contrib/arrow
vendored
@ -1 +1 @@
|
||||
Subproject commit 1f1b3d35fb6eb73e6492d3afd8a85cde848d174f
|
||||
Subproject commit 1d93838f69a802639ca144ea5704a98e2481810d
|
@ -334,20 +334,36 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/compute/api_vector.cc"
|
||||
"${LIBRARY_DIR}/compute/cast.cc"
|
||||
"${LIBRARY_DIR}/compute/exec.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/accumulation_queue.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/accumulation_queue.h"
|
||||
"${LIBRARY_DIR}/compute/exec/aggregate.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/aggregate_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/asof_join_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/bloom_filter.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/exec_plan.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/expression.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/filter_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/project_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/source_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/sink_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join_dict.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_hash.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_map.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/map_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/options.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/order_by_impl.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/partition_util.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/project_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/query_context.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/sink_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/source_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/swiss_join.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/task_util.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/tpch_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/union_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/util.cc"
|
||||
"${LIBRARY_DIR}/compute/function.cc"
|
||||
"${LIBRARY_DIR}/compute/function_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernel.cc"
|
||||
"${LIBRARY_DIR}/compute/light_array.cc"
|
||||
"${LIBRARY_DIR}/compute/registry.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/aggregate_basic.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/aggregate_mode.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/aggregate_quantile.cc"
|
||||
@ -355,49 +371,43 @@ set(ARROW_SRCS
|
||||
"${LIBRARY_DIR}/compute/kernels/aggregate_var_std.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/codegen_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/hash_aggregate.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/row_encoder.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_arithmetic.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_boolean.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_boolean.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_dictionary.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_extension.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_nested.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_numeric.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_string.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_cast_temporal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_compare.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_if_else.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_nested.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_random.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_round.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_set_lookup.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_string_ascii.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_string_utf8.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_temporal_binary.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_temporal_unary.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_validity.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_if_else.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_string_ascii.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/scalar_string_utf8.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/util_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_array_sort.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_cumulative_ops.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_hash.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_rank.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_select_k.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_nested.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_rank.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_replace.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_select_k.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_selection.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/vector_sort.cc"
|
||||
"${LIBRARY_DIR}/compute/kernels/row_encoder.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/union_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_hash.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/key_map.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/util.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join_dict.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/hash_join_node.cc"
|
||||
"${LIBRARY_DIR}/compute/exec/task_util.cc"
|
||||
"${LIBRARY_DIR}/compute/light_array.cc"
|
||||
"${LIBRARY_DIR}/compute/registry.cc"
|
||||
"${LIBRARY_DIR}/compute/row/compare_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/row/encode_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/row/grouper.cc"
|
||||
"${LIBRARY_DIR}/compute/row/compare_internal.cc"
|
||||
"${LIBRARY_DIR}/compute/row/row_internal.cc"
|
||||
|
||||
"${LIBRARY_DIR}/ipc/dictionary.cc"
|
||||
|
2
contrib/base64
vendored
2
contrib/base64
vendored
@ -1 +1 @@
|
||||
Subproject commit 9499e0c4945589973b9ea1bc927377cfbc84aa46
|
||||
Subproject commit 8628e258090f9eb76d90ac3c91e1ab4690e9aa11
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit aec12eea7fc762721ae16943d1361340c66c9c17
|
||||
Subproject commit ae94606a70f1e298ce2a5718db858079185c4d9c
|
@ -19,18 +19,22 @@ add_library (_boost_filesystem ${SRCS_FILESYSTEM})
|
||||
add_library (boost::filesystem ALIAS _boost_filesystem)
|
||||
target_include_directories (_boost_filesystem SYSTEM BEFORE PUBLIC ${LIBRARY_DIR})
|
||||
|
||||
# headers-only
|
||||
if (OS_LINUX)
|
||||
target_compile_definitions (_boost_filesystem PRIVATE
|
||||
BOOST_FILESYSTEM_HAS_POSIX_AT_APIS=1
|
||||
)
|
||||
endif ()
|
||||
|
||||
|
||||
# headers-only
|
||||
add_library (_boost_headers_only INTERFACE)
|
||||
add_library (boost::headers_only ALIAS _boost_headers_only)
|
||||
target_include_directories (_boost_headers_only SYSTEM BEFORE INTERFACE ${LIBRARY_DIR})
|
||||
|
||||
# asio
|
||||
|
||||
target_compile_definitions (_boost_headers_only INTERFACE
|
||||
BOOST_ASIO_STANDALONE=1
|
||||
# Avoid using of deprecated in c++ > 17 std::result_of
|
||||
BOOST_ASIO_HAS_STD_INVOKE_RESULT=1
|
||||
BOOST_ASIO_HAS_STD_INVOKE_RESULT=1 # Avoid using of deprecated in c++ > 17 std::result_of
|
||||
BOOST_TIMER_ENABLE_DEPRECATED=1 # wordnet-blast (enabled via USE_NLP) uses Boost legacy timer classes
|
||||
)
|
||||
|
||||
# iostreams
|
||||
@ -172,9 +176,9 @@ endif()
|
||||
# coroutine
|
||||
|
||||
set (SRCS_COROUTINE
|
||||
"${LIBRARY_DIR}/libs/coroutine/detail/coroutine_context.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/exceptions.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/posix/stack_traits.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/detail/coroutine_context.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/exceptions.cpp"
|
||||
"${LIBRARY_DIR}/libs/coroutine/src/posix/stack_traits.cpp"
|
||||
)
|
||||
add_library (_boost_coroutine ${SRCS_COROUTINE})
|
||||
add_library (boost::coroutine ALIAS _boost_coroutine)
|
||||
|
@ -1,6 +1,7 @@
|
||||
option(ENABLE_ISAL_LIBRARY "Enable ISA-L library" ${ENABLE_LIBRARIES})
|
||||
if (ARCH_AARCH64)
|
||||
# Disable ISA-L libray on aarch64.
|
||||
|
||||
# ISA-L is only available for x86-64, so it shall be disabled for other platforms
|
||||
if (NOT ARCH_AMD64)
|
||||
set (ENABLE_ISAL_LIBRARY OFF)
|
||||
endif ()
|
||||
|
||||
|
2
contrib/krb5
vendored
2
contrib/krb5
vendored
@ -1 +1 @@
|
||||
Subproject commit b56ce6ba690e1f320df1a64afa34980c3e462617
|
||||
Subproject commit 71b06c2276009ae649c7703019f3b4605f66fd3d
|
@ -147,7 +147,7 @@ target_compile_definitions(_libarchive PUBLIC
|
||||
target_compile_options(_libarchive PRIVATE "-Wno-reserved-macro-identifier")
|
||||
|
||||
if (TARGET ch_contrib::xz)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_LZMA_H=1)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_LZMA_H=1 HAVE_LIBLZMA=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::xz)
|
||||
endif()
|
||||
|
||||
@ -156,6 +156,16 @@ if (TARGET ch_contrib::zlib)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::zlib)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::zstd)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_ZSTD_H=1 HAVE_LIBZSTD=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::zstd)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::bzip2)
|
||||
target_compile_definitions(_libarchive PUBLIC HAVE_BZLIB_H=1)
|
||||
target_link_libraries(_libarchive PRIVATE ch_contrib::bzip2)
|
||||
endif()
|
||||
|
||||
if (OS_LINUX)
|
||||
target_compile_definitions(
|
||||
_libarchive PUBLIC
|
||||
|
2
contrib/libpqxx
vendored
2
contrib/libpqxx
vendored
@ -1 +1 @@
|
||||
Subproject commit bdd6540fb95ff56c813691ceb5da5a3266cf235d
|
||||
Subproject commit 791d68fd89902835133c50435e380ec7a73271b7
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
||||
Subproject commit d857c707fccd50423bea1c4710dc469cf89607a9
|
||||
Subproject commit e7b8befca85c8b847614432dba250c22d35fbae0
|
@ -1,18 +1,16 @@
|
||||
if (APPLE OR NOT ARCH_AMD64 OR SANITIZE STREQUAL "undefined")
|
||||
if (APPLE OR SANITIZE STREQUAL "undefined")
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT OFF)
|
||||
else()
|
||||
set (ENABLE_EMBEDDED_COMPILER_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for JIT compilation during query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# TODO: Enable compilation on AArch64
|
||||
|
||||
set (LLVM_VERSION "15.0.0bundled")
|
||||
set (LLVM_INCLUDE_DIRS
|
||||
"${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm/include"
|
||||
@ -58,18 +56,30 @@ set (REQUIRED_LLVM_LIBRARIES
|
||||
LLVMDemangle
|
||||
)
|
||||
|
||||
# if (ARCH_AMD64)
|
||||
if (ARCH_AMD64)
|
||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMX86Info LLVMX86Desc LLVMX86CodeGen)
|
||||
# elseif (ARCH_AARCH64)
|
||||
# list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
# endif ()
|
||||
elseif (ARCH_AARCH64)
|
||||
set (LLVM_TARGETS_TO_BUILD "AArch64" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMAArch64Info LLVMAArch64Desc LLVMAArch64CodeGen)
|
||||
elseif (ARCH_PPC64LE)
|
||||
set (LLVM_TARGETS_TO_BUILD "PowerPC" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMPowerPCInfo LLVMPowerPCDesc LLVMPowerPCCodeGen)
|
||||
elseif (ARCH_S390X)
|
||||
set (LLVM_TARGETS_TO_BUILD "SystemZ" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMSystemZInfo LLVMSystemZDesc LLVMSystemZCodeGen)
|
||||
elseif (ARCH_RISCV64)
|
||||
set (LLVM_TARGETS_TO_BUILD "RISCV" CACHE INTERNAL "")
|
||||
list(APPEND REQUIRED_LLVM_LIBRARIES LLVMRISCVInfo LLVMRISCVDesc LLVMRISCVCodeGen)
|
||||
endif ()
|
||||
|
||||
message (STATUS "LLVM TARGETS TO BUILD ${LLVM_TARGETS_TO_BUILD}")
|
||||
|
||||
set (CMAKE_INSTALL_RPATH "ON") # Do not adjust RPATH in llvm, since then it will not be able to find libcxx/libcxxabi/libunwind
|
||||
set (LLVM_COMPILER_CHECKED 1 CACHE INTERNAL "") # Skip internal compiler selection
|
||||
set (LLVM_ENABLE_EH 1 CACHE INTERNAL "") # With exception handling
|
||||
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
|
||||
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
|
||||
set (LLVM_TARGETS_TO_BUILD "X86" CACHE STRING "") # for x86 + ARM: "X86;AArch64"
|
||||
|
||||
# Omit unnecessary stuff (just the options which are ON by default)
|
||||
set(LLVM_ENABLE_BACKTRACES 0 CACHE INTERNAL "")
|
||||
@ -99,15 +109,12 @@ set(LLVM_ENABLE_BINDINGS 0 CACHE INTERNAL "")
|
||||
set (LLVM_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/llvm")
|
||||
set (LLVM_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/llvm-project/llvm")
|
||||
|
||||
# Since we always use toolchain files to generate hermatic builds, cmake will
|
||||
# think it's a cross compilation, and LLVM will try to configure NATIVE LLVM
|
||||
# targets with all tests enabled, which will slow down cmake configuration and
|
||||
# compilation (You'll see Building native llvm-tblgen...). Let's disable the
|
||||
# cross compiling indicator for now.
|
||||
#
|
||||
# TODO We should let cmake know whether it's indeed a cross compilation in the
|
||||
# first place.
|
||||
set (CMAKE_CROSSCOMPILING 0)
|
||||
message (STATUS "LLVM CMAKE CROSS COMPILING ${CMAKE_CROSSCOMPILING}")
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
set (LLVM_HOST_TRIPLE "${CMAKE_C_COMPILER_TARGET}" CACHE INTERNAL "")
|
||||
message (STATUS "CROSS COMPILING SET LLVM HOST TRIPLE ${LLVM_HOST_TRIPLE}")
|
||||
endif()
|
||||
|
||||
add_subdirectory ("${LLVM_SOURCE_DIR}" "${LLVM_BINARY_DIR}")
|
||||
|
||||
set_directory_properties (PROPERTIES
|
||||
|
2
contrib/lz4
vendored
2
contrib/lz4
vendored
@ -1 +1 @@
|
||||
Subproject commit e82198428c8061372d5adef1f9bfff4203f6081e
|
||||
Subproject commit 92ebf1870b9acbefc0e7970409a181954a10ff40
|
@ -13,6 +13,11 @@ add_library (ch_contrib::lz4 ALIAS _lz4)
|
||||
|
||||
target_compile_definitions (_lz4 PUBLIC LZ4_DISABLE_DEPRECATE_WARNINGS=1)
|
||||
target_compile_definitions (_lz4 PUBLIC LZ4_FAST_DEC_LOOP=1)
|
||||
|
||||
if(ARCH_S390X)
|
||||
target_compile_definitions(_lz4 PRIVATE LZ4_STATIC_LINKING_ONLY_ENDIANNESS_INDEPENDENT_OUTPUT)
|
||||
endif()
|
||||
|
||||
if (SANITIZE STREQUAL "undefined")
|
||||
target_compile_options (_lz4 PRIVATE -fno-sanitize=undefined)
|
||||
endif ()
|
||||
|
2
contrib/openldap
vendored
2
contrib/openldap
vendored
@ -1 +1 @@
|
||||
Subproject commit 8688afe6bc95ebcd20edf4578c536362218cb70a
|
||||
Subproject commit 5671b80e369df2caf5f34e02924316205a43c895
|
@ -96,71 +96,82 @@ target_compile_definitions(_lber
|
||||
)
|
||||
|
||||
set(_ldap_srcs
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/bind.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/open.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/result.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/error.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/compare.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/search.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/controls.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/messages.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/references.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/extended.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/cyrus.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/modify.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/add.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/modrdn.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/delete.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/abandon.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sasl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sbind.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/unbind.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/add.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/addentry.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/assertion.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/avl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/bind.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/cancel.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/charray.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/compare.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/controls.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/cyrus.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/dds.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/delete.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/deref.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/dnssrv.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/error.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/extended.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/fetch.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/filter.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/free.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sort.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/passwd.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/whoami.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/vc.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/getattr.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/getdn.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/getentry.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/getattr.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/getvalues.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/addentry.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/request.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-ip.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/url.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/pagectrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sortctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/vlvctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/init.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/options.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/print.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/string.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/util-int.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/schema.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/charray.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-local.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/dnssrv.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8-conv.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls2.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_o.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_g.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/turn.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ppolicy.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/dds.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/txn.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldap_sync.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/stctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/assertion.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/deref.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldifutil.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldif.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/fetch.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/lbase64.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldap_sync.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldif.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ldifutil.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/messages.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/modify.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/modrdn.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/msctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/open.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/options.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-ip.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/os-local.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/pagectrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/passwd.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/ppolicy.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/print.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/psearchctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/rdwr.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/references.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/request.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/result.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/rq.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sasl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sbind.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/schema.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/search.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sort.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/sortctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/stctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/string.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tavl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/thr_debug.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/thr_nt.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/thr_posix.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/thr_pth.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/thr_thr.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/threads.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls2.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_g.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tls_o.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/tpool.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/turn.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/txn.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/unbind.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/url.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8-conv.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/utf-8.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/util-int.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/vc.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/vlvctrl.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap/whoami.c"
|
||||
)
|
||||
|
||||
mkversion(ldap)
|
||||
@ -185,43 +196,5 @@ target_compile_definitions(_ldap
|
||||
PRIVATE LDAP_LIBRARY
|
||||
)
|
||||
|
||||
set(_ldap_r_specific_srcs
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/threads.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rdwr.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/tpool.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/rq.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_posix.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_thr.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_nt.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_pth.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_stub.c"
|
||||
"${OPENLDAP_SOURCE_DIR}/libraries/libldap_r/thr_debug.c"
|
||||
)
|
||||
|
||||
mkversion(ldap_r)
|
||||
|
||||
add_library(_ldap_r
|
||||
${_ldap_r_specific_srcs}
|
||||
${_ldap_srcs}
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/ldap_r-version.c"
|
||||
)
|
||||
|
||||
target_link_libraries(_ldap_r
|
||||
PRIVATE _lber
|
||||
PRIVATE OpenSSL::Crypto OpenSSL::SSL
|
||||
)
|
||||
|
||||
target_include_directories(_ldap_r SYSTEM
|
||||
PUBLIC ${_extra_build_dir}/include
|
||||
PUBLIC "${OPENLDAP_SOURCE_DIR}/include"
|
||||
PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap_r"
|
||||
PRIVATE "${OPENLDAP_SOURCE_DIR}/libraries/libldap"
|
||||
)
|
||||
|
||||
target_compile_definitions(_ldap_r
|
||||
PRIVATE LDAP_R_COMPILE
|
||||
PRIVATE LDAP_LIBRARY
|
||||
)
|
||||
|
||||
add_library(ch_contrib::ldap ALIAS _ldap_r)
|
||||
add_library(ch_contrib::ldap ALIAS _ldap)
|
||||
add_library(ch_contrib::lber ALIAS _lber)
|
||||
|
2
contrib/openssl
vendored
2
contrib/openssl
vendored
@ -1 +1 @@
|
||||
Subproject commit 19cc035b6c6f2283573d29c7ea7f7d675cf750ce
|
||||
Subproject commit 245cb0291e0db99d9ccf3692fa76f440b2b054c2
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/cmp.h.in
|
||||
* Generated by Makefile from include/openssl/cmp.h.in
|
||||
*
|
||||
* Copyright 2007-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright Nokia 2007-2019
|
||||
* Copyright Siemens AG 2015-2019
|
||||
*
|
||||
@ -193,6 +193,9 @@ typedef ASN1_BIT_STRING OSSL_CMP_PKIFAILUREINFO;
|
||||
* -- CertReqMsg
|
||||
* }
|
||||
*/
|
||||
# define OSSL_CMP_PKISTATUS_request -3
|
||||
# define OSSL_CMP_PKISTATUS_trans -2
|
||||
# define OSSL_CMP_PKISTATUS_unspecified -1
|
||||
# define OSSL_CMP_PKISTATUS_accepted 0
|
||||
# define OSSL_CMP_PKISTATUS_grantedWithMods 1
|
||||
# define OSSL_CMP_PKISTATUS_rejection 2
|
||||
@ -439,11 +442,12 @@ int OSSL_CMP_CTX_build_cert_chain(OSSL_CMP_CTX *ctx, X509_STORE *own_trusted,
|
||||
int OSSL_CMP_CTX_set1_pkey(OSSL_CMP_CTX *ctx, EVP_PKEY *pkey);
|
||||
int OSSL_CMP_CTX_set1_referenceValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *ref, int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx, const unsigned char *sec,
|
||||
const int len);
|
||||
int OSSL_CMP_CTX_set1_secretValue(OSSL_CMP_CTX *ctx,
|
||||
const unsigned char *sec, int len);
|
||||
/* CMP message header and extra certificates: */
|
||||
int OSSL_CMP_CTX_set1_recipient(OSSL_CMP_CTX *ctx, const X509_NAME *name);
|
||||
int OSSL_CMP_CTX_push0_geninfo_ITAV(OSSL_CMP_CTX *ctx, OSSL_CMP_ITAV *itav);
|
||||
int OSSL_CMP_CTX_reset_geninfo_ITAVs(OSSL_CMP_CTX *ctx);
|
||||
int OSSL_CMP_CTX_set1_extraCertsOut(OSSL_CMP_CTX *ctx,
|
||||
STACK_OF(X509) *extraCertsOut);
|
||||
/* certificate template: */
|
||||
@ -499,6 +503,7 @@ ASN1_OCTET_STRING *OSSL_CMP_HDR_get0_recipNonce(const OSSL_CMP_PKIHEADER *hdr);
|
||||
OSSL_CMP_PKIHEADER *OSSL_CMP_MSG_get0_header(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_get_bodytype(const OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_transactionID(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
int OSSL_CMP_MSG_update_recipNonce(OSSL_CMP_CTX *ctx, OSSL_CMP_MSG *msg);
|
||||
OSSL_CRMF_MSG *OSSL_CMP_CTX_setup_CRM(OSSL_CMP_CTX *ctx, int for_KUR, int rid);
|
||||
OSSL_CMP_MSG *OSSL_CMP_MSG_read(const char *file, OSSL_LIB_CTX *libctx,
|
||||
const char *propq);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/opensslv.h.in
|
||||
* Generated by Makefile from include/openssl/opensslv.h.in
|
||||
*
|
||||
* Copyright 1999-2020 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
@ -29,7 +29,7 @@ extern "C" {
|
||||
*/
|
||||
# define OPENSSL_VERSION_MAJOR 3
|
||||
# define OPENSSL_VERSION_MINOR 0
|
||||
# define OPENSSL_VERSION_PATCH 7
|
||||
# define OPENSSL_VERSION_PATCH 10
|
||||
|
||||
/*
|
||||
* Additional version information
|
||||
@ -74,21 +74,21 @@ extern "C" {
|
||||
* longer variant with OPENSSL_VERSION_PRE_RELEASE_STR and
|
||||
* OPENSSL_VERSION_BUILD_METADATA_STR appended.
|
||||
*/
|
||||
# define OPENSSL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.7"
|
||||
# define OPENSSL_VERSION_STR "3.0.10"
|
||||
# define OPENSSL_FULL_VERSION_STR "3.0.10"
|
||||
|
||||
/*
|
||||
* SECTION 3: ADDITIONAL METADATA
|
||||
*
|
||||
* These strings are defined separately to allow them to be parsable.
|
||||
*/
|
||||
# define OPENSSL_RELEASE_DATE "1 Nov 2022"
|
||||
# define OPENSSL_RELEASE_DATE "1 Aug 2023"
|
||||
|
||||
/*
|
||||
* SECTION 4: BACKWARD COMPATIBILITY
|
||||
*/
|
||||
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.7 1 Nov 2022"
|
||||
# define OPENSSL_VERSION_TEXT "OpenSSL 3.0.10 1 Aug 2023"
|
||||
|
||||
/* Synthesize OPENSSL_VERSION_NUMBER with the layout 0xMNN00PPSL */
|
||||
# ifdef OPENSSL_VERSION_PRE_RELEASE
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*
|
||||
* WARNING: do not edit!
|
||||
* Generated by Makefile from ../include/openssl/x509v3.h.in
|
||||
* Generated by Makefile from include/openssl/x509v3.h.in
|
||||
*
|
||||
* Copyright 1999-2021 The OpenSSL Project Authors. All Rights Reserved.
|
||||
* Copyright 1999-2023 The OpenSSL Project Authors. All Rights Reserved.
|
||||
*
|
||||
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
||||
* this file except in compliance with the License. You can obtain a copy
|
||||
@ -177,7 +177,7 @@ typedef struct GENERAL_NAME_st {
|
||||
OTHERNAME *otherName; /* otherName */
|
||||
ASN1_IA5STRING *rfc822Name;
|
||||
ASN1_IA5STRING *dNSName;
|
||||
ASN1_TYPE *x400Address;
|
||||
ASN1_STRING *x400Address;
|
||||
X509_NAME *directoryName;
|
||||
EDIPARTYNAME *ediPartyName;
|
||||
ASN1_IA5STRING *uniformResourceIdentifier;
|
||||
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
||||
Subproject commit 568d1d60c250af1890f226c182bc15bd8cc94cf1
|
||||
Subproject commit a20d1d9d7ad4a4be7b7ba97588e16ca8b9abb2b6
|
@ -32,7 +32,6 @@ set(RE2_SOURCES
|
||||
${SRC_DIR}/re2/tostring.cc
|
||||
${SRC_DIR}/re2/unicode_casefold.cc
|
||||
${SRC_DIR}/re2/unicode_groups.cc
|
||||
${SRC_DIR}/util/pcre.cc
|
||||
${SRC_DIR}/util/rune.cc
|
||||
${SRC_DIR}/util/strutil.cc
|
||||
)
|
||||
|
1
contrib/robin-map
vendored
Submodule
1
contrib/robin-map
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d
|
1
contrib/robin-map-cmake/CMakeLists.txt
Normal file
1
contrib/robin-map-cmake/CMakeLists.txt
Normal file
@ -0,0 +1 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
2
contrib/snappy
vendored
2
contrib/snappy
vendored
@ -1 +1 @@
|
||||
Subproject commit fb057edfed820212076239fd32cb2ff23e9016bf
|
||||
Subproject commit 6ebb5b1ab8801ea3fde103c5c29f5ab86df5fe7a
|
@ -20,6 +20,7 @@ echo '/boost/context/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/boost/convert/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/boost/coroutine/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/boost/core/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/boost/describe/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/boost/detail/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/boost/dynamic_bitset/*' >> $FILES_TO_CHECKOUT
|
||||
echo '/boost/exception/*' >> $FILES_TO_CHECKOUT
|
||||
|
1
contrib/usearch
vendored
Submodule
1
contrib/usearch
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit f942b6f334b31716f9bdb02eb6a25fa6b222f5ba
|
17
contrib/usearch-cmake/CMakeLists.txt
Normal file
17
contrib/usearch-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,17 @@
|
||||
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
||||
set(USEARCH_SOURCE_DIR "${USEARCH_PROJECT_DIR}/include")
|
||||
|
||||
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
|
||||
set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map")
|
||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD-map")
|
||||
|
||||
add_library(_usearch INTERFACE)
|
||||
|
||||
target_include_directories(_usearch SYSTEM INTERFACE
|
||||
${FP16_PROJECT_DIR}/include
|
||||
${ROBIN_MAP_PROJECT_DIR}/include
|
||||
${SIMSIMD_PROJECT_DIR}/include
|
||||
${USEARCH_SOURCE_DIR})
|
||||
|
||||
add_library(ch_contrib::usearch ALIAS _usearch)
|
||||
target_compile_definitions(_usearch INTERFACE ENABLE_USEARCH)
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
esac
|
||||
|
||||
ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG VERSION="23.7.5.30"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG VERSION="23.7.5.30"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||
|
@ -23,7 +23,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="23.7.4.5"
|
||||
ARG VERSION="23.7.5.30"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
# set non-empty deb_location_url url to create a docker image
|
||||
|
@ -5,19 +5,124 @@
|
||||
# and their names will contain a hash of the table structure,
|
||||
# which allows exporting tables from servers of different versions.
|
||||
|
||||
# Config file contains KEY=VALUE pairs with any necessary parameters like:
|
||||
# CLICKHOUSE_CI_LOGS_HOST - remote host
|
||||
# CLICKHOUSE_CI_LOGS_USER - password for user
|
||||
# CLICKHOUSE_CI_LOGS_PASSWORD - password for user
|
||||
CLICKHOUSE_CI_LOGS_CREDENTIALS=${CLICKHOUSE_CI_LOGS_CREDENTIALS:-/tmp/export-logs-config.sh}
|
||||
CLICKHOUSE_CI_LOGS_USER=${CLICKHOUSE_CI_LOGS_USER:-ci}
|
||||
|
||||
# Pre-configured destination cluster, where to export the data
|
||||
CLUSTER=${CLUSTER:=system_logs_export}
|
||||
CLICKHOUSE_CI_LOGS_CLUSTER=${CLICKHOUSE_CI_LOGS_CLUSTER:-system_logs_export}
|
||||
|
||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:="pull_request_number UInt32, commit_sha String, check_start_time DateTime, check_name LowCardinality(String), instance_type LowCardinality(String), "}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:="0 AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type"}
|
||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:="check_name, "}
|
||||
EXTRA_COLUMNS=${EXTRA_COLUMNS:-"pull_request_number UInt32, commit_sha String, check_start_time DateTime, check_name LowCardinality(String), instance_type LowCardinality(String), "}
|
||||
EXTRA_COLUMNS_EXPRESSION=${EXTRA_COLUMNS_EXPRESSION:-"0 AS pull_request_number, '' AS commit_sha, now() AS check_start_time, '' AS check_name, '' AS instance_type"}
|
||||
EXTRA_ORDER_BY_COLUMNS=${EXTRA_ORDER_BY_COLUMNS:-"check_name, "}
|
||||
|
||||
CONNECTION_PARAMETERS=${CONNECTION_PARAMETERS:=""}
|
||||
function __set_connection_args
|
||||
{
|
||||
# It's impossible to use generous $CONNECTION_ARGS string, it's unsafe from word splitting perspective.
|
||||
# That's why we must stick to the generated option
|
||||
CONNECTION_ARGS=(
|
||||
--receive_timeout=10 --send_timeout=10 --secure
|
||||
--user "${CLICKHOUSE_CI_LOGS_USER}" --host "${CLICKHOUSE_CI_LOGS_HOST}"
|
||||
--password "${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
)
|
||||
}
|
||||
|
||||
# Create all configured system logs:
|
||||
function __shadow_credentials
|
||||
{
|
||||
# The function completely screws the output, it shouldn't be used in normal functions, only in ()
|
||||
# The only way to substitute the env as a plain text is using perl 's/\Qsomething\E/another/
|
||||
exec &> >(perl -pe '
|
||||
s(\Q$ENV{CLICKHOUSE_CI_LOGS_HOST}\E)[CLICKHOUSE_CI_LOGS_HOST]g;
|
||||
s(\Q$ENV{CLICKHOUSE_CI_LOGS_USER}\E)[CLICKHOUSE_CI_LOGS_USER]g;
|
||||
s(\Q$ENV{CLICKHOUSE_CI_LOGS_PASSWORD}\E)[CLICKHOUSE_CI_LOGS_PASSWORD]g;
|
||||
')
|
||||
}
|
||||
|
||||
function check_logs_credentials
|
||||
(
|
||||
# The function connects with given credentials, and if it's unable to execute the simplest query, returns exit code
|
||||
|
||||
# First check, if all necessary parameters are set
|
||||
set +x
|
||||
for parameter in CLICKHOUSE_CI_LOGS_HOST CLICKHOUSE_CI_LOGS_USER CLICKHOUSE_CI_LOGS_PASSWORD; do
|
||||
export -p | grep -q "$parameter" || {
|
||||
echo "Credentials parameter $parameter is unset"
|
||||
return 1
|
||||
}
|
||||
done
|
||||
|
||||
__shadow_credentials
|
||||
__set_connection_args
|
||||
local code
|
||||
# Catch both success and error to not fail on `set -e`
|
||||
clickhouse-client "${CONNECTION_ARGS[@]}" -q 'SELECT 1 FORMAT Null' && return 0 || code=$?
|
||||
if [ "$code" != 0 ]; then
|
||||
echo 'Failed to connect to CI Logs cluster'
|
||||
return $code
|
||||
fi
|
||||
)
|
||||
|
||||
function config_logs_export_cluster
|
||||
(
|
||||
# The function is launched in a separate shell instance to not expose the
|
||||
# exported values from CLICKHOUSE_CI_LOGS_CREDENTIALS
|
||||
set +x
|
||||
if ! [ -r "${CLICKHOUSE_CI_LOGS_CREDENTIALS}" ]; then
|
||||
echo "File $CLICKHOUSE_CI_LOGS_CREDENTIALS does not exist, do not setup"
|
||||
return
|
||||
fi
|
||||
set -a
|
||||
# shellcheck disable=SC1090
|
||||
source "${CLICKHOUSE_CI_LOGS_CREDENTIALS}"
|
||||
set +a
|
||||
__shadow_credentials
|
||||
echo "Checking if the credentials work"
|
||||
check_logs_credentials || return 0
|
||||
cluster_config="${1:-/etc/clickhouse-server/config.d/system_logs_export.yaml}"
|
||||
mkdir -p "$(dirname "$cluster_config")"
|
||||
echo "remote_servers:
|
||||
${CLICKHOUSE_CI_LOGS_CLUSTER}:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: '${CLICKHOUSE_CI_LOGS_USER}'
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
port: 9440
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > "$cluster_config"
|
||||
echo "Cluster ${CLICKHOUSE_CI_LOGS_CLUSTER} is confugured in ${cluster_config}"
|
||||
)
|
||||
|
||||
function setup_logs_replication
|
||||
(
|
||||
# The function is launched in a separate shell instance to not expose the
|
||||
# exported values from CLICKHOUSE_CI_LOGS_CREDENTIALS
|
||||
set +x
|
||||
# disable output
|
||||
if ! [ -r "${CLICKHOUSE_CI_LOGS_CREDENTIALS}" ]; then
|
||||
echo "File $CLICKHOUSE_CI_LOGS_CREDENTIALS does not exist, do not setup"
|
||||
return 0
|
||||
fi
|
||||
set -a
|
||||
# shellcheck disable=SC1090
|
||||
source "${CLICKHOUSE_CI_LOGS_CREDENTIALS}"
|
||||
set +a
|
||||
__shadow_credentials
|
||||
echo "Checking if the credentials work"
|
||||
check_logs_credentials || return 0
|
||||
__set_connection_args
|
||||
|
||||
echo 'Create all configured system logs'
|
||||
clickhouse-client --query "SYSTEM FLUSH LOGS"
|
||||
|
||||
# It's doesn't make sense to try creating tables if SYNC fails
|
||||
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client --receive_timeout 180 "${CONNECTION_ARGS[@]}" || return 0
|
||||
|
||||
# For each system log table:
|
||||
echo 'Create %_log tables'
|
||||
clickhouse-client --query "SHOW TABLES FROM system LIKE '%\\_log'" | while read -r table
|
||||
do
|
||||
# Calculate hash of its structure:
|
||||
@ -36,20 +141,23 @@ do
|
||||
/^TTL /d
|
||||
')
|
||||
|
||||
echo "Creating destination table ${table}_${hash}" >&2
|
||||
echo -e "Creating remote destination table ${table}_${hash} with statement:\n${statement}" >&2
|
||||
|
||||
echo "$statement" | clickhouse-client $CONNECTION_PARAMETERS
|
||||
echo "$statement" | clickhouse-client --database_replicated_initial_query_timeout_sec=10 \
|
||||
--distributed_ddl_task_timeout=10 --receive_timeout=10 --send_timeout=10 \
|
||||
"${CONNECTION_ARGS[@]}" || continue
|
||||
|
||||
echo "Creating table system.${table}_sender" >&2
|
||||
|
||||
# Create Distributed table and materialized view to watch on the original table:
|
||||
clickhouse-client --query "
|
||||
CREATE TABLE system.${table}_sender
|
||||
ENGINE = Distributed(${CLUSTER}, default, ${table}_${hash})
|
||||
ENGINE = Distributed(${CLICKHOUSE_CI_LOGS_CLUSTER}, default, ${table}_${hash})
|
||||
SETTINGS flush_on_detach=0
|
||||
EMPTY AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
FROM system.${table}
|
||||
"
|
||||
" || continue
|
||||
|
||||
echo "Creating materialized view system.${table}_watcher" >&2
|
||||
|
||||
@ -57,5 +165,6 @@ do
|
||||
CREATE MATERIALIZED VIEW system.${table}_watcher TO system.${table}_sender AS
|
||||
SELECT ${EXTRA_COLUMNS_EXPRESSION}, *
|
||||
FROM system.${table}
|
||||
"
|
||||
" || continue
|
||||
done
|
||||
)
|
||||
|
@ -1,6 +1,8 @@
|
||||
#!/bin/bash
|
||||
# shellcheck disable=SC2086,SC2001,SC2046,SC2030,SC2031,SC2010,SC2015
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
set -x
|
||||
|
||||
# core.COMM.PID-TID
|
||||
@ -123,22 +125,7 @@ EOL
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
port: 9440
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > db/config.d/system_logs_export.yaml
|
||||
fi
|
||||
config_logs_export_cluster db/config.d/system_logs_export.yaml
|
||||
}
|
||||
|
||||
function filter_exists_and_template
|
||||
@ -242,20 +229,7 @@ quit
|
||||
kill -0 $server_pid # This checks that it is our server that is started and not some other one
|
||||
echo 'Server started and responded'
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PR_TO_TEST AS pull_request_number, '$SHA_TO_TEST' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
/setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
setup_logs_replication
|
||||
|
||||
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
|
||||
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
|
||||
|
@ -6,7 +6,7 @@ RUN apt-get install -y wget openjdk-8-jre
|
||||
|
||||
RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-3.1.0/hadoop-3.1.0.tar.gz && \
|
||||
tar -xf hadoop-3.1.0.tar.gz && rm -rf hadoop-3.1.0.tar.gz
|
||||
RUN wget https://dlcdn.apache.org/hive/hive-2.3.9/apache-hive-2.3.9-bin.tar.gz && \
|
||||
RUN wget https://apache.apache.org/dist/hive/hive-2.3.9/apache-hive-2.3.9-bin.tar.gz && \
|
||||
tar -xf apache-hive-2.3.9-bin.tar.gz && rm -rf apache-hive-2.3.9-bin.tar.gz
|
||||
RUN apt install -y vim
|
||||
|
||||
|
@ -103,7 +103,7 @@ RUN python3 -m pip install --no-cache-dir \
|
||||
urllib3
|
||||
|
||||
# Hudi supports only spark 3.3.*, not 3.4
|
||||
RUN curl -fsSL -O https://dlcdn.apache.org/spark/spark-3.3.2/spark-3.3.2-bin-hadoop3.tgz \
|
||||
RUN curl -fsSL -O https://archive.apache.org/dist/spark/spark-3.3.2/spark-3.3.2-bin-hadoop3.tgz \
|
||||
&& tar xzvf spark-3.3.2-bin-hadoop3.tgz -C / \
|
||||
&& rm spark-3.3.2-bin-hadoop3.tgz
|
||||
|
||||
|
@ -20,6 +20,9 @@ services:
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir1:-}
|
||||
target: /var/lib/clickhouse-keeper
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir1:-}
|
||||
target: /var/lib/clickhouse
|
||||
entrypoint: "${keeper_cmd_prefix:-clickhouse keeper} --config=/etc/clickhouse-keeper/keeper_config1.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
@ -53,6 +56,9 @@ services:
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir2:-}
|
||||
target: /var/lib/clickhouse-keeper
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir1:-}
|
||||
target: /var/lib/clickhouse
|
||||
entrypoint: "${keeper_cmd_prefix:-clickhouse keeper} --config=/etc/clickhouse-keeper/keeper_config2.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
@ -86,6 +92,9 @@ services:
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir3:-}
|
||||
target: /var/lib/clickhouse-keeper
|
||||
- type: ${keeper_fs:-tmpfs}
|
||||
source: ${keeper_db_dir1:-}
|
||||
target: /var/lib/clickhouse
|
||||
entrypoint: "${keeper_cmd_prefix:-clickhouse keeper} --config=/etc/clickhouse-keeper/keeper_config3.xml --log-file=/var/log/clickhouse-keeper/clickhouse-keeper.log --errorlog-file=/var/log/clickhouse-keeper/clickhouse-keeper.err.log"
|
||||
cap_add:
|
||||
- SYS_PTRACE
|
||||
|
@ -644,7 +644,7 @@ function report
|
||||
rm -r report ||:
|
||||
mkdir report report/tmp ||:
|
||||
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv slow-on-client.tsv all-queries.tsv run-errors.tsv ||:
|
||||
rm ./*.{rep,svg} test-times.tsv test-dump.tsv unstable.tsv unstable-query-ids.tsv unstable-query-metrics.tsv changed-perf.tsv unstable-tests.tsv unstable-queries.tsv bad-tests.tsv all-queries.tsv run-errors.tsv ||:
|
||||
|
||||
cat analyze/errors.log >> report/errors.log ||:
|
||||
cat profile-errors.log >> report/errors.log ||:
|
||||
@ -810,12 +810,6 @@ create view total_client_time_per_query as select *
|
||||
from file('analyze/client-times.tsv', TSV,
|
||||
'test text, query_index int, client float, server float');
|
||||
|
||||
create table slow_on_client_report engine File(TSV, 'report/slow-on-client.tsv')
|
||||
as select client, server, round(client/server, 3) p,
|
||||
test, query_display_name
|
||||
from total_client_time_per_query left join query_display_names using (test, query_index)
|
||||
where p > round(1.02, 3) order by p desc;
|
||||
|
||||
create table wall_clock_time_per_test engine Memory as select *
|
||||
from file('wall-clock-times.tsv', TSV, 'test text, real float, user float, system float');
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
<!-- disable JIT for perf tests -->
|
||||
<compile_expressions>0</compile_expressions>
|
||||
<compile_aggregate_expressions>0</compile_aggregate_expressions>
|
||||
<compile_sort_description>0</compile_sort_description>
|
||||
|
||||
<!-- Don't fail some prewarm queries too early -->
|
||||
<timeout_before_checking_execution_speed>60</timeout_before_checking_execution_speed>
|
||||
|
@ -364,20 +364,6 @@ if args.report == "main":
|
||||
]
|
||||
)
|
||||
|
||||
slow_on_client_rows = tsvRows("report/slow-on-client.tsv")
|
||||
error_tests += len(slow_on_client_rows)
|
||||
addSimpleTable(
|
||||
"Slow on Client",
|
||||
["Client time, s", "Server time, s", "Ratio", "Test", "Query"],
|
||||
slow_on_client_rows,
|
||||
)
|
||||
if slow_on_client_rows:
|
||||
errors_explained.append(
|
||||
[
|
||||
f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>'
|
||||
]
|
||||
)
|
||||
|
||||
def add_backward_incompatible():
|
||||
rows = tsvRows("report/partial-queries-report.tsv")
|
||||
if not rows:
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
set -e -x
|
||||
|
||||
# Choose random timezone for this test run
|
||||
@ -20,21 +22,7 @@ ln -s /usr/share/clickhouse-test/clickhouse-test /usr/bin/clickhouse-test
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateful
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
function start()
|
||||
{
|
||||
@ -82,20 +70,7 @@ function start()
|
||||
|
||||
start
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
setup_logs_replication
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
|
@ -73,7 +73,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
&& chmod +x ./mc ./minio
|
||||
|
||||
|
||||
RUN wget --no-verbose 'https://dlcdn.apache.org/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
||||
RUN wget --no-verbose 'https://archive.apache.org/dist/hadoop/common/hadoop-3.3.1/hadoop-3.3.1.tar.gz' \
|
||||
&& tar -xvf hadoop-3.3.1.tar.gz \
|
||||
&& rm -rf hadoop-3.3.1.tar.gz
|
||||
|
||||
|
@ -1,5 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
@ -36,21 +39,7 @@ fi
|
||||
./setup_minio.sh stateless
|
||||
./setup_hdfs_minicluster.sh
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
# For flaky check we also enable thread fuzzer
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
@ -116,20 +105,7 @@ do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
setup_logs_replication
|
||||
|
||||
attach_gdb_to_clickhouse || true # FIXME: to not break old builds, clean on 2023-09-01
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
|
||||
# Avoid overlaps with previous runs
|
||||
dmesg --clear
|
||||
# shellcheck disable=SC1091
|
||||
source /setup_export_logs.sh
|
||||
|
||||
set -x
|
||||
|
||||
@ -51,38 +53,11 @@ configure
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
./setup_minio.sh stateless # to have a proper environment
|
||||
|
||||
# Setup a cluster for logs export to ClickHouse Cloud
|
||||
# Note: these variables are provided to the Docker run command by the Python script in tests/ci
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
echo "
|
||||
remote_servers:
|
||||
system_logs_export:
|
||||
shard:
|
||||
replica:
|
||||
secure: 1
|
||||
user: ci
|
||||
host: '${CLICKHOUSE_CI_LOGS_HOST}'
|
||||
password: '${CLICKHOUSE_CI_LOGS_PASSWORD}'
|
||||
" > /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
fi
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
start
|
||||
|
||||
# Initialize export of system logs to ClickHouse Cloud
|
||||
if [ -n "${CLICKHOUSE_CI_LOGS_HOST}" ]
|
||||
then
|
||||
export EXTRA_COLUMNS_EXPRESSION="$PULL_REQUEST_NUMBER AS pull_request_number, '$COMMIT_SHA' AS commit_sha, '$CHECK_START_TIME' AS check_start_time, '$CHECK_NAME' AS check_name, '$INSTANCE_TYPE' AS instance_type"
|
||||
# TODO: Check if the password will appear in the logs.
|
||||
export CONNECTION_PARAMETERS="--secure --user ci --host ${CLICKHOUSE_CI_LOGS_HOST} --password ${CLICKHOUSE_CI_LOGS_PASSWORD}"
|
||||
|
||||
./setup_export_logs.sh
|
||||
|
||||
# Unset variables after use
|
||||
export CONNECTION_PARAMETERS=''
|
||||
export CLICKHOUSE_CI_LOGS_HOST=''
|
||||
export CLICKHOUSE_CI_LOGS_PASSWORD=''
|
||||
fi
|
||||
setup_logs_replication
|
||||
|
||||
# shellcheck disable=SC2086 # No quotes because I want to split it into words.
|
||||
/s3downloader --url-prefix "$S3_URL" --dataset-names $DATASETS
|
||||
@ -211,6 +186,11 @@ mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/cli
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server/config.d/logger_trace.xml \
|
||||
| sed "s|<level>trace</level>|<level>test</level>|" \
|
||||
> /etc/clickhouse-server/config.d/logger_trace.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/logger_trace.xml.tmp /etc/clickhouse-server/config.d/logger_trace.xml
|
||||
|
||||
start
|
||||
|
||||
stress --hung-check --drop-databases --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION" --global-time-limit 1200 \
|
||||
|
@ -129,6 +129,7 @@ sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||
| sed "s|>1<|>0<|g" \
|
||||
> /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml
|
||||
rm /etc/clickhouse-server/config.d/filesystem_caches_path.xml
|
||||
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Server successfully started', 'OK', NULL, ''" >> /test_output/test_results.tsv \
|
||||
|
36
docs/changelogs/v22.8.21.38-lts.md
Normal file
36
docs/changelogs/v22.8.21.38-lts.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v22.8.21.38-lts (70872e9859e) FIXME as compared to v22.8.20.11-lts (c9ca79e24e8)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#53017](https://github.com/ClickHouse/ClickHouse/issues/53017): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53459](https://github.com/ClickHouse/ClickHouse/issues/53459): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix Block structure mismatch in Pipe::unitePipes for FINAL [#51492](https://github.com/ClickHouse/ClickHouse/pull/51492) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix broken `02862_sorted_distinct_sparse_fix` [#53738](https://github.com/ClickHouse/ClickHouse/pull/53738) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
14
docs/changelogs/v23.3.10.5-lts.md
Normal file
14
docs/changelogs/v23.3.10.5-lts.md
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.10.5-lts (d8737007f9e) FIXME as compared to v23.3.9.55-lts (b9c5c8622d3)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
17
docs/changelogs/v23.3.11.5-lts.md
Normal file
17
docs/changelogs/v23.3.11.5-lts.md
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.11.5-lts (5762a23a76d) FIXME as compared to v23.3.10.5-lts (d8737007f9e)
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
45
docs/changelogs/v23.3.9.55-lts.md
Normal file
45
docs/changelogs/v23.3.9.55-lts.md
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.3.9.55-lts (b9c5c8622d3) FIXME as compared to v23.3.8.21-lts (1675f2264f3)
|
||||
|
||||
#### Performance Improvement
|
||||
* Backported in [#52213](https://github.com/ClickHouse/ClickHouse/issues/52213): Do not store blocks in `ANY` hash join if nothing is inserted. [#48633](https://github.com/ClickHouse/ClickHouse/pull/48633) ([vdimir](https://github.com/vdimir)).
|
||||
* Backported in [#52826](https://github.com/ClickHouse/ClickHouse/issues/52826): Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#53019](https://github.com/ClickHouse/ClickHouse/issues/53019): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53288](https://github.com/ClickHouse/ClickHouse/issues/53288): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#53461](https://github.com/ClickHouse/ClickHouse/issues/53461): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix optimization to move functions before sorting. [#51481](https://github.com/ClickHouse/ClickHouse/pull/51481) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix Block structure mismatch in Pipe::unitePipes for FINAL [#51492](https://github.com/ClickHouse/ClickHouse/pull/51492) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
62
docs/changelogs/v23.5.5.92-stable.md
Normal file
62
docs/changelogs/v23.5.5.92-stable.md
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.5.5.92-stable (557edaddace) FIXME as compared to v23.5.4.25-stable (190f962abcf)
|
||||
|
||||
#### Performance Improvement
|
||||
* Backported in [#52749](https://github.com/ClickHouse/ClickHouse/issues/52749): Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#51886](https://github.com/ClickHouse/ClickHouse/issues/51886): Update cargo dependencies. [#51721](https://github.com/ClickHouse/ClickHouse/pull/51721) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#52909](https://github.com/ClickHouse/ClickHouse/issues/52909): Add `clickhouse-keeper-client` symlink to the clickhouse-server package. [#51882](https://github.com/ClickHouse/ClickHouse/pull/51882) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53021](https://github.com/ClickHouse/ClickHouse/issues/53021): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53289](https://github.com/ClickHouse/ClickHouse/issues/53289): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#53463](https://github.com/ClickHouse/ClickHouse/issues/53463): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix backward compatibility for IP types hashing in aggregate functions [#50551](https://github.com/ClickHouse/ClickHouse/pull/50551) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix segfault in MathUnary [#51499](https://github.com/ClickHouse/ClickHouse/pull/51499) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix reading from empty column in `parseSipHashKey` [#51804](https://github.com/ClickHouse/ClickHouse/pull/51804) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix async connect to hosts with multiple ips [#51934](https://github.com/ClickHouse/ClickHouse/pull/51934) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Allow parametric UDFs [#51964](https://github.com/ClickHouse/ClickHouse/pull/51964) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix error in `groupArrayMoving` functions [#52161](https://github.com/ClickHouse/ClickHouse/pull/52161) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible error "Cannot drain connections: cancel first" [#52585](https://github.com/ClickHouse/ClickHouse/pull/52585) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Decoupled commits from [#51180](https://github.com/ClickHouse/ClickHouse/issues/51180) for backports [#51561](https://github.com/ClickHouse/ClickHouse/pull/51561) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix MergeTreeMarksLoader segfaulting if marks file is longer than expected [#51636](https://github.com/ClickHouse/ClickHouse/pull/51636) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Fix source image for sqllogic [#51728](https://github.com/ClickHouse/ClickHouse/pull/51728) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Pin rust nightly (to make it stable) [#51903](https://github.com/ClickHouse/ClickHouse/pull/51903) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Less replication errors [#52382](https://github.com/ClickHouse/ClickHouse/pull/52382) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve logging macros [#52519](https://github.com/ClickHouse/ClickHouse/pull/52519) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
58
docs/changelogs/v23.6.3.87-stable.md
Normal file
58
docs/changelogs/v23.6.3.87-stable.md
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.6.3.87-stable (36911c17d0f) FIXME as compared to v23.6.2.18-stable (89f39a7ccfe)
|
||||
|
||||
#### Performance Improvement
|
||||
* Backported in [#52751](https://github.com/ClickHouse/ClickHouse/issues/52751): Fix incorrect projection analysis which invalidates primary keys. This issue only exists when `query_plan_optimize_primary_key = 1, query_plan_optimize_projection = 1` . This fixes [#48823](https://github.com/ClickHouse/ClickHouse/issues/48823) . This fixes [#51173](https://github.com/ClickHouse/ClickHouse/issues/51173) . [#52308](https://github.com/ClickHouse/ClickHouse/pull/52308) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#52911](https://github.com/ClickHouse/ClickHouse/issues/52911): Add `clickhouse-keeper-client` symlink to the clickhouse-server package. [#51882](https://github.com/ClickHouse/ClickHouse/pull/51882) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53023](https://github.com/ClickHouse/ClickHouse/issues/53023): Packing inline cache into docker images sometimes causes strange special effects. Since we don't use it at all, it's good to go. [#53008](https://github.com/ClickHouse/ClickHouse/pull/53008) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#53290](https://github.com/ClickHouse/ClickHouse/issues/53290): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#53465](https://github.com/ClickHouse/ClickHouse/issues/53465): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Fix for moving 'IN' conditions to PREWHERE [#51610](https://github.com/ClickHouse/ClickHouse/pull/51610) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Fix binary arithmetic for Nullable(IPv4) [#51642](https://github.com/ClickHouse/ClickHouse/pull/51642) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Support IPv4 and IPv6 as dictionary attributes [#51756](https://github.com/ClickHouse/ClickHouse/pull/51756) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Check refcount in `RemoveManyObjectStorageOperation::finalize` instead of `execute` [#51954](https://github.com/ClickHouse/ClickHouse/pull/51954) ([vdimir](https://github.com/vdimir)).
|
||||
* Fix ORDER BY tuple of WINDOW functions [#52145](https://github.com/ClickHouse/ClickHouse/pull/52145) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix error in `groupArrayMoving` functions [#52161](https://github.com/ClickHouse/ClickHouse/pull/52161) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable expression templates for time intervals [#52335](https://github.com/ClickHouse/ClickHouse/pull/52335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix `countSubstrings()` hang with empty needle and a column haystack [#52409](https://github.com/ClickHouse/ClickHouse/pull/52409) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
* Fixed inserting into Buffer engine [#52440](https://github.com/ClickHouse/ClickHouse/pull/52440) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* The implementation of AnyHash was non-conformant. [#52448](https://github.com/ClickHouse/ClickHouse/pull/52448) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix abort in function `transform` [#52513](https://github.com/ClickHouse/ClickHouse/pull/52513) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible error "Cannot drain connections: cancel first" [#52585](https://github.com/ClickHouse/ClickHouse/pull/52585) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* init and destroy ares channel on demand.. [#52634](https://github.com/ClickHouse/ClickHouse/pull/52634) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix crash in function `tuple` with one sparse column argument [#52659](https://github.com/ClickHouse/ClickHouse/pull/52659) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* clickhouse-keeper: fix implementation of server with poll() [#52833](https://github.com/ClickHouse/ClickHouse/pull/52833) ([Andy Fiddaman](https://github.com/citrus-it)).
|
||||
* Fix password leak in show create mysql table [#52962](https://github.com/ClickHouse/ClickHouse/pull/52962) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Fix: logical error in grace hash join [#51737](https://github.com/ClickHouse/ClickHouse/pull/51737) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Pin rust nightly (to make it stable) [#51903](https://github.com/ClickHouse/ClickHouse/pull/51903) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix crash in comparison functions due to incorrect query analysis [#52172](https://github.com/ClickHouse/ClickHouse/pull/52172) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Less replication errors [#52382](https://github.com/ClickHouse/ClickHouse/pull/52382) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Improve logging macros [#52519](https://github.com/ClickHouse/ClickHouse/pull/52519) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix deadlocks in StorageTableFunctionProxy [#52626](https://github.com/ClickHouse/ClickHouse/pull/52626) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Increase min protocol version for sparse serialization [#52835](https://github.com/ClickHouse/ClickHouse/pull/52835) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Docker improvements [#52869](https://github.com/ClickHouse/ClickHouse/pull/52869) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
31
docs/changelogs/v23.7.5.30-stable.md
Normal file
31
docs/changelogs/v23.7.5.30-stable.md
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2023
|
||||
---
|
||||
|
||||
# 2023 Changelog
|
||||
|
||||
### ClickHouse release v23.7.5.30-stable (e86c21fb922) FIXME as compared to v23.7.4.5-stable (bd2fcd44553)
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Backported in [#53291](https://github.com/ClickHouse/ClickHouse/issues/53291): The compiler's profile data (`-ftime-trace`) is uploaded to ClickHouse Cloud., the second attempt after [#53100](https://github.com/ClickHouse/ClickHouse/issues/53100). [#53213](https://github.com/ClickHouse/ClickHouse/pull/53213) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#53467](https://github.com/ClickHouse/ClickHouse/issues/53467): Preserve environment parameters in `clickhouse start` command. Fixes [#51962](https://github.com/ClickHouse/ClickHouse/issues/51962). [#53418](https://github.com/ClickHouse/ClickHouse/pull/53418) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
|
||||
* Not-ready Set [#53162](https://github.com/ClickHouse/ClickHouse/pull/53162) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix incorrect normal projection AST format [#53347](https://github.com/ClickHouse/ClickHouse/pull/53347) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix loading lazy database during system.table select query [#53372](https://github.com/ClickHouse/ClickHouse/pull/53372) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Fix wrong columns order for queries with parallel FINAL. [#53489](https://github.com/ClickHouse/ClickHouse/pull/53489) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix: interpolate expression takes source column instead of same name aliased from select expression. [#53572](https://github.com/ClickHouse/ClickHouse/pull/53572) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Correctly handle totals and extremes with `DelayedSource` [#53644](https://github.com/ClickHouse/ClickHouse/pull/53644) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix: sorted distinct with sparse columns [#53711](https://github.com/ClickHouse/ClickHouse/pull/53711) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fix fuzzer crash in parseDateTime() [#53764](https://github.com/ClickHouse/ClickHouse/pull/53764) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Disable test_reverse_dns_query/test.py [#53195](https://github.com/ClickHouse/ClickHouse/pull/53195) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Disable test_host_regexp_multiple_ptr_records/test.py [#53211](https://github.com/ClickHouse/ClickHouse/pull/53211) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix broken `02862_sorted_distinct_sparse_fix` [#53738](https://github.com/ClickHouse/ClickHouse/pull/53738) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Get rid of describe_parameters for the best robot token [#53833](https://github.com/ClickHouse/ClickHouse/pull/53833) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
@ -90,34 +90,117 @@ Process 1 stopped
|
||||
|
||||
## Visual Studio Code integration
|
||||
|
||||
- [CodeLLDB extension](https://github.com/vadimcn/vscode-lldb) is required for visual debugging, the [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [cmake variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md).
|
||||
- Make sure to set the backend to your llvm installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
|
||||
- Launcher:
|
||||
- [CodeLLDB](https://github.com/vadimcn/vscode-lldb) extension is required for visual debugging.
|
||||
- [Command Variable](https://github.com/rioj7/command-variable) extension can help dynamic launches if using [CMake Variants](https://github.com/microsoft/vscode-cmake-tools/blob/main/docs/variants.md).
|
||||
- Make sure to set the backend to your LLVM installation eg. `"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"`
|
||||
- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this)
|
||||
|
||||
### Example configurations
|
||||
#### cmake-variants.yaml
|
||||
```yaml
|
||||
buildType:
|
||||
default: relwithdebinfo
|
||||
choices:
|
||||
debug:
|
||||
short: Debug
|
||||
long: Emit debug information
|
||||
buildType: Debug
|
||||
release:
|
||||
short: Release
|
||||
long: Optimize generated code
|
||||
buildType: Release
|
||||
relwithdebinfo:
|
||||
short: RelWithDebInfo
|
||||
long: Release with Debug Info
|
||||
buildType: RelWithDebInfo
|
||||
tsan:
|
||||
short: MinSizeRel
|
||||
long: Minimum Size Release
|
||||
buildType: MinSizeRel
|
||||
|
||||
toolchain:
|
||||
default: default
|
||||
description: Select toolchain
|
||||
choices:
|
||||
default:
|
||||
short: x86_64
|
||||
long: x86_64
|
||||
s390x:
|
||||
short: s390x
|
||||
long: s390x
|
||||
settings:
|
||||
CMAKE_TOOLCHAIN_FILE: cmake/linux/toolchain-s390x.cmake
|
||||
```
|
||||
|
||||
#### launch.json
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Debug",
|
||||
"type": "lldb",
|
||||
"request": "custom",
|
||||
"targetCreateCommands": ["target create ${command:cmake.launchTargetDirectory}/clickhouse"],
|
||||
"processCreateCommands": ["settings set target.source-map ${input:targetdir} ${workspaceFolder}", "gdb-remote 31338"],
|
||||
"sourceMap": { "${input:targetdir}": "${workspaceFolder}" },
|
||||
}
|
||||
],
|
||||
"inputs": [
|
||||
{
|
||||
"id": "targetdir",
|
||||
"type": "command",
|
||||
"command": "extension.commandvariable.transform",
|
||||
"args": {
|
||||
"text": "${command:cmake.launchTargetDirectory}",
|
||||
"find": ".*/([^/]+)/[^/]+$",
|
||||
"replace": "$1"
|
||||
}
|
||||
"name": "(lldb) Launch s390x with qemu",
|
||||
"targetCreateCommands": ["target create ${command:cmake.launchTargetPath}"],
|
||||
"processCreateCommands": ["gdb-remote 2159"],
|
||||
"preLaunchTask": "Run ClickHouse"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
#### settings.json
|
||||
This would also put different builds under different subfolders of the `build` folder.
|
||||
```json
|
||||
{
|
||||
"cmake.buildDirectory": "${workspaceFolder}/build/${buildKitVendor}-${buildKitVersion}-${variant:toolchain}-${variant:buildType}",
|
||||
"lldb.library": "/usr/lib/x86_64-linux-gnu/liblldb-15.so"
|
||||
}
|
||||
```
|
||||
|
||||
#### run-debug.sh
|
||||
```sh
|
||||
#! /bin/sh
|
||||
echo 'Starting debugger session'
|
||||
cd $1
|
||||
qemu-s390x-static -g 2159 -L /usr/s390x-linux-gnu $2 $3 $4
|
||||
```
|
||||
|
||||
#### tasks.json
|
||||
Defines a task to run the compiled executable in `server` mode under a `tmp` folder next to the binaries, with configuration from under `programs/server/config.xml`.
|
||||
```json
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Run ClickHouse",
|
||||
"type": "shell",
|
||||
"isBackground": true,
|
||||
"command": "${workspaceFolder}/.vscode/run-debug.sh",
|
||||
"args": [
|
||||
"${command:cmake.launchTargetDirectory}/tmp",
|
||||
"${command:cmake.launchTargetPath}",
|
||||
"server",
|
||||
"--config-file=${workspaceFolder}/programs/server/config.xml"
|
||||
],
|
||||
"problemMatcher": [
|
||||
{
|
||||
"pattern": [
|
||||
{
|
||||
"regexp": ".",
|
||||
"file": 1,
|
||||
"location": 2,
|
||||
"message": 3
|
||||
}
|
||||
],
|
||||
"background": {
|
||||
"activeOnStart": true,
|
||||
"beginsPattern": "^Starting debugger session",
|
||||
"endsPattern": ".*"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
- Make sure to run the clickhouse executable in debug mode prior to launch. (It is also possible to create a `preLaunchTask` that automates this)
|
@ -173,6 +173,7 @@ Similar to GraphiteMergeTree, the Kafka engine supports extended configuration u
|
||||
<!-- Global configuration options for all tables of Kafka engine type -->
|
||||
<debug>cgrp</debug>
|
||||
<auto_offset_reset>smallest</auto_offset_reset>
|
||||
<statistics_interval_ms>600</statistics_interval_ms>
|
||||
|
||||
<!-- Configuration specific to topics "logs" and "stats" -->
|
||||
|
||||
@ -260,3 +261,4 @@ The number of rows in one Kafka message depends on whether the format is row-bas
|
||||
|
||||
- [Virtual columns](../../../engines/table-engines/index.md#table_engines-virtual_columns)
|
||||
- [background_message_broker_schedule_pool_size](../../../operations/server-configuration-parameters/settings.md#background_message_broker_schedule_pool_size)
|
||||
- [system.kafka_consumers](../../../operations/system-tables/kafka_consumers.md)
|
||||
|
@ -13,7 +13,7 @@ If more than one table is required, it is highly recommended to use the [Materia
|
||||
|
||||
``` sql
|
||||
CREATE TABLE postgresql_db.postgresql_replica (key UInt64, value UInt64)
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_replica', 'postgres_user', 'postgres_password')
|
||||
ENGINE = MaterializedPostgreSQL('postgres1:5432', 'postgres_database', 'postgresql_table', 'postgres_user', 'postgres_password')
|
||||
PRIMARY KEY key;
|
||||
```
|
||||
|
||||
|
@ -39,7 +39,7 @@ CREATE TABLE s3_queue_engine_table (name String, value UInt32)
|
||||
CREATE TABLE s3queue_engine_table (name String, value UInt32)
|
||||
ENGINE=S3Queue('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/*', 'CSV', 'gzip')
|
||||
SETTINGS
|
||||
mode = 'ordred';
|
||||
mode = 'ordered';
|
||||
```
|
||||
|
||||
Using named collections:
|
||||
@ -60,7 +60,7 @@ Using named collections:
|
||||
CREATE TABLE s3queue_engine_table (name String, value UInt32)
|
||||
ENGINE=S3Queue(s3queue_conf, format = 'CSV', compression_method = 'gzip')
|
||||
SETTINGS
|
||||
mode = 'ordred';
|
||||
mode = 'ordered';
|
||||
```
|
||||
|
||||
## Settings {#s3queue-settings}
|
||||
@ -188,7 +188,7 @@ Example:
|
||||
CREATE TABLE s3queue_engine_table (name String, value UInt32)
|
||||
ENGINE=S3Queue('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/*', 'CSV', 'gzip')
|
||||
SETTINGS
|
||||
mode = 'unordred',
|
||||
mode = 'unordered',
|
||||
keeper_path = '/clickhouse/s3queue/';
|
||||
|
||||
CREATE TABLE stats (name String, value UInt32)
|
||||
|
@ -142,13 +142,15 @@ was specified for ANN indexes, the default value is 100 million.
|
||||
|
||||
- [Annoy](/docs/en/engines/table-engines/mergetree-family/annindexes.md#annoy-annoy)
|
||||
|
||||
- [USearch](/docs/en/engines/table-engines/mergetree-family/annindexes.md#usearch-usearch)
|
||||
|
||||
## Annoy {#annoy}
|
||||
|
||||
Annoy indexes are currently experimental, to use them you first need to `SET allow_experimental_annoy_index = 1`. They are also currently
|
||||
disabled on ARM due to memory safety problems with the algorithm.
|
||||
|
||||
This type of ANN index implements [the Annoy algorithm](https://github.com/spotify/annoy) which is based on a recursive division of the
|
||||
space in random linear surfaces (lines in 2D, planes in 3D etc.).
|
||||
This type of ANN index is based on the [Annoy library](https://github.com/spotify/annoy) which recursively divides the space into random
|
||||
linear surfaces (lines in 2D, planes in 3D etc.).
|
||||
|
||||
<div class='vimeo-container'>
|
||||
<iframe src="//www.youtube.com/embed/QkCCyLW0ehU"
|
||||
@ -216,3 +218,64 @@ ORDER BY L2Distance(vectors, Point)
|
||||
LIMIT N
|
||||
SETTINGS annoy_index_search_k_nodes=100;
|
||||
```
|
||||
|
||||
:::note
|
||||
The Annoy index currently does not work with per-table, non-default `index_granularity` settings (see
|
||||
[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml.
|
||||
:::
|
||||
## USearch {#usearch}
|
||||
|
||||
This type of ANN index is based on the [the USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW
|
||||
algorithm](https://arxiv.org/abs/1603.09320), i.e., builds a hierarchical graph where each point represents a vector and the edges represent
|
||||
similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the
|
||||
overall dataset, while still providing 99% recall. This is especially useful when working with high-dimensional vectors,
|
||||
that are expensive to load and compare. The library also has several hardware-specific SIMD optimizations to accelerate further
|
||||
distance computations on modern Arm (NEON and SVE) and x86 (AVX2 and AVX-512) CPUs and OS-specific optimizations to allow efficient
|
||||
navigation around immutable persistent files, without loading them into RAM.
|
||||
|
||||
<div class='vimeo-container'>
|
||||
<iframe src="//www.youtube.com/embed/UMrhB3icP9w"
|
||||
width="640"
|
||||
height="360"
|
||||
frameborder="0"
|
||||
allow="autoplay;
|
||||
fullscreen;
|
||||
picture-in-picture"
|
||||
allowfullscreen>
|
||||
</iframe>
|
||||
</div>
|
||||
|
||||
Syntax to create an USearch index over an [Array](../../../sql-reference/data-types/array.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_usearch_index
|
||||
(
|
||||
id Int64,
|
||||
vectors Array(Float32),
|
||||
INDEX [ann_index_name] vectors TYPE usearch([Distance]) [GRANULARITY N]
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id;
|
||||
```
|
||||
|
||||
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_usearch_index
|
||||
(
|
||||
id Int64,
|
||||
vectors Tuple(Float32[, Float32[, ...]]),
|
||||
INDEX [ann_index_name] vectors TYPE usearch([Distance]) [GRANULARITY N]
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id;
|
||||
```
|
||||
|
||||
USearch currently supports two distance functions:
|
||||
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
|
||||
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).
|
||||
- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
|
||||
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
|
||||
|
||||
For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
|
||||
distance function was specified during index creation, `L2Distance` is used as default.
|
@ -1,23 +1,21 @@
|
||||
# Laion-400M dataset
|
||||
|
||||
The dataset contains 400 million images with English text. For more information follow this [link](https://laion.ai/blog/laion-400-open-dataset/). Laion provides even larger datasets (e.g. [5 billion](https://laion.ai/blog/laion-5b/)). Working with them will be similar.
|
||||
The [Laion-400M dataset](https://laion.ai/blog/laion-400-open-dataset/) contains 400 million images with English image captions. Laion nowadays provides [an even larger dataset](https://laion.ai/blog/laion-5b/) but working with it will be similar.
|
||||
|
||||
The dataset has prepared embeddings for texts and images. This will be used to demonstrate [Approximate nearest neighbor search indexes](../../engines/table-engines/mergetree-family/annindexes.md).
|
||||
The dataset contains the image URL, embeddings for both the image and the image caption, a similarity score between the image and the image caption, as well as metadata, e.g. the image width/height, the licence and a NSFW flag. We can use the dataset to demonstrate [approximate nearest neighbor search](../../engines/table-engines/mergetree-family/annindexes.md) in ClickHouse.
|
||||
|
||||
## Prepare data
|
||||
## Data preparation
|
||||
|
||||
Embeddings are stored in `.npy` files, so we have to read them with python and merge with other data.
|
||||
|
||||
Download data and process it with simple `download.sh` script:
|
||||
The embeddings and the metadata are stored in separate files in the raw data. A data preparation step downloads the data, merges the files,
|
||||
converts them to CSV and imports them into ClickHouse. You can use the following `download.sh` script for that:
|
||||
|
||||
```bash
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${1}.npy
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${1}.parquet
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${1}.npy
|
||||
python3 process.py ${1}
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/img_emb/img_emb_${1}.npy # download image embedding
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/text_emb/text_emb_${1}.npy # download text embedding
|
||||
wget --tries=100 https://deploy.laion.ai/8f83b608504d46bb81708ec86e912220/embeddings/metadata/metadata_${1}.parquet # download metadata
|
||||
python3 process.py ${1} # merge files and convert to CSV
|
||||
```
|
||||
|
||||
Where `process.py`:
|
||||
Script `process.py` is defined as follows:
|
||||
|
||||
```python
|
||||
import pandas as pd
|
||||
@ -35,11 +33,11 @@ im_emb = np.load(npy_file)
|
||||
text_emb = np.load(text_npy)
|
||||
data = pd.read_parquet(metadata_file)
|
||||
|
||||
# combine them
|
||||
# combine files
|
||||
data = pd.concat([data, pd.DataFrame({"image_embedding" : [*im_emb]}), pd.DataFrame({"text_embedding" : [*text_emb]})], axis=1, copy=False)
|
||||
|
||||
# you can save more columns
|
||||
data = data[['url', 'caption', 'similarity', "image_embedding", "text_embedding"]]
|
||||
# columns to be imported into ClickHouse
|
||||
data = data[['url', 'caption', 'NSFW', 'similarity', "image_embedding", "text_embedding"]]
|
||||
|
||||
# transform np.arrays to lists
|
||||
data['image_embedding'] = data['image_embedding'].apply(lambda x: list(x))
|
||||
@ -48,30 +46,32 @@ data['text_embedding'] = data['text_embedding'].apply(lambda x: list(x))
|
||||
# this small hack is needed becase caption sometimes contains all kind of quotes
|
||||
data['caption'] = data['caption'].apply(lambda x: x.replace("'", " ").replace('"', " "))
|
||||
|
||||
# save data to file
|
||||
# export data as CSV file
|
||||
data.to_csv(str_i + '.csv', header=False)
|
||||
|
||||
# previous files can be removed
|
||||
# removed raw data files
|
||||
os.system(f"rm {npy_file} {metadata_file} {text_npy}")
|
||||
```
|
||||
|
||||
You can download data with
|
||||
To start the data preparation pipeline, run:
|
||||
|
||||
```bash
|
||||
seq 0 409 | xargs -P100 -I{} bash -c './download.sh {}'
|
||||
```
|
||||
|
||||
The dataset is divided into 409 files. If you want to work only with a certain part of the dataset, just change the limits.
|
||||
The dataset is split into 410 files, each file contains ca. 1 million rows. If you like to work with a smaller subset of the data, simply adjust the limits, e.g. `seq 0 9 | ...`.
|
||||
|
||||
## Create table for laion
|
||||
## Create table
|
||||
|
||||
Without indexes table can be created by
|
||||
To create a table without indexes, run:
|
||||
|
||||
```sql
|
||||
CREATE TABLE laion_dataset
|
||||
CREATE TABLE laion
|
||||
(
|
||||
`id` Int64,
|
||||
`url` String,
|
||||
`caption` String,
|
||||
`NSFW` String,
|
||||
`similarity` Float32,
|
||||
`image_embedding` Array(Float32),
|
||||
`text_embedding` Array(Float32)
|
||||
@ -81,23 +81,23 @@ ORDER BY id
|
||||
SETTINGS index_granularity = 8192
|
||||
```
|
||||
|
||||
Fill table with data:
|
||||
To import the CSV files into ClickHouse:
|
||||
|
||||
```sql
|
||||
INSERT INTO laion_dataset FROM INFILE '{path_to_csv_files}/*.csv'
|
||||
INSERT INTO laion FROM INFILE '{path_to_csv_files}/*.csv'
|
||||
```
|
||||
|
||||
## Check data in table without indexes
|
||||
## Run a brute-force ANN search (without ANN index)
|
||||
|
||||
Let's check the work of the following query on the part of the dataset (8 million records):
|
||||
To run a brute-force approximate nearest neighbor search, run:
|
||||
|
||||
```sql
|
||||
select url, caption from test_laion where similarity > 0.2 order by L2Distance(image_embedding, {target:Array(Float32)}) limit 30
|
||||
SELECT url, caption FROM laion WHERE similarity > 0.2 ORDER BY L2Distance(image_embedding, {target:Array(Float32)}) LIMIT 30
|
||||
```
|
||||
|
||||
Since the embeddings for images and texts may not match, let's also require a certain threshold of matching accuracy to get images that are more likely to satisfy our queries. The client parameter `target`, which is an array of 512 elements. See later in this article for a convenient way of obtaining such vectors. I used a random picture of a cat from the Internet as a target vector.
|
||||
The filter on `similarity` makes sure that the images correspond to the image captions in the query results. `target` is an array of 512 elements and a client parameter. A convenient way to obtain such arrays will be presented at the end of the article. For now, we can run the embedding of a random cat picture as `target`.
|
||||
|
||||
**The result**
|
||||
**Result**
|
||||
|
||||
```
|
||||
┌─url───────────────────────────────────────────────────────────────────────────────────────────────────────────┬─caption────────────────────────────────────────────────────────────────┐
|
||||
@ -114,32 +114,32 @@ Since the embeddings for images and texts may not match, let's also require a ce
|
||||
8 rows in set. Elapsed: 6.432 sec. Processed 19.65 million rows, 43.96 GB (3.06 million rows/s., 6.84 GB/s.)
|
||||
```
|
||||
|
||||
## Add indexes
|
||||
## Run a ANN with an ANN index
|
||||
|
||||
Create a new table or follow instructions from [alter documentation](../../sql-reference/statements/alter/skipping-index.md).
|
||||
Either create a new table or use [ALTER TABLE ADD INDEX](../../sql-reference/statements/alter/skipping-index.md) to add an ANN index:
|
||||
|
||||
```sql
|
||||
CREATE TABLE laion_dataset
|
||||
CREATE TABLE laion
|
||||
(
|
||||
`id` Int64,
|
||||
`url` String,
|
||||
`caption` String,
|
||||
`NSFW` String,
|
||||
`similarity` Float32,
|
||||
`image_embedding` Array(Float32),
|
||||
`text_embedding` Array(Float32),
|
||||
INDEX annoy_image image_embedding TYPE annoy(1000) GRANULARITY 1000,
|
||||
INDEX annoy_text text_embedding TYPE annoy(1000) GRANULARITY 1000
|
||||
INDEX annoy_image image_embedding TYPE annoy(1000),
|
||||
INDEX annoy_text text_embedding TYPE annoy(1000)
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id
|
||||
SETTINGS index_granularity = 8192
|
||||
```
|
||||
|
||||
When created, the index will be built by L2Distance. You can read more about the parameters in the [annoy documentation](../../engines/table-engines/mergetree-family/annindexes.md#annoy-annoy). It makes sense to build indexes for a large number of granules. If you need good speed, then GRANULARITY should be several times larger than the expected number of results in the search.
|
||||
Now let's check again with the same query:
|
||||
By default, Annoy indexes use the L2 distance as metric. Further tuning knobs for index creation and search are described in the Annoy index [documentation](../../engines/table-engines/mergetree-family/annindexes.md). Let's check now again with the same query:
|
||||
|
||||
```sql
|
||||
select url, caption from test_indexes_laion where similarity > 0.2 order by L2Distance(image_embedding, {target:Array(Float32)}) limit 8
|
||||
SELECT url, caption FROM test_indexes_laion WHERE similarity > 0.2 ORDER BY l2Distance(image_embedding, {target:Array(Float32)}) LIMIT 8
|
||||
```
|
||||
|
||||
**Result**
|
||||
@ -159,15 +159,18 @@ select url, caption from test_indexes_laion where similarity > 0.2 order by L2Di
|
||||
8 rows in set. Elapsed: 0.641 sec. Processed 22.06 thousand rows, 49.36 MB (91.53 thousand rows/s., 204.81 MB/s.)
|
||||
```
|
||||
|
||||
The speed has increased significantly. But now, the results sometimes differ from what you are looking for. This is due to the approximation of the search and the quality of the constructed embedding. Note that the example was given for picture embeddings, but there are also text embeddings in the dataset, which can also be used for searching.
|
||||
The speed increased significantly at the cost of less accurate results. This is because the ANN index only provide approximate search results. Note the example searched for similar image embeddings, yet it is also possible to search for positive image caption embeddings.
|
||||
|
||||
## Scripts for embeddings
|
||||
## Creating embeddings with UDFs
|
||||
|
||||
Usually, we do not want to get embeddings from existing data, but to get them for new data and look for similar ones in old data. We can use [UDF](../../sql-reference/functions/index.md#sql-user-defined-functions) for this purpose. They will allow you to set the `target` vector without leaving the client. All of the following scripts will be written for the `ViT-B/32` model, as it was used for this dataset. You can use any model, but it is necessary to build embeddings in the dataset and for new objects using the same model.
|
||||
One usually wants to create embeddings for new images or new image captions and search for similar image / image caption pairs in the data. We can use [UDF](../../sql-reference/functions/index.md#sql-user-defined-functions) to create the `target` vector without leaving the client. It is important to use the same model to create the data and new embeddings for searches. The following scripts utilize the `ViT-B/32` model which also underlies the dataset.
|
||||
|
||||
### Text embeddings
|
||||
|
||||
First, store the following Python script in the `user_scripts/` directory of your ClickHouse data path and make it executable (`chmod +x encode_text.py`).
|
||||
|
||||
`encode_text.py`:
|
||||
|
||||
```python
|
||||
#!/usr/bin/python3
|
||||
import clip
|
||||
@ -182,10 +185,12 @@ if __name__ == '__main__':
|
||||
inputs = clip.tokenize(text)
|
||||
with torch.no_grad():
|
||||
text_features = model.encode_text(inputs)[0].tolist()
|
||||
print(text_features)
|
||||
sys.stdout.flush()
|
||||
```
|
||||
|
||||
`encode_text_function.xml`:
|
||||
Then create `encode_text_function.xml` in a location referenced by `<user_defined_executable_functions_config>/path/to/*_function.xml</user_defined_executable_functions_config>` in your ClickHouse server configuration file.
|
||||
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
@ -203,19 +208,19 @@ if __name__ == '__main__':
|
||||
</functions>
|
||||
```
|
||||
|
||||
Now we can simply use:
|
||||
You can now simply use:
|
||||
|
||||
```sql
|
||||
SELECT encode_text('cat');
|
||||
```
|
||||
|
||||
The first use will be slow because the model needs to be loaded. But repeated queries will be fast. Then we copy the results to ``set param_target=...`` and can easily write queries
|
||||
The first run will be slow because it loads the model, but repeated runs will be fast. We can then copy the output to `SET param_target=...` and can easily write queries.
|
||||
|
||||
### Image embeddings
|
||||
|
||||
For pictures, the process is similar, but you send the path instead of the picture (if necessary, you can implement a download picture with processing, but it will take longer)
|
||||
Image embeddings can be created similarly but we will provide the Python script the path to a local image instead of the image caption text.
|
||||
|
||||
`encode_image.py`
|
||||
|
||||
`encode_picture.py`
|
||||
```python
|
||||
#!/usr/bin/python3
|
||||
import clip
|
||||
@ -235,25 +240,27 @@ if __name__ == '__main__':
|
||||
sys.stdout.flush()
|
||||
```
|
||||
|
||||
`encode_picture_function.xml`
|
||||
`encode_image_function.xml`
|
||||
|
||||
```xml
|
||||
<functions>
|
||||
<function>
|
||||
<type>executable_pool</type>
|
||||
<name>encode_picture</name>
|
||||
<name>encode_image</name>
|
||||
<return_type>Array(Float32)</return_type>
|
||||
<argument>
|
||||
<type>String</type>
|
||||
<name>path</name>
|
||||
</argument>
|
||||
<format>TabSeparated</format>
|
||||
<command>encode_picture.py</command>
|
||||
<command>encode_image.py</command>
|
||||
<command_read_timeout>1000000</command_read_timeout>
|
||||
</function>
|
||||
</functions>
|
||||
```
|
||||
|
||||
The query:
|
||||
Then run this query:
|
||||
|
||||
```sql
|
||||
SELECT encode_picture('some/path/to/your/picture');
|
||||
SELECT encode_image('/path/to/your/image');
|
||||
```
|
||||
|
@ -196,6 +196,7 @@ SELECT * FROM nestedt FORMAT TSV
|
||||
- [input_format_tsv_skip_first_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_first_lines) - skip specified number of lines at the beginning of data. Default value - `0`.
|
||||
- [input_format_tsv_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_detect_header) - automatically detect header with names and types in TSV format. Default value - `true`.
|
||||
- [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||
- [input_format_tsv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_tsv_allow_variable_number_of_columns) - allow variable number of columns in TSV format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
|
||||
## TabSeparatedRaw {#tabseparatedraw}
|
||||
|
||||
@ -473,7 +474,7 @@ The CSV format supports the output of totals and extremes the same way as `TabSe
|
||||
- [input_format_csv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_csv_skip_trailing_empty_lines) - skip trailing empty lines at the end of data. Default value - `false`.
|
||||
- [input_format_csv_trim_whitespaces](/docs/en/operations/settings/settings-formats.md/#input_format_csv_trim_whitespaces) - trim spaces and tabs in non-quoted CSV strings. Default value - `true`.
|
||||
- [input_format_csv_allow_whitespace_or_tab_as_delimiter](/docs/en/operations/settings/settings-formats.md/# input_format_csv_allow_whitespace_or_tab_as_delimiter) - Allow to use whitespace or tab as field delimiter in CSV strings. Default value - `false`.
|
||||
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values. Default value - `false`.
|
||||
- [input_format_csv_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_csv_allow_variable_number_of_columns) - allow variable number of columns in CSV format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [input_format_csv_use_default_on_bad_values](/docs/en/operations/settings/settings-formats.md/#input_format_csv_use_default_on_bad_values) - Allow to set default value to column when CSV field deserialization failed on bad value. Default value - `false`.
|
||||
|
||||
## CSVWithNames {#csvwithnames}
|
||||
@ -502,9 +503,10 @@ the types from input data will be compared with the types of the corresponding c
|
||||
|
||||
Similar to [Template](#format-template), but it prints or reads all names and types of columns and uses escaping rule from [format_custom_escaping_rule](/docs/en/operations/settings/settings-formats.md/#format_custom_escaping_rule) setting and delimiters from [format_custom_field_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_field_delimiter), [format_custom_row_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_before_delimiter), [format_custom_row_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_after_delimiter), [format_custom_row_between_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_row_between_delimiter), [format_custom_result_before_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_before_delimiter) and [format_custom_result_after_delimiter](/docs/en/operations/settings/settings-formats.md/#format_custom_result_after_delimiter) settings, not from format strings.
|
||||
|
||||
If setting [input_format_custom_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, ClickHouse will automatically detect header with names and types if any.
|
||||
|
||||
If setting [input_format_tsv_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) is enabled, trailing empty lines at the end of file will be skipped.
|
||||
Additional settings:
|
||||
- [input_format_custom_detect_header](/docs/en/operations/settings/settings-formats.md/#input_format_custom_detect_header) - enables automatic detection of header with names and types if any. Default value - `true`.
|
||||
- [input_format_custom_skip_trailing_empty_lines](/docs/en/operations/settings/settings-formats.md/#input_format_custom_skip_trailing_empty_lines) - skip trailing empty lines at the end of file . Default value - `false`.
|
||||
- [input_format_custom_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_custom_allow_variable_number_of_columns) - allow variable number of columns in CustomSeparated format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
|
||||
There is also `CustomSeparatedIgnoreSpaces` format, which is similar to [TemplateIgnoreSpaces](#templateignorespaces).
|
||||
|
||||
@ -1262,6 +1264,7 @@ SELECT * FROM json_each_row_nested
|
||||
- [input_format_json_named_tuples_as_objects](/docs/en/operations/settings/settings-formats.md/#input_format_json_named_tuples_as_objects) - parse named tuple columns as JSON objects. Default value - `true`.
|
||||
- [input_format_json_defaults_for_missing_elements_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_defaults_for_missing_elements_in_named_tuple) - insert default values for missing elements in JSON object while parsing named tuple. Default value - `true`.
|
||||
- [input_format_json_ignore_unknown_keys_in_named_tuple](/docs/en/operations/settings/settings-formats.md/#input_format_json_ignore_unknown_keys_in_named_tuple) - Ignore unknown keys in json object for named tuples. Default value - `false`.
|
||||
- [input_format_json_compact_allow_variable_number_of_columns](/docs/en/operations/settings/settings-formats.md/#input_format_json_compact_allow_variable_number_of_columns) - allow variable number of columns in JSONCompact/JSONCompactEachRow format, ignore extra columns and use default values on missing columns. Default value - `false`.
|
||||
- [output_format_json_quote_64bit_integers](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_integers) - controls quoting of 64-bit integers in JSON output format. Default value - `true`.
|
||||
- [output_format_json_quote_64bit_floats](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_64bit_floats) - controls quoting of 64-bit floats in JSON output format. Default value - `false`.
|
||||
- [output_format_json_quote_denormals](/docs/en/operations/settings/settings-formats.md/#output_format_json_quote_denormals) - enables '+nan', '-nan', '+inf', '-inf' outputs in JSON output format. Default value - `false`.
|
||||
@ -2136,6 +2139,7 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
|
||||
- [input_format_parquet_case_insensitive_column_matching](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_case_insensitive_column_matching) - ignore case when matching Parquet columns with ClickHouse columns. Default value - `false`.
|
||||
- [input_format_parquet_allow_missing_columns](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_allow_missing_columns) - allow missing columns while reading Parquet data. Default value - `false`.
|
||||
- [input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_skip_columns_with_unsupported_types_in_schema_inference) - allow skipping columns with unsupported types while schema inference for Parquet format. Default value - `false`.
|
||||
- [input_format_parquet_local_file_min_bytes_for_seek](/docs/en/operations/settings/settings-formats.md/#input_format_parquet_local_file_min_bytes_for_seek) - min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format. Default value - `8192`.
|
||||
- [output_format_parquet_fixed_string_as_fixed_byte_array](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_fixed_string_as_fixed_byte_array) - use Parquet FIXED_LENGTH_BYTE_ARRAY type instead of Binary/String for FixedString columns. Default value - `true`.
|
||||
- [output_format_parquet_version](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_version) - The version of Parquet format used in output format. Default value - `2.latest`.
|
||||
- [output_format_parquet_compression_method](/docs/en/operations/settings/settings-formats.md/#output_format_parquet_compression_method) - compression method used in output Parquet format. Default value - `snappy`.
|
||||
|
BIN
docs/en/interfaces/images/mysql1.png
Normal file
BIN
docs/en/interfaces/images/mysql1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 232 KiB |
BIN
docs/en/interfaces/images/mysql2.png
Normal file
BIN
docs/en/interfaces/images/mysql2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 102 KiB |
BIN
docs/en/interfaces/images/mysql3.png
Normal file
BIN
docs/en/interfaces/images/mysql3.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 37 KiB |
BIN
docs/en/interfaces/images/mysql4.png
Normal file
BIN
docs/en/interfaces/images/mysql4.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 88 KiB |
BIN
docs/en/interfaces/images/mysql5.png
Normal file
BIN
docs/en/interfaces/images/mysql5.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 246 KiB |
@ -6,7 +6,38 @@ sidebar_label: MySQL Interface
|
||||
|
||||
# MySQL Interface
|
||||
|
||||
ClickHouse supports MySQL wire protocol. To enable the MySQL wire protocol, add the [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d` folder:
|
||||
ClickHouse supports the MySQL wire protocol. This allow tools that are MySQL-compatible to interact with ClickHouse seamlessly (e.g. [Looker Studio](../integrations/data-visualization/looker-studio-and-clickhouse.md)).
|
||||
|
||||
## Enabling the MySQL Interface On ClickHouse Cloud
|
||||
|
||||
:::note
|
||||
The MySQL interface for ClickHouse Cloud is currently in private preview. Please contact support@clickhouse.com to enable this feature for your ClickHouse Cloud service.
|
||||
:::
|
||||
|
||||
1. After creating your ClickHouse Cloud Service, on the credentials screen, select the MySQL tab
|
||||
|
||||
![Credentials screen - Prompt](./images/mysql1.png)
|
||||
|
||||
2. Toggle the switch to enable the MySQL interface for this specific service. This will expose port `3306` for this service and prompt you with your MySQL connection screen that include your unique MySQL username. The password will be the same as the service's default user password.
|
||||
|
||||
![Credentials screen - Enabled MySQL](./images/mysql2.png)
|
||||
|
||||
Alternatively, in order to enable the MySQL interface for an existing service:
|
||||
|
||||
1. Ensure your service is in `Running` state then click on the "View connection string" button for the service you want to enable the MySQL interface for
|
||||
|
||||
![Connection screen - Prompt MySQL](./images/mysql3.png)
|
||||
|
||||
2. Toggle the switch to enable the MySQL interface for this specific service. This will prompt you to enter the default password.
|
||||
|
||||
![Connection screen - Prompt MySQL](./images/mysql4.png)
|
||||
|
||||
3. After entering the password, you will get prompted the MySQL connection string for this service
|
||||
![Connection screen - MySQL Enabled](./images/mysql5.png)
|
||||
|
||||
## Enabling the MySQL Interface On Self-managed ClickHouse
|
||||
|
||||
Add the [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) setting to your server's configuration file. For example, you could define the port in a new XML file in your `config.d/` [folder](../operations/configuration-files):
|
||||
|
||||
``` xml
|
||||
<clickhouse>
|
||||
@ -20,7 +51,7 @@ Startup your ClickHouse server and look for a log message similar to the followi
|
||||
{} <Information> Application: Listening for MySQL compatibility protocol: 127.0.0.1:9004
|
||||
```
|
||||
|
||||
## Connect mysql to ClickHouse
|
||||
## Connect MySQL to ClickHouse
|
||||
|
||||
The following command demonstrates how to connect the MySQL client `mysql` to ClickHouse:
|
||||
|
||||
|
@ -21,6 +21,11 @@ In most cases it is recommended to use an appropriate tool or library instead of
|
||||
- [ODBC driver](../interfaces/odbc.md)
|
||||
- [C++ client library](../interfaces/cpp.md)
|
||||
|
||||
ClickHouse server provides embedded visual interfaces for power users:
|
||||
|
||||
- Play UI: open `/play` in the browser;
|
||||
- Advanced Dashboard: open `/dashboard` in the browser;
|
||||
|
||||
There are also a wide range of third-party libraries for working with ClickHouse:
|
||||
|
||||
- [Client libraries](../interfaces/third-party/client-libraries.md)
|
||||
|
@ -228,8 +228,8 @@ For most input formats schema inference reads some data to determine its structu
|
||||
To prevent inferring the same schema every time ClickHouse read the data from the same file, the inferred schema is cached and when accessing the same file again, ClickHouse will use the schema from the cache.
|
||||
|
||||
There are special settings that control this cache:
|
||||
- `schema_inference_cache_max_elements_for_{file/s3/hdfs/url}` - the maximum number of cached schemas for the corresponding table function. The default value is `4096`. These settings should be set in the server config.
|
||||
- `schema_inference_use_cache_for_{file,s3,hdfs,url}` - allows turning on/off using cache for schema inference. These settings can be used in queries.
|
||||
- `schema_inference_cache_max_elements_for_{file/s3/hdfs/url/azure}` - the maximum number of cached schemas for the corresponding table function. The default value is `4096`. These settings should be set in the server config.
|
||||
- `schema_inference_use_cache_for_{file,s3,hdfs,url,azure}` - allows turning on/off using cache for schema inference. These settings can be used in queries.
|
||||
|
||||
The schema of the file can be changed by modifying the data or by changing format settings.
|
||||
For this reason, the schema inference cache identifies the schema by file source, format name, used format settings, and the last modification time of the file.
|
||||
|
@ -65,7 +65,7 @@ XML substitution example:
|
||||
|
||||
Substitutions can also be performed from ZooKeeper. To do this, specify the attribute `from_zk = "/path/to/node"`. The element value is replaced with the contents of the node at `/path/to/node` in ZooKeeper. You can also put an entire XML subtree on the ZooKeeper node and it will be fully inserted into the source element.
|
||||
|
||||
## Encrypting Configuration {#encryption}
|
||||
## Encrypting and Hiding Configuration {#encryption}
|
||||
|
||||
You can use symmetric encryption to encrypt a configuration element, for example, a password field. To do so, first configure the [encryption codec](../sql-reference/statements/create/table.md#encryption-codecs), then add attribute `encrypted_by` with the name of the encryption codec as value to the element to encrypt.
|
||||
|
||||
@ -102,6 +102,21 @@ Example:
|
||||
961F000000040000000000EEDDEF4F453CFE6457C4234BD7C09258BD651D85
|
||||
```
|
||||
|
||||
Even with applied encryption in the preprocessed file the elements are still saved in plain text. In case this is a problem, we suggest two alternatives: either set file permissions of the preprocessed file to 600 or use the `hide_in_preprocessed` attribute.
|
||||
|
||||
Example:
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
|
||||
<interserver_http_credentials hide_in_preprocessed="true">
|
||||
<user>admin</user>
|
||||
<password>secret</password>
|
||||
</interserver_http_credentials>
|
||||
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
## User Settings {#user-settings}
|
||||
|
||||
The `config.xml` file can specify a separate config with user settings, profiles, and quotas. The relative path to this config is set in the `users_config` element. By default, it is `users.xml`. If `users_config` is omitted, the user settings, profiles, and quotas are specified directly in `config.xml`.
|
||||
|
@ -217,23 +217,61 @@ Type: UInt32
|
||||
Default: 1024
|
||||
|
||||
|
||||
## index_mark_cache_policy
|
||||
|
||||
Index mark cache policy name.
|
||||
|
||||
Type: String
|
||||
|
||||
Default: SLRU
|
||||
|
||||
## index_mark_cache_size
|
||||
|
||||
Size of cache for index marks. Zero means disabled.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
|
||||
## index_mark_cache_size_ratio
|
||||
|
||||
The size of the protected queue in the index mark cache relative to the cache's total size.
|
||||
|
||||
Type: Double
|
||||
|
||||
Default: 0.5
|
||||
|
||||
## index_uncompressed_cache_policy
|
||||
|
||||
Index uncompressed cache policy name.
|
||||
|
||||
Type: String
|
||||
|
||||
Default: SLRU
|
||||
|
||||
## index_uncompressed_cache_size
|
||||
|
||||
Size of cache for uncompressed blocks of MergeTree indices. Zero means disabled.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
|
||||
## index_uncompressed_cache_size_ratio
|
||||
|
||||
The size of the protected queue in the index uncompressed cache relative to the cache's total size.
|
||||
|
||||
Type: Double
|
||||
|
||||
Default: 0.5
|
||||
|
||||
## io_thread_pool_queue_size
|
||||
|
||||
@ -255,10 +293,22 @@ Default: SLRU
|
||||
|
||||
Size of cache for marks (index of MergeTree family of tables).
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 5368709120
|
||||
|
||||
## mark_cache_size_ratio
|
||||
|
||||
The size of the protected queue in the mark cache relative to the cache's total size.
|
||||
|
||||
Type: Double
|
||||
|
||||
Default: 0.5
|
||||
|
||||
## max_backup_bandwidth_for_server
|
||||
|
||||
The maximum read speed in bytes per second for all backups on server. Zero means unlimited.
|
||||
@ -288,7 +338,7 @@ Default: 1000
|
||||
Limit on total number of concurrently executed queries. Zero means Unlimited. Note that limits on insert and select queries, and on the maximum number of queries for users must also be considered. See also max_concurrent_insert_queries, max_concurrent_select_queries, max_concurrent_queries_for_all_users. Zero means unlimited.
|
||||
|
||||
:::note
|
||||
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
@ -300,7 +350,7 @@ Default: 0
|
||||
Limit on total number of concurrent insert queries. Zero means Unlimited.
|
||||
|
||||
:::note
|
||||
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
@ -312,7 +362,7 @@ Default: 0
|
||||
Limit on total number of concurrently select queries. Zero means Unlimited.
|
||||
|
||||
:::note
|
||||
These settings can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
@ -456,6 +506,10 @@ Sets the cache size (in bytes) for mapped files. This setting allows avoiding fr
|
||||
|
||||
Note that the amount of data in mapped files does not consume memory directly and is not accounted for in query or server memory usage — because this memory can be discarded similar to the OS page cache. The cache is dropped (the files are closed) automatically on the removal of old parts in tables of the MergeTree family, also it can be dropped manually by the `SYSTEM DROP MMAP CACHE` query.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 1000
|
||||
@ -605,10 +659,22 @@ There is one shared cache for the server. Memory is allocated on demand. The cac
|
||||
|
||||
The uncompressed cache is advantageous for very short queries in individual cases.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
|
||||
## uncompressed_cache_size_ratio
|
||||
|
||||
The size of the protected queue in the uncompressed cache relative to the cache's total size.
|
||||
|
||||
Type: Double
|
||||
|
||||
Default: 0.5
|
||||
|
||||
## builtin_dictionaries_reload_interval {#builtin-dictionaries-reload-interval}
|
||||
|
||||
The interval in seconds before reloading built-in dictionaries.
|
||||
|
@ -56,11 +56,11 @@ Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 300.
|
||||
Default value: 3000.
|
||||
|
||||
To achieve maximum performance of `SELECT` queries, it is necessary to minimize the number of parts processed, see [Merge Tree](../../development/architecture.md#merge-tree).
|
||||
|
||||
You can set a larger value to 600 (1200), this will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300.
|
||||
Prior to 23.6 this setting was set to 300. You can set a higher different value, it will reduce the probability of the `Too many parts` error, but at the same time `SELECT` performance might degrade. Also in case of a merge issue (for example, due to insufficient disk space) you will notice it later than it could be with the original 300.
|
||||
|
||||
|
||||
## parts_to_delay_insert {#parts-to-delay-insert}
|
||||
@ -623,6 +623,19 @@ Possible values:
|
||||
|
||||
Default value: false
|
||||
|
||||
## number_of_free_entries_in_pool_to_execute_optimize_entire_partition {#number_of_free_entries_in_pool_to_execute_optimize_entire_partition}
|
||||
|
||||
When there is less than specified number of free entries in pool, do not execute optimizing entire partition in the background (this task generated when set `min_age_to_force_merge_seconds` and enable `min_age_to_force_merge_on_partition_only`). This is to leave free threads for regular merges and avoid "Too many parts".
|
||||
|
||||
Possible values:
|
||||
|
||||
- Positive integer.
|
||||
|
||||
Default value: 25
|
||||
|
||||
The value of the `number_of_free_entries_in_pool_to_execute_optimize_entire_partition` setting should be less than the value of the [background_pool_size](/docs/en/operations/server-configuration-parameters/settings.md/#background_pool_size) * [background_merges_mutations_concurrency_ratio](/docs/en/operations/server-configuration-parameters/settings.md/#background_merges_mutations_concurrency_ratio). Otherwise, ClickHouse throws an exception.
|
||||
|
||||
|
||||
## allow_floating_point_partition_key {#allow_floating_point_partition_key}
|
||||
|
||||
Enables to allow floating-point number as a partition key.
|
||||
|
@ -627,6 +627,13 @@ Column type should be String. If value is empty, default names `row_{i}`will be
|
||||
|
||||
Default value: ''.
|
||||
|
||||
### input_format_json_compact_allow_variable_number_of_columns {#input_format_json_compact_allow_variable_number_of_columns}
|
||||
|
||||
Allow variable number of columns in rows in JSONCompact/JSONCompactEachRow input formats.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## TSV format settings {#tsv-format-settings}
|
||||
|
||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||
@ -764,6 +771,13 @@ When enabled, trailing empty lines at the end of TSV file will be skipped.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_tsv_allow_variable_number_of_columns {#input_format_tsv_allow_variable_number_of_columns}
|
||||
|
||||
Allow variable number of columns in rows in TSV input format.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## CSV format settings {#csv-format-settings}
|
||||
|
||||
### format_csv_delimiter {#format_csv_delimiter}
|
||||
@ -955,9 +969,11 @@ Result
|
||||
```text
|
||||
" string "
|
||||
```
|
||||
|
||||
### input_format_csv_allow_variable_number_of_columns {#input_format_csv_allow_variable_number_of_columns}
|
||||
|
||||
ignore extra columns in CSV input (if file has more columns than expected) and treat missing fields in CSV input as default values.
|
||||
Allow variable number of columns in rows in CSV input format.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
@ -1223,6 +1239,12 @@ Allow skipping columns with unsupported types while schema inference for format
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_parquet_local_file_min_bytes_for_seek {#input_format_parquet_local_file_min_bytes_for_seek}
|
||||
|
||||
min bytes required for local read (file) to do seek, instead of read with ignore in Parquet input format.
|
||||
|
||||
Default value - `8192`.
|
||||
|
||||
### output_format_parquet_string_as_string {#output_format_parquet_string_as_string}
|
||||
|
||||
Use Parquet String type instead of Binary for String columns.
|
||||
@ -1565,6 +1587,13 @@ When enabled, trailing empty lines at the end of file in CustomSeparated format
|
||||
|
||||
Disabled by default.
|
||||
|
||||
### input_format_custom_allow_variable_number_of_columns {#input_format_custom_allow_variable_number_of_columns}
|
||||
|
||||
Allow variable number of columns in rows in CustomSeparated input format.
|
||||
Ignore extra columns in rows with more columns than expected and treat missing columns as default values.
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## Regexp format settings {#regexp-format-settings}
|
||||
|
||||
### format_regexp_escaping_rule {#format_regexp_escaping_rule}
|
||||
|
@ -98,6 +98,18 @@ Default value: 0.
|
||||
</profiles>
|
||||
```
|
||||
|
||||
## mutations_execute_nondeterministic_on_initiator {#mutations_execute_nondeterministic_on_initiator}
|
||||
|
||||
If true constant nondeterministic functions (e.g. function `now()`) are executed on initiator and replaced to literals in `UPDATE` and `DELETE` queries. It helps to keep data in sync on replicas while executing mutations with constant nondeterministic functions. Default value: `false`.
|
||||
|
||||
## mutations_execute_subqueries_on_initiator {#mutations_execute_subqueries_on_initiator}
|
||||
|
||||
If true scalar subqueries are executed on initiator and replaced to literals in `UPDATE` and `DELETE` queries. Default value: `false`.
|
||||
|
||||
## mutations_max_literal_size_to_replace {#mutations_max_literal_size_to_replace}
|
||||
|
||||
The maximum size of serialized literal in bytes to replace in `UPDATE` and `DELETE` queries. Takes effect only if at least one the two settings above is enabled. Default value: 16384 (16 KiB).
|
||||
|
||||
## distributed_product_mode {#distributed-product-mode}
|
||||
|
||||
Changes the behaviour of [distributed subqueries](../../sql-reference/operators/in.md).
|
||||
@ -2371,6 +2383,23 @@ See also:
|
||||
|
||||
- [optimize_functions_to_subcolumns](#optimize-functions-to-subcolumns)
|
||||
|
||||
## optimize_count_from_files {#optimize_count_from_files}
|
||||
|
||||
Enables or disables the optimization of counting number of rows from files in different input formats. It applies to table functions/engines `file`/`s3`/`url`/`hdfs`/`azureBlobStorage`.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Optimization disabled.
|
||||
- 1 — Optimization enabled.
|
||||
|
||||
Default value: `1`.
|
||||
|
||||
## use_cache_for_count_from_files {#use_cache_for_count_from_files}
|
||||
|
||||
Enables caching of rows number during count from files in table functions `file`/`s3`/`url`/`hdfs`/`azureBlobStorage`.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life}
|
||||
|
||||
- Type: seconds
|
||||
@ -4614,3 +4643,19 @@ SELECT toFloat64('1.7091'), toFloat64('1.5008753E7') SETTINGS precise_float_pars
|
||||
│ 1.7091 │ 15008753 │
|
||||
└─────────────────────┴──────────────────────────┘
|
||||
```
|
||||
|
||||
## validate_tcp_client_information {#validate-tcp-client-information}
|
||||
|
||||
Determines whether validation of client information enabled when query packet is received from a client using a TCP connection.
|
||||
|
||||
If `true`, an exception will be thrown on invalid client information from the TCP client.
|
||||
|
||||
If `false`, the data will not be validated. The server will work with clients of all versions.
|
||||
|
||||
The default value is `false`.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<validate_tcp_client_information>true</validate_tcp_client_information>
|
||||
```
|
||||
|
@ -114,7 +114,11 @@ Example of disk configuration:
|
||||
|
||||
## Using local cache {#using-local-cache}
|
||||
|
||||
It is possible to configure local cache over disks in storage configuration starting from version 22.3. For versions 22.3 - 22.7 cache is supported only for `s3` disk type. For versions >= 22.8 cache is supported for any disk type: S3, Azure, Local, Encrypted, etc. Cache uses `LRU` cache policy.
|
||||
It is possible to configure local cache over disks in storage configuration starting from version 22.3.
|
||||
For versions 22.3 - 22.7 cache is supported only for `s3` disk type. For versions >= 22.8 cache is supported for any disk type: S3, Azure, Local, Encrypted, etc.
|
||||
For versions >= 23.5 cache is supported only for remote disk types: S3, Azure, HDFS.
|
||||
Cache uses `LRU` cache policy.
|
||||
|
||||
|
||||
Example of configuration for versions later or equal to 22.8:
|
||||
|
||||
|
@ -23,6 +23,7 @@ Columns:
|
||||
- `database_shard_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database).
|
||||
- `database_replica_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database).
|
||||
- `is_active` ([Nullable(UInt8)](../../sql-reference/data-types/int-uint.md)) — The status of the `Replicated` database replica (for clusters that belong to a `Replicated` database): 1 means "replica is online", 0 means "replica is offline", `NULL` means "unknown".
|
||||
- `name` ([String](../../sql-reference/data-types/string.md)) - An alias to cluster.
|
||||
|
||||
**Example**
|
||||
|
||||
|
58
docs/en/operations/system-tables/kafka_consumers.md
Normal file
58
docs/en/operations/system-tables/kafka_consumers.md
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
slug: /en/operations/system-tables/kafka_consumers
|
||||
---
|
||||
# kafka_consumers
|
||||
|
||||
Contains information about Kafka consumers.
|
||||
Applicable for [Kafka table engine](../../engines/table-engines/integrations/kafka) (native ClickHouse integration)
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` (String) - database of the table with Kafka Engine.
|
||||
- `table` (String) - name of the table with Kafka Engine.
|
||||
- `consumer_id` (String) - Kafka consumer identifier. Note, that a table can have many consumers. Specified by `kafka_num_consumers` parameter.
|
||||
- `assignments.topic` (Array(String)) - Kafka topic.
|
||||
- `assignments.partition_id` (Array(Int32)) - Kafka partition id. Note, that only one consumer can be assigned to a partition.
|
||||
- `assignments.current_offset` (Array(Int64)) - current offset.
|
||||
- `exceptions.time`, (Array(DateTime)) - timestamp when the 10 most recent exceptions were generated.
|
||||
- `exceptions.text`, (Array(String)) - text of 10 most recent exceptions.
|
||||
- `last_poll_time`, (DateTime) - timestamp of the most recent poll.
|
||||
- `num_messages_read`, (UInt64) - number of messages read by the consumer.
|
||||
- `last_commit_time`, (DateTime) - timestamp of the most recent poll.
|
||||
- `num_commits`, (UInt64) - total number of commits for the consumer.
|
||||
- `last_rebalance_time`, (DateTime) - timestamp of the most recent Kafka rebalance
|
||||
- `num_rebalance_revocations`, (UInt64) - number of times the consumer was revoked its partitions
|
||||
- `num_rebalance_assignments`, (UInt64) - number of times the consumer was assigned to Kafka cluster
|
||||
- `is_currently_used`, (UInt8) - consumer is in use
|
||||
- `rdkafka_stat` (String) - library internal statistic. See https://github.com/ClickHouse/librdkafka/blob/master/STATISTICS.md . Set `statistics_interval_ms` to 0 disable, default is 3000 (once in three seconds).
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
SELECT *
|
||||
FROM system.kafka_consumers
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
database: test
|
||||
table: kafka
|
||||
consumer_id: ClickHouse-instance-test-kafka-1caddc7f-f917-4bb1-ac55-e28bd103a4a0
|
||||
assignments.topic: ['system_kafka_cons']
|
||||
assignments.partition_id: [0]
|
||||
assignments.current_offset: [18446744073709550615]
|
||||
exceptions.time: []
|
||||
exceptions.text: []
|
||||
last_poll_time: 2006-11-09 18:47:47
|
||||
num_messages_read: 4
|
||||
last_commit_time: 2006-11-10 04:39:40
|
||||
num_commits: 1
|
||||
last_rebalance_time: 1970-01-01 00:00:00
|
||||
num_rebalance_revocations: 0
|
||||
num_rebalance_assignments: 1
|
||||
is_currently_used: 1
|
||||
rdkafka_stat: {...}
|
||||
|
||||
```
|
@ -30,7 +30,7 @@ curl https://clickhouse.com/ | sh
|
||||
The binary you just downloaded can run all sorts of ClickHouse tools and utilities. If you want to run ClickHouse as a database server, check out the [Quick Start](../../quick-start.mdx).
|
||||
:::
|
||||
|
||||
## Query data in a CSV file using SQL
|
||||
## Query data in a file using SQL {#query_data_in_file}
|
||||
|
||||
A common use of `clickhouse-local` is to run ad-hoc queries on files: where you don't have to insert the data into a table. `clickhouse-local` can stream the data from a file into a temporary table and execute your SQL.
|
||||
|
||||
@ -57,6 +57,19 @@ The `file` table function creates a table, and you can use `DESCRIBE` to see the
|
||||
./clickhouse local -q "DESCRIBE file('reviews.tsv')"
|
||||
```
|
||||
|
||||
:::tip
|
||||
You are allowed to use globs in file name (See [glob substitutions](/docs/en/sql-reference/table-functions/file.md/#globs-in-path)).
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
./clickhouse local -q "SELECT * FROM 'reviews*.jsonl'"
|
||||
./clickhouse local -q "SELECT * FROM 'review_?.csv'"
|
||||
./clickhouse local -q "SELECT * FROM 'review_{1..3}.csv'"
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
```response
|
||||
marketplace Nullable(String)
|
||||
customer_id Nullable(Int64)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user