mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-23 16:12:01 +00:00
Merge branch 'master' into annoy_cosine
This commit is contained in:
commit
21c024b043
29
.github/ISSUE_TEMPLATE/96_installation-issues.md
vendored
Normal file
29
.github/ISSUE_TEMPLATE/96_installation-issues.md
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
name: Installation issue
|
||||||
|
about: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/
|
||||||
|
title: ''
|
||||||
|
labels: comp-install
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Installation type**
|
||||||
|
|
||||||
|
Packages, docker, single binary, curl?
|
||||||
|
|
||||||
|
**Source of the ClickHouse**
|
||||||
|
|
||||||
|
A link to the source. Or the command you've tried
|
||||||
|
|
||||||
|
**Expected result**
|
||||||
|
|
||||||
|
What you expected
|
||||||
|
|
||||||
|
**The actual result**
|
||||||
|
|
||||||
|
What you get
|
||||||
|
|
||||||
|
**How to reproduce**
|
||||||
|
|
||||||
|
* For Linux-based operating systems: provide a script for clear docker container from the official image
|
||||||
|
* For anything else: steps to reproduce on as much as possible clear system
|
2
.github/workflows/backport_branches.yml
vendored
2
.github/workflows/backport_branches.yml
vendored
@ -466,6 +466,7 @@ jobs:
|
|||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -504,6 +505,7 @@ jobs:
|
|||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
|
2
.github/workflows/master.yml
vendored
2
.github/workflows/master.yml
vendored
@ -974,6 +974,7 @@ jobs:
|
|||||||
- BuilderDebTsan
|
- BuilderDebTsan
|
||||||
- BuilderDebUBsan
|
- BuilderDebUBsan
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -1021,6 +1022,7 @@ jobs:
|
|||||||
- BuilderBinClangTidy
|
- BuilderBinClangTidy
|
||||||
- BuilderDebShared
|
- BuilderDebShared
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
|
2
.github/workflows/pull_request.yml
vendored
2
.github/workflows/pull_request.yml
vendored
@ -112,7 +112,7 @@ jobs:
|
|||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: DockerHubPush
|
needs: DockerHubPush
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
if: ${{ success() || failure() }}
|
if: ${{ success() || failure() || always() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
|
2
.github/workflows/release_branches.yml
vendored
2
.github/workflows/release_branches.yml
vendored
@ -541,6 +541,7 @@ jobs:
|
|||||||
- BuilderDebMsan
|
- BuilderDebMsan
|
||||||
- BuilderDebDebug
|
- BuilderDebDebug
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -580,6 +581,7 @@ jobs:
|
|||||||
- BuilderBinDarwin
|
- BuilderBinDarwin
|
||||||
- BuilderBinDarwinAarch64
|
- BuilderBinDarwinAarch64
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
if: ${{ success() || failure() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -80,6 +80,7 @@ core
|
|||||||
vgcore*
|
vgcore*
|
||||||
|
|
||||||
*.deb
|
*.deb
|
||||||
|
*.tar.zst
|
||||||
*.build
|
*.build
|
||||||
*.upload
|
*.upload
|
||||||
*.changes
|
*.changes
|
||||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -287,3 +287,6 @@
|
|||||||
[submodule "contrib/corrosion"]
|
[submodule "contrib/corrosion"]
|
||||||
path = contrib/corrosion
|
path = contrib/corrosion
|
||||||
url = https://github.com/corrosion-rs/corrosion.git
|
url = https://github.com/corrosion-rs/corrosion.git
|
||||||
|
[submodule "contrib/morton-nd"]
|
||||||
|
path = contrib/morton-nd
|
||||||
|
url = https://github.com/morton-nd/morton-nd
|
||||||
|
183
CHANGELOG.md
183
CHANGELOG.md
@ -1,6 +1,7 @@
|
|||||||
### Table of Contents
|
### Table of Contents
|
||||||
|
**[ClickHouse release v22.10, 2022-10-25](#2210)**<br/>
|
||||||
**[ClickHouse release v22.9, 2022-09-22](#229)**<br/>
|
**[ClickHouse release v22.9, 2022-09-22](#229)**<br/>
|
||||||
**[ClickHouse release v22.8, 2022-08-18](#228)**<br/>
|
**[ClickHouse release v22.8-lts, 2022-08-18](#228)**<br/>
|
||||||
**[ClickHouse release v22.7, 2022-07-21](#227)**<br/>
|
**[ClickHouse release v22.7, 2022-07-21](#227)**<br/>
|
||||||
**[ClickHouse release v22.6, 2022-06-16](#226)**<br/>
|
**[ClickHouse release v22.6, 2022-06-16](#226)**<br/>
|
||||||
**[ClickHouse release v22.5, 2022-05-19](#225)**<br/>
|
**[ClickHouse release v22.5, 2022-05-19](#225)**<br/>
|
||||||
@ -10,10 +11,143 @@
|
|||||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
|
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
|
||||||
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
|
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
|
||||||
|
|
||||||
|
### <a id="2210"></a> ClickHouse release 22.10, 2022-10-26
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Rename cache commands: `show caches` -> `show filesystem caches`, `describe cache` -> `describe filesystem cache`. [#41508](https://github.com/ClickHouse/ClickHouse/pull/41508) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Remove support for the `WITH TIMEOUT` section for `LIVE VIEW`. This closes [#40557](https://github.com/ClickHouse/ClickHouse/issues/40557). [#42173](https://github.com/ClickHouse/ClickHouse/pull/42173) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove support for the `{database}` macro from the client's prompt. It was displayed incorrectly if the database was unspecified and it was not updated on `USE` statements. This closes [#25891](https://github.com/ClickHouse/ClickHouse/issues/25891). [#42508](https://github.com/ClickHouse/ClickHouse/pull/42508) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Composable protocol configuration is added. Now different protocols can be set up with different listen hosts. Protocol wrappers such as PROXYv1 can be set up over any other protocols (TCP, TCP secure, MySQL, Postgres). [#41198](https://github.com/ClickHouse/ClickHouse/pull/41198) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Add `S3` as a new type of the destination of backups. Support BACKUP to S3 with as-is path/data structure. [#42333](https://github.com/ClickHouse/ClickHouse/pull/42333) ([Vitaly Baranov](https://github.com/vitlibar)), [#42232](https://github.com/ClickHouse/ClickHouse/pull/42232) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added functions (`randUniform`, `randNormal`, `randLogNormal`, `randExponential`, `randChiSquared`, `randStudentT`, `randFisherF`, `randBernoulli`, `randBinomial`, `randNegativeBinomial`, `randPoisson`) to generate random values according to the specified distributions. This closes [#21834](https://github.com/ClickHouse/ClickHouse/issues/21834). [#42411](https://github.com/ClickHouse/ClickHouse/pull/42411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* An improvement for ClickHouse Keeper: add support for uploading snapshots to S3. S3 information can be defined inside `keeper_server.s3_snapshot`. [#41342](https://github.com/ClickHouse/ClickHouse/pull/41342) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Added an aggregate function `analysisOfVariance` (`anova`) to perform a statistical test over several groups of normally distributed observations to find out whether all groups have the same mean or not. Original PR [#37872](https://github.com/ClickHouse/ClickHouse/issues/37872). [#42131](https://github.com/ClickHouse/ClickHouse/pull/42131) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Support limiting of temporary data stored on disk using settings `max_temporary_data_on_disk_size_for_user`/`max_temporary_data_on_disk_size_for_query` . [#40893](https://github.com/ClickHouse/ClickHouse/pull/40893) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Add setting `format_json_object_each_row_column_for_object_name` to write/parse object name as column value in JSONObjectEachRow format. [#41703](https://github.com/ClickHouse/ClickHouse/pull/41703) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add BLAKE3 hash-function to SQL. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)).
|
||||||
|
* The function `javaHash` has been extended to integers. [#41131](https://github.com/ClickHouse/ClickHouse/pull/41131) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* Add OpenTelemetry support to ON CLUSTER DDL (require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Added system table `asynchronous_insert_log`. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add support for methods `lz4`, `bz2`, `snappy` in HTTP's `Accept-Encoding` which is a non-standard extension to HTTP protocol. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||||
|
* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Added new infrastructure for query analysis and planning under the `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Initial implementation of Kusto Query Language. Please don't use it. [#37961](https://github.com/ClickHouse/ClickHouse/pull/37961) ([Yong Wang](https://github.com/kashwy)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Relax the "Too many parts" threshold. This closes [#6551](https://github.com/ClickHouse/ClickHouse/issues/6551). Now ClickHouse will allow more parts in a partition if the average part size is large enough (at least 10 GiB). This allows to have up to petabytes of data in a single partition of a single table on a single server, which is possible using disk shelves or object storage. [#42002](https://github.com/ClickHouse/ClickHouse/pull/42002) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Implement operator precedence element parser to make the required stack size smaller. [#34892](https://github.com/ClickHouse/ClickHouse/pull/34892) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* DISTINCT in order optimization leverage sorting properties of data streams. This improvement will enable reading in order for DISTINCT if applicable (before it was necessary to provide ORDER BY for columns in DISTINCT). [#41014](https://github.com/ClickHouse/ClickHouse/pull/41014) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* ColumnVector: optimize UInt8 index with AVX512VBMI. [#41247](https://github.com/ClickHouse/ClickHouse/pull/41247) ([Guo Wangyang](https://github.com/guowangy)).
|
||||||
|
* Optimize the lock contentions for `ThreadGroupStatus::mutex`. The performance experiments of **SSB** (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could bring a **2.95x** improvement of the geomean of all subcases' QPS. [#41675](https://github.com/ClickHouse/ClickHouse/pull/41675) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Add `ldapr` capabilities to AArch64 builds. This is supported from Graviton 2+, Azure and GCP instances. Only appeared in clang-15 [not so long ago](https://github.com/llvm/llvm-project/commit/9609b5daffe9fd28d83d83da895abc5113f76c24). [#41778](https://github.com/ClickHouse/ClickHouse/pull/41778) ([Daniel Kutenin](https://github.com/danlark1)).
|
||||||
|
* Improve performance when comparing strings and one argument is an empty constant string. [#41870](https://github.com/ClickHouse/ClickHouse/pull/41870) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* Optimize `insertFrom` of ColumnAggregateFunction to share Aggregate State in some cases. [#41960](https://github.com/ClickHouse/ClickHouse/pull/41960) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Make writing to `azure_blob_storage` disks faster (respect `max_single_part_upload_size` instead of writing a block per each buffer size). Inefficiency mentioned in [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42041](https://github.com/ClickHouse/ClickHouse/pull/42041) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Make thread ids in the process list and query_log unique to avoid waste. [#42180](https://github.com/ClickHouse/ClickHouse/pull/42180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Support skipping cache completely (both download to cache and reading cached data) in case the requested read range exceeds the threshold defined by cache setting `bypass_cache_threashold`, requires to be enabled with `enable_bypass_cache_with_threshold`). [#42418](https://github.com/ClickHouse/ClickHouse/pull/42418) ([Han Shukai](https://github.com/KinderRiven)). This helps on slow local disks.
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Add setting `allow_implicit_no_password`: in combination with `allow_no_password` it forbids creating a user with no password unless `IDENTIFIED WITH no_password` is explicitly specified. [#41341](https://github.com/ClickHouse/ClickHouse/pull/41341) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Embedded Keeper will always start in the background allowing ClickHouse to start without achieving quorum. [#40991](https://github.com/ClickHouse/ClickHouse/pull/40991) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Made reestablishing a new connection to ZooKeeper more reactive in case of expiration of the previous one. Previously there was a task which spawns every minute by default and thus a table could be in readonly state for about this time. [#41092](https://github.com/ClickHouse/ClickHouse/pull/41092) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Now projections can be used with zero copy replication (zero-copy replication is a non-production feature). [#41147](https://github.com/ClickHouse/ClickHouse/pull/41147) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Support expression `(EXPLAIN SELECT ...)` in a subquery. Queries like `SELECT * FROM (EXPLAIN PIPELINE SELECT col FROM TABLE ORDER BY col)` became valid. [#40630](https://github.com/ClickHouse/ClickHouse/pull/40630) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Allow changing `async_insert_max_data_size` or `async_insert_busy_timeout_ms` in scope of query. E.g. user wants to insert data rarely and she doesn't have access to the server config to tune default settings. [#40668](https://github.com/ClickHouse/ClickHouse/pull/40668) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Improvements for reading from remote filesystems, made threadpool size for reads/writes configurable. Closes [#41070](https://github.com/ClickHouse/ClickHouse/issues/41070). [#41011](https://github.com/ClickHouse/ClickHouse/pull/41011) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Support all combinators combination in WindowTransform/arratReduce*/initializeAggregation/aggregate functions versioning. Previously combinators like `ForEach/Resample/Map` didn't work in these places, using them led to exception like`State function ... inserts results into non-state column`. [#41107](https://github.com/ClickHouse/ClickHouse/pull/41107) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add function `tryDecrypt` that returns NULL when decrypt fails (e.g. decrypt with incorrect key) instead of throwing an exception. [#41206](https://github.com/ClickHouse/ClickHouse/pull/41206) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Add the `unreserved_space` column to the `system.disks` table to check how much space is not taken by reservations per disk. [#41254](https://github.com/ClickHouse/ClickHouse/pull/41254) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Support s3 authorization headers in table function arguments. [#41261](https://github.com/ClickHouse/ClickHouse/pull/41261) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add support for MultiRead in Keeper and internal ZooKeeper client (this is an extension to ZooKeeper protocol, only available in ClickHouse Keeper). [#41410](https://github.com/ClickHouse/ClickHouse/pull/41410) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add support for decimal type comparing with floating point literal in IN operator. [#41544](https://github.com/ClickHouse/ClickHouse/pull/41544) ([liang.huang](https://github.com/lhuang09287750)).
|
||||||
|
* Allow readable size values (like `1TB`) in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these periods ClickHouse can nevertheless try to establish a connection and produce errors. This behavior is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Only allow clients connecting to a secure server with an invalid certificate only to proceed with the '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Add function `tryBase58Decode`, similar to the existing function `tryBase64Decode`. [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)).
|
||||||
|
* Fix parallel parsing: segmentator now checks `max_block_size`. This fixed memory overallocation in case of parallel parsing and small LIMIT. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Don't add "TABLE_IS_DROPPED" exception to `system.errors` if it's happened during SELECT from a system table and was ignored. [#41908](https://github.com/ClickHouse/ClickHouse/pull/41908) ([AlfVII](https://github.com/AlfVII)).
|
||||||
|
* Improve option `enable_extended_results_for_datetime_functions` to return results of type DateTime64 for functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute` and `timeSlot`. [#41910](https://github.com/ClickHouse/ClickHouse/pull/41910) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Improve `DateTime` type inference for text formats. Now it respects setting `date_time_input_format` and doesn't try to infer datetimes from numbers as timestamps. Closes [#41389](https://github.com/ClickHouse/ClickHouse/issues/41389) Closes [#42206](https://github.com/ClickHouse/ClickHouse/issues/42206). [#41912](https://github.com/ClickHouse/ClickHouse/pull/41912) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Remove confusing warning when inserting with `perform_ttl_move_on_insert` = false. [#41980](https://github.com/ClickHouse/ClickHouse/pull/41980) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Allow user to write `countState(*)` similar to `count(*)`. This closes [#9338](https://github.com/ClickHouse/ClickHouse/issues/9338). [#41983](https://github.com/ClickHouse/ClickHouse/pull/41983) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Fix `rankCorr` size overflow. [#42020](https://github.com/ClickHouse/ClickHouse/pull/42020) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Added an option to specify an arbitrary string as an environment name in the Sentry's config for more handy reports. [#42037](https://github.com/ClickHouse/ClickHouse/pull/42037) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix parsing out-of-range Date from CSV. [#42044](https://github.com/ClickHouse/ClickHouse/pull/42044) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* `parseDataTimeBestEffort` now supports comma between date and time. Closes [#42038](https://github.com/ClickHouse/ClickHouse/issues/42038). [#42049](https://github.com/ClickHouse/ClickHouse/pull/42049) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Improved stale replica recovery process for `ReplicatedMergeTree`. If a lost replica has some parts which are absent from a healthy replica, but these parts should appear in the future according to the replication queue of the healthy replica, then the lost replica will keep such parts instead of detaching them. [#42134](https://github.com/ClickHouse/ClickHouse/pull/42134) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Add a possibility to use `Date32` arguments for date_diff function. Fix issue in date_diff function when using DateTime64 arguments with a start date before Unix epoch and end date after Unix epoch. [#42308](https://github.com/ClickHouse/ClickHouse/pull/42308) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* When uploading big parts to Minio, 'Complete Multipart Upload' can take a long time. Minio sends heartbeats every 10 seconds (see https://github.com/minio/minio/pull/7198). But clickhouse times out earlier, because the default send/receive timeout is [set](https://github.com/ClickHouse/ClickHouse/blob/cc24fcd6d5dfb67f5f66f5483e986bd1010ad9cf/src/IO/S3/PocoHTTPClient.cpp#L123) to 5 seconds. [#42321](https://github.com/ClickHouse/ClickHouse/pull/42321) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Allow to use `Date32` arguments for `dateName` function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Now filters with NULL literals will be used during index analysis. [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)). This is continuation of [#39550i](https://github.com/ClickHouse/ClickHouse/pull/39550) by [@fastio](https://github.com/fastio) who implemented most of the logic.
|
||||||
|
* Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)). This represents the biggest advancement for ClickHouse testing in this year so far.
|
||||||
|
* Beta version of the ClickHouse Cloud service is released: [https://clickhouse.cloud/](https://clickhouse.cloud/). It provides the easiest way to use ClickHouse (even slightly easier than the single-command installation).
|
||||||
|
* Added support of WHERE clause generation to AST Fuzzer and possibility to add or remove ORDER BY and WHERE clause. [#38519](https://github.com/ClickHouse/ClickHouse/pull/38519) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Aarch64 binaries now require at least ARMv8.2, released in 2016. Most notably, this enables use of ARM LSE, i.e. native atomic operations. Also, CMake build option "NO_ARMV81_OR_HIGHER" has been added to allow compilation of binaries for older ARMv8.0 hardware, e.g. Raspberry Pi 4. [#41610](https://github.com/ClickHouse/ClickHouse/pull/41610) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Allow building ClickHouse with Musl (small changes after it was already supported but broken). [#41987](https://github.com/ClickHouse/ClickHouse/pull/41987) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add the `$CLICKHOUSE_CRONFILE` file checking to avoid running the `sed` command to get the file not found error on install. [#42081](https://github.com/ClickHouse/ClickHouse/pull/42081) ([Chun-Sheng, Li](https://github.com/peter279k)).
|
||||||
|
* Update cctz to `2022e` to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)). [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||||
|
* Add Rust code support into ClickHouse with BLAKE3 hash-function library as an example. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Choose correct aggregation method for `LowCardinality` with big integer types. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Several fixes for `web` disk. [#41652](https://github.com/ClickHouse/ClickHouse/pull/41652) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixes an issue that causes docker run to fail if `https_port` is not present in config. [#41693](https://github.com/ClickHouse/ClickHouse/pull/41693) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Mutations were not cancelled properly on server shutdown or `SYSTEM STOP MERGES` query and cancellation might take long time, it's fixed. [#41699](https://github.com/ClickHouse/ClickHouse/pull/41699) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix wrong result of queries with `ORDER BY` or `GROUP BY` by columns from prefix of sorting key, wrapped into monotonic functions, with enable "read in order" optimization (settings `optimize_read_in_order` and `optimize_aggregation_in_order`). [#41701](https://github.com/ClickHouse/ClickHouse/pull/41701) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix possible crash in `SELECT` from `Merge` table with enabled `optimize_monotonous_functions_in_order_by` setting. Fixes [#41269](https://github.com/ClickHouse/ClickHouse/issues/41269). [#41740](https://github.com/ClickHouse/ClickHouse/pull/41740) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Don't allow to create or alter merge tree tables with column name `_row_exists`, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||||
|
* Fix a bug that CORS headers are missing in some HTTP responses. [#41792](https://github.com/ClickHouse/ClickHouse/pull/41792) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* 22.9 might fail to startup `ReplicatedMergeTree` table if that table was created by 20.3 or older version and was never altered, it's fixed. Fixes [#41742](https://github.com/ClickHouse/ClickHouse/issues/41742). [#41796](https://github.com/ClickHouse/ClickHouse/pull/41796) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* When the batch sending fails for some reason, it cannot be automatically recovered, and if it is not processed in time, it will lead to accumulation, and the printed error message will become longer and longer, which will cause the http thread to block. [#41813](https://github.com/ClickHouse/ClickHouse/pull/41813) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
|
* Fix compact parts with compressed marks setting. Fixes [#41783](https://github.com/ClickHouse/ClickHouse/issues/41783) and [#41746](https://github.com/ClickHouse/ClickHouse/issues/41746). [#41823](https://github.com/ClickHouse/ClickHouse/pull/41823) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Old versions of Replicated database don't have a special marker in [Zoo]Keeper. We need to check only whether the node contains come obscure data instead of special mark. [#41875](https://github.com/ClickHouse/ClickHouse/pull/41875) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix possible exception in fs cache. [#41884](https://github.com/ClickHouse/ClickHouse/pull/41884) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix `use_environment_credentials` for s3 table function. [#41970](https://github.com/ClickHouse/ClickHouse/pull/41970) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixed "Directory already exists and is not empty" error on detaching broken part that might prevent `ReplicatedMergeTree` table from starting replication. Fixes [#40957](https://github.com/ClickHouse/ClickHouse/issues/40957). [#41981](https://github.com/ClickHouse/ClickHouse/pull/41981) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* `toDateTime64` now returns the same output with negative integer and float arguments. [#42025](https://github.com/ClickHouse/ClickHouse/pull/42025) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix write into `azure_blob_storage`. Partially closes [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42034](https://github.com/ClickHouse/ClickHouse/pull/42034) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix the `bzip2` decoding issue for specific `bzip2` files. [#42046](https://github.com/ClickHouse/ClickHouse/pull/42046) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix SQL function `toLastDayOfMonth` with setting "enable_extended_results_for_datetime_functions = 1" at the beginning of the extended range (January 1900). - Fix SQL function "toRelativeWeekNum()" with setting "enable_extended_results_for_datetime_functions = 1" at the end of extended range (December 2299). - Improve the performance of for SQL functions "toISOYear()", "toFirstDayNumOfISOYearIndex()" and "toYearWeekOfNewyearMode()" by avoiding unnecessary index arithmetics. [#42084](https://github.com/ClickHouse/ClickHouse/pull/42084) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* The maximum size of fetches for each table accidentally was set to 8 while the pool size could be bigger. Now the maximum size of fetches for table is equal to the pool size. [#42090](https://github.com/ClickHouse/ClickHouse/pull/42090) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* A table might be shut down and a dictionary might be detached before checking if can be dropped without breaking dependencies between table, it's fixed. Fixes [#41982](https://github.com/ClickHouse/ClickHouse/issues/41982). [#42106](https://github.com/ClickHouse/ClickHouse/pull/42106) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix bad inefficiency of `remote_filesystem_read_method=read` with filesystem cache. Closes [#42125](https://github.com/ClickHouse/ClickHouse/issues/42125). [#42129](https://github.com/ClickHouse/ClickHouse/pull/42129) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix possible timeout exception for distributed queries with use_hedged_requests = 0. [#42130](https://github.com/ClickHouse/ClickHouse/pull/42130) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed a minor bug inside function `runningDifference` in case of using it with `Date32` type. Previously `Date` was used and it may cause some logical errors like `Bad cast from type DB::ColumnVector<int> to DB::ColumnVector<unsigned short>'`. [#42143](https://github.com/ClickHouse/ClickHouse/pull/42143) ([Alfred Xu](https://github.com/sperlingxx)).
|
||||||
|
* Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* DISTINCT in order fails with LOGICAL_ERROR if first column in sorting key contains function. [#42186](https://github.com/ClickHouse/ClickHouse/pull/42186) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix read from `Buffer` tables with read in order desc. [#42236](https://github.com/ClickHouse/ClickHouse/pull/42236) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix a bug which prevents ClickHouse to start when `background_pool_size setting` is set on default profile but `background_merges_mutations_concurrency_ratio` is not. [#42315](https://github.com/ClickHouse/ClickHouse/pull/42315) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
* `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix a data race in query finish/cancel. This closes [#42346](https://github.com/ClickHouse/ClickHouse/issues/42346). [#42362](https://github.com/ClickHouse/ClickHouse/pull/42362) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix assert cast in join on falsy condition, Close [#42380](https://github.com/ClickHouse/ClickHouse/issues/42380). [#42407](https://github.com/ClickHouse/ClickHouse/pull/42407) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `AggregateFunctionQuantile` now correctly works with UInt128 columns. Previously, the quantile state interpreted `UInt128` columns as `Int128` which could have led to incorrect results. [#42473](https://github.com/ClickHouse/ClickHouse/pull/42473) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix bad_cast assert during INSERT into `Annoy` indexes over non-Float32 columns. `Annoy` indices is an experimental feature. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Arithmetic operator with Date or DateTime and 128 or 256-bit integer was referencing uninitialized memory. [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="229"></a> ClickHouse release 22.9, 2022-09-22
|
### <a id="229"></a> ClickHouse release 22.9, 2022-09-22
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
* Upgrade from 20.3 and older to 22.9 and newer should be done through an intermediate version if there are any `ReplicatedMergeTree` tables, otherwise server with the new version will not start. [#40641](https://github.com/ClickHouse/ClickHouse/pull/40641) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* Upgrade from 20.3 and older to 22.9 and newer should be done through an intermediate version if there are any `ReplicatedMergeTree` tables, otherwise server with the new version will not start. [#40641](https://github.com/ClickHouse/ClickHouse/pull/40641) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* Remove the functions `accurate_Cast` and `accurate_CastOrNull` (they are different to `accurateCast` and `accurateCastOrNull` by underscore in the name and they are not affected by the value of `cast_keep_nullable` setting). These functions were undocumented, untested, unused, and unneeded. They appeared to be alive due to code generalization. [#40682](https://github.com/ClickHouse/ClickHouse/pull/40682) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Remove the functions `accurate_Cast` and `accurate_CastOrNull` (they are different to `accurateCast` and `accurateCastOrNull` by underscore in the name and they are not affected by the value of `cast_keep_nullable` setting). These functions were undocumented, untested, unused, and unneeded. They appeared to be alive due to code generalization. [#40682](https://github.com/ClickHouse/ClickHouse/pull/40682) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Add a test to ensure that every new table function will be documented. See [#40649](https://github.com/ClickHouse/ClickHouse/issues/40649). Rename table function `MeiliSearch` to `meilisearch`. [#40709](https://github.com/ClickHouse/ClickHouse/pull/40709) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Add a test to ensure that every new table function will be documented. See [#40649](https://github.com/ClickHouse/ClickHouse/issues/40649). Rename table function `MeiliSearch` to `meilisearch`. [#40709](https://github.com/ClickHouse/ClickHouse/pull/40709) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
@ -21,6 +155,7 @@
|
|||||||
* Make interpretation of YAML configs to be more conventional. [#41044](https://github.com/ClickHouse/ClickHouse/pull/41044) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Make interpretation of YAML configs to be more conventional. [#41044](https://github.com/ClickHouse/ClickHouse/pull/41044) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
|
|
||||||
* Support `insert_quorum = 'auto'` to use majority number. [#39970](https://github.com/ClickHouse/ClickHouse/pull/39970) ([Sachin](https://github.com/SachinSetiya)).
|
* Support `insert_quorum = 'auto'` to use majority number. [#39970](https://github.com/ClickHouse/ClickHouse/pull/39970) ([Sachin](https://github.com/SachinSetiya)).
|
||||||
* Add embedded dashboards to ClickHouse server. This is a demo project about how to achieve 90% results with 1% effort using ClickHouse features. [#40461](https://github.com/ClickHouse/ClickHouse/pull/40461) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Add embedded dashboards to ClickHouse server. This is a demo project about how to achieve 90% results with 1% effort using ClickHouse features. [#40461](https://github.com/ClickHouse/ClickHouse/pull/40461) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Added new settings constraint writability kind `changeable_in_readonly`. [#40631](https://github.com/ClickHouse/ClickHouse/pull/40631) ([Sergei Trifonov](https://github.com/serxa)).
|
* Added new settings constraint writability kind `changeable_in_readonly`. [#40631](https://github.com/ClickHouse/ClickHouse/pull/40631) ([Sergei Trifonov](https://github.com/serxa)).
|
||||||
@ -38,6 +173,7 @@
|
|||||||
* Improvement for in-memory data parts: remove completely processed WAL files. [#40592](https://github.com/ClickHouse/ClickHouse/pull/40592) ([Azat Khuzhin](https://github.com/azat)).
|
* Improvement for in-memory data parts: remove completely processed WAL files. [#40592](https://github.com/ClickHouse/ClickHouse/pull/40592) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
|
||||||
#### Performance Improvement
|
#### Performance Improvement
|
||||||
|
|
||||||
* Implement compression of marks and primary key. Close [#34437](https://github.com/ClickHouse/ClickHouse/issues/34437). [#37693](https://github.com/ClickHouse/ClickHouse/pull/37693) ([zhongyuankai](https://github.com/zhongyuankai)).
|
* Implement compression of marks and primary key. Close [#34437](https://github.com/ClickHouse/ClickHouse/issues/34437). [#37693](https://github.com/ClickHouse/ClickHouse/pull/37693) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
* Allow to load marks with threadpool in advance. Regulated by setting `load_marks_asynchronously` (default: 0). [#40821](https://github.com/ClickHouse/ClickHouse/pull/40821) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Allow to load marks with threadpool in advance. Regulated by setting `load_marks_asynchronously` (default: 0). [#40821](https://github.com/ClickHouse/ClickHouse/pull/40821) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
* Virtual filesystem over s3 will use random object names split into multiple path prefixes for better performance on AWS. [#40968](https://github.com/ClickHouse/ClickHouse/pull/40968) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Virtual filesystem over s3 will use random object names split into multiple path prefixes for better performance on AWS. [#40968](https://github.com/ClickHouse/ClickHouse/pull/40968) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
@ -58,6 +194,7 @@
|
|||||||
* Parallel hash JOIN for Float data types might be suboptimal. Make it better. [#41183](https://github.com/ClickHouse/ClickHouse/pull/41183) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Parallel hash JOIN for Float data types might be suboptimal. Make it better. [#41183](https://github.com/ClickHouse/ClickHouse/pull/41183) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
#### Improvement
|
#### Improvement
|
||||||
|
|
||||||
* During startup and ATTACH call, `ReplicatedMergeTree` tables will be readonly until the ZooKeeper connection is made and the setup is finished. [#40148](https://github.com/ClickHouse/ClickHouse/pull/40148) ([Antonio Andelic](https://github.com/antonio2368)).
|
* During startup and ATTACH call, `ReplicatedMergeTree` tables will be readonly until the ZooKeeper connection is made and the setup is finished. [#40148](https://github.com/ClickHouse/ClickHouse/pull/40148) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
* Add `enable_extended_results_for_datetime_functions` option to return results of type Date32 for functions toStartOfYear, toStartOfISOYear, toStartOfQuarter, toStartOfMonth, toStartOfWeek, toMonday and toLastDayOfMonth when argument is Date32 or DateTime64, otherwise results of Date type are returned. For compatibility reasons default value is ‘0’. [#41214](https://github.com/ClickHouse/ClickHouse/pull/41214) ([Roman Vasin](https://github.com/rvasin)).
|
* Add `enable_extended_results_for_datetime_functions` option to return results of type Date32 for functions toStartOfYear, toStartOfISOYear, toStartOfQuarter, toStartOfMonth, toStartOfWeek, toMonday and toLastDayOfMonth when argument is Date32 or DateTime64, otherwise results of Date type are returned. For compatibility reasons default value is ‘0’. [#41214](https://github.com/ClickHouse/ClickHouse/pull/41214) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
* For security and stability reasons, CatBoost models are no longer evaluated within the ClickHouse server. Instead, the evaluation is now done in the clickhouse-library-bridge, a separate process that loads the catboost library and communicates with the server process via HTTP. [#40897](https://github.com/ClickHouse/ClickHouse/pull/40897) ([Robert Schulze](https://github.com/rschu1ze)). [#39629](https://github.com/ClickHouse/ClickHouse/pull/39629) ([Robert Schulze](https://github.com/rschu1ze)).
|
* For security and stability reasons, CatBoost models are no longer evaluated within the ClickHouse server. Instead, the evaluation is now done in the clickhouse-library-bridge, a separate process that loads the catboost library and communicates with the server process via HTTP. [#40897](https://github.com/ClickHouse/ClickHouse/pull/40897) ([Robert Schulze](https://github.com/rschu1ze)). [#39629](https://github.com/ClickHouse/ClickHouse/pull/39629) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
@ -108,6 +245,7 @@
|
|||||||
* Add `has_lightweight_delete` to system.parts. [#41564](https://github.com/ClickHouse/ClickHouse/pull/41564) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Add `has_lightweight_delete` to system.parts. [#41564](https://github.com/ClickHouse/ClickHouse/pull/41564) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
* Enforce documentation for every setting. [#40644](https://github.com/ClickHouse/ClickHouse/pull/40644) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Enforce documentation for every setting. [#40644](https://github.com/ClickHouse/ClickHouse/pull/40644) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Enforce documentation for every current metric. [#40645](https://github.com/ClickHouse/ClickHouse/pull/40645) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Enforce documentation for every current metric. [#40645](https://github.com/ClickHouse/ClickHouse/pull/40645) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Enforce documentation for every profile event counter. Write the documentation where it was missing. [#40646](https://github.com/ClickHouse/ClickHouse/pull/40646) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Enforce documentation for every profile event counter. Write the documentation where it was missing. [#40646](https://github.com/ClickHouse/ClickHouse/pull/40646) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
@ -217,15 +355,16 @@
|
|||||||
* Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41586](https://github.com/ClickHouse/ClickHouse/pull/41586) ([Raúl Marín](https://github.com/Algunenano)).
|
* Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41586](https://github.com/ClickHouse/ClickHouse/pull/41586) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Fix possible `pipeline stuck` exception for queries with `OFFSET`. The error was found with `enable_optimize_predicate_expression = 0` and always false condition in `WHERE`. Fixes [#41383](https://github.com/ClickHouse/ClickHouse/issues/41383). [#41588](https://github.com/ClickHouse/ClickHouse/pull/41588) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Fix possible `pipeline stuck` exception for queries with `OFFSET`. The error was found with `enable_optimize_predicate_expression = 0` and always false condition in `WHERE`. Fixes [#41383](https://github.com/ClickHouse/ClickHouse/issues/41383). [#41588](https://github.com/ClickHouse/ClickHouse/pull/41588) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
||||||
|
### <a id="228"></a> ClickHouse release 22.8-lts, 2022-08-18
|
||||||
### <a id="228"></a> ClickHouse release 22.8, 2022-08-18
|
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
* Extended range of `Date32` and `DateTime64` to support dates from the year 1900 to 2299. In previous versions, the supported interval was only from the year 1925 to 2283. The implementation is using the proleptic Gregorian calendar (which is conformant with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601):2004 (clause 3.2.1 The Gregorian calendar)) instead of accounting for historical transitions from the Julian to the Gregorian calendar. This change affects implementation-specific behavior for out-of-range arguments. E.g. if in previous versions the value of `1899-01-01` was clamped to `1925-01-01`, in the new version it will be clamped to `1900-01-01`. It changes the behavior of rounding with `toStartOfInterval` if you pass `INTERVAL 3 QUARTER` up to one quarter because the intervals are counted from an implementation-specific point of time. Closes [#28216](https://github.com/ClickHouse/ClickHouse/issues/28216), improves [#38393](https://github.com/ClickHouse/ClickHouse/issues/38393). [#39425](https://github.com/ClickHouse/ClickHouse/pull/39425) ([Roman Vasin](https://github.com/rvasin)).
|
* Extended range of `Date32` and `DateTime64` to support dates from the year 1900 to 2299. In previous versions, the supported interval was only from the year 1925 to 2283. The implementation is using the proleptic Gregorian calendar (which is conformant with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601):2004 (clause 3.2.1 The Gregorian calendar)) instead of accounting for historical transitions from the Julian to the Gregorian calendar. This change affects implementation-specific behavior for out-of-range arguments. E.g. if in previous versions the value of `1899-01-01` was clamped to `1925-01-01`, in the new version it will be clamped to `1900-01-01`. It changes the behavior of rounding with `toStartOfInterval` if you pass `INTERVAL 3 QUARTER` up to one quarter because the intervals are counted from an implementation-specific point of time. Closes [#28216](https://github.com/ClickHouse/ClickHouse/issues/28216), improves [#38393](https://github.com/ClickHouse/ClickHouse/issues/38393). [#39425](https://github.com/ClickHouse/ClickHouse/pull/39425) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
* Now, all relevant dictionary sources respect `remote_url_allow_hosts` setting. It was already done for HTTP, Cassandra, Redis. Added ClickHouse, MongoDB, MySQL, PostgreSQL. Host is checked only for dictionaries created from DDL. [#39184](https://github.com/ClickHouse/ClickHouse/pull/39184) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Now, all relevant dictionary sources respect `remote_url_allow_hosts` setting. It was already done for HTTP, Cassandra, Redis. Added ClickHouse, MongoDB, MySQL, PostgreSQL. Host is checked only for dictionaries created from DDL. [#39184](https://github.com/ClickHouse/ClickHouse/pull/39184) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Make the remote filesystem cache composable, allow not to evict certain files (regarding idx, mrk, ..), delete old cache version. Now it is possible to configure cache over Azure blob storage disk, over Local disk, over StaticWeb disk, etc. This PR is marked backward incompatible because cache configuration changes and in order for cache to work need to update the config file. Old cache will still be used with new configuration. The server will startup fine with the old cache configuration. Closes https://github.com/ClickHouse/ClickHouse/issues/36140. Closes https://github.com/ClickHouse/ClickHouse/issues/37889. ([Kseniia Sumarokova](https://github.com/kssenii)). [#36171](https://github.com/ClickHouse/ClickHouse/pull/36171))
|
* Make the remote filesystem cache composable, allow not to evict certain files (regarding idx, mrk, ..), delete old cache version. Now it is possible to configure cache over Azure blob storage disk, over Local disk, over StaticWeb disk, etc. This PR is marked backward incompatible because cache configuration changes and in order for cache to work need to update the config file. Old cache will still be used with new configuration. The server will startup fine with the old cache configuration. Closes https://github.com/ClickHouse/ClickHouse/issues/36140. Closes https://github.com/ClickHouse/ClickHouse/issues/37889. ([Kseniia Sumarokova](https://github.com/kssenii)). [#36171](https://github.com/ClickHouse/ClickHouse/pull/36171))
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
|
|
||||||
* Query parameters can be set in interactive mode as `SET param_abc = 'def'` and transferred via the native protocol as settings. [#39906](https://github.com/ClickHouse/ClickHouse/pull/39906) ([Nikita Taranov](https://github.com/nickitat)).
|
* Query parameters can be set in interactive mode as `SET param_abc = 'def'` and transferred via the native protocol as settings. [#39906](https://github.com/ClickHouse/ClickHouse/pull/39906) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
* Quota key can be set in the native protocol ([Yakov Olkhovsky](https://github.com/ClickHouse/ClickHouse/pull/39874)).
|
* Quota key can be set in the native protocol ([Yakov Olkhovsky](https://github.com/ClickHouse/ClickHouse/pull/39874)).
|
||||||
* Added a setting `exact_rows_before_limit` (0/1). When enabled, ClickHouse will provide exact value for `rows_before_limit_at_least` statistic, but with the cost that the data before limit will have to be read completely. This closes [#6613](https://github.com/ClickHouse/ClickHouse/issues/6613). [#25333](https://github.com/ClickHouse/ClickHouse/pull/25333) ([kevin wan](https://github.com/MaxWk)).
|
* Added a setting `exact_rows_before_limit` (0/1). When enabled, ClickHouse will provide exact value for `rows_before_limit_at_least` statistic, but with the cost that the data before limit will have to be read completely. This closes [#6613](https://github.com/ClickHouse/ClickHouse/issues/6613). [#25333](https://github.com/ClickHouse/ClickHouse/pull/25333) ([kevin wan](https://github.com/MaxWk)).
|
||||||
@ -240,12 +379,14 @@
|
|||||||
* Add new setting schema_inference_hints that allows to specify structure hints in schema inference for specific columns. Closes [#39569](https://github.com/ClickHouse/ClickHouse/issues/39569). [#40068](https://github.com/ClickHouse/ClickHouse/pull/40068) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Add new setting schema_inference_hints that allows to specify structure hints in schema inference for specific columns. Closes [#39569](https://github.com/ClickHouse/ClickHouse/issues/39569). [#40068](https://github.com/ClickHouse/ClickHouse/pull/40068) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
#### Experimental Feature
|
#### Experimental Feature
|
||||||
|
|
||||||
* Support SQL standard DELETE FROM syntax on merge tree tables and lightweight delete implementation for merge tree families. [#37893](https://github.com/ClickHouse/ClickHouse/pull/37893) ([Jianmei Zhang](https://github.com/zhangjmruc)) ([Alexander Gololobov](https://github.com/davenger)). Note: this new feature does not make ClickHouse an HTAP DBMS.
|
* Support SQL standard DELETE FROM syntax on merge tree tables and lightweight delete implementation for merge tree families. [#37893](https://github.com/ClickHouse/ClickHouse/pull/37893) ([Jianmei Zhang](https://github.com/zhangjmruc)) ([Alexander Gololobov](https://github.com/davenger)). Note: this new feature does not make ClickHouse an HTAP DBMS.
|
||||||
|
|
||||||
#### Performance Improvement
|
#### Performance Improvement
|
||||||
|
|
||||||
* Improved memory usage during memory efficient merging of aggregation results. [#39429](https://github.com/ClickHouse/ClickHouse/pull/39429) ([Nikita Taranov](https://github.com/nickitat)).
|
* Improved memory usage during memory efficient merging of aggregation results. [#39429](https://github.com/ClickHouse/ClickHouse/pull/39429) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
* Added concurrency control logic to limit total number of concurrent threads created by queries. [#37558](https://github.com/ClickHouse/ClickHouse/pull/37558) ([Sergei Trifonov](https://github.com/serxa)). Add `concurrent_threads_soft_limit parameter` to increase performance in case of high QPS by means of limiting total number of threads for all queries. [#37285](https://github.com/ClickHouse/ClickHouse/pull/37285) ([Roman Vasin](https://github.com/rvasin)).
|
* Added concurrency control logic to limit total number of concurrent threads created by queries. [#37558](https://github.com/ClickHouse/ClickHouse/pull/37558) ([Sergei Trifonov](https://github.com/serxa)). Add `concurrent_threads_soft_limit parameter` to increase performance in case of high QPS by means of limiting total number of threads for all queries. [#37285](https://github.com/ClickHouse/ClickHouse/pull/37285) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)).
|
* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)).
|
||||||
* Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#36654](https://github.com/ClickHouse/ClickHouse/pull/36654) ([jasperzhu](https://github.com/jinjunzh)). [#39494](https://github.com/ClickHouse/ClickHouse/pull/39494) ([Robert Schulze](https://github.com/rschu1ze)).
|
* Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#36654](https://github.com/ClickHouse/ClickHouse/pull/36654) ([jasperzhu](https://github.com/jinjunzh)). [#39494](https://github.com/ClickHouse/ClickHouse/pull/39494) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
* `DISTINCT` in order with `ORDER BY`: Deduce way to sort based on input stream sort description. Skip sorting if input stream is already sorted. [#38719](https://github.com/ClickHouse/ClickHouse/pull/38719) ([Igor Nikonov](https://github.com/devcrafter)). Improve memory usage (significantly) and query execution time + use `DistinctSortedChunkTransform` for final distinct when `DISTINCT` columns match `ORDER BY` columns, but rename to `DistinctSortedStreamTransform` in `EXPLAIN PIPELINE` → this improves memory usage significantly + remove unnecessary allocations in hot loop in `DistinctSortedChunkTransform`. [#39432](https://github.com/ClickHouse/ClickHouse/pull/39432) ([Igor Nikonov](https://github.com/devcrafter)). Use `DistinctSortedTransform` only when sort description is applicable to DISTINCT columns, otherwise fall back to ordinary DISTINCT implementation + it allows making less checks during `DistinctSortedTransform` execution. [#39528](https://github.com/ClickHouse/ClickHouse/pull/39528) ([Igor Nikonov](https://github.com/devcrafter)). Fix: `DistinctSortedTransform` didn't take advantage of sorting. It never cleared HashSet since clearing_columns were detected incorrectly (always empty). So, it basically worked as ordinary `DISTINCT` (`DistinctTransform`). The fix reduces memory usage significantly. [#39538](https://github.com/ClickHouse/ClickHouse/pull/39538) ([Igor Nikonov](https://github.com/devcrafter)).
|
* `DISTINCT` in order with `ORDER BY`: Deduce way to sort based on input stream sort description. Skip sorting if input stream is already sorted. [#38719](https://github.com/ClickHouse/ClickHouse/pull/38719) ([Igor Nikonov](https://github.com/devcrafter)). Improve memory usage (significantly) and query execution time + use `DistinctSortedChunkTransform` for final distinct when `DISTINCT` columns match `ORDER BY` columns, but rename to `DistinctSortedStreamTransform` in `EXPLAIN PIPELINE` → this improves memory usage significantly + remove unnecessary allocations in hot loop in `DistinctSortedChunkTransform`. [#39432](https://github.com/ClickHouse/ClickHouse/pull/39432) ([Igor Nikonov](https://github.com/devcrafter)). Use `DistinctSortedTransform` only when sort description is applicable to DISTINCT columns, otherwise fall back to ordinary DISTINCT implementation + it allows making less checks during `DistinctSortedTransform` execution. [#39528](https://github.com/ClickHouse/ClickHouse/pull/39528) ([Igor Nikonov](https://github.com/devcrafter)). Fix: `DistinctSortedTransform` didn't take advantage of sorting. It never cleared HashSet since clearing_columns were detected incorrectly (always empty). So, it basically worked as ordinary `DISTINCT` (`DistinctTransform`). The fix reduces memory usage significantly. [#39538](https://github.com/ClickHouse/ClickHouse/pull/39538) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
* Use local node as first priority to get structure of remote table when executing `cluster` and similar table functions. [#39440](https://github.com/ClickHouse/ClickHouse/pull/39440) ([Mingliang Pan](https://github.com/liangliangpan)).
|
* Use local node as first priority to get structure of remote table when executing `cluster` and similar table functions. [#39440](https://github.com/ClickHouse/ClickHouse/pull/39440) ([Mingliang Pan](https://github.com/liangliangpan)).
|
||||||
@ -256,6 +397,7 @@
|
|||||||
* Improve bytes to bits mask transform for SSE/AVX/AVX512. [#39586](https://github.com/ClickHouse/ClickHouse/pull/39586) ([Guo Wangyang](https://github.com/guowangy)).
|
* Improve bytes to bits mask transform for SSE/AVX/AVX512. [#39586](https://github.com/ClickHouse/ClickHouse/pull/39586) ([Guo Wangyang](https://github.com/guowangy)).
|
||||||
|
|
||||||
#### Improvement
|
#### Improvement
|
||||||
|
|
||||||
* Normalize `AggregateFunction` types and state representations because optimizations like [#35788](https://github.com/ClickHouse/ClickHouse/pull/35788) will treat `count(not null columns)` as `count()`, which might confuses distributed interpreters with the following error : `Conversion from AggregateFunction(count) to AggregateFunction(count, Int64) is not supported`. [#39420](https://github.com/ClickHouse/ClickHouse/pull/39420) ([Amos Bird](https://github.com/amosbird)). The functions with identical states can be used in materialized views interchangeably.
|
* Normalize `AggregateFunction` types and state representations because optimizations like [#35788](https://github.com/ClickHouse/ClickHouse/pull/35788) will treat `count(not null columns)` as `count()`, which might confuses distributed interpreters with the following error : `Conversion from AggregateFunction(count) to AggregateFunction(count, Int64) is not supported`. [#39420](https://github.com/ClickHouse/ClickHouse/pull/39420) ([Amos Bird](https://github.com/amosbird)). The functions with identical states can be used in materialized views interchangeably.
|
||||||
* Rework and simplify the `system.backups` table, remove the `internal` column, allow user to set the ID of operation, add columns `num_files`, `uncompressed_size`, `compressed_size`, `start_time`, `end_time`. [#39503](https://github.com/ClickHouse/ClickHouse/pull/39503) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Rework and simplify the `system.backups` table, remove the `internal` column, allow user to set the ID of operation, add columns `num_files`, `uncompressed_size`, `compressed_size`, `start_time`, `end_time`. [#39503](https://github.com/ClickHouse/ClickHouse/pull/39503) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
* Improved structure of DDL query result table for `Replicated` database (separate columns with shard and replica name, more clear status) - `CREATE TABLE ... ON CLUSTER` queries can be normalized on initiator first if `distributed_ddl_entry_format_version` is set to 3 (default value). It means that `ON CLUSTER` queries may not work if initiator does not belong to the cluster that specified in query. Fixes [#37318](https://github.com/ClickHouse/ClickHouse/issues/37318), [#39500](https://github.com/ClickHouse/ClickHouse/issues/39500) - Ignore `ON CLUSTER` clause if database is `Replicated` and cluster name equals to database name. Related to [#35570](https://github.com/ClickHouse/ClickHouse/issues/35570) - Miscellaneous minor fixes for `Replicated` database engine - Check metadata consistency when starting up `Replicated` database, start replica recovery in case of mismatch of local metadata and metadata in Keeper. Resolves [#24880](https://github.com/ClickHouse/ClickHouse/issues/24880). [#37198](https://github.com/ClickHouse/ClickHouse/pull/37198) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* Improved structure of DDL query result table for `Replicated` database (separate columns with shard and replica name, more clear status) - `CREATE TABLE ... ON CLUSTER` queries can be normalized on initiator first if `distributed_ddl_entry_format_version` is set to 3 (default value). It means that `ON CLUSTER` queries may not work if initiator does not belong to the cluster that specified in query. Fixes [#37318](https://github.com/ClickHouse/ClickHouse/issues/37318), [#39500](https://github.com/ClickHouse/ClickHouse/issues/39500) - Ignore `ON CLUSTER` clause if database is `Replicated` and cluster name equals to database name. Related to [#35570](https://github.com/ClickHouse/ClickHouse/issues/35570) - Miscellaneous minor fixes for `Replicated` database engine - Check metadata consistency when starting up `Replicated` database, start replica recovery in case of mismatch of local metadata and metadata in Keeper. Resolves [#24880](https://github.com/ClickHouse/ClickHouse/issues/24880). [#37198](https://github.com/ClickHouse/ClickHouse/pull/37198) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
@ -294,6 +436,7 @@
|
|||||||
* Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)).
|
* Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)).
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
* [ClickFiddle](https://fiddle.clickhouse.com/): A new tool for testing ClickHouse versions in read/write mode (**Igor Baliuk**).
|
* [ClickFiddle](https://fiddle.clickhouse.com/): A new tool for testing ClickHouse versions in read/write mode (**Igor Baliuk**).
|
||||||
* ClickHouse binary is made self-extracting [#35775](https://github.com/ClickHouse/ClickHouse/pull/35775) ([Yakov Olkhovskiy, Arthur Filatenkov](https://github.com/yakov-olkhovskiy)).
|
* ClickHouse binary is made self-extracting [#35775](https://github.com/ClickHouse/ClickHouse/pull/35775) ([Yakov Olkhovskiy, Arthur Filatenkov](https://github.com/yakov-olkhovskiy)).
|
||||||
* Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
@ -308,6 +451,7 @@
|
|||||||
* Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
#### Bug Fix
|
#### Bug Fix
|
||||||
|
|
||||||
* Fix possible segfault in `CapnProto` input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Fix possible segfault in `CapnProto` input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
@ -358,16 +502,17 @@
|
|||||||
* A fix for reverse DNS resolution. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)).
|
* A fix for reverse DNS resolution. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||||
* Fix unexpected result `arrayDifference` of `Array(UInt32). [#40211](https://github.com/ClickHouse/ClickHouse/pull/40211) ([Duc Canh Le](https://github.com/canhld94)).
|
* Fix unexpected result `arrayDifference` of `Array(UInt32). [#40211](https://github.com/ClickHouse/ClickHouse/pull/40211) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="227"></a> ClickHouse release 22.7, 2022-07-21
|
### <a id="227"></a> ClickHouse release 22.7, 2022-07-21
|
||||||
|
|
||||||
#### Upgrade Notes
|
#### Upgrade Notes
|
||||||
|
|
||||||
* Enable setting `enable_positional_arguments` by default. It allows queries like `SELECT ... ORDER BY 1, 2` where 1, 2 are the references to the select clause. If you need to return the old behavior, disable this setting. [#38204](https://github.com/ClickHouse/ClickHouse/pull/38204) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Enable setting `enable_positional_arguments` by default. It allows queries like `SELECT ... ORDER BY 1, 2` where 1, 2 are the references to the select clause. If you need to return the old behavior, disable this setting. [#38204](https://github.com/ClickHouse/ClickHouse/pull/38204) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Disable `format_csv_allow_single_quotes` by default. See [#37096](https://github.com/ClickHouse/ClickHouse/issues/37096). ([Kruglov Pavel](https://github.com/Avogar)).
|
* Disable `format_csv_allow_single_quotes` by default. See [#37096](https://github.com/ClickHouse/ClickHouse/issues/37096). ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* `Ordinary` database engine and old storage definition syntax for `*MergeTree` tables are deprecated. By default it's not possible to create new databases with `Ordinary` engine. If `system` database has `Ordinary` engine it will be automatically converted to `Atomic` on server startup. There are settings to keep old behavior (`allow_deprecated_database_ordinary` and `allow_deprecated_syntax_for_merge_tree`), but these settings may be removed in future releases. [#38335](https://github.com/ClickHouse/ClickHouse/pull/38335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* `Ordinary` database engine and old storage definition syntax for `*MergeTree` tables are deprecated. By default it's not possible to create new databases with `Ordinary` engine. If `system` database has `Ordinary` engine it will be automatically converted to `Atomic` on server startup. There are settings to keep old behavior (`allow_deprecated_database_ordinary` and `allow_deprecated_syntax_for_merge_tree`), but these settings may be removed in future releases. [#38335](https://github.com/ClickHouse/ClickHouse/pull/38335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* Force rewriting comma join to inner by default (set default value `cross_to_inner_join_rewrite = 2`). To have old behavior set `cross_to_inner_join_rewrite = 1`. [#39326](https://github.com/ClickHouse/ClickHouse/pull/39326) ([Vladimir C](https://github.com/vdimir)). If you will face any incompatibilities, you can turn this setting back.
|
* Force rewriting comma join to inner by default (set default value `cross_to_inner_join_rewrite = 2`). To have old behavior set `cross_to_inner_join_rewrite = 1`. [#39326](https://github.com/ClickHouse/ClickHouse/pull/39326) ([Vladimir C](https://github.com/vdimir)). If you will face any incompatibilities, you can turn this setting back.
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
|
|
||||||
* Support expressions with window functions. Closes [#19857](https://github.com/ClickHouse/ClickHouse/issues/19857). [#37848](https://github.com/ClickHouse/ClickHouse/pull/37848) ([Dmitry Novik](https://github.com/novikd)).
|
* Support expressions with window functions. Closes [#19857](https://github.com/ClickHouse/ClickHouse/issues/19857). [#37848](https://github.com/ClickHouse/ClickHouse/pull/37848) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
* Add new `direct` join algorithm for `EmbeddedRocksDB` tables, see [#33582](https://github.com/ClickHouse/ClickHouse/issues/33582). [#35363](https://github.com/ClickHouse/ClickHouse/pull/35363) ([Vladimir C](https://github.com/vdimir)).
|
* Add new `direct` join algorithm for `EmbeddedRocksDB` tables, see [#33582](https://github.com/ClickHouse/ClickHouse/issues/33582). [#35363](https://github.com/ClickHouse/ClickHouse/pull/35363) ([Vladimir C](https://github.com/vdimir)).
|
||||||
* Added full sorting merge join algorithm. [#35796](https://github.com/ClickHouse/ClickHouse/pull/35796) ([Vladimir C](https://github.com/vdimir)).
|
* Added full sorting merge join algorithm. [#35796](https://github.com/ClickHouse/ClickHouse/pull/35796) ([Vladimir C](https://github.com/vdimir)).
|
||||||
@ -395,9 +540,11 @@
|
|||||||
* Add `clickhouse-diagnostics` binary to the packages. [#38647](https://github.com/ClickHouse/ClickHouse/pull/38647) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
* Add `clickhouse-diagnostics` binary to the packages. [#38647](https://github.com/ClickHouse/ClickHouse/pull/38647) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
#### Experimental Feature
|
#### Experimental Feature
|
||||||
|
|
||||||
* Adds new setting `implicit_transaction` to run standalone queries inside a transaction. It handles both creation and closing (via COMMIT if the query succeeded or ROLLBACK if it didn't) of the transaction automatically. [#38344](https://github.com/ClickHouse/ClickHouse/pull/38344) ([Raúl Marín](https://github.com/Algunenano)).
|
* Adds new setting `implicit_transaction` to run standalone queries inside a transaction. It handles both creation and closing (via COMMIT if the query succeeded or ROLLBACK if it didn't) of the transaction automatically. [#38344](https://github.com/ClickHouse/ClickHouse/pull/38344) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
#### Performance Improvement
|
#### Performance Improvement
|
||||||
|
|
||||||
* Distinct optimization for sorted columns. Use specialized distinct transformation in case input stream is sorted by column(s) in distinct. Optimization can be applied to pre-distinct, final distinct, or both. Initial implementation by @dimarub2000. [#37803](https://github.com/ClickHouse/ClickHouse/pull/37803) ([Igor Nikonov](https://github.com/devcrafter)).
|
* Distinct optimization for sorted columns. Use specialized distinct transformation in case input stream is sorted by column(s) in distinct. Optimization can be applied to pre-distinct, final distinct, or both. Initial implementation by @dimarub2000. [#37803](https://github.com/ClickHouse/ClickHouse/pull/37803) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
* Improve performance of `ORDER BY`, `MergeTree` merges, window functions using batch version of `BinaryHeap`. [#38022](https://github.com/ClickHouse/ClickHouse/pull/38022) ([Maksim Kita](https://github.com/kitaisreal)).
|
* Improve performance of `ORDER BY`, `MergeTree` merges, window functions using batch version of `BinaryHeap`. [#38022](https://github.com/ClickHouse/ClickHouse/pull/38022) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
* More parallel execution for queries with `FINAL` [#36396](https://github.com/ClickHouse/ClickHouse/pull/36396) ([Nikita Taranov](https://github.com/nickitat)).
|
* More parallel execution for queries with `FINAL` [#36396](https://github.com/ClickHouse/ClickHouse/pull/36396) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
@ -407,7 +554,7 @@
|
|||||||
* Improve performance of insertion to columns of type `JSON`. [#38320](https://github.com/ClickHouse/ClickHouse/pull/38320) ([Anton Popov](https://github.com/CurtizJ)).
|
* Improve performance of insertion to columns of type `JSON`. [#38320](https://github.com/ClickHouse/ClickHouse/pull/38320) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
* Optimized insertion and lookups in the HashTable. [#38413](https://github.com/ClickHouse/ClickHouse/pull/38413) ([Nikita Taranov](https://github.com/nickitat)).
|
* Optimized insertion and lookups in the HashTable. [#38413](https://github.com/ClickHouse/ClickHouse/pull/38413) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
* Fix performance degradation from [#32493](https://github.com/ClickHouse/ClickHouse/issues/32493). [#38417](https://github.com/ClickHouse/ClickHouse/pull/38417) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Fix performance degradation from [#32493](https://github.com/ClickHouse/ClickHouse/issues/32493). [#38417](https://github.com/ClickHouse/ClickHouse/pull/38417) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)).
|
* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
* Norm and Distance functions for arrays speed up 1.2-2 times. [#38740](https://github.com/ClickHouse/ClickHouse/pull/38740) ([Alexander Gololobov](https://github.com/davenger)).
|
* Norm and Distance functions for arrays speed up 1.2-2 times. [#38740](https://github.com/ClickHouse/ClickHouse/pull/38740) ([Alexander Gololobov](https://github.com/davenger)).
|
||||||
* Add AVX-512 VBMI optimized `copyOverlap32Shuffle` for LZ4 decompression. In other words, LZ4 decompression performance is improved. [#37891](https://github.com/ClickHouse/ClickHouse/pull/37891) ([Guo Wangyang](https://github.com/guowangy)).
|
* Add AVX-512 VBMI optimized `copyOverlap32Shuffle` for LZ4 decompression. In other words, LZ4 decompression performance is improved. [#37891](https://github.com/ClickHouse/ClickHouse/pull/37891) ([Guo Wangyang](https://github.com/guowangy)).
|
||||||
* `ORDER BY (a, b)` will use all the same benefits as `ORDER BY a, b`. [#38873](https://github.com/ClickHouse/ClickHouse/pull/38873) ([Igor Nikonov](https://github.com/devcrafter)).
|
* `ORDER BY (a, b)` will use all the same benefits as `ORDER BY a, b`. [#38873](https://github.com/ClickHouse/ClickHouse/pull/38873) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
@ -419,6 +566,7 @@
|
|||||||
* The table `system.asynchronous_metric_log` is further optimized for storage space. This closes [#38134](https://github.com/ClickHouse/ClickHouse/issues/38134). See the [YouTube video](https://www.youtube.com/watch?v=0fSp9SF8N8A). [#38428](https://github.com/ClickHouse/ClickHouse/pull/38428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* The table `system.asynchronous_metric_log` is further optimized for storage space. This closes [#38134](https://github.com/ClickHouse/ClickHouse/issues/38134). See the [YouTube video](https://www.youtube.com/watch?v=0fSp9SF8N8A). [#38428](https://github.com/ClickHouse/ClickHouse/pull/38428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
#### Improvement
|
#### Improvement
|
||||||
|
|
||||||
* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||||
* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)).
|
* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)).
|
* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
@ -464,6 +612,7 @@
|
|||||||
* Allow to declare `RabbitMQ` queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
|
* Allow to declare `RabbitMQ` queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
* Apply Clang Thread Safety Analysis (TSA) annotations to ClickHouse. [#38068](https://github.com/ClickHouse/ClickHouse/pull/38068) ([Robert Schulze](https://github.com/rschu1ze)).
|
* Apply Clang Thread Safety Analysis (TSA) annotations to ClickHouse. [#38068](https://github.com/ClickHouse/ClickHouse/pull/38068) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
* Adapt universal installation script for FreeBSD. [#39302](https://github.com/ClickHouse/ClickHouse/pull/39302) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Adapt universal installation script for FreeBSD. [#39302](https://github.com/ClickHouse/ClickHouse/pull/39302) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Preparation for building on `s390x` platform. [#39193](https://github.com/ClickHouse/ClickHouse/pull/39193) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
* Preparation for building on `s390x` platform. [#39193](https://github.com/ClickHouse/ClickHouse/pull/39193) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
@ -473,6 +622,7 @@
|
|||||||
* Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
* Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
* Fix rounding for `Decimal128/Decimal256` with more than 19-digits long scale. [#38027](https://github.com/ClickHouse/ClickHouse/pull/38027) ([Igor Nikonov](https://github.com/devcrafter)).
|
* Fix rounding for `Decimal128/Decimal256` with more than 19-digits long scale. [#38027](https://github.com/ClickHouse/ClickHouse/pull/38027) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
* Fixed crash caused by data race in storage `Hive` (integration table engine). [#38887](https://github.com/ClickHouse/ClickHouse/pull/38887) ([lgbo](https://github.com/lgbo-ustc)).
|
* Fixed crash caused by data race in storage `Hive` (integration table engine). [#38887](https://github.com/ClickHouse/ClickHouse/pull/38887) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
* Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
@ -529,6 +679,7 @@
|
|||||||
### <a id="226"></a> ClickHouse release 22.6, 2022-06-16
|
### <a id="226"></a> ClickHouse release 22.6, 2022-06-16
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
* Remove support for octal number literals in SQL. In previous versions they were parsed as Float64. [#37765](https://github.com/ClickHouse/ClickHouse/pull/37765) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
* Remove support for octal number literals in SQL. In previous versions they were parsed as Float64. [#37765](https://github.com/ClickHouse/ClickHouse/pull/37765) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
* Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)).
|
* Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
* Changed format of binary serialization of columns of experimental type `Object`. New format is more convenient to implement by third-party clients. [#37482](https://github.com/ClickHouse/ClickHouse/pull/37482) ([Anton Popov](https://github.com/CurtizJ)).
|
* Changed format of binary serialization of columns of experimental type `Object`. New format is more convenient to implement by third-party clients. [#37482](https://github.com/ClickHouse/ClickHouse/pull/37482) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
@ -537,6 +688,7 @@
|
|||||||
* If you run different ClickHouse versions on a cluster with AArch64 CPU or mix AArch64 and amd64 on a cluster, and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, and the size of the result is huge, the data will not be fully aggregated in the result of these queries during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade.
|
* If you run different ClickHouse versions on a cluster with AArch64 CPU or mix AArch64 and amd64 on a cluster, and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, and the size of the result is huge, the data will not be fully aggregated in the result of these queries during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade.
|
||||||
|
|
||||||
#### New Feature
|
#### New Feature
|
||||||
|
|
||||||
* Add `GROUPING` function. It allows to disambiguate the records in the queries with `ROLLUP`, `CUBE` or `GROUPING SETS`. Closes [#19426](https://github.com/ClickHouse/ClickHouse/issues/19426). [#37163](https://github.com/ClickHouse/ClickHouse/pull/37163) ([Dmitry Novik](https://github.com/novikd)).
|
* Add `GROUPING` function. It allows to disambiguate the records in the queries with `ROLLUP`, `CUBE` or `GROUPING SETS`. Closes [#19426](https://github.com/ClickHouse/ClickHouse/issues/19426). [#37163](https://github.com/ClickHouse/ClickHouse/pull/37163) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
* A new codec [FPC](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf) algorithm for floating point data compression. [#37553](https://github.com/ClickHouse/ClickHouse/pull/37553) ([Mikhail Guzov](https://github.com/koloshmet)).
|
* A new codec [FPC](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf) algorithm for floating point data compression. [#37553](https://github.com/ClickHouse/ClickHouse/pull/37553) ([Mikhail Guzov](https://github.com/koloshmet)).
|
||||||
* Add new columnar JSON formats: `JSONColumns`, `JSONCompactColumns`, `JSONColumnsWithMetadata`. Closes [#36338](https://github.com/ClickHouse/ClickHouse/issues/36338) Closes [#34509](https://github.com/ClickHouse/ClickHouse/issues/34509). [#36975](https://github.com/ClickHouse/ClickHouse/pull/36975) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Add new columnar JSON formats: `JSONColumns`, `JSONCompactColumns`, `JSONColumnsWithMetadata`. Closes [#36338](https://github.com/ClickHouse/ClickHouse/issues/36338) Closes [#34509](https://github.com/ClickHouse/ClickHouse/issues/34509). [#36975](https://github.com/ClickHouse/ClickHouse/pull/36975) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
@ -557,11 +709,13 @@
|
|||||||
* Added `SYSTEM UNFREEZE` query that deletes the whole backup regardless if the corresponding table is deleted or not. [#36424](https://github.com/ClickHouse/ClickHouse/pull/36424) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
* Added `SYSTEM UNFREEZE` query that deletes the whole backup regardless if the corresponding table is deleted or not. [#36424](https://github.com/ClickHouse/ClickHouse/pull/36424) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
||||||
|
|
||||||
#### Experimental Feature
|
#### Experimental Feature
|
||||||
|
|
||||||
* Enables `POPULATE` for `WINDOW VIEW`. [#36945](https://github.com/ClickHouse/ClickHouse/pull/36945) ([vxider](https://github.com/Vxider)).
|
* Enables `POPULATE` for `WINDOW VIEW`. [#36945](https://github.com/ClickHouse/ClickHouse/pull/36945) ([vxider](https://github.com/Vxider)).
|
||||||
* `ALTER TABLE ... MODIFY QUERY` support for `WINDOW VIEW`. [#37188](https://github.com/ClickHouse/ClickHouse/pull/37188) ([vxider](https://github.com/Vxider)).
|
* `ALTER TABLE ... MODIFY QUERY` support for `WINDOW VIEW`. [#37188](https://github.com/ClickHouse/ClickHouse/pull/37188) ([vxider](https://github.com/Vxider)).
|
||||||
* This PR changes the behavior of the `ENGINE` syntax in `WINDOW VIEW`, to make it like in `MATERIALIZED VIEW`. [#37214](https://github.com/ClickHouse/ClickHouse/pull/37214) ([vxider](https://github.com/Vxider)).
|
* This PR changes the behavior of the `ENGINE` syntax in `WINDOW VIEW`, to make it like in `MATERIALIZED VIEW`. [#37214](https://github.com/ClickHouse/ClickHouse/pull/37214) ([vxider](https://github.com/Vxider)).
|
||||||
|
|
||||||
#### Performance Improvement
|
#### Performance Improvement
|
||||||
|
|
||||||
* Added numerous optimizations for ARM NEON [#38093](https://github.com/ClickHouse/ClickHouse/pull/38093)([Daniel Kutenin](https://github.com/danlark1)), ([Alexandra Pilipyuk](https://github.com/chalice19)) Note: if you run different ClickHouse versions on a cluster with ARM CPU and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, the result of the aggregation query will be wrong during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade.
|
* Added numerous optimizations for ARM NEON [#38093](https://github.com/ClickHouse/ClickHouse/pull/38093)([Daniel Kutenin](https://github.com/danlark1)), ([Alexandra Pilipyuk](https://github.com/chalice19)) Note: if you run different ClickHouse versions on a cluster with ARM CPU and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, the result of the aggregation query will be wrong during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade.
|
||||||
* Improve performance and memory usage for select of subset of columns for formats Native, Protobuf, CapnProto, JSONEachRow, TSKV, all formats with suffixes WithNames/WithNamesAndTypes. Previously while selecting only subset of columns from files in these formats all columns were read and stored in memory. Now only required columns are read. This PR enables setting `input_format_skip_unknown_fields` by default, because otherwise in case of select of subset of columns exception will be thrown. [#37192](https://github.com/ClickHouse/ClickHouse/pull/37192) ([Kruglov Pavel](https://github.com/Avogar)).
|
* Improve performance and memory usage for select of subset of columns for formats Native, Protobuf, CapnProto, JSONEachRow, TSKV, all formats with suffixes WithNames/WithNamesAndTypes. Previously while selecting only subset of columns from files in these formats all columns were read and stored in memory. Now only required columns are read. This PR enables setting `input_format_skip_unknown_fields` by default, because otherwise in case of select of subset of columns exception will be thrown. [#37192](https://github.com/ClickHouse/ClickHouse/pull/37192) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
* Now more filters can be pushed down for join. [#37472](https://github.com/ClickHouse/ClickHouse/pull/37472) ([Amos Bird](https://github.com/amosbird)).
|
* Now more filters can be pushed down for join. [#37472](https://github.com/ClickHouse/ClickHouse/pull/37472) ([Amos Bird](https://github.com/amosbird)).
|
||||||
@ -592,6 +746,7 @@
|
|||||||
* In function: CompressedWriteBuffer::nextImpl(), there is an unnecessary write-copy step that would happen frequently during inserting data. Below shows the differentiation with this patch: - Before: 1. Compress "working_buffer" into "compressed_buffer" 2. write-copy into "out" - After: Directly Compress "working_buffer" into "out". [#37242](https://github.com/ClickHouse/ClickHouse/pull/37242) ([jasperzhu](https://github.com/jinjunzh)).
|
* In function: CompressedWriteBuffer::nextImpl(), there is an unnecessary write-copy step that would happen frequently during inserting data. Below shows the differentiation with this patch: - Before: 1. Compress "working_buffer" into "compressed_buffer" 2. write-copy into "out" - After: Directly Compress "working_buffer" into "out". [#37242](https://github.com/ClickHouse/ClickHouse/pull/37242) ([jasperzhu](https://github.com/jinjunzh)).
|
||||||
|
|
||||||
#### Improvement
|
#### Improvement
|
||||||
|
|
||||||
* Support types with non-standard defaults in ROLLUP, CUBE, GROUPING SETS. Closes [#37360](https://github.com/ClickHouse/ClickHouse/issues/37360). [#37667](https://github.com/ClickHouse/ClickHouse/pull/37667) ([Dmitry Novik](https://github.com/novikd)).
|
* Support types with non-standard defaults in ROLLUP, CUBE, GROUPING SETS. Closes [#37360](https://github.com/ClickHouse/ClickHouse/issues/37360). [#37667](https://github.com/ClickHouse/ClickHouse/pull/37667) ([Dmitry Novik](https://github.com/novikd)).
|
||||||
* Fix stack traces collection on ARM. Closes [#37044](https://github.com/ClickHouse/ClickHouse/issues/37044). Closes [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638). [#37797](https://github.com/ClickHouse/ClickHouse/pull/37797) ([Maksim Kita](https://github.com/kitaisreal)).
|
* Fix stack traces collection on ARM. Closes [#37044](https://github.com/ClickHouse/ClickHouse/issues/37044). Closes [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638). [#37797](https://github.com/ClickHouse/ClickHouse/pull/37797) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
* Client will try every IP address returned by DNS resolution until successful connection. [#37273](https://github.com/ClickHouse/ClickHouse/pull/37273) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
* Client will try every IP address returned by DNS resolution until successful connection. [#37273](https://github.com/ClickHouse/ClickHouse/pull/37273) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
@ -633,6 +788,7 @@
|
|||||||
* Add implicit grants with grant option too. For example `GRANT CREATE TABLE ON test.* TO A WITH GRANT OPTION` now allows `A` to execute `GRANT CREATE VIEW ON test.* TO B`. [#38017](https://github.com/ClickHouse/ClickHouse/pull/38017) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Add implicit grants with grant option too. For example `GRANT CREATE TABLE ON test.* TO A WITH GRANT OPTION` now allows `A` to execute `GRANT CREATE VIEW ON test.* TO B`. [#38017](https://github.com/ClickHouse/ClickHouse/pull/38017) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
* Use `clang-14` and LLVM infrastructure version 14 for builds. This closes [#34681](https://github.com/ClickHouse/ClickHouse/issues/34681). [#34754](https://github.com/ClickHouse/ClickHouse/pull/34754) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Note: `clang-14` has [a bug](https://github.com/google/sanitizers/issues/1540) in ThreadSanitizer that makes our CI work worse.
|
* Use `clang-14` and LLVM infrastructure version 14 for builds. This closes [#34681](https://github.com/ClickHouse/ClickHouse/issues/34681). [#34754](https://github.com/ClickHouse/ClickHouse/pull/34754) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Note: `clang-14` has [a bug](https://github.com/google/sanitizers/issues/1540) in ThreadSanitizer that makes our CI work worse.
|
||||||
* Allow to drop privileges at startup. This simplifies Docker images. Closes [#36293](https://github.com/ClickHouse/ClickHouse/issues/36293). [#36341](https://github.com/ClickHouse/ClickHouse/pull/36341) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Allow to drop privileges at startup. This simplifies Docker images. Closes [#36293](https://github.com/ClickHouse/ClickHouse/issues/36293). [#36341](https://github.com/ClickHouse/ClickHouse/pull/36341) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)).
|
* Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)).
|
||||||
@ -690,7 +846,6 @@
|
|||||||
* Fix possible heap-use-after-free error when reading system.projection_parts and system.projection_parts_columns . This fixes [#37184](https://github.com/ClickHouse/ClickHouse/issues/37184). [#37185](https://github.com/ClickHouse/ClickHouse/pull/37185) ([Amos Bird](https://github.com/amosbird)).
|
* Fix possible heap-use-after-free error when reading system.projection_parts and system.projection_parts_columns . This fixes [#37184](https://github.com/ClickHouse/ClickHouse/issues/37184). [#37185](https://github.com/ClickHouse/ClickHouse/pull/37185) ([Amos Bird](https://github.com/amosbird)).
|
||||||
* Fixed `DateTime64` fractional seconds behavior prior to Unix epoch. [#37697](https://github.com/ClickHouse/ClickHouse/pull/37697) ([Andrey Zvonov](https://github.com/zvonand)). [#37039](https://github.com/ClickHouse/ClickHouse/pull/37039) ([李扬](https://github.com/taiyang-li)).
|
* Fixed `DateTime64` fractional seconds behavior prior to Unix epoch. [#37697](https://github.com/ClickHouse/ClickHouse/pull/37697) ([Andrey Zvonov](https://github.com/zvonand)). [#37039](https://github.com/ClickHouse/ClickHouse/pull/37039) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="225"></a> ClickHouse release 22.5, 2022-05-19
|
### <a id="225"></a> ClickHouse release 22.5, 2022-05-19
|
||||||
|
|
||||||
#### Upgrade Notes
|
#### Upgrade Notes
|
||||||
@ -743,7 +898,7 @@
|
|||||||
* Implement partial GROUP BY key for optimize_aggregation_in_order. [#35111](https://github.com/ClickHouse/ClickHouse/pull/35111) ([Azat Khuzhin](https://github.com/azat)).
|
* Implement partial GROUP BY key for optimize_aggregation_in_order. [#35111](https://github.com/ClickHouse/ClickHouse/pull/35111) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
|
||||||
#### Improvement
|
#### Improvement
|
||||||
|
|
||||||
* Show names of erroneous files in case of parsing errors while executing table functions `file`, `s3` and `url`. [#36314](https://github.com/ClickHouse/ClickHouse/pull/36314) ([Anton Popov](https://github.com/CurtizJ)).
|
* Show names of erroneous files in case of parsing errors while executing table functions `file`, `s3` and `url`. [#36314](https://github.com/ClickHouse/ClickHouse/pull/36314) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
* Allowed to increase the number of threads for executing background operations (merges, mutations, moves and fetches) at runtime if they are specified at top level config. [#36425](https://github.com/ClickHouse/ClickHouse/pull/36425) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Allowed to increase the number of threads for executing background operations (merges, mutations, moves and fetches) at runtime if they are specified at top level config. [#36425](https://github.com/ClickHouse/ClickHouse/pull/36425) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
* Now date time conversion functions that generates time before 1970-01-01 00:00:00 with partial hours/minutes timezones will be saturated to zero instead of overflow. This is the continuation of https://github.com/ClickHouse/ClickHouse/pull/29953 which addresses https://github.com/ClickHouse/ClickHouse/pull/29953#discussion_r800550280 . Mark as improvement because it's implementation defined behavior (and very rare case) and we are allowed to break it. [#36656](https://github.com/ClickHouse/ClickHouse/pull/36656) ([Amos Bird](https://github.com/amosbird)).
|
* Now date time conversion functions that generates time before 1970-01-01 00:00:00 with partial hours/minutes timezones will be saturated to zero instead of overflow. This is the continuation of https://github.com/ClickHouse/ClickHouse/pull/29953 which addresses https://github.com/ClickHouse/ClickHouse/pull/29953#discussion_r800550280 . Mark as improvement because it's implementation defined behavior (and very rare case) and we are allowed to break it. [#36656](https://github.com/ClickHouse/ClickHouse/pull/36656) ([Amos Bird](https://github.com/amosbird)).
|
||||||
@ -852,7 +1007,6 @@
|
|||||||
* Fix ALTER DROP COLUMN of nested column with compact parts (i.e. `ALTER TABLE x DROP COLUMN n`, when there is column `n.d`). [#35797](https://github.com/ClickHouse/ClickHouse/pull/35797) ([Azat Khuzhin](https://github.com/azat)).
|
* Fix ALTER DROP COLUMN of nested column with compact parts (i.e. `ALTER TABLE x DROP COLUMN n`, when there is column `n.d`). [#35797](https://github.com/ClickHouse/ClickHouse/pull/35797) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
* Fix substring function range error length when `offset` and `length` is negative constant and `s` is not constant. [#33861](https://github.com/ClickHouse/ClickHouse/pull/33861) ([RogerYK](https://github.com/RogerYK)).
|
* Fix substring function range error length when `offset` and `length` is negative constant and `s` is not constant. [#33861](https://github.com/ClickHouse/ClickHouse/pull/33861) ([RogerYK](https://github.com/RogerYK)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="224"></a> ClickHouse release 22.4, 2022-04-19
|
### <a id="224"></a> ClickHouse release 22.4, 2022-04-19
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
@ -1004,8 +1158,7 @@
|
|||||||
* Fix mutations in tables with enabled sparse columns. [#35284](https://github.com/ClickHouse/ClickHouse/pull/35284) ([Anton Popov](https://github.com/CurtizJ)).
|
* Fix mutations in tables with enabled sparse columns. [#35284](https://github.com/ClickHouse/ClickHouse/pull/35284) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
* Do not delay final part writing by default (fixes possible `Memory limit exceeded` during `INSERT` by adding `max_insert_delayed_streams_for_parallel_write` with default to 1000 for writes to s3 and disabled as before otherwise). [#34780](https://github.com/ClickHouse/ClickHouse/pull/34780) ([Azat Khuzhin](https://github.com/azat)).
|
* Do not delay final part writing by default (fixes possible `Memory limit exceeded` during `INSERT` by adding `max_insert_delayed_streams_for_parallel_write` with default to 1000 for writes to s3 and disabled as before otherwise). [#34780](https://github.com/ClickHouse/ClickHouse/pull/34780) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
|
||||||
|
### <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
|
||||||
## <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
|
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
|
|
||||||
@ -1132,7 +1285,6 @@
|
|||||||
* Fix incorrect result of trivial count query when part movement feature is used [#34089](https://github.com/ClickHouse/ClickHouse/issues/34089). [#34385](https://github.com/ClickHouse/ClickHouse/pull/34385) ([nvartolomei](https://github.com/nvartolomei)).
|
* Fix incorrect result of trivial count query when part movement feature is used [#34089](https://github.com/ClickHouse/ClickHouse/issues/34089). [#34385](https://github.com/ClickHouse/ClickHouse/pull/34385) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
* Fix inconsistency of `max_query_size` limitation in distributed subqueries. [#34078](https://github.com/ClickHouse/ClickHouse/pull/34078) ([Chao Ma](https://github.com/godliness)).
|
* Fix inconsistency of `max_query_size` limitation in distributed subqueries. [#34078](https://github.com/ClickHouse/ClickHouse/pull/34078) ([Chao Ma](https://github.com/godliness)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="222"></a> ClickHouse release v22.2, 2022-02-17
|
### <a id="222"></a> ClickHouse release v22.2, 2022-02-17
|
||||||
|
|
||||||
#### Upgrade Notes
|
#### Upgrade Notes
|
||||||
@ -1308,7 +1460,6 @@
|
|||||||
* Fix issue [#18206](https://github.com/ClickHouse/ClickHouse/issues/18206). [#33977](https://github.com/ClickHouse/ClickHouse/pull/33977) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Fix issue [#18206](https://github.com/ClickHouse/ClickHouse/issues/18206). [#33977](https://github.com/ClickHouse/ClickHouse/pull/33977) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
* This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
|
||||||
|
|
||||||
### <a id="221"></a> ClickHouse release v22.1, 2022-01-18
|
### <a id="221"></a> ClickHouse release v22.1, 2022-01-18
|
||||||
|
|
||||||
#### Upgrade Notes
|
#### Upgrade Notes
|
||||||
@ -1335,7 +1486,6 @@
|
|||||||
* Add function `decodeURLFormComponent` slightly different to `decodeURLComponent`. Close [#10298](https://github.com/ClickHouse/ClickHouse/issues/10298). [#33451](https://github.com/ClickHouse/ClickHouse/pull/33451) ([SuperDJY](https://github.com/cmsxbc)).
|
* Add function `decodeURLFormComponent` slightly different to `decodeURLComponent`. Close [#10298](https://github.com/ClickHouse/ClickHouse/issues/10298). [#33451](https://github.com/ClickHouse/ClickHouse/pull/33451) ([SuperDJY](https://github.com/cmsxbc)).
|
||||||
* Allow to split `GraphiteMergeTree` rollup rules for plain/tagged metrics (optional rule_type field). [#33494](https://github.com/ClickHouse/ClickHouse/pull/33494) ([Michail Safronov](https://github.com/msaf1980)).
|
* Allow to split `GraphiteMergeTree` rollup rules for plain/tagged metrics (optional rule_type field). [#33494](https://github.com/ClickHouse/ClickHouse/pull/33494) ([Michail Safronov](https://github.com/msaf1980)).
|
||||||
|
|
||||||
|
|
||||||
#### Performance Improvement
|
#### Performance Improvement
|
||||||
|
|
||||||
* Support moving conditions to `PREWHERE` (setting `optimize_move_to_prewhere`) for tables of `Merge` engine if its all underlying tables supports `PREWHERE`. [#33300](https://github.com/ClickHouse/ClickHouse/pull/33300) ([Anton Popov](https://github.com/CurtizJ)).
|
* Support moving conditions to `PREWHERE` (setting `optimize_move_to_prewhere`) for tables of `Merge` engine if its all underlying tables supports `PREWHERE`. [#33300](https://github.com/ClickHouse/ClickHouse/pull/33300) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
@ -1351,7 +1501,6 @@
|
|||||||
* Optimize selecting of MergeTree parts that can be moved between volumes. [#33225](https://github.com/ClickHouse/ClickHouse/pull/33225) ([OnePiece](https://github.com/zhongyuankai)).
|
* Optimize selecting of MergeTree parts that can be moved between volumes. [#33225](https://github.com/ClickHouse/ClickHouse/pull/33225) ([OnePiece](https://github.com/zhongyuankai)).
|
||||||
* Fix `sparse_hashed` dict performance with sequential keys (wrong hash function). [#32536](https://github.com/ClickHouse/ClickHouse/pull/32536) ([Azat Khuzhin](https://github.com/azat)).
|
* Fix `sparse_hashed` dict performance with sequential keys (wrong hash function). [#32536](https://github.com/ClickHouse/ClickHouse/pull/32536) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
|
||||||
|
|
||||||
#### Experimental Feature
|
#### Experimental Feature
|
||||||
|
|
||||||
* Parallel reading from multiple replicas within a shard during distributed query without using sample key. To enable this, set `allow_experimental_parallel_reading_from_replicas = 1` and `max_parallel_replicas` to any number. This closes [#26748](https://github.com/ClickHouse/ClickHouse/issues/26748). [#29279](https://github.com/ClickHouse/ClickHouse/pull/29279) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Parallel reading from multiple replicas within a shard during distributed query without using sample key. To enable this, set `allow_experimental_parallel_reading_from_replicas = 1` and `max_parallel_replicas` to any number. This closes [#26748](https://github.com/ClickHouse/ClickHouse/issues/26748). [#29279](https://github.com/ClickHouse/ClickHouse/pull/29279) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
@ -1364,7 +1513,6 @@
|
|||||||
* Fix ACL with explicit digit hash in `clickhouse-keeper`: now the behavior consistent with ZooKeeper and generated digest is always accepted. [#33249](https://github.com/ClickHouse/ClickHouse/pull/33249) ([小路](https://github.com/nicelulu)). [#33246](https://github.com/ClickHouse/ClickHouse/pull/33246).
|
* Fix ACL with explicit digit hash in `clickhouse-keeper`: now the behavior consistent with ZooKeeper and generated digest is always accepted. [#33249](https://github.com/ClickHouse/ClickHouse/pull/33249) ([小路](https://github.com/nicelulu)). [#33246](https://github.com/ClickHouse/ClickHouse/pull/33246).
|
||||||
* Fix unexpected projection removal when detaching parts. [#32067](https://github.com/ClickHouse/ClickHouse/pull/32067) ([Amos Bird](https://github.com/amosbird)).
|
* Fix unexpected projection removal when detaching parts. [#32067](https://github.com/ClickHouse/ClickHouse/pull/32067) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
|
||||||
|
|
||||||
#### Improvement
|
#### Improvement
|
||||||
|
|
||||||
* Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch.
|
* Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch.
|
||||||
@ -1411,7 +1559,6 @@
|
|||||||
* Updating `modification_time` for data part in `system.parts` after part movement [#32964](https://github.com/ClickHouse/ClickHouse/issues/32964). [#32965](https://github.com/ClickHouse/ClickHouse/pull/32965) ([save-my-heart](https://github.com/save-my-heart)).
|
* Updating `modification_time` for data part in `system.parts` after part movement [#32964](https://github.com/ClickHouse/ClickHouse/issues/32964). [#32965](https://github.com/ClickHouse/ClickHouse/pull/32965) ([save-my-heart](https://github.com/save-my-heart)).
|
||||||
* Potential issue, cannot be exploited: integer overflow may happen in array resize. [#33024](https://github.com/ClickHouse/ClickHouse/pull/33024) ([varadarajkumar](https://github.com/varadarajkumar)).
|
* Potential issue, cannot be exploited: integer overflow may happen in array resize. [#33024](https://github.com/ClickHouse/ClickHouse/pull/33024) ([varadarajkumar](https://github.com/varadarajkumar)).
|
||||||
|
|
||||||
|
|
||||||
#### Build/Testing/Packaging Improvement
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
* Add packages, functional tests and Docker builds for AArch64 (ARM) version of ClickHouse. [#32911](https://github.com/ClickHouse/ClickHouse/pull/32911) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). [#32415](https://github.com/ClickHouse/ClickHouse/pull/32415)
|
* Add packages, functional tests and Docker builds for AArch64 (ARM) version of ClickHouse. [#32911](https://github.com/ClickHouse/ClickHouse/pull/32911) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). [#32415](https://github.com/ClickHouse/ClickHouse/pull/32415)
|
||||||
@ -1426,7 +1573,6 @@
|
|||||||
* Inject git information into clickhouse binary file. So we can get source code revision easily from clickhouse binary file. [#33124](https://github.com/ClickHouse/ClickHouse/pull/33124) ([taiyang-li](https://github.com/taiyang-li)).
|
* Inject git information into clickhouse binary file. So we can get source code revision easily from clickhouse binary file. [#33124](https://github.com/ClickHouse/ClickHouse/pull/33124) ([taiyang-li](https://github.com/taiyang-li)).
|
||||||
* Remove obsolete code from ConfigProcessor. Yandex specific code is not used anymore. The code contained one minor defect. This defect was reported by [Mallik Hassan](https://github.com/SadiHassan) in [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). This closes [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). [#33026](https://github.com/ClickHouse/ClickHouse/pull/33026) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
* Remove obsolete code from ConfigProcessor. Yandex specific code is not used anymore. The code contained one minor defect. This defect was reported by [Mallik Hassan](https://github.com/SadiHassan) in [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). This closes [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). [#33026](https://github.com/ClickHouse/ClickHouse/pull/33026) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
|
||||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
* Several fixes for format parsing. This is relevant if `clickhouse-server` is open for write access to adversary. Specifically crafted input data for `Native` format may lead to reading uninitialized memory or crash. This is relevant if `clickhouse-server` is open for write access to adversary. [#33050](https://github.com/ClickHouse/ClickHouse/pull/33050) ([Heena Bansal](https://github.com/HeenaBansal2009)). Fixed Apache Avro Union type index out of boundary issue in Apache Avro binary format. [#33022](https://github.com/ClickHouse/ClickHouse/pull/33022) ([Harry Lee](https://github.com/HarryLeeIBM)). Fix null pointer dereference in `LowCardinality` data when deserializing `LowCardinality` data in the Native format. [#33021](https://github.com/ClickHouse/ClickHouse/pull/33021) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
* Several fixes for format parsing. This is relevant if `clickhouse-server` is open for write access to adversary. Specifically crafted input data for `Native` format may lead to reading uninitialized memory or crash. This is relevant if `clickhouse-server` is open for write access to adversary. [#33050](https://github.com/ClickHouse/ClickHouse/pull/33050) ([Heena Bansal](https://github.com/HeenaBansal2009)). Fixed Apache Avro Union type index out of boundary issue in Apache Avro binary format. [#33022](https://github.com/ClickHouse/ClickHouse/pull/33022) ([Harry Lee](https://github.com/HarryLeeIBM)). Fix null pointer dereference in `LowCardinality` data when deserializing `LowCardinality` data in the Native format. [#33021](https://github.com/ClickHouse/ClickHouse/pull/33021) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
@ -1485,5 +1631,4 @@
|
|||||||
* Fix possible crash (or incorrect result) in case of `LowCardinality` arguments of window function. Fixes [#31114](https://github.com/ClickHouse/ClickHouse/issues/31114). [#31888](https://github.com/ClickHouse/ClickHouse/pull/31888) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Fix possible crash (or incorrect result) in case of `LowCardinality` arguments of window function. Fixes [#31114](https://github.com/ClickHouse/ClickHouse/issues/31114). [#31888](https://github.com/ClickHouse/ClickHouse/pull/31888) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Fix hang up with command `DROP TABLE system.query_log sync`. [#33293](https://github.com/ClickHouse/ClickHouse/pull/33293) ([zhanghuajie](https://github.com/zhanghuajieHIT)).
|
* Fix hang up with command `DROP TABLE system.query_log sync`. [#33293](https://github.com/ClickHouse/ClickHouse/pull/33293) ([zhanghuajie](https://github.com/zhanghuajieHIT)).
|
||||||
|
|
||||||
|
|
||||||
## [Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021)
|
## [Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021)
|
||||||
|
@ -10,7 +10,7 @@ ClickHouse® is an open-source column-oriented database management system that a
|
|||||||
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
* [Documentation](https://clickhouse.com/docs/en/) provides more in-depth information.
|
||||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||||
* [Blog](https://clickhouse.com/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||||
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
* [Code Browser (Woboq)](https://clickhouse.com/codebrowser/ClickHouse/index.html) with syntax highlight and navigation.
|
||||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlight, powered by github.dev.
|
||||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||||
|
@ -10,9 +10,11 @@ The following versions of ClickHouse server are currently being supported with s
|
|||||||
|
|
||||||
| Version | Supported |
|
| Version | Supported |
|
||||||
|:-|:-|
|
|:-|:-|
|
||||||
|
| 22.10 | ✔️ |
|
||||||
|
| 22.9 | ✔️ |
|
||||||
| 22.8 | ✔️ |
|
| 22.8 | ✔️ |
|
||||||
| 22.7 | ✔️ |
|
| 22.7 | ❌ |
|
||||||
| 22.6 | ✔️ |
|
| 22.6 | ❌ |
|
||||||
| 22.5 | ❌ |
|
| 22.5 | ❌ |
|
||||||
| 22.4 | ❌ |
|
| 22.4 | ❌ |
|
||||||
| 22.3 | ✔️ |
|
| 22.3 | ✔️ |
|
||||||
|
@ -151,7 +151,7 @@ public:
|
|||||||
{
|
{
|
||||||
size_t dot_pos = path.rfind('.');
|
size_t dot_pos = path.rfind('.');
|
||||||
if (dot_pos != std::string::npos)
|
if (dot_pos != std::string::npos)
|
||||||
fd = ::mkstemps(path.data(), path.size() - dot_pos);
|
fd = ::mkstemps(path.data(), static_cast<int>(path.size() - dot_pos));
|
||||||
else
|
else
|
||||||
fd = ::mkstemp(path.data());
|
fd = ::mkstemp(path.data());
|
||||||
|
|
||||||
@ -408,7 +408,7 @@ ReplxxLineReader::ReplxxLineReader(
|
|||||||
// In a simplest case use simple comment.
|
// In a simplest case use simple comment.
|
||||||
commented_line = fmt::format("-- {}", state.text());
|
commented_line = fmt::format("-- {}", state.text());
|
||||||
}
|
}
|
||||||
rx.set_state(replxx::Replxx::State(commented_line.c_str(), commented_line.size()));
|
rx.set_state(replxx::Replxx::State(commented_line.c_str(), static_cast<int>(commented_line.size())));
|
||||||
|
|
||||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||||
};
|
};
|
||||||
@ -480,7 +480,7 @@ void ReplxxLineReader::openEditor()
|
|||||||
if (executeCommand(argv) == 0)
|
if (executeCommand(argv) == 0)
|
||||||
{
|
{
|
||||||
const std::string & new_query = readFile(editor_file.getPath());
|
const std::string & new_query = readFile(editor_file.getPath());
|
||||||
rx.set_state(replxx::Replxx::State(new_query.c_str(), new_query.size()));
|
rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast<int>(new_query.size())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (const std::runtime_error & e)
|
catch (const std::runtime_error & e)
|
||||||
@ -526,7 +526,7 @@ void ReplxxLineReader::openInteractiveHistorySearch()
|
|||||||
{
|
{
|
||||||
std::string new_query = readFile(output_file.getPath());
|
std::string new_query = readFile(output_file.getPath());
|
||||||
rightTrim(new_query);
|
rightTrim(new_query);
|
||||||
rx.set_state(replxx::Replxx::State(new_query.c_str(), new_query.size()));
|
rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast<int>(new_query.size())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (const std::runtime_error & e)
|
catch (const std::runtime_error & e)
|
||||||
|
@ -265,7 +265,7 @@ inline size_t hashLessThan16(const char * data, size_t size)
|
|||||||
|
|
||||||
struct CRC32Hash
|
struct CRC32Hash
|
||||||
{
|
{
|
||||||
size_t operator() (StringRef x) const
|
unsigned operator() (StringRef x) const
|
||||||
{
|
{
|
||||||
const char * pos = x.data;
|
const char * pos = x.data;
|
||||||
size_t size = x.size;
|
size_t size = x.size;
|
||||||
@ -275,22 +275,22 @@ struct CRC32Hash
|
|||||||
|
|
||||||
if (size < 8)
|
if (size < 8)
|
||||||
{
|
{
|
||||||
return hashLessThan8(x.data, x.size);
|
return static_cast<unsigned>(hashLessThan8(x.data, x.size));
|
||||||
}
|
}
|
||||||
|
|
||||||
const char * end = pos + size;
|
const char * end = pos + size;
|
||||||
size_t res = -1ULL;
|
unsigned res = -1U;
|
||||||
|
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
UInt64 word = unalignedLoad<UInt64>(pos);
|
UInt64 word = unalignedLoad<UInt64>(pos);
|
||||||
res = CRC_INT(res, word);
|
res = static_cast<unsigned>(CRC_INT(res, word));
|
||||||
|
|
||||||
pos += 8;
|
pos += 8;
|
||||||
} while (pos + 8 < end);
|
} while (pos + 8 < end);
|
||||||
|
|
||||||
UInt64 word = unalignedLoad<UInt64>(end - 8); /// I'm not sure if this is normal.
|
UInt64 word = unalignedLoad<UInt64>(end - 8); /// I'm not sure if this is normal.
|
||||||
res = CRC_INT(res, word);
|
res = static_cast<unsigned>(CRC_INT(res, word));
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -302,7 +302,7 @@ struct StringRefHash : CRC32Hash {};
|
|||||||
|
|
||||||
struct CRC32Hash
|
struct CRC32Hash
|
||||||
{
|
{
|
||||||
size_t operator() (StringRef /* x */) const
|
unsigned operator() (StringRef /* x */) const
|
||||||
{
|
{
|
||||||
throw std::logic_error{"Not implemented CRC32Hash without SSE"};
|
throw std::logic_error{"Not implemented CRC32Hash without SSE"};
|
||||||
}
|
}
|
||||||
|
@ -122,7 +122,7 @@ QuotientAndRemainder<N> static inline split(UnsignedOfSize<N> value)
|
|||||||
constexpr DivisionBy10PowN<N> division;
|
constexpr DivisionBy10PowN<N> division;
|
||||||
|
|
||||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
||||||
UnsignedOfSize<N / 2> remainder = value - quotient * pow10<UnsignedOfSize<N / 2>>(N);
|
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
||||||
|
|
||||||
return {quotient, remainder};
|
return {quotient, remainder};
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,8 @@
|
|||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
# include <sys/syscall.h>
|
# include <sys/syscall.h>
|
||||||
#endif
|
#endif
|
||||||
#include <cstdlib>
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <base/safeExit.h>
|
#include <base/safeExit.h>
|
||||||
#include <base/defines.h>
|
|
||||||
|
|
||||||
[[noreturn]] void safeExit(int code)
|
[[noreturn]] void safeExit(int code)
|
||||||
{
|
{
|
||||||
|
@ -227,6 +227,8 @@ struct integer<Bits, Signed>::_impl
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
__attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept
|
__attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept
|
||||||
{
|
{
|
||||||
|
/// NOTE: this can be called with DB::Decimal, and in this case, result
|
||||||
|
/// will be wrong
|
||||||
if constexpr (std::is_signed_v<T>)
|
if constexpr (std::is_signed_v<T>)
|
||||||
return static_cast<int64_t>(f);
|
return static_cast<int64_t>(f);
|
||||||
else
|
else
|
||||||
|
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||||
SET(VERSION_REVISION 54467)
|
SET(VERSION_REVISION 54468)
|
||||||
SET(VERSION_MAJOR 22)
|
SET(VERSION_MAJOR 22)
|
||||||
SET(VERSION_MINOR 10)
|
SET(VERSION_MINOR 11)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH 3030d4c7ff09ec44ab07d0a8069ea923227288a1)
|
SET(VERSION_GITHASH 98ab5a3c189232ea2a3dddb9d2be7196ae8b3434)
|
||||||
SET(VERSION_DESCRIBE v22.10.1.1-testing)
|
SET(VERSION_DESCRIBE v22.11.1.1-testing)
|
||||||
SET(VERSION_STRING 22.10.1.1)
|
SET(VERSION_STRING 22.11.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -61,8 +61,14 @@ elseif (ARCH_AARCH64)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
elseif (ARCH_PPC64LE)
|
elseif (ARCH_PPC64LE)
|
||||||
|
# By Default, build for power8 and up, allow building for power9 and up
|
||||||
# Note that gcc and clang have support for x86 SSE2 intrinsics when building for PowerPC
|
# Note that gcc and clang have support for x86 SSE2 intrinsics when building for PowerPC
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
option (POWER9 "Build for Power 9 CPU and above" 0)
|
||||||
|
if(POWER9)
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power9 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
||||||
|
else ()
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
||||||
|
endif ()
|
||||||
|
|
||||||
elseif (ARCH_AMD64)
|
elseif (ARCH_AMD64)
|
||||||
option (ENABLE_SSSE3 "Use SSSE3 instructions on x86_64" 1)
|
option (ENABLE_SSSE3 "Use SSSE3 instructions on x86_64" 1)
|
||||||
@ -75,6 +81,7 @@ elseif (ARCH_AMD64)
|
|||||||
option (ENABLE_AVX512 "Use AVX512 instructions on x86_64" 0)
|
option (ENABLE_AVX512 "Use AVX512 instructions on x86_64" 0)
|
||||||
option (ENABLE_AVX512_VBMI "Use AVX512_VBMI instruction on x86_64 (depends on ENABLE_AVX512)" 0)
|
option (ENABLE_AVX512_VBMI "Use AVX512_VBMI instruction on x86_64 (depends on ENABLE_AVX512)" 0)
|
||||||
option (ENABLE_BMI "Use BMI instructions on x86_64" 0)
|
option (ENABLE_BMI "Use BMI instructions on x86_64" 0)
|
||||||
|
option (ENABLE_BMI2 "Use BMI2 instructions on x86_64 (depends on ENABLE_AVX2)" 0)
|
||||||
option (ENABLE_AVX2_FOR_SPEC_OP "Use avx2 instructions for specific operations on x86_64" 0)
|
option (ENABLE_AVX2_FOR_SPEC_OP "Use avx2 instructions for specific operations on x86_64" 0)
|
||||||
option (ENABLE_AVX512_FOR_SPEC_OP "Use avx512 instructions for specific operations on x86_64" 0)
|
option (ENABLE_AVX512_FOR_SPEC_OP "Use avx512 instructions for specific operations on x86_64" 0)
|
||||||
|
|
||||||
@ -90,6 +97,7 @@ elseif (ARCH_AMD64)
|
|||||||
SET(ENABLE_AVX512 0)
|
SET(ENABLE_AVX512 0)
|
||||||
SET(ENABLE_AVX512_VBMI 0)
|
SET(ENABLE_AVX512_VBMI 0)
|
||||||
SET(ENABLE_BMI 0)
|
SET(ENABLE_BMI 0)
|
||||||
|
SET(ENABLE_BMI2 0)
|
||||||
SET(ENABLE_AVX2_FOR_SPEC_OP 0)
|
SET(ENABLE_AVX2_FOR_SPEC_OP 0)
|
||||||
SET(ENABLE_AVX512_FOR_SPEC_OP 0)
|
SET(ENABLE_AVX512_FOR_SPEC_OP 0)
|
||||||
endif()
|
endif()
|
||||||
@ -237,6 +245,20 @@ elseif (ARCH_AMD64)
|
|||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
set (TEST_FLAG "-mbmi2")
|
||||||
|
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
|
||||||
|
check_cxx_source_compiles("
|
||||||
|
#include <immintrin.h>
|
||||||
|
int main() {
|
||||||
|
auto a = _pdep_u64(0, 0);
|
||||||
|
(void)a;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
" HAVE_BMI2)
|
||||||
|
if (HAVE_BMI2 AND HAVE_AVX2 AND ENABLE_AVX2 AND ENABLE_BMI2)
|
||||||
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} ${TEST_FLAG}")
|
||||||
|
endif ()
|
||||||
|
|
||||||
# Limit avx2/avx512 flag for specific source build
|
# Limit avx2/avx512 flag for specific source build
|
||||||
set (X86_INTRINSICS_FLAGS "")
|
set (X86_INTRINSICS_FLAGS "")
|
||||||
if (ENABLE_AVX2_FOR_SPEC_OP)
|
if (ENABLE_AVX2_FOR_SPEC_OP)
|
||||||
|
@ -3,15 +3,15 @@
|
|||||||
# This is a workaround for bug in llvm/clang,
|
# This is a workaround for bug in llvm/clang,
|
||||||
# that does not produce .debug_aranges with LTO
|
# that does not produce .debug_aranges with LTO
|
||||||
#
|
#
|
||||||
# NOTE: this is a temporary solution, that should be removed once [1] will be
|
# NOTE: this is a temporary solution, that should be removed after upgrading to
|
||||||
# resolved.
|
# clang-16/llvm-16.
|
||||||
#
|
#
|
||||||
# [1]: https://discourse.llvm.org/t/clang-does-not-produce-full-debug-aranges-section-with-thinlto/64898/8
|
# Refs: https://reviews.llvm.org/D133092
|
||||||
|
|
||||||
# NOTE: only -flto=thin is supported.
|
# NOTE: only -flto=thin is supported.
|
||||||
# NOTE: it is not possible to check was there -gdwarf-aranges initially or not.
|
# NOTE: it is not possible to check was there -gdwarf-aranges initially or not.
|
||||||
if [[ "$*" =~ -plugin-opt=thinlto ]]; then
|
if [[ "$*" =~ -plugin-opt=thinlto ]]; then
|
||||||
exec "@LLD_PATH@" -mllvm -generate-arange-section "$@"
|
exec "@LLD_PATH@" -plugin-opt=-generate-arange-section "$@"
|
||||||
else
|
else
|
||||||
exec "@LLD_PATH@" "$@"
|
exec "@LLD_PATH@" "$@"
|
||||||
fi
|
fi
|
||||||
|
@ -85,7 +85,7 @@ if (SANITIZE)
|
|||||||
# and they have a bunch of flags not halt the program if UIO happend and even to silence that warnings.
|
# and they have a bunch of flags not halt the program if UIO happend and even to silence that warnings.
|
||||||
# But for unknown reason that flags don't work with ClickHouse or we don't understand how to properly use them,
|
# But for unknown reason that flags don't work with ClickHouse or we don't understand how to properly use them,
|
||||||
# that's why we often receive reports about UIO. The simplest way to avoid this is just set this flag here.
|
# that's why we often receive reports about UIO. The simplest way to avoid this is just set this flag here.
|
||||||
set(UBSAN_FLAGS "${SAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
|
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
|
||||||
endif()
|
endif()
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||||
|
@ -117,7 +117,7 @@ endif()
|
|||||||
# Archiver
|
# Archiver
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_GCC)
|
||||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-14" "llvm-ar-13" "llvm-ar-12")
|
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-15" "llvm-ar-14" "llvm-ar-13" "llvm-ar-12")
|
||||||
else ()
|
else ()
|
||||||
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
|
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
|
||||||
endif ()
|
endif ()
|
||||||
@ -131,7 +131,7 @@ message(STATUS "Using archiver: ${CMAKE_AR}")
|
|||||||
# Ranlib
|
# Ranlib
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_GCC)
|
||||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-14" "llvm-ranlib-13" "llvm-ranlib-12")
|
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-15" "llvm-ranlib-14" "llvm-ranlib-13" "llvm-ranlib-12")
|
||||||
else ()
|
else ()
|
||||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
|
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
|
||||||
endif ()
|
endif ()
|
||||||
@ -145,7 +145,7 @@ message(STATUS "Using ranlib: ${CMAKE_RANLIB}")
|
|||||||
# Install Name Tool
|
# Install Name Tool
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_GCC)
|
||||||
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool" "llvm-install-name-tool-14" "llvm-install-name-tool-13" "llvm-install-name-tool-12")
|
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool" "llvm-install-name-tool-15" "llvm-install-name-tool-14" "llvm-install-name-tool-13" "llvm-install-name-tool-12")
|
||||||
else ()
|
else ()
|
||||||
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
|
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
|
||||||
endif ()
|
endif ()
|
||||||
@ -159,7 +159,7 @@ message(STATUS "Using install-name-tool: ${CMAKE_INSTALL_NAME_TOOL}")
|
|||||||
# Objcopy
|
# Objcopy
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_GCC)
|
||||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-14" "llvm-objcopy-13" "llvm-objcopy-12" "objcopy")
|
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-15" "llvm-objcopy-14" "llvm-objcopy-13" "llvm-objcopy-12" "objcopy")
|
||||||
else ()
|
else ()
|
||||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
|
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
|
||||||
endif ()
|
endif ()
|
||||||
@ -173,7 +173,7 @@ endif ()
|
|||||||
# Strip
|
# Strip
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_GCC)
|
||||||
find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-14" "llvm-strip-13" "llvm-strip-12" "strip")
|
find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-15" "llvm-strip-14" "llvm-strip-13" "llvm-strip-12" "strip")
|
||||||
else ()
|
else ()
|
||||||
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
|
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
|
||||||
endif ()
|
endif ()
|
||||||
|
@ -27,7 +27,6 @@ if (COMPILER_CLANG)
|
|||||||
no_warning(sign-conversion)
|
no_warning(sign-conversion)
|
||||||
no_warning(implicit-int-conversion)
|
no_warning(implicit-int-conversion)
|
||||||
no_warning(implicit-int-float-conversion)
|
no_warning(implicit-int-float-conversion)
|
||||||
no_warning(shorten-64-to-32)
|
|
||||||
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
|
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
|
||||||
no_warning(disabled-macro-expansion)
|
no_warning(disabled-macro-expansion)
|
||||||
no_warning(documentation-unknown-command)
|
no_warning(documentation-unknown-command)
|
||||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -114,6 +114,7 @@ if (ENABLE_TESTS)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
add_contrib (llvm-project-cmake llvm-project)
|
add_contrib (llvm-project-cmake llvm-project)
|
||||||
|
add_contrib (libfuzzer-cmake llvm-project)
|
||||||
add_contrib (libxml2-cmake libxml2)
|
add_contrib (libxml2-cmake libxml2)
|
||||||
add_contrib (aws-s3-cmake
|
add_contrib (aws-s3-cmake
|
||||||
aws
|
aws
|
||||||
@ -164,6 +165,7 @@ add_contrib (sqlite-cmake sqlite-amalgamation)
|
|||||||
add_contrib (s2geometry-cmake s2geometry)
|
add_contrib (s2geometry-cmake s2geometry)
|
||||||
add_contrib (c-ares-cmake c-ares)
|
add_contrib (c-ares-cmake c-ares)
|
||||||
add_contrib (qpl-cmake qpl)
|
add_contrib (qpl-cmake qpl)
|
||||||
|
add_contrib (morton-nd-cmake morton-nd)
|
||||||
|
|
||||||
add_contrib(annoy-cmake annoy)
|
add_contrib(annoy-cmake annoy)
|
||||||
|
|
||||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 05ec08ce61e4b5c44692cc2f1ce4b6d8596679bf
|
Subproject commit 7a454c25c7d16053bcd327cdd16329212a08fa4a
|
2
contrib/libcxx
vendored
2
contrib/libcxx
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 172b2ae074f6755145b91c53a95c8540c1468239
|
Subproject commit 4db7f838afd3139eb3761694b04d31275df45d2d
|
@ -25,6 +25,7 @@ set(SRCS
|
|||||||
"${LIBCXX_SOURCE_DIR}/src/ios.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/ios.cpp"
|
||||||
"${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/ios.instantiations.cpp"
|
||||||
"${LIBCXX_SOURCE_DIR}/src/iostream.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/iostream.cpp"
|
||||||
|
"${LIBCXX_SOURCE_DIR}/src/legacy_debug_handler.cpp"
|
||||||
"${LIBCXX_SOURCE_DIR}/src/legacy_pointer_safety.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/legacy_pointer_safety.cpp"
|
||||||
"${LIBCXX_SOURCE_DIR}/src/locale.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/locale.cpp"
|
||||||
"${LIBCXX_SOURCE_DIR}/src/memory.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/memory.cpp"
|
||||||
@ -49,6 +50,7 @@ set(SRCS
|
|||||||
"${LIBCXX_SOURCE_DIR}/src/valarray.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/valarray.cpp"
|
||||||
"${LIBCXX_SOURCE_DIR}/src/variant.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/variant.cpp"
|
||||||
"${LIBCXX_SOURCE_DIR}/src/vector.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/vector.cpp"
|
||||||
|
"${LIBCXX_SOURCE_DIR}/src/verbose_abort.cpp"
|
||||||
)
|
)
|
||||||
|
|
||||||
add_library(cxx ${SRCS})
|
add_library(cxx ${SRCS})
|
||||||
|
2
contrib/libcxxabi
vendored
2
contrib/libcxxabi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6eb7cc7a7bdd779e6734d1b9fb451df2274462d7
|
Subproject commit a736a6b3c6a7b8aae2ebad629ca21b2c55b4820e
|
@ -9,6 +9,7 @@ set(SRCS
|
|||||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp"
|
"${LIBCXXABI_SOURCE_DIR}/src/cxa_exception_storage.cpp"
|
||||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp"
|
"${LIBCXXABI_SOURCE_DIR}/src/cxa_guard.cpp"
|
||||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp"
|
"${LIBCXXABI_SOURCE_DIR}/src/cxa_handlers.cpp"
|
||||||
|
# "${LIBCXXABI_SOURCE_DIR}/src/cxa_noexception.cpp"
|
||||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp"
|
"${LIBCXXABI_SOURCE_DIR}/src/cxa_personality.cpp"
|
||||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp"
|
"${LIBCXXABI_SOURCE_DIR}/src/cxa_thread_atexit.cpp"
|
||||||
"${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp"
|
"${LIBCXXABI_SOURCE_DIR}/src/cxa_vector.cpp"
|
||||||
|
35
contrib/libfuzzer-cmake/CMakeLists.txt
Normal file
35
contrib/libfuzzer-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
set(COMPILER_RT_FUZZER_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/compiler-rt/lib/fuzzer")
|
||||||
|
|
||||||
|
set(FUZZER_SRCS
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerCrossOver.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerDataFlowTrace.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerDriver.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsDlsym.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsWeak.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsWindows.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCounters.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCountersDarwin.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCountersWindows.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerFork.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIO.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIOPosix.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIOWindows.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerLoop.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMerge.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMutate.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerSHA1.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerTracePC.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtil.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilDarwin.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilFuchsia.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilLinux.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilPosix.cpp"
|
||||||
|
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilWindows.cpp"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(_fuzzer_no_main STATIC ${FUZZER_SRCS})
|
||||||
|
add_library(ch_contrib::fuzzer_no_main ALIAS _fuzzer_no_main)
|
||||||
|
|
||||||
|
add_library(_fuzzer STATIC ${FUZZER_SRCS} "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMain.cpp")
|
||||||
|
add_library(ch_contrib::fuzzer ALIAS _fuzzer)
|
||||||
|
|
1
contrib/morton-nd
vendored
Submodule
1
contrib/morton-nd
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 3795491a4aa3cdc916c8583094683f0d68df5bc0
|
3
contrib/morton-nd-cmake/CMakeLists.txt
Normal file
3
contrib/morton-nd-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
add_library(_morton_nd INTERFACE)
|
||||||
|
target_include_directories(_morton_nd SYSTEM BEFORE INTERFACE "${ClickHouse_SOURCE_DIR}/contrib/morton-nd/include/")
|
||||||
|
add_library(ch_contrib::morton_nd ALIAS _morton_nd)
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit e7c2b2f7bcf3b4b33892a1a6d25c32a93edfbdb9
|
Subproject commit 2c8998e26c6d46b27c710d7829c3a15e34959f70
|
@ -78,23 +78,13 @@ endif()
|
|||||||
|
|
||||||
include(CheckCCompilerFlag)
|
include(CheckCCompilerFlag)
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||||
CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
|
if(POWER9)
|
||||||
if(HAS_POWER9)
|
set(HAS_POWER9 1)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power9 -mtune=power9")
|
set(HAS_ALTIVEC 1)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power9 -mtune=power9")
|
|
||||||
else()
|
else()
|
||||||
CHECK_C_COMPILER_FLAG("-mcpu=power8" HAS_POWER8)
|
set(HAS_POWER8 1)
|
||||||
if(HAS_POWER8)
|
set(HAS_ALTIVEC 1)
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power8 -mtune=power8")
|
endif(POWER9)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power8 -mtune=power8")
|
|
||||||
endif(HAS_POWER8)
|
|
||||||
endif(HAS_POWER9)
|
|
||||||
CHECK_C_COMPILER_FLAG("-maltivec" HAS_ALTIVEC)
|
|
||||||
if(HAS_ALTIVEC)
|
|
||||||
message(STATUS " HAS_ALTIVEC yes")
|
|
||||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maltivec")
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec")
|
|
||||||
endif(HAS_ALTIVEC)
|
|
||||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||||
|
2
contrib/zlib-ng
vendored
2
contrib/zlib-ng
vendored
@ -1 +1 @@
|
|||||||
Subproject commit bffad6f6fe74d6a2f92e2668390664a926c68733
|
Subproject commit 50f0eae1a411764cd6d1e85b3ce471438acd3c1c
|
@ -73,7 +73,7 @@ RUN apt-get install binutils-riscv64-linux-gnu
|
|||||||
|
|
||||||
# Architecture of the image when BuildKit/buildx is used
|
# Architecture of the image when BuildKit/buildx is used
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG NFPM_VERSION=2.18.1
|
ARG NFPM_VERSION=2.20.0
|
||||||
|
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
&& curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \
|
&& curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \
|
||||||
|
@ -208,6 +208,7 @@ def parse_env_variables(
|
|||||||
cxx = cc.replace("gcc", "g++").replace("clang", "clang++")
|
cxx = cc.replace("gcc", "g++").replace("clang", "clang++")
|
||||||
|
|
||||||
if package_type == "deb":
|
if package_type == "deb":
|
||||||
|
# NOTE: This are the env for packages/build script
|
||||||
result.append("MAKE_DEB=true")
|
result.append("MAKE_DEB=true")
|
||||||
cmake_flags.append("-DENABLE_TESTS=0")
|
cmake_flags.append("-DENABLE_TESTS=0")
|
||||||
cmake_flags.append("-DENABLE_UTILS=0")
|
cmake_flags.append("-DENABLE_UTILS=0")
|
||||||
@ -268,6 +269,7 @@ def parse_env_variables(
|
|||||||
result.append('DISTCC_HOSTS="localhost/`nproc`"')
|
result.append('DISTCC_HOSTS="localhost/`nproc`"')
|
||||||
|
|
||||||
if additional_pkgs:
|
if additional_pkgs:
|
||||||
|
# NOTE: This are the env for packages/build script
|
||||||
result.append("MAKE_APK=true")
|
result.append("MAKE_APK=true")
|
||||||
result.append("MAKE_RPM=true")
|
result.append("MAKE_RPM=true")
|
||||||
result.append("MAKE_TGZ=true")
|
result.append("MAKE_TGZ=true")
|
||||||
|
@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="22.9.3.18"
|
ARG VERSION="22.10.1.1877"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# user/group precreated explicitly with fixed uid/gid on purpose.
|
# user/group precreated explicitly with fixed uid/gid on purpose.
|
||||||
|
@ -21,7 +21,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="22.9.3.18"
|
ARG VERSION="22.10.1.1877"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
# set non-empty deb_location_url url to create a docker image
|
# set non-empty deb_location_url url to create a docker image
|
||||||
|
@ -36,10 +36,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# repo versions doesn't work correctly with C++17
|
# repo versions doesn't work correctly with C++17
|
||||||
# also we push reports to s3, so we add index.html to subfolder urls
|
# also we push reports to s3, so we add index.html to subfolder urls
|
||||||
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
||||||
# TODO: remove branch in a few weeks after merge, e.g. in May or June 2022
|
RUN git clone https://github.com/ClickHouse/woboq_codebrowser \
|
||||||
#
|
|
||||||
# FIXME: update location of a repo
|
|
||||||
RUN git clone https://github.com/azat/woboq_codebrowser --branch llvm-15 \
|
|
||||||
&& cd woboq_codebrowser \
|
&& cd woboq_codebrowser \
|
||||||
&& cmake . -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \
|
&& cmake . -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \
|
||||||
&& ninja \
|
&& ninja \
|
||||||
|
@ -136,6 +136,7 @@ function clone_submodules
|
|||||||
contrib/wyhash
|
contrib/wyhash
|
||||||
contrib/hashidsxx
|
contrib/hashidsxx
|
||||||
contrib/c-ares
|
contrib/c-ares
|
||||||
|
contrib/morton-nd
|
||||||
)
|
)
|
||||||
|
|
||||||
git submodule sync
|
git submodule sync
|
||||||
|
@ -27,9 +27,14 @@ RUN apt-get update \
|
|||||||
tar \
|
tar \
|
||||||
tzdata \
|
tzdata \
|
||||||
unixodbc \
|
unixodbc \
|
||||||
|
python3-pip \
|
||||||
|
libcurl4-openssl-dev \
|
||||||
|
libssl-dev \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||||
|
|
||||||
|
RUN pip3 install pycurl
|
||||||
|
|
||||||
# Architecture of the image when BuildKit/buildx is used
|
# Architecture of the image when BuildKit/buildx is used
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
|
31
docker/test/stress/run.sh
Executable file → Normal file
31
docker/test/stress/run.sh
Executable file → Normal file
@ -47,7 +47,6 @@ function install_packages()
|
|||||||
|
|
||||||
function configure()
|
function configure()
|
||||||
{
|
{
|
||||||
export ZOOKEEPER_FAULT_INJECTION=1
|
|
||||||
# install test configs
|
# install test configs
|
||||||
export USE_DATABASE_ORDINARY=1
|
export USE_DATABASE_ORDINARY=1
|
||||||
export EXPORT_S3_STORAGE_POLICIES=1
|
export EXPORT_S3_STORAGE_POLICIES=1
|
||||||
@ -203,6 +202,7 @@ quit
|
|||||||
|
|
||||||
install_packages package_folder
|
install_packages package_folder
|
||||||
|
|
||||||
|
export ZOOKEEPER_FAULT_INJECTION=1
|
||||||
configure
|
configure
|
||||||
|
|
||||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||||
@ -243,6 +243,7 @@ stop
|
|||||||
|
|
||||||
# Let's enable S3 storage by default
|
# Let's enable S3 storage by default
|
||||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||||
|
export ZOOKEEPER_FAULT_INJECTION=1
|
||||||
configure
|
configure
|
||||||
|
|
||||||
# But we still need default disk because some tables loaded only into it
|
# But we still need default disk because some tables loaded only into it
|
||||||
@ -270,10 +271,6 @@ clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_
|
|||||||
|| (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \
|
|| (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||||
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt)
|
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt)
|
||||||
|
|
||||||
echo "Get previous release tag"
|
|
||||||
previous_release_tag=$(clickhouse-client --query="SELECT version()" | get_previous_release_tag)
|
|
||||||
echo $previous_release_tag
|
|
||||||
|
|
||||||
stop
|
stop
|
||||||
|
|
||||||
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
|
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
|
||||||
@ -331,6 +328,10 @@ zgrep -Fa " received signal " /test_output/gdb.log > /dev/null \
|
|||||||
|
|
||||||
echo -e "Backward compatibility check\n"
|
echo -e "Backward compatibility check\n"
|
||||||
|
|
||||||
|
echo "Get previous release tag"
|
||||||
|
previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
|
||||||
|
echo $previous_release_tag
|
||||||
|
|
||||||
echo "Clone previous release repository"
|
echo "Clone previous release repository"
|
||||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||||
|
|
||||||
@ -375,6 +376,8 @@ else
|
|||||||
install_packages previous_release_package_folder
|
install_packages previous_release_package_folder
|
||||||
|
|
||||||
# Start server from previous release
|
# Start server from previous release
|
||||||
|
# Previous version may not be ready for fault injections
|
||||||
|
export ZOOKEEPER_FAULT_INJECTION=0
|
||||||
configure
|
configure
|
||||||
|
|
||||||
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
||||||
@ -389,12 +392,23 @@ else
|
|||||||
|
|
||||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||||
|
|
||||||
# Install new package before running stress test because we should use new clickhouse-client and new clickhouse-test
|
# Install new package before running stress test because we should use new
|
||||||
# But we should leave old binary in /usr/bin/ for gdb (so it will print sane stacktarces)
|
# clickhouse-client and new clickhouse-test.
|
||||||
|
#
|
||||||
|
# But we should leave old binary in /usr/bin/ and debug symbols in
|
||||||
|
# /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it
|
||||||
|
# will print sane stacktraces and also to avoid possible crashes.
|
||||||
|
#
|
||||||
|
# FIXME: those files can be extracted directly from debian package, but
|
||||||
|
# actually better solution will be to use different PATH instead of playing
|
||||||
|
# games with files from packages.
|
||||||
mv /usr/bin/clickhouse previous_release_package_folder/
|
mv /usr/bin/clickhouse previous_release_package_folder/
|
||||||
|
mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/
|
||||||
install_packages package_folder
|
install_packages package_folder
|
||||||
mv /usr/bin/clickhouse package_folder/
|
mv /usr/bin/clickhouse package_folder/
|
||||||
|
mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/
|
||||||
mv previous_release_package_folder/clickhouse /usr/bin/
|
mv previous_release_package_folder/clickhouse /usr/bin/
|
||||||
|
mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||||
|
|
||||||
mkdir tmp_stress_output
|
mkdir tmp_stress_output
|
||||||
|
|
||||||
@ -410,6 +424,8 @@ else
|
|||||||
|
|
||||||
# Start new server
|
# Start new server
|
||||||
mv package_folder/clickhouse /usr/bin/
|
mv package_folder/clickhouse /usr/bin/
|
||||||
|
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||||
|
export ZOOKEEPER_FAULT_INJECTION=1
|
||||||
configure
|
configure
|
||||||
start 500
|
start 500
|
||||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||||
@ -464,6 +480,7 @@ else
|
|||||||
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
||||||
-e "The set of parts restored in place of" \
|
-e "The set of parts restored in place of" \
|
||||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||||
|
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||||
|
@ -286,9 +286,7 @@ if __name__ == "__main__":
|
|||||||
# But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY.
|
# But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY.
|
||||||
"--client-option",
|
"--client-option",
|
||||||
"max_untracked_memory=1Gi",
|
"max_untracked_memory=1Gi",
|
||||||
"--client-option",
|
|
||||||
"max_memory_usage_for_user=0",
|
"max_memory_usage_for_user=0",
|
||||||
"--client-option",
|
|
||||||
"memory_profiler_step=1Gi",
|
"memory_profiler_step=1Gi",
|
||||||
# Use system database to avoid CREATE/DROP DATABASE queries
|
# Use system database to avoid CREATE/DROP DATABASE queries
|
||||||
"--database=system",
|
"--database=system",
|
||||||
|
@ -5,6 +5,7 @@ FROM ubuntu:20.04
|
|||||||
ARG apt_archive="http://archive.ubuntu.com"
|
ARG apt_archive="http://archive.ubuntu.com"
|
||||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||||
|
|
||||||
|
# 15.0.2
|
||||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=15
|
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=15
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
@ -58,6 +59,9 @@ RUN apt-get update \
|
|||||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||||
# for external_symbolizer_path
|
# for external_symbolizer_path
|
||||||
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
RUN ln -s /usr/bin/llvm-symbolizer-${LLVM_VERSION} /usr/bin/llvm-symbolizer
|
||||||
|
# FIXME: workaround for "The imported target "merge-fdata" references the file" error
|
||||||
|
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
||||||
|
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
||||||
|
|
||||||
ARG CCACHE_VERSION=4.6.1
|
ARG CCACHE_VERSION=4.6.1
|
||||||
RUN mkdir /tmp/ccache \
|
RUN mkdir /tmp/ccache \
|
||||||
|
352
docs/changelogs/v22.10.1.1877-stable.md
Normal file
352
docs/changelogs/v22.10.1.1877-stable.md
Normal file
@ -0,0 +1,352 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.10.1.1877-stable (98ab5a3c189) FIXME as compared to v22.9.1.2603-stable (3030d4c7ff0)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Rename cache commands: `show caches` -> `show filesystem caches`, `describe cache` -> `describe filesystem cache`. [#41508](https://github.com/ClickHouse/ClickHouse/pull/41508) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Remove support for the `WITH TIMEOUT` section for `LIVE VIEW`. This closes [#40557](https://github.com/ClickHouse/ClickHouse/issues/40557). [#42173](https://github.com/ClickHouse/ClickHouse/pull/42173) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
* Add Rust code support into ClickHouse with BLAKE3 hash-function library as an example. [#33435](https://github.com/ClickHouse/ClickHouse/pull/33435) ([BoloniniD](https://github.com/BoloniniD)).
|
||||||
|
* This is the initial implement of Kusto Query Language. (MVP). [#37961](https://github.com/ClickHouse/ClickHouse/pull/37961) ([Yong Wang](https://github.com/kashwy)).
|
||||||
|
* * Support limiting of temporary data stored on disk using settings `max_temporary_data_on_disk_size_for_user`/`max_temporary_data_on_disk_size_for_query` . [#40893](https://github.com/ClickHouse/ClickHouse/pull/40893) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Support Java integers hashing in `javaHash`. [#41131](https://github.com/ClickHouse/ClickHouse/pull/41131) ([JackyWoo](https://github.com/JackyWoo)).
|
||||||
|
* This PR is to support the OpenSSL in-house build like the BoringSSL submodule. Build flag i.e. ENABLE_CH_BUNDLE_BORINGSSL is used to choose between BoringSSL and OpenSSL. By default, the BoringSSL in-house build will be used. [#41142](https://github.com/ClickHouse/ClickHouse/pull/41142) ([MeenaRenganathan22](https://github.com/MeenaRenganathan22)).
|
||||||
|
* Composable protocol configuration is added. [#41198](https://github.com/ClickHouse/ClickHouse/pull/41198) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Add OpenTelemetry support to ON CLUSTER DDL(require `distributed_ddl_entry_format_version` to be set to 4). [#41484](https://github.com/ClickHouse/ClickHouse/pull/41484) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Add setting `format_json_object_each_row_column_for_object_name` to write/parse object name as column value in JSONObjectEachRow format. [#41703](https://github.com/ClickHouse/ClickHouse/pull/41703) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* adds Morton Coding (ZCurve) encode/decode functions. [#41753](https://github.com/ClickHouse/ClickHouse/pull/41753) ([Constantine Peresypkin](https://github.com/pkit)).
|
||||||
|
* Implement support for different UUID binary formats with support for the two most prevalent ones: the default big-endian and Microsoft's mixed-endian as specified in [RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.1). [#42108](https://github.com/ClickHouse/ClickHouse/pull/42108) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Added an aggregate function `analysisOfVariance` (`anova`) to perform a statistical test over several groups of normally distributed observations to find out whether all groups have the same mean or not. Original PR [#37872](https://github.com/ClickHouse/ClickHouse/issues/37872). [#42131](https://github.com/ClickHouse/ClickHouse/pull/42131) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Add support for `SET setting_name = DEFAULT`. [#42187](https://github.com/ClickHouse/ClickHouse/pull/42187) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||||
|
* * Add `URL` Functions which conform rfc. Functions include: `cutToFirstSignificantSubdomainCustomRFC`, `cutToFirstSignificantSubdomainCustomWithWWWRFC`, `cutToFirstSignificantSubdomainRFC`, `cutToFirstSignificantSubdomainWithWWWRFC`, `domainRFC`, `domainWithoutWWWRFC`, `firstSignificantSubdomainCustomRFC`, `firstSignificantSubdomainRFC`, `portRFC`, `topLevelDomainRFC`. [#42274](https://github.com/ClickHouse/ClickHouse/pull/42274) ([Quanfa Fu](https://github.com/dentiscalprum)).
|
||||||
|
* Added functions (`randUniform`, `randNormal`, `randLogNormal`, `randExponential`, `randChiSquared`, `randStudentT`, `randFisherF`, `randBernoulli`, `randBinomial`, `randNegativeBinomial`, `randPoisson` ) to generate random values according to the specified distributions. This closes [#21834](https://github.com/ClickHouse/ClickHouse/issues/21834). [#42411](https://github.com/ClickHouse/ClickHouse/pull/42411) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Implement operator precedence element parser to resolve stack overflow issues and make the required stack size smaller. [#34892](https://github.com/ClickHouse/ClickHouse/pull/34892) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* DISTINCT in order optimization leverage sorting properties of data streams. This improvement will enable reading in order for DISTINCT if applicable (before it was necessary to provide ORDER BY for columns in DISTINCT). [#41014](https://github.com/ClickHouse/ClickHouse/pull/41014) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* ColumnVector: optimize UInt8 index with AVX512VBMI. [#41247](https://github.com/ClickHouse/ClickHouse/pull/41247) ([Guo Wangyang](https://github.com/guowangy)).
|
||||||
|
* The performance experiments of **SSB** (Star Schema Benchmark) on the ICX device (Intel Xeon Platinum 8380 CPU, 80 cores, 160 threads) shows that this change could bring a **2.95x** improvement of the geomean of all subcases' QPS. [#41675](https://github.com/ClickHouse/ClickHouse/pull/41675) ([Zhiguo Zhou](https://github.com/ZhiguoZh)).
|
||||||
|
* Fixed slowness in JSONExtract with LowCardinality(String) tuples. [#41726](https://github.com/ClickHouse/ClickHouse/pull/41726) ([AlfVII](https://github.com/AlfVII)).
|
||||||
|
* Add ldapr capabilities to AArch64 builds. This is supported from Graviton 2+, Azure and GCP instances. Only appeared in clang-15 [not so long ago](https://github.com/llvm/llvm-project/commit/9609b5daffe9fd28d83d83da895abc5113f76c24). [#41778](https://github.com/ClickHouse/ClickHouse/pull/41778) ([Daniel Kutenin](https://github.com/danlark1)).
|
||||||
|
* Improve performance when comparing strings and one argument is empty constant string. [#41870](https://github.com/ClickHouse/ClickHouse/pull/41870) ([Jiebin Sun](https://github.com/jiebinn)).
|
||||||
|
* optimize insertFrom of ColumnAggregateFunction to share Aggregate State in some cases. [#41960](https://github.com/ClickHouse/ClickHouse/pull/41960) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Relax the "Too many parts" threshold. This closes [#6551](https://github.com/ClickHouse/ClickHouse/issues/6551). Now ClickHouse will allow more parts in a partition if the average part size is large enough (at least 10 GiB). This allows to have up to petabytes of data in a single partition of a single table on a single server, which is possible using disk shelves or object storage. [#42002](https://github.com/ClickHouse/ClickHouse/pull/42002) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Make writing to AzureBlobStorage more efficient (respect `max_single_part_upload_size` instead of writing a block per each buffer size). Inefficiency mentioned in [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42041](https://github.com/ClickHouse/ClickHouse/pull/42041) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Make thread ids in the process list and query_log unique to avoid waste. [#42180](https://github.com/ClickHouse/ClickHouse/pull/42180) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Added new infrastructure for query analysis and planning under `allow_experimental_analyzer` setting. [#31796](https://github.com/ClickHouse/ClickHouse/pull/31796) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* * Support expression `(EXPLAIN SELECT ...)` in a subquery. Queries like `SELECT * FROM (EXPLAIN PIPELINE SELECT col FROM TABLE ORDER BY col)` became valid. [#40630](https://github.com/ClickHouse/ClickHouse/pull/40630) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Currently changing `async_insert_max_data_size` or `async_insert_busy_timeout_ms` in scope of query makes no sense and this leads to bad user experience. E.g. user wants to insert data rarely and he doesn't have an access to server config to tune default settings. [#40668](https://github.com/ClickHouse/ClickHouse/pull/40668) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Embedded Keeper will always start in the background allowing ClickHouse to start without achieving quorum. [#40991](https://github.com/ClickHouse/ClickHouse/pull/40991) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Improvements for reading from remote filesystems, made threadpool size for reads/writes configurable. Closes [#41070](https://github.com/ClickHouse/ClickHouse/issues/41070). [#41011](https://github.com/ClickHouse/ClickHouse/pull/41011) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Made reestablishing a new connection more reactive in case of expiration of the previous one. Previously there was a task which spawns every minute by default and thus a table could be in readonly state for about this time. [#41092](https://github.com/ClickHouse/ClickHouse/pull/41092) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Support all combinators combination in WindowTransform/arratReduce*/initializeAggregation/aggregate functions versioning. Previously combinators like `ForEach/Resample/Map` didn't work in these places, using them led to exception like`State function ... inserts results into non-state column`. [#41107](https://github.com/ClickHouse/ClickHouse/pull/41107) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Now projections can be used with zero copy replication. [#41147](https://github.com/ClickHouse/ClickHouse/pull/41147) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* - Add function tryDecrypt that returns NULL when decrypt fail (e.g. decrypt with incorrect key) instead of throwing exception. [#41206](https://github.com/ClickHouse/ClickHouse/pull/41206) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Add the `unreserved_space` column to the `system.disks` table to check how much space is not taken by reservations per disk. [#41254](https://github.com/ClickHouse/ClickHouse/pull/41254) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Support s3 authorisation headers from ast arguments. [#41261](https://github.com/ClickHouse/ClickHouse/pull/41261) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add setting 'allow_implicit_no_password' that forbids creating a user with no password unless 'IDENTIFIED WITH no_password' is explicitly specified. [#41341](https://github.com/ClickHouse/ClickHouse/pull/41341) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* keeper-improvement: add support for uploading snapshots to S3. S3 information can be defined inside `keeper_server.s3_snapshot`. [#41342](https://github.com/ClickHouse/ClickHouse/pull/41342) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Add support for MultiRead in Keeper and internal ZooKeeper client. [#41410](https://github.com/ClickHouse/ClickHouse/pull/41410) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* add a support for decimal type comparing with floating point literal in IN operator. [#41544](https://github.com/ClickHouse/ClickHouse/pull/41544) ([liang.huang](https://github.com/lhuang09287750)).
|
||||||
|
* Allow readable size values in cache config. [#41688](https://github.com/ClickHouse/ClickHouse/pull/41688) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Check file path for path traversal attacks in errors logger for input formats. [#41694](https://github.com/ClickHouse/ClickHouse/pull/41694) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* ClickHouse could cache stale DNS entries for some period of time (15 seconds by default) until the cache won't be updated asynchronously. During these period ClickHouse can nevertheless try to establish a connection and produce errors. This behaviour is fixed. [#41707](https://github.com/ClickHouse/ClickHouse/pull/41707) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Add interactive history search with fzf-like utility (fzf/sk) for `clickhouse-client`/`clickhouse-local` (note you can use `FZF_DEFAULT_OPTS`/`SKIM_DEFAULT_OPTIONS` to additionally configure the behavior). [#41730](https://github.com/ClickHouse/ClickHouse/pull/41730) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* For client when connecting to a secure server with invalid certificate only allow to proceed with '--accept-certificate' flag. [#41743](https://github.com/ClickHouse/ClickHouse/pull/41743) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Add function "tryBase58Decode()", similar to the existing function "tryBase64Decode()". [#41824](https://github.com/ClickHouse/ClickHouse/pull/41824) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Improve feedback when replacing partition with different primary key. Fixes [#34798](https://github.com/ClickHouse/ClickHouse/issues/34798). [#41838](https://github.com/ClickHouse/ClickHouse/pull/41838) ([Salvatore](https://github.com/tbsal)).
|
||||||
|
* Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)).
|
||||||
|
* Fix parallel parsing: segmentator now checks max_block_size. [#41852](https://github.com/ClickHouse/ClickHouse/pull/41852) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Don't report TABLE_IS_DROPPED exception in order to skip table in case is was just dropped. [#41908](https://github.com/ClickHouse/ClickHouse/pull/41908) ([AlfVII](https://github.com/AlfVII)).
|
||||||
|
* Improve option enable_extended_results_for_datetime_functions to return results of type DateTime64 for functions toStartOfDay, toStartOfHour, toStartOfFifteenMinutes, toStartOfTenMinutes, toStartOfFiveMinutes, toStartOfMinute and timeSlot. [#41910](https://github.com/ClickHouse/ClickHouse/pull/41910) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Improve DateTime type inference for text formats. Now it respect setting `date_time_input_format` and doesn't try to infer datetimes from numbers as timestamps. Closes [#41389](https://github.com/ClickHouse/ClickHouse/issues/41389) Closes [#42206](https://github.com/ClickHouse/ClickHouse/issues/42206). [#41912](https://github.com/ClickHouse/ClickHouse/pull/41912) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Remove confusing warning when inserting with `perform_ttl_move_on_insert`=false. [#41980](https://github.com/ClickHouse/ClickHouse/pull/41980) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Allow user to write `countState(*)` similar to `count(*)`. This closes [#9338](https://github.com/ClickHouse/ClickHouse/issues/9338). [#41983](https://github.com/ClickHouse/ClickHouse/pull/41983) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* - Fix rankCorr size overflow. [#42020](https://github.com/ClickHouse/ClickHouse/pull/42020) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Added an option to specify an arbitrary string as an environment name in the Sentry's config for more handy reports. [#42037](https://github.com/ClickHouse/ClickHouse/pull/42037) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Added system table `asynchronous_insert_log `. It contains information about asynchronous inserts (including results of queries in fire-and-forget mode (with `wait_for_async_insert=0`)) for better introspection. [#42040](https://github.com/ClickHouse/ClickHouse/pull/42040) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix parsing out-of-range Date from CSV:. [#42044](https://github.com/ClickHouse/ClickHouse/pull/42044) ([Andrey Zvonov](https://github.com/zvonand)).
|
||||||
|
* parseDataTimeBestEffort support comma between date and time. Closes [#42038](https://github.com/ClickHouse/ClickHouse/issues/42038). [#42049](https://github.com/ClickHouse/ClickHouse/pull/42049) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Add support for methods lz4, bz2, snappy in 'Accept-Encoding'. [#42071](https://github.com/ClickHouse/ClickHouse/pull/42071) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Various minor fixes for BLAKE3 function. [#42073](https://github.com/ClickHouse/ClickHouse/pull/42073) ([BoloniniD](https://github.com/BoloniniD)).
|
||||||
|
* Improved stale replica recovery process for `ReplicatedMergeTree`. If lost replica have some parts which absent on a healthy replica, but these parts should appear in future according to replication queue of the healthy replica, then lost replica will keep such parts instead of detaching them. [#42134](https://github.com/ClickHouse/ClickHouse/pull/42134) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Support BACKUP to S3 with as-is path/data structure. [#42232](https://github.com/ClickHouse/ClickHouse/pull/42232) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add a possibility to use Date32 arguments for date_diff function. Fix issue in date_diff function when using DateTime64 arguments with start date before Unix epoch and end date after Unix epoch. [#42308](https://github.com/ClickHouse/ClickHouse/pull/42308) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* When uploading big parts to minio, 'Complete Multipart Upload' can take a long time. Minio sends heartbeats every 10 seconds (see https://github.com/minio/minio/pull/7198). But clickhouse times out earlier, because the default send/receive timeout is [set](https://github.com/ClickHouse/ClickHouse/blob/cc24fcd6d5dfb67f5f66f5483e986bd1010ad9cf/src/IO/S3/PocoHTTPClient.cpp#L123) to 5 seconds. [#42321](https://github.com/ClickHouse/ClickHouse/pull/42321) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Add `S3` as a new type of the destination of backups. [#42333](https://github.com/ClickHouse/ClickHouse/pull/42333) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fix rarely invalid cast of aggregate state types with complex types such as Decimal. This fixes [#42408](https://github.com/ClickHouse/ClickHouse/issues/42408). [#42417](https://github.com/ClickHouse/ClickHouse/pull/42417) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Support skipping cache completely (both download to cache and reading cached data) in case the requested read range exceeds the threshold defined by cache setting `bypass_cache_threashold`, requires to be enabled with `enable_bypass_cache_with_threshold`). [#42418](https://github.com/ClickHouse/ClickHouse/pull/42418) ([Han Shukai](https://github.com/KinderRiven)).
|
||||||
|
* Merge parts if every part in the range is older than a certain threshold. The threshold can be set by using `min_age_to_force_merge_seconds`. This closes [#35836](https://github.com/ClickHouse/ClickHouse/issues/35836). [#42423](https://github.com/ClickHouse/ClickHouse/pull/42423) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Enabled CompiledExpressionCache in clickhouse-local. [#42477](https://github.com/ClickHouse/ClickHouse/pull/42477) ([AlfVII](https://github.com/AlfVII)).
|
||||||
|
* Remove support for the `{database}` macro from the client's prompt. It was displayed incorrectly if the database was unspecified and it was not updated on `USE` statements. This closes [#25891](https://github.com/ClickHouse/ClickHouse/issues/25891). [#42508](https://github.com/ClickHouse/ClickHouse/pull/42508) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* - Improve the time to recover lost keeper connections. [#42541](https://github.com/ClickHouse/ClickHouse/pull/42541) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Allow to use Date32 arguments for dateName function. [#42554](https://github.com/ClickHouse/ClickHouse/pull/42554) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
* Now filters with NULL literals will be used during index analysis. This closes https://github.com/ClickHouse/ClickHouse/pull/41814 [#34063](https://github.com/ClickHouse/ClickHouse/issues/34063). [#41842](https://github.com/ClickHouse/ClickHouse/pull/41842) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix using subqueries in row policy filters. This PR fixes [#32463](https://github.com/ClickHouse/ClickHouse/issues/32463). [#42562](https://github.com/ClickHouse/ClickHouse/pull/42562) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Added support of WHERE clause generation to AST Fuzzer and possibility to add or remove ORDER BY and WHERE clause. [#38519](https://github.com/ClickHouse/ClickHouse/pull/38519) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Aarch64 binaries now require at least ARMv8.2, released in 2016. Most notably, this enables use of ARM LSE, i.e. native atomic operations. Also, CMake build option "NO_ARMV81_OR_HIGHER" has been added to allow compilation of binaries for older ARMv8.0 hardware, e.g. Raspberry Pi 4. [#41610](https://github.com/ClickHouse/ClickHouse/pull/41610) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* After updating runners to 22.04 cgroups stopped to work in privileged mode, here's the issue https://github.com/moby/moby/issues/42275#issuecomment-1115055846. [#41857](https://github.com/ClickHouse/ClickHouse/pull/41857) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Allow building ClickHouse with Musl (small changes after it was already supported but broken). [#41987](https://github.com/ClickHouse/ClickHouse/pull/41987) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* - Add the `$CLICKHOUSE_CRONFILE` file checking to avoid running the `sed` command to get the file not found error. [#42081](https://github.com/ClickHouse/ClickHouse/pull/42081) ([Chun-Sheng, Li](https://github.com/peter279k)).
|
||||||
|
* Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||||
|
* Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix power8 support. [#42462](https://github.com/ClickHouse/ClickHouse/pull/42462) ([Boris Kuschel](https://github.com/bkuschel)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Several fixes for DiskWeb. [#41652](https://github.com/ClickHouse/ClickHouse/pull/41652) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixes issue when docker run will fail if "https_port" is not present in config. [#41693](https://github.com/ClickHouse/ClickHouse/pull/41693) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Mutations were not cancelled properly on server shutdown or `SYSTEM STOP MERGES` query and cancellation might take long time, it's fixed. [#41699](https://github.com/ClickHouse/ClickHouse/pull/41699) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix wrong result of queries with `ORDER BY` or `GROUP BY` by columns from prefix of sorting key, wrapped into monotonic functions, with enable "read in order" optimization (settings `optimize_read_in_order` and `optimize_aggregation_in_order`). [#41701](https://github.com/ClickHouse/ClickHouse/pull/41701) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix possible crash in `SELECT` from `Merge` table with enabled `optimize_monotonous_functions_in_order_by` setting. Fixes [#41269](https://github.com/ClickHouse/ClickHouse/issues/41269). [#41740](https://github.com/ClickHouse/ClickHouse/pull/41740) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||||
|
* Fix a bug that CORS headers are missing in some HTTP responses. [#41792](https://github.com/ClickHouse/ClickHouse/pull/41792) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* 22.9 might fail to startup `ReplicatedMergeTree` table if that table was created by 20.3 or older version and was never altered, it's fixed. Fixes [#41742](https://github.com/ClickHouse/ClickHouse/issues/41742). [#41796](https://github.com/ClickHouse/ClickHouse/pull/41796) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* When the batch sending fails for some reason, it cannot be automatically recovered, and if it is not processed in time, it will lead to accumulation, and the printed error message will become longer and longer, which will cause the http thread to block. [#41813](https://github.com/ClickHouse/ClickHouse/pull/41813) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||||
|
* Fix compact parts with compressed marks setting. Fixes [#41783](https://github.com/ClickHouse/ClickHouse/issues/41783) and [#41746](https://github.com/ClickHouse/ClickHouse/issues/41746). [#41823](https://github.com/ClickHouse/ClickHouse/pull/41823) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Old versions of Replicated database doesn't have a special marker in [Zoo]Keeper. We need to check only whether the node contains come obscure data instead of special mark. [#41875](https://github.com/ClickHouse/ClickHouse/pull/41875) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix possible exception in fs cache. [#41884](https://github.com/ClickHouse/ClickHouse/pull/41884) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix use_environment_credentials for s3 table function. [#41970](https://github.com/ClickHouse/ClickHouse/pull/41970) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fixed "Directory already exists and is not empty" error on detaching broken part that might prevent `ReplicatedMergeTree` table from starting replication. Fixes [#40957](https://github.com/ClickHouse/ClickHouse/issues/40957). [#41981](https://github.com/ClickHouse/ClickHouse/pull/41981) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* toDateTime64() now returns the same output with negative integer and float arguments. [#42025](https://github.com/ClickHouse/ClickHouse/pull/42025) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix write into AzureBlobStorage. Partially closes [#41754](https://github.com/ClickHouse/ClickHouse/issues/41754). [#42034](https://github.com/ClickHouse/ClickHouse/pull/42034) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix the bzip2 decoding issue for specific bzip2 files. [#42046](https://github.com/ClickHouse/ClickHouse/pull/42046) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* - Fix SQL function "toLastDayOfMonth()" with setting "enable_extended_results_for_datetime_functions = 1" at the beginning of the extended range (January 1900). - Fix SQL function "toRelativeWeekNum()" with setting "enable_extended_results_for_datetime_functions = 1" at the end of extended range (December 2299). - Improve the performance of for SQL functions "toISOYear()", "toFirstDayNumOfISOYearIndex()" and "toYearWeekOfNewyearMode()" by avoiding unnecessary index arithmetics. [#42084](https://github.com/ClickHouse/ClickHouse/pull/42084) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* The maximum size of fetches for each table accidentally was set to 8 while the pool size could be bigger. Now the maximum size of fetches for table is equal to the pool size. [#42090](https://github.com/ClickHouse/ClickHouse/pull/42090) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* A table might be shut down and a dictionary might be detached before checking if can be dropped without breaking dependencies between table, it's fixed. Fixes [#41982](https://github.com/ClickHouse/ClickHouse/issues/41982). [#42106](https://github.com/ClickHouse/ClickHouse/pull/42106) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix bad inefficiency of `remote_filesystem_read_method=read` with filesystem cache. Closes [#42125](https://github.com/ClickHouse/ClickHouse/issues/42125). [#42129](https://github.com/ClickHouse/ClickHouse/pull/42129) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix possible timeout exception for distributed queries with use_hedged_requests=0. [#42130](https://github.com/ClickHouse/ClickHouse/pull/42130) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed a minor bug inside function `runningDifference` in case of using it with `Date32` type. Previously `Date` was used and it may cause some logical errors like `Bad cast from type DB::ColumnVector<int> to DB::ColumnVector<unsigned short>'`. [#42143](https://github.com/ClickHouse/ClickHouse/pull/42143) ([Alfred Xu](https://github.com/sperlingxx)).
|
||||||
|
* Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* DISTINCT in order fails with LOGICAL_ERROR if first column in sorting key contains function. [#42186](https://github.com/ClickHouse/ClickHouse/pull/42186) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* - Fix read from buffer with read in order desc. [#42236](https://github.com/ClickHouse/ClickHouse/pull/42236) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
* Fix a bug which prevents ClickHouse to start when background_pool_size setting is set on default profile but background_merges_mutations_concurrency_ratio is not. [#42315](https://github.com/ClickHouse/ClickHouse/pull/42315) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
* `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix a data race in query finish/cancel. This closes [#42346](https://github.com/ClickHouse/ClickHouse/issues/42346). [#42362](https://github.com/ClickHouse/ClickHouse/pull/42362) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix assert cast in join on falsy condition, Close [#42380](https://github.com/ClickHouse/ClickHouse/issues/42380). [#42407](https://github.com/ClickHouse/ClickHouse/pull/42407) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `AggregateFunctionQuantile` now correctly works with UInt128 columns. Previously, the quantile state interpreted `UInt128` columns as `Int128` which could have led to incorrect results. [#42473](https://github.com/ClickHouse/ClickHouse/pull/42473) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix bad_assert during INSERT into Annoy indexes over non-Float32 columns. [#42485](https://github.com/ClickHouse/ClickHouse/pull/42485) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix function `arrayElement` with type `Map` with `Nullable` values and `Nullable` index. [#42623](https://github.com/ClickHouse/ClickHouse/pull/42623) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||||
|
|
||||||
|
* Fix unexpected table loading error when partition key contains alias function names during server upgrade. [#36379](https://github.com/ClickHouse/ClickHouse/pull/36379) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
|
||||||
|
#### Build Improvement
|
||||||
|
|
||||||
|
* Fixed SipHash Endian issue for s390x platform. [#41372](https://github.com/ClickHouse/ClickHouse/pull/41372) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
* Enable lib base64 for ppc64le platform. [#41974](https://github.com/ClickHouse/ClickHouse/pull/41974) ([Suzy Wang](https://github.com/SuzyWangIBMer)).
|
||||||
|
* Fixed Endian issue in T64 compression codec on s390x. [#42314](https://github.com/ClickHouse/ClickHouse/pull/42314) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Disable parallel s3 multipart upload for part moves."'. [#41681](https://github.com/ClickHouse/ClickHouse/pull/41681) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* NO CL ENTRY: 'Revert "Attempt to fix abort from parallel parsing"'. [#42545](https://github.com/ClickHouse/ClickHouse/pull/42545) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* NO CL ENTRY: 'Revert "Low cardinality cases moved to the function for its corresponding type"'. [#42633](https://github.com/ClickHouse/ClickHouse/pull/42633) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Test for ignore function in PARTITION KEY [#39875](https://github.com/ClickHouse/ClickHouse/pull/39875) ([UnamedRus](https://github.com/UnamedRus)).
|
||||||
|
* Add fuzzer for table definitions [#40096](https://github.com/ClickHouse/ClickHouse/pull/40096) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add missing tests for legacy geobase [#40684](https://github.com/ClickHouse/ClickHouse/pull/40684) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove obsolete comment from the config.xml [#41518](https://github.com/ClickHouse/ClickHouse/pull/41518) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Resurrect parallel distributed insert select with s3Cluster [#41535](https://github.com/ClickHouse/ClickHouse/pull/41535) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Update runners to a recent version to install on 22.04 [#41556](https://github.com/ClickHouse/ClickHouse/pull/41556) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Refactor wiping sensitive information from logs. [#41562](https://github.com/ClickHouse/ClickHouse/pull/41562) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Better S3 logs [#41587](https://github.com/ClickHouse/ClickHouse/pull/41587) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix typos in JSON formats after [#40910](https://github.com/ClickHouse/ClickHouse/issues/40910) [#41614](https://github.com/ClickHouse/ClickHouse/pull/41614) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix drop for KeeperMap [#41616](https://github.com/ClickHouse/ClickHouse/pull/41616) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* increase default max_suspicious_broken_parts to 100 [#41619](https://github.com/ClickHouse/ClickHouse/pull/41619) ([Denny Crane](https://github.com/den-crane)).
|
||||||
|
* Release AWS SDK log level + replace one exception [#41649](https://github.com/ClickHouse/ClickHouse/pull/41649) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix a destruction order for views ThreadStatus [#41650](https://github.com/ClickHouse/ClickHouse/pull/41650) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Add very explicit logging on disk choice for fetch [#41653](https://github.com/ClickHouse/ClickHouse/pull/41653) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix race between ~BackgroundSchedulePool and ~DNSCacheUpdater [#41654](https://github.com/ClickHouse/ClickHouse/pull/41654) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Add changelog for 22.9 [#41668](https://github.com/ClickHouse/ClickHouse/pull/41668) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Update version after release [#41670](https://github.com/ClickHouse/ClickHouse/pull/41670) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix error message [#41680](https://github.com/ClickHouse/ClickHouse/pull/41680) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Add test for setting output_format_json_validate_utf8 [#41691](https://github.com/ClickHouse/ClickHouse/pull/41691) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Resolve findings from clang-tidy [#41702](https://github.com/ClickHouse/ClickHouse/pull/41702) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Ignore Keeper errors from ReplicatedMergeTreeAttachThread in stress tests [#41717](https://github.com/ClickHouse/ClickHouse/pull/41717) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Collect logs in Stress test using clickhouse-local [#41721](https://github.com/ClickHouse/ClickHouse/pull/41721) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable flaky `test_merge_tree_azure_blob_storage` [#41722](https://github.com/ClickHouse/ClickHouse/pull/41722) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.9.2.7-stable [#41724](https://github.com/ClickHouse/ClickHouse/pull/41724) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Fix part removal retries [#41728](https://github.com/ClickHouse/ClickHouse/pull/41728) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Try fix azure tests [#41731](https://github.com/ClickHouse/ClickHouse/pull/41731) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix test build [#41732](https://github.com/ClickHouse/ClickHouse/pull/41732) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Change logging levels in cache [#41733](https://github.com/ClickHouse/ClickHouse/pull/41733) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Revert of "Revert the revert of "ColumnVector: optimize filter with AVX512 VBMI2 compress store" [#40033](https://github.com/ClickHouse/ClickHouse/issues/40033)" [#41752](https://github.com/ClickHouse/ClickHouse/pull/41752) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix SET query parameters formatting [#41755](https://github.com/ClickHouse/ClickHouse/pull/41755) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Support to run testcases on macOS [#41760](https://github.com/ClickHouse/ClickHouse/pull/41760) ([Frank Chen](https://github.com/FrankChen021)).
|
||||||
|
* Bump LLVM from 12 to 13 [#41762](https://github.com/ClickHouse/ClickHouse/pull/41762) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* ColumnVector: re-enable AVX512_VBMI/AVX512_VBMI2 optimized filter and index [#41765](https://github.com/ClickHouse/ClickHouse/pull/41765) ([Guo Wangyang](https://github.com/guowangy)).
|
||||||
|
* Update 02354_annoy.sql [#41767](https://github.com/ClickHouse/ClickHouse/pull/41767) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix the typo preventing building latest images [#41769](https://github.com/ClickHouse/ClickHouse/pull/41769) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Make automatic download script choose between ARMv8.0 or ARMv8.2 builds [#41775](https://github.com/ClickHouse/ClickHouse/pull/41775) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix tests for docker-ci [#41777](https://github.com/ClickHouse/ClickHouse/pull/41777) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Possible fix for KeeperMap drop [#41784](https://github.com/ClickHouse/ClickHouse/pull/41784) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Fix drop of completely dropped table [#41789](https://github.com/ClickHouse/ClickHouse/pull/41789) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Log git hash during startup [#41790](https://github.com/ClickHouse/ClickHouse/pull/41790) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Revert "ColumnVector: optimize UInt8 index with AVX512VBMI ([#41247](https://github.com/ClickHouse/ClickHouse/issues/41247))" [#41797](https://github.com/ClickHouse/ClickHouse/pull/41797) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Small fix in dashboard [#41798](https://github.com/ClickHouse/ClickHouse/pull/41798) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Keep the most important log in stress tests [#41821](https://github.com/ClickHouse/ClickHouse/pull/41821) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Use copy for some operations instead of hardlinks [#41832](https://github.com/ClickHouse/ClickHouse/pull/41832) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Remove unused variable in registerStorageMergeTree.cpp [#41839](https://github.com/ClickHouse/ClickHouse/pull/41839) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix Jepsen [#41845](https://github.com/ClickHouse/ClickHouse/pull/41845) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Increase `request_timeout_ms` for s3 tests in CI [#41853](https://github.com/ClickHouse/ClickHouse/pull/41853) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* tests: fix debug symbols (and possible crashes) for backward compatiblity check [#41854](https://github.com/ClickHouse/ClickHouse/pull/41854) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Remove two redundant lines [#41856](https://github.com/ClickHouse/ClickHouse/pull/41856) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Infer Object type only when allow_experimental_object_type is enabled [#41858](https://github.com/ClickHouse/ClickHouse/pull/41858) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Add default UNION/EXCEPT/INTERSECT to the echo query text [#41862](https://github.com/ClickHouse/ClickHouse/pull/41862) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Consolidate CMake-generated config headers [#41873](https://github.com/ClickHouse/ClickHouse/pull/41873) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix 02267_file_globs_schema_inference.sql flakiness [#41877](https://github.com/ClickHouse/ClickHouse/pull/41877) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Docs: Remove obsolete modelEvaluate() mention [#41878](https://github.com/ClickHouse/ClickHouse/pull/41878) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Better exception message for duplicate column names in schema inference [#41885](https://github.com/ClickHouse/ClickHouse/pull/41885) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Docs: Reference external papers as DOIs [#41886](https://github.com/ClickHouse/ClickHouse/pull/41886) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Make LDAPR a prerequisite for downloading the ARMv8.2 build [#41897](https://github.com/ClickHouse/ClickHouse/pull/41897) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Another sync replicas in test_recovery_replica [#41898](https://github.com/ClickHouse/ClickHouse/pull/41898) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* remove unused code [#41921](https://github.com/ClickHouse/ClickHouse/pull/41921) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Move all queries for MV creation to the end of queue during recovering [#41932](https://github.com/ClickHouse/ClickHouse/pull/41932) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix broken test_disks_app_func [#41933](https://github.com/ClickHouse/ClickHouse/pull/41933) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Temporarily disable ThreadFuzzer with TSan [#41943](https://github.com/ClickHouse/ClickHouse/pull/41943) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Enable some disabled S3 tests [#41945](https://github.com/ClickHouse/ClickHouse/pull/41945) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* QOL log improvements [#41947](https://github.com/ClickHouse/ClickHouse/pull/41947) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Fix non-deterministic test results [#41948](https://github.com/ClickHouse/ClickHouse/pull/41948) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Earlier throw exception in PullingAsyncPipelineExecutor. [#41949](https://github.com/ClickHouse/ClickHouse/pull/41949) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fix linker error [#41950](https://github.com/ClickHouse/ClickHouse/pull/41950) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Bump LLVM from 13 to 14 [#41951](https://github.com/ClickHouse/ClickHouse/pull/41951) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.3.13.80-lts [#41953](https://github.com/ClickHouse/ClickHouse/pull/41953) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.7.6.74-stable [#41954](https://github.com/ClickHouse/ClickHouse/pull/41954) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.8.6.71-lts [#41955](https://github.com/ClickHouse/ClickHouse/pull/41955) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.9.3.18-stable [#41956](https://github.com/ClickHouse/ClickHouse/pull/41956) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Rename max_temp_data_on_disk -> max_temporary_data_on_disk [#41984](https://github.com/ClickHouse/ClickHouse/pull/41984) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Add more checkStackSize calls [#41991](https://github.com/ClickHouse/ClickHouse/pull/41991) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix test 02403_big_http_chunk_size [#41996](https://github.com/ClickHouse/ClickHouse/pull/41996) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* More sane behavior of part number thresholds override in query level settings [#42001](https://github.com/ClickHouse/ClickHouse/pull/42001) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Remove useless code [#42004](https://github.com/ClickHouse/ClickHouse/pull/42004) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Refactoring: Uninline some error handling methods [#42010](https://github.com/ClickHouse/ClickHouse/pull/42010) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Fix warning that ENABLE_REPLXX is unused [#42013](https://github.com/ClickHouse/ClickHouse/pull/42013) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Drop leftovers of libexecinfo [#42014](https://github.com/ClickHouse/ClickHouse/pull/42014) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* More detailed exception message [#42022](https://github.com/ClickHouse/ClickHouse/pull/42022) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Build against an LLVM version which has clang[-extra-tools], lldb and lld removed [#42023](https://github.com/ClickHouse/ClickHouse/pull/42023) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add log message and lower the retry timeout in MergeTreeRestartingThread [#42026](https://github.com/ClickHouse/ClickHouse/pull/42026) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Update amqp-cpp [#42031](https://github.com/ClickHouse/ClickHouse/pull/42031) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix No such key during table drop [#42036](https://github.com/ClickHouse/ClickHouse/pull/42036) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Temporarily disable too aggressive tests [#42050](https://github.com/ClickHouse/ClickHouse/pull/42050) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix style check [#42055](https://github.com/ClickHouse/ClickHouse/pull/42055) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Function name normalization fix functions header [#42063](https://github.com/ClickHouse/ClickHouse/pull/42063) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* remove unused virtual keyword [#42065](https://github.com/ClickHouse/ClickHouse/pull/42065) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Fix crash in `SummingMergeTree` with `LowCardinality` [#42066](https://github.com/ClickHouse/ClickHouse/pull/42066) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix drop of completely dropped table [#42067](https://github.com/ClickHouse/ClickHouse/pull/42067) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fix assertion in bloom filter index [#42072](https://github.com/ClickHouse/ClickHouse/pull/42072) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Ignore core.autocrlf for tests references [#42076](https://github.com/ClickHouse/ClickHouse/pull/42076) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix progress for INSERT SELECT [#42078](https://github.com/ClickHouse/ClickHouse/pull/42078) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Avoid adding extra new line after using fuzzy history search [#42080](https://github.com/ClickHouse/ClickHouse/pull/42080) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Add `at` to runner AMI, bump gh runner version [#42082](https://github.com/ClickHouse/ClickHouse/pull/42082) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Use send_metadata instead of send_object_metadata [#42085](https://github.com/ClickHouse/ClickHouse/pull/42085) ([Elena Torró](https://github.com/elenatorro)).
|
||||||
|
* Docs: Preparations to remove misc statements page [#42086](https://github.com/ClickHouse/ClickHouse/pull/42086) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Followup for TemporaryDataOnDisk [#42103](https://github.com/ClickHouse/ClickHouse/pull/42103) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Disable 02122_join_group_by_timeout for debug [#42104](https://github.com/ClickHouse/ClickHouse/pull/42104) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Update version_date.tsv and changelogs after v22.6.9.11-stable [#42114](https://github.com/ClickHouse/ClickHouse/pull/42114) ([robot-clickhouse](https://github.com/robot-clickhouse)).
|
||||||
|
* JIT compilation migration to LLVM 15 [#42123](https://github.com/ClickHouse/ClickHouse/pull/42123) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Fix build without TSA [#42128](https://github.com/ClickHouse/ClickHouse/pull/42128) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Update codespell-ignore-words.list [#42132](https://github.com/ClickHouse/ClickHouse/pull/42132) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||||
|
* Add null pointer checks [#42135](https://github.com/ClickHouse/ClickHouse/pull/42135) ([ltrk2](https://github.com/ltrk2)).
|
||||||
|
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Follow up for [#42129](https://github.com/ClickHouse/ClickHouse/issues/42129) [#42144](https://github.com/ClickHouse/ClickHouse/pull/42144) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Fix checking parent for old-format parts [#42147](https://github.com/ClickHouse/ClickHouse/pull/42147) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Revert "Resurrect parallel distributed insert select with s3Cluster [#42150](https://github.com/ClickHouse/ClickHouse/pull/42150) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Docs: Add "TABLE" to CHECK/DESCRIBE statements in sidebar [#42152](https://github.com/ClickHouse/ClickHouse/pull/42152) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add logging during merge tree startup [#42163](https://github.com/ClickHouse/ClickHouse/pull/42163) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Abort instead of `__builtin_unreachable` in debug builds [#42168](https://github.com/ClickHouse/ClickHouse/pull/42168) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* [RFC] Enable -Wshorten-64-to-32 [#42190](https://github.com/ClickHouse/ClickHouse/pull/42190) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix dialect setting description [#42196](https://github.com/ClickHouse/ClickHouse/pull/42196) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Add a test for #658 [#42197](https://github.com/ClickHouse/ClickHouse/pull/42197) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* use alias for MergeMutateSelectedEntry share ptr [#42211](https://github.com/ClickHouse/ClickHouse/pull/42211) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||||
|
* Fix LLVM build [#42216](https://github.com/ClickHouse/ClickHouse/pull/42216) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Exclude comments from style-check defined extern [#42217](https://github.com/ClickHouse/ClickHouse/pull/42217) ([Vladimir C](https://github.com/vdimir)).
|
||||||
|
* Update safeExit.cpp [#42220](https://github.com/ClickHouse/ClickHouse/pull/42220) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Disable concurrent parts removal [#42222](https://github.com/ClickHouse/ClickHouse/pull/42222) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fail fast on empty URL in HDFS [#42223](https://github.com/ClickHouse/ClickHouse/pull/42223) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Add a test for [#2389](https://github.com/ClickHouse/ClickHouse/issues/2389) [#42235](https://github.com/ClickHouse/ClickHouse/pull/42235) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Use MultiRead where possible [#42243](https://github.com/ClickHouse/ClickHouse/pull/42243) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Minor cleanups of LLVM integration [#42249](https://github.com/ClickHouse/ClickHouse/pull/42249) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* remove useless code [#42253](https://github.com/ClickHouse/ClickHouse/pull/42253) ([flynn](https://github.com/ucasfl)).
|
||||||
|
* Early return of corner cases in selectPartsToMutate function [#42254](https://github.com/ClickHouse/ClickHouse/pull/42254) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||||
|
* Refactor the implementation of user-defined functions [#42263](https://github.com/ClickHouse/ClickHouse/pull/42263) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* assert unused value in test_replicated_merge_tree_compatibility [#42266](https://github.com/ClickHouse/ClickHouse/pull/42266) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
* Fix Date Interval add/minus over DataTypeDate32 [#42279](https://github.com/ClickHouse/ClickHouse/pull/42279) ([Alfred Xu](https://github.com/sperlingxx)).
|
||||||
|
* Fix log-level in `clickhouse-disks` [#42302](https://github.com/ClickHouse/ClickHouse/pull/42302) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Remove forgotten debug logging [#42313](https://github.com/ClickHouse/ClickHouse/pull/42313) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix another trash in zero-copy replication [#42317](https://github.com/ClickHouse/ClickHouse/pull/42317) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* go update for diagnostics tool [#42325](https://github.com/ClickHouse/ClickHouse/pull/42325) ([Dale McDiarmid](https://github.com/gingerwizard)).
|
||||||
|
* Better logging for asynchronous inserts [#42345](https://github.com/ClickHouse/ClickHouse/pull/42345) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Use nfpm packager for archlinux packages [#42349](https://github.com/ClickHouse/ClickHouse/pull/42349) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Bump llvm/clang to 15.0.2 [#42351](https://github.com/ClickHouse/ClickHouse/pull/42351) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Make getResource() independent from the order of the sections [#42353](https://github.com/ClickHouse/ClickHouse/pull/42353) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Smaller threshold for multipart upload part size increase [#42392](https://github.com/ClickHouse/ClickHouse/pull/42392) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Better error message for unsupported delimiters in custom formats [#42406](https://github.com/ClickHouse/ClickHouse/pull/42406) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Fix formatting of `ALTER FREEZE` [#42409](https://github.com/ClickHouse/ClickHouse/pull/42409) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Replace table name in ast fuzzer more often [#42413](https://github.com/ClickHouse/ClickHouse/pull/42413) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add *-15 tools to cmake.tools for GCC build [#42430](https://github.com/ClickHouse/ClickHouse/pull/42430) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Deactivate tasks in ReplicatedMergeTree until startup [#42441](https://github.com/ClickHouse/ClickHouse/pull/42441) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Revert "Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787)" [#42442](https://github.com/ClickHouse/ClickHouse/pull/42442) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Update woboq_codebrowser location [#42448](https://github.com/ClickHouse/ClickHouse/pull/42448) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* add mdx and jsx to list of doc files [#42454](https://github.com/ClickHouse/ClickHouse/pull/42454) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||||
|
* Remove code browser docs [#42455](https://github.com/ClickHouse/ClickHouse/pull/42455) ([Dan Roscigno](https://github.com/DanRoscigno)).
|
||||||
|
* Better workaround for emitting .debug_aranges section [#42457](https://github.com/ClickHouse/ClickHouse/pull/42457) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix flaky test [#42459](https://github.com/ClickHouse/ClickHouse/pull/42459) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix UBSan report in Julian Day functions [#42464](https://github.com/ClickHouse/ClickHouse/pull/42464) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* rename filesystem_query_cache [#42472](https://github.com/ClickHouse/ClickHouse/pull/42472) ([Han Shukai](https://github.com/KinderRiven)).
|
||||||
|
* Add convenience typedefs for Date/Date32/DateTime/DateTime64 columns [#42476](https://github.com/ClickHouse/ClickHouse/pull/42476) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Add error "Destination table is myself" to exception list in BC check [#42479](https://github.com/ClickHouse/ClickHouse/pull/42479) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Get current clickhouse version without sending query in BC check [#42483](https://github.com/ClickHouse/ClickHouse/pull/42483) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
* Fix logical error from welchTTest [#42487](https://github.com/ClickHouse/ClickHouse/pull/42487) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Attempt to fix abort from parallel parsing [#42496](https://github.com/ClickHouse/ClickHouse/pull/42496) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Increase threshold for using physical cores for `max_threads` [#42503](https://github.com/ClickHouse/ClickHouse/pull/42503) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Add a test for [#16827](https://github.com/ClickHouse/ClickHouse/issues/16827) [#42511](https://github.com/ClickHouse/ClickHouse/pull/42511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Add a test for [#13653](https://github.com/ClickHouse/ClickHouse/issues/13653) [#42512](https://github.com/ClickHouse/ClickHouse/pull/42512) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix aliases [#42514](https://github.com/ClickHouse/ClickHouse/pull/42514) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* tests: fix 00705_drop_create_merge_tree flakiness [#42522](https://github.com/ClickHouse/ClickHouse/pull/42522) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix sanitizer reports in integration tests [#42529](https://github.com/ClickHouse/ClickHouse/pull/42529) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix `KeeperTCPHandler` data race [#42532](https://github.com/ClickHouse/ClickHouse/pull/42532) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Disable `test_storage_nats`, because it's permanently broken [#42535](https://github.com/ClickHouse/ClickHouse/pull/42535) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Better logs in clickhouse-disks [#42549](https://github.com/ClickHouse/ClickHouse/pull/42549) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* add lib_fuzzer and lib_fuzzer_no_main to llvm-project build [#42550](https://github.com/ClickHouse/ClickHouse/pull/42550) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Some polishing: replicated merge tree [#42560](https://github.com/ClickHouse/ClickHouse/pull/42560) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Temporarily disable flaky `test_replicated_merge_tree_hdfs_zero_copy` [#42563](https://github.com/ClickHouse/ClickHouse/pull/42563) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Adapt internal data structures to 512-bit era [#42564](https://github.com/ClickHouse/ClickHouse/pull/42564) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Fix strange code in date monotonicity [#42574](https://github.com/ClickHouse/ClickHouse/pull/42574) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Clear thread::id when ThreadFromGlobalPool exits. [#42577](https://github.com/ClickHouse/ClickHouse/pull/42577) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* ci/stress: fix memory limits overrides for hung check [#42585](https://github.com/ClickHouse/ClickHouse/pull/42585) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* tests: avoid model overlap for obfuscator [#42586](https://github.com/ClickHouse/ClickHouse/pull/42586) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix possible segfault in expression parser [#42598](https://github.com/ClickHouse/ClickHouse/pull/42598) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Fix incorrect trace log line on dict reload [#42609](https://github.com/ClickHouse/ClickHouse/pull/42609) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fix flaky 02458_datediff_date32 test [#42611](https://github.com/ClickHouse/ClickHouse/pull/42611) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
* Revert revert 41268 disable s3 parallel write for part moves to disk s3 [#42617](https://github.com/ClickHouse/ClickHouse/pull/42617) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Try to fix data race on zookeeper vs DDLWorker at server shutdown. [#42620](https://github.com/ClickHouse/ClickHouse/pull/42620) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Add a template for installation issues [#42626](https://github.com/ClickHouse/ClickHouse/pull/42626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix typo in cmake code related to fuzzing [#42627](https://github.com/ClickHouse/ClickHouse/pull/42627) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fix build [#42635](https://github.com/ClickHouse/ClickHouse/pull/42635) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Add .rgignore for test data [#42639](https://github.com/ClickHouse/ClickHouse/pull/42639) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix flaky 02457_datediff_via_unix_epoch test [#42655](https://github.com/ClickHouse/ClickHouse/pull/42655) ([Roman Vasin](https://github.com/rvasin)).
|
||||||
|
|
29
docs/changelogs/v22.7.7.24-stable.md
Normal file
29
docs/changelogs/v22.7.7.24-stable.md
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.7.7.24-stable (02ad1f979a8) FIXME as compared to v22.7.6.74-stable (c00ffb3c11a)
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
* Backported in [#42433](https://github.com/ClickHouse/ClickHouse/issues/42433): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#42329](https://github.com/ClickHouse/ClickHouse/issues/42329): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||||
|
* Backported in [#42359](https://github.com/ClickHouse/ClickHouse/issues/42359): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#42268](https://github.com/ClickHouse/ClickHouse/issues/42268): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#42299](https://github.com/ClickHouse/ClickHouse/issues/42299): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#42386](https://github.com/ClickHouse/ClickHouse/issues/42386): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#42498](https://github.com/ClickHouse/ClickHouse/issues/42498): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#42593](https://github.com/ClickHouse/ClickHouse/issues/42593): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
37
docs/changelogs/v22.8.7.34-lts.md
Normal file
37
docs/changelogs/v22.8.7.34-lts.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.7.34-lts (3c38e5e8ab9) FIXME as compared to v22.8.6.71-lts (7bf38a43e30)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#42096](https://github.com/ClickHouse/ClickHouse/issues/42096): Replace back `clickhouse su` command with `sudo -u` in start in order to respect limits in `/etc/security/limits.conf`. [#41847](https://github.com/ClickHouse/ClickHouse/pull/41847) ([Eugene Konkov](https://github.com/ekonkov)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
* Backported in [#42434](https://github.com/ClickHouse/ClickHouse/issues/42434): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#42296](https://github.com/ClickHouse/ClickHouse/issues/42296): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||||
|
* Backported in [#42360](https://github.com/ClickHouse/ClickHouse/issues/42360): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#42489](https://github.com/ClickHouse/ClickHouse/issues/42489): Removed skipping of mutations in unaffected partitions of `MergeTree` tables, because this feature never worked correctly and might cause resurrection of finished mutations. [#40589](https://github.com/ClickHouse/ClickHouse/pull/40589) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#42121](https://github.com/ClickHouse/ClickHouse/issues/42121): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* - Prevent crash when passing wrong aggregation states to groupBitmap*. [#41972](https://github.com/ClickHouse/ClickHouse/pull/41972) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* - Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41973](https://github.com/ClickHouse/ClickHouse/pull/41973) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#42269](https://github.com/ClickHouse/ClickHouse/issues/42269): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#42300](https://github.com/ClickHouse/ClickHouse/issues/42300): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#42387](https://github.com/ClickHouse/ClickHouse/issues/42387): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#42499](https://github.com/ClickHouse/ClickHouse/issues/42499): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#42571](https://github.com/ClickHouse/ClickHouse/issues/42571): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#42594](https://github.com/ClickHouse/ClickHouse/issues/42594): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
13
docs/changelogs/v22.8.8.3-lts.md
Normal file
13
docs/changelogs/v22.8.8.3-lts.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.8.8.3-lts (ac5a6cababc) FIXME as compared to v22.8.7.34-lts (3c38e5e8ab9)
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#42677](https://github.com/ClickHouse/ClickHouse/issues/42677): keeper-fix: fix race in accessing logs while snapshot is being installed. [#40627](https://github.com/ClickHouse/ClickHouse/pull/40627) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
|
33
docs/changelogs/v22.9.4.32-stable.md
Normal file
33
docs/changelogs/v22.9.4.32-stable.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2022
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2022 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v22.9.4.32-stable (3db8bcf1a70) FIXME as compared to v22.9.3.18-stable (0cb4b15d2fa)
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
* Backported in [#42435](https://github.com/ClickHouse/ClickHouse/issues/42435): - Choose correct aggregation method for LowCardinality with BigInt. [#42342](https://github.com/ClickHouse/ClickHouse/pull/42342) ([Duc Canh Le](https://github.com/canhld94)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#42297](https://github.com/ClickHouse/ClickHouse/issues/42297): Update cctz to the latest master, update tzdb to 2020e. [#42273](https://github.com/ClickHouse/ClickHouse/pull/42273) ([Dom Del Nano](https://github.com/ddelnano)).
|
||||||
|
* Backported in [#42361](https://github.com/ClickHouse/ClickHouse/issues/42361): Update tzdata to 2022e to support the new timezone changes. Palestine transitions are now Saturdays at 02:00. Simplify three Ukraine zones into one. Jordan and Syria switch from +02/+03 with DST to year-round +03. (https://data.iana.org/time-zones/tzdb/NEWS). This closes [#42252](https://github.com/ClickHouse/ClickHouse/issues/42252). [#42327](https://github.com/ClickHouse/ClickHouse/pull/42327) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||||
|
|
||||||
|
* Backported in [#42122](https://github.com/ClickHouse/ClickHouse/issues/42122): Fixed "Part ... intersects part ..." error that might happen in extremely rare cases if replica was restarted just after detaching some part as broken. [#41741](https://github.com/ClickHouse/ClickHouse/pull/41741) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
|
* Backported in [#41938](https://github.com/ClickHouse/ClickHouse/issues/41938): Don't allow to create or alter merge tree tables with virtual column name _row_exists, which is reserved for lightweight delete. Fixed [#41716](https://github.com/ClickHouse/ClickHouse/issues/41716). [#41763](https://github.com/ClickHouse/ClickHouse/pull/41763) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||||
|
* Backported in [#42179](https://github.com/ClickHouse/ClickHouse/issues/42179): Fix reusing of files > 4GB from base backup. [#42146](https://github.com/ClickHouse/ClickHouse/pull/42146) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Backported in [#42301](https://github.com/ClickHouse/ClickHouse/issues/42301): Fix a bug with projections and the `aggregate_functions_null_for_empty` setting. This bug is very rare and appears only if you enable the `aggregate_functions_null_for_empty` setting in the server's config. This closes [#41647](https://github.com/ClickHouse/ClickHouse/issues/41647). [#42198](https://github.com/ClickHouse/ClickHouse/pull/42198) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#42388](https://github.com/ClickHouse/ClickHouse/issues/42388): `ALTER UPDATE` of attached part (with columns different from table schema) could create an invalid `columns.txt` metadata on disk. Reading from such part could fail with errors or return invalid data. Fixes [#42161](https://github.com/ClickHouse/ClickHouse/issues/42161). [#42319](https://github.com/ClickHouse/ClickHouse/pull/42319) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#42500](https://github.com/ClickHouse/ClickHouse/issues/42500): Setting `additional_table_filters` were not applied to `Distributed` storage. Fixes [#41692](https://github.com/ClickHouse/ClickHouse/issues/41692). [#42322](https://github.com/ClickHouse/ClickHouse/pull/42322) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#42581](https://github.com/ClickHouse/ClickHouse/issues/42581): This reverts [#40217](https://github.com/ClickHouse/ClickHouse/issues/40217) which introduced a regression in date/time functions. [#42367](https://github.com/ClickHouse/ClickHouse/pull/42367) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#42572](https://github.com/ClickHouse/ClickHouse/issues/42572): Fix buffer overflow in the processing of Decimal data types. This closes [#42451](https://github.com/ClickHouse/ClickHouse/issues/42451). [#42465](https://github.com/ClickHouse/ClickHouse/pull/42465) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#42595](https://github.com/ClickHouse/ClickHouse/issues/42595): This closes [#42453](https://github.com/ClickHouse/ClickHouse/issues/42453). [#42573](https://github.com/ClickHouse/ClickHouse/pull/42573) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Add a warning message to release.py script, require release type [#41975](https://github.com/ClickHouse/ClickHouse/pull/41975) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Revert [#27787](https://github.com/ClickHouse/ClickHouse/issues/27787) [#42136](https://github.com/ClickHouse/ClickHouse/pull/42136) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
|
@ -49,27 +49,13 @@ When we calculate some function over columns in a block, we add another column w
|
|||||||
|
|
||||||
Blocks are created for every processed chunk of data. Note that for the same type of calculation, the column names and types remain the same for different blocks, and only column data changes. It is better to split block data from the block header because small block sizes have a high overhead of temporary strings for copying shared_ptrs and column names.
|
Blocks are created for every processed chunk of data. Note that for the same type of calculation, the column names and types remain the same for different blocks, and only column data changes. It is better to split block data from the block header because small block sizes have a high overhead of temporary strings for copying shared_ptrs and column names.
|
||||||
|
|
||||||
## Block Streams {#block-streams}
|
## Processors
|
||||||
|
|
||||||
Block streams are for processing data. We use streams of blocks to read data from somewhere, perform data transformations, or write data to somewhere. `IBlockInputStream` has the `read` method to fetch the next block while available. `IBlockOutputStream` has the `write` method to push the block somewhere.
|
See the description at [https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h).
|
||||||
|
|
||||||
Streams are responsible for:
|
|
||||||
|
|
||||||
1. Reading or writing to a table. The table just returns a stream for reading or writing blocks.
|
|
||||||
2. Implementing data formats. For example, if you want to output data to a terminal in `Pretty` format, you create a block output stream where you push blocks, and it formats them.
|
|
||||||
3. Performing data transformations. Let’s say you have `IBlockInputStream` and want to create a filtered stream. You create `FilterBlockInputStream` and initialize it with your stream. Then when you pull a block from `FilterBlockInputStream`, it pulls a block from your stream, filters it, and returns the filtered block to you. Query execution pipelines are represented this way.
|
|
||||||
|
|
||||||
There are more sophisticated transformations. For example, when you pull from `AggregatingBlockInputStream`, it reads all data from its source, aggregates it, and then returns a stream of aggregated data for you. Another example: `UnionBlockInputStream` accepts many input sources in the constructor and also a number of threads. It launches multiple threads and reads from multiple sources in parallel.
|
|
||||||
|
|
||||||
> Block streams use the “pull” approach to control flow: when you pull a block from the first stream, it consequently pulls the required blocks from nested streams, and the entire execution pipeline will work. Neither “pull” nor “push” is the best solution, because control flow is implicit, and that limits the implementation of various features like simultaneous execution of multiple queries (merging many pipelines together). This limitation could be overcome with coroutines or just running extra threads that wait for each other. We may have more possibilities if we make control flow explicit: if we locate the logic for passing data from one calculation unit to another outside of those calculation units. Read this [article](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) for more thoughts.
|
|
||||||
|
|
||||||
We should note that the query execution pipeline creates temporary data at each step. We try to keep block size small enough so that temporary data fits in the CPU cache. With that assumption, writing and reading temporary data is almost free in comparison with other calculations. We could consider an alternative, which is to fuse many operations in the pipeline together. It could make the pipeline as short as possible and remove much of the temporary data, which could be an advantage, but it also has drawbacks. For example, a split pipeline makes it easy to implement caching intermediate data, stealing intermediate data from similar queries running at the same time, and merging pipelines for similar queries.
|
|
||||||
|
|
||||||
## Formats {#formats}
|
## Formats {#formats}
|
||||||
|
|
||||||
Data formats are implemented with block streams. There are “presentational” formats only suitable for the output of data to the client, such as `Pretty` format, which provides only `IBlockOutputStream`. And there are input/output formats, such as `TabSeparated` or `JSONEachRow`.
|
Data formats are implemented with processors.
|
||||||
|
|
||||||
There are also row streams: `IRowInputStream` and `IRowOutputStream`. They allow you to pull/push data by individual rows, not by blocks. And they are only needed to simplify the implementation of row-oriented formats. The wrappers `BlockInputStreamFromRowInputStream` and `BlockOutputStreamFromRowOutputStream` allow you to convert row-oriented streams to regular block-oriented streams.
|
|
||||||
|
|
||||||
## I/O {#io}
|
## I/O {#io}
|
||||||
|
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/development/browse-code
|
|
||||||
sidebar_label: Source Code Browser
|
|
||||||
sidebar_position: 72
|
|
||||||
description: Various ways to browse and edit the source code
|
|
||||||
---
|
|
||||||
|
|
||||||
# Browse ClickHouse Source Code
|
|
||||||
|
|
||||||
You can use the **Woboq** online code browser available [here](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). It provides code navigation and semantic highlighting, search and indexing. The code snapshot is updated daily.
|
|
||||||
|
|
||||||
Also, you can browse sources on [GitHub](https://github.com/ClickHouse/ClickHouse) as usual.
|
|
||||||
|
|
||||||
If you’re interested what IDE to use, we recommend CLion, QT Creator, VS Code and KDevelop (with caveats). You can use any favorite IDE. Vim and Emacs also count.
|
|
@ -419,6 +419,8 @@ Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `St
|
|||||||
|
|
||||||
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function.
|
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function.
|
||||||
|
|
||||||
|
There are also special-purpose and experimental indexes to support approximate nearest neighbor (ANN) queries. See [here](annindexes.md) for details.
|
||||||
|
|
||||||
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall).
|
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall).
|
||||||
|
|
||||||
Example of index creation for `Map` data type
|
Example of index creation for `Map` data type
|
||||||
|
@ -101,7 +101,7 @@ SELECT count()
|
|||||||
FROM uk_price_paid
|
FROM uk_price_paid
|
||||||
```
|
```
|
||||||
|
|
||||||
At the time this query was executed, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse:
|
At the time this query was run, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT formatReadableSize(total_bytes)
|
SELECT formatReadableSize(total_bytes)
|
||||||
@ -342,7 +342,7 @@ The result looks like:
|
|||||||
|
|
||||||
## Let's Speed Up Queries Using Projections {#speedup-with-projections}
|
## Let's Speed Up Queries Using Projections {#speedup-with-projections}
|
||||||
|
|
||||||
[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At execution time, ClickHouse will use your projection if it thinks the projection can improve the performance fo the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful).
|
[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At query time, ClickHouse will use your projection if it thinks the projection can improve the performance of the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful).
|
||||||
|
|
||||||
### Build a Projection {#build-projection}
|
### Build a Projection {#build-projection}
|
||||||
|
|
||||||
|
@ -1020,6 +1020,62 @@ Example:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
To use object name as column value you can use special setting [format_json_object_each_row_column_for_object_name](../operations/settings/settings.md#format_json_object_each_row_column_for_object_name). Value of this setting is set to the name of a column, that is used as JSON key for a row in resulting object.
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
For output:
|
||||||
|
|
||||||
|
Let's say we have table `test` with two columns:
|
||||||
|
```
|
||||||
|
┌─object_name─┬─number─┐
|
||||||
|
│ first_obj │ 1 │
|
||||||
|
│ second_obj │ 2 │
|
||||||
|
│ third_obj │ 3 │
|
||||||
|
└─────────────┴────────┘
|
||||||
|
```
|
||||||
|
Let's output it in `JSONObjectEachRow` format and use `format_json_object_each_row_column_for_object_name` setting:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
select * from test settings format_json_object_each_row_column_for_object_name='object_name'
|
||||||
|
```
|
||||||
|
|
||||||
|
The output:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"first_obj": {"number": 1},
|
||||||
|
"second_obj": {"number": 2},
|
||||||
|
"third_obj": {"number": 3}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For input:
|
||||||
|
|
||||||
|
Let's say we stored output from previous example in a file with name `data.json`:
|
||||||
|
```sql
|
||||||
|
select * from file('data.json', JSONObjectEachRow, 'object_name String, number UInt64') settings format_json_object_each_row_column_for_object_name='object_name'
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─object_name─┬─number─┐
|
||||||
|
│ first_obj │ 1 │
|
||||||
|
│ second_obj │ 2 │
|
||||||
|
│ third_obj │ 3 │
|
||||||
|
└─────────────┴────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
It also works in schema inference:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
desc file('data.json', JSONObjectEachRow) settings format_json_object_each_row_column_for_object_name='object_name'
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─name────────┬─type────────────┐
|
||||||
|
│ object_name │ String │
|
||||||
|
│ number │ Nullable(Int64) │
|
||||||
|
└─────────────┴─────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Inserting Data {#json-inserting-data}
|
### Inserting Data {#json-inserting-data}
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@ ClickHouse Inc does **not** maintain the libraries listed below and hasn’t don
|
|||||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||||
|
- [node-clickhouse-orm](https://github.com/zimv/node-clickhouse-orm)
|
||||||
- Perl
|
- Perl
|
||||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
---
|
|
||||||
slug: /en/operations/backup
|
[//]: # (This file is included in Manage > Backups)
|
||||||
sidebar_position: 49
|
|
||||||
sidebar_label: Data backup and restore
|
- [Backup to a local disk](#backup-to-a-local-disk)
|
||||||
title: Data backup and restore
|
- [Configuring backup/restore to use an S3 endpoint](#configuring-backuprestore-to-use-an-s3-endpoint)
|
||||||
---
|
- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk)
|
||||||
|
- [Alternatives](#alternatives)
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.
|
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.
|
||||||
|
|
||||||
@ -15,7 +18,9 @@ Each company has different resources available and business requirements, so the
|
|||||||
Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly.
|
Keep in mind that if you backed something up and never tried to restore it, chances are that restore will not work properly when you actually need it (or at least it will take longer than business can tolerate). So whatever backup approach you choose, make sure to automate the restore process as well, and practice it on a spare ClickHouse cluster regularly.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Configure a backup destination
|
## Backup to a local disk
|
||||||
|
|
||||||
|
### Configure a backup destination
|
||||||
|
|
||||||
In the examples below you will see the backup destination specified like `Disk('backups', '1.zip')`. To prepare the destination add a file to `/etc/clickhouse-server/config.d/backup_disk.xml` specifying the backup destination. For example, this file defines disk named `backups` and then adds that disk to the **backups > allowed_disk** list:
|
In the examples below you will see the backup destination specified like `Disk('backups', '1.zip')`. To prepare the destination add a file to `/etc/clickhouse-server/config.d/backup_disk.xml` specifying the backup destination. For example, this file defines disk named `backups` and then adds that disk to the **backups > allowed_disk** list:
|
||||||
|
|
||||||
@ -39,7 +44,7 @@ In the examples below you will see the backup destination specified like `Disk('
|
|||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
## Parameters
|
### Parameters
|
||||||
|
|
||||||
Backups can be either full or incremental, and can include tables (including materialized views, projections, and dictionaries), and databases. Backups can be synchronous (default) or asynchronous. They can be compressed. Backups can be password protected.
|
Backups can be either full or incremental, and can include tables (including materialized views, projections, and dictionaries), and databases. Backups can be synchronous (default) or asynchronous. They can be compressed. Backups can be password protected.
|
||||||
|
|
||||||
@ -52,7 +57,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
|||||||
- `password` for the file on disk
|
- `password` for the file on disk
|
||||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||||
|
|
||||||
## Usage examples
|
### Usage examples
|
||||||
|
|
||||||
Backup and then restore a table:
|
Backup and then restore a table:
|
||||||
```
|
```
|
||||||
@ -81,7 +86,7 @@ RESTORE TABLE test.table AS test.table2 FROM Disk('backups', '1.zip')
|
|||||||
BACKUP TABLE test.table3 AS test.table4 TO Disk('backups', '2.zip')
|
BACKUP TABLE test.table3 AS test.table4 TO Disk('backups', '2.zip')
|
||||||
```
|
```
|
||||||
|
|
||||||
## Incremental backups
|
### Incremental backups
|
||||||
|
|
||||||
Incremental backups can be taken by specifying the `base_backup`.
|
Incremental backups can be taken by specifying the `base_backup`.
|
||||||
:::note
|
:::note
|
||||||
@ -100,7 +105,7 @@ RESTORE TABLE test.table AS test.table2
|
|||||||
FROM Disk('backups', 'incremental-a.zip');
|
FROM Disk('backups', 'incremental-a.zip');
|
||||||
```
|
```
|
||||||
|
|
||||||
## Assign a password to the backup
|
### Assign a password to the backup
|
||||||
|
|
||||||
Backups written to disk can have a password applied to the file:
|
Backups written to disk can have a password applied to the file:
|
||||||
```
|
```
|
||||||
@ -116,7 +121,7 @@ RESTORE TABLE test.table
|
|||||||
SETTINGS password='qwerty'
|
SETTINGS password='qwerty'
|
||||||
```
|
```
|
||||||
|
|
||||||
## Compression settings
|
### Compression settings
|
||||||
|
|
||||||
If you would like to specify the compression method or level:
|
If you would like to specify the compression method or level:
|
||||||
```
|
```
|
||||||
@ -125,14 +130,14 @@ BACKUP TABLE test.table
|
|||||||
SETTINGS compression_method='lzma', compression_level=3
|
SETTINGS compression_method='lzma', compression_level=3
|
||||||
```
|
```
|
||||||
|
|
||||||
## Restore specific partitions
|
### Restore specific partitions
|
||||||
If specific partitions associated with a table need to be restored these can be specified. To restore partitions 1 and 4 from backup:
|
If specific partitions associated with a table need to be restored these can be specified. To restore partitions 1 and 4 from backup:
|
||||||
```
|
```
|
||||||
RESTORE TABLE test.table PARTITIONS '2', '3'
|
RESTORE TABLE test.table PARTITIONS '2', '3'
|
||||||
FROM Disk('backups', 'filename.zip')
|
FROM Disk('backups', 'filename.zip')
|
||||||
```
|
```
|
||||||
|
|
||||||
## Check the status of backups
|
### Check the status of backups
|
||||||
|
|
||||||
The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file:
|
The backup command returns an `id` and `status`, and that `id` can be used to get the status of the backup. This is very useful to check the progress of long ASYNC backups. The example below shows a failure that happened when trying to overwrite an existing backup file:
|
||||||
```sql
|
```sql
|
||||||
@ -171,6 +176,160 @@ end_time: 2022-08-30 09:21:46
|
|||||||
1 row in set. Elapsed: 0.002 sec.
|
1 row in set. Elapsed: 0.002 sec.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Configuring BACKUP/RESTORE to use an S3 Endpoint
|
||||||
|
|
||||||
|
To write backups to an S3 bucket you need three pieces of information:
|
||||||
|
- S3 endpoint,
|
||||||
|
for example `https://mars-doc-test.s3.amazonaws.com/backup-S3/`
|
||||||
|
- Access key ID,
|
||||||
|
for example `ABC123`
|
||||||
|
- Secret access key,
|
||||||
|
for example `Abc+123`
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Creating an S3 bucket is covered in [Use S3 Object Storage as a ClickHouse disk](/docs/en/integrations/data-ingestion/s3/configuring-s3-for-clickhouse-use.md), just come back to this doc after saving the policy, there is no need to configure ClickHouse to use the S3 bucket.
|
||||||
|
:::
|
||||||
|
|
||||||
|
The destination for a backup will be specified like this:
|
||||||
|
```
|
||||||
|
S3('<S3 endpoint>/<directory>', '<Access key ID>', '<Secret access key>)
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE data
|
||||||
|
(
|
||||||
|
`key` Int,
|
||||||
|
`value` String,
|
||||||
|
`array` Array(String)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY tuple()
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO data SELECT *
|
||||||
|
FROM generateRandom('key Int, value String, array Array(String)')
|
||||||
|
LIMIT 1000
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a base (initial) backup
|
||||||
|
|
||||||
|
Incremental backups require a _base_ backup to start from, this example will be used
|
||||||
|
later as the base backup. The first parameter of the S3 destination is the S3 endpoint followed by the directory within the bucket to use for this backup. In this example the directory is named `my_backup`.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123')
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─id───────────────────────────────────┬─status─────────┐
|
||||||
|
│ de442b75-a66c-4a3c-a193-f76f278c70f3 │ BACKUP_CREATED │
|
||||||
|
└──────────────────────────────────────┴────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Add more data
|
||||||
|
|
||||||
|
Incremental backups are populated with the difference between the base backup and the current content of the table being backed up. Add more data before taking the incremental backup:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO data SELECT *
|
||||||
|
FROM generateRandom('key Int, value String, array Array(String)')
|
||||||
|
LIMIT 100
|
||||||
|
```
|
||||||
|
### Take an incremental backup
|
||||||
|
|
||||||
|
This backup command is similar to the base backup, but adds `SETTINGS base_backup` and the location of the base backup. Note that the destination for the incremental backup is not the same directory as the base, it is the same endpoint with a different target directory within the bucket. The base backup is in `my_backup`, and the incremental will be written to `my_incremental`:
|
||||||
|
```sql
|
||||||
|
BACKUP TABLE data TO S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123') SETTINGS base_backup = S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_backup', 'ABC123', 'Abc+123')
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─id───────────────────────────────────┬─status─────────┐
|
||||||
|
│ f6cd3900-850f-41c9-94f1-0c4df33ea528 │ BACKUP_CREATED │
|
||||||
|
└──────────────────────────────────────┴────────────────┘
|
||||||
|
```
|
||||||
|
### Restore from the incremental backup
|
||||||
|
|
||||||
|
This command restores the incremental backup into a new table, `data3`. Note that when an incremental backup is restored, the base backup is also included. Specify only the incremental backup when restoring:
|
||||||
|
```sql
|
||||||
|
RESTORE TABLE data AS data3 FROM S3('https://mars-doc-test.s3.amazonaws.com/backup-S3/my_incremental', 'ABC123', 'Abc+123')
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─id───────────────────────────────────┬─status───┐
|
||||||
|
│ ff0c8c39-7dff-4324-a241-000796de11ca │ RESTORED │
|
||||||
|
└──────────────────────────────────────┴──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify the count
|
||||||
|
|
||||||
|
There were two inserts into the original table `data`, one with 1,000 rows and one with 100 rows, for a total of 1,100. Verify that the restored table has 1,100 rows:
|
||||||
|
```sql
|
||||||
|
SELECT count()
|
||||||
|
FROM data3
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─count()─┐
|
||||||
|
│ 1100 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Verify the content
|
||||||
|
This compares the content of the original table, `data` with the restored table `data3`:
|
||||||
|
```sql
|
||||||
|
SELECT throwIf((
|
||||||
|
SELECT groupArray(tuple(*))
|
||||||
|
FROM data
|
||||||
|
) != (
|
||||||
|
SELECT groupArray(tuple(*))
|
||||||
|
FROM data3
|
||||||
|
), 'Data does not match after BACKUP/RESTORE')
|
||||||
|
```
|
||||||
|
## BACKUP/RESTORE Using an S3 Disk
|
||||||
|
|
||||||
|
It is also possible to `BACKUP`/`RESTORE` to S3 by configuring an S3 disk in the ClickHouse storage configuration. Configure the disk like this by adding a file to `/etc/clickhouse-server/config.d`:
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<clickhouse>
|
||||||
|
<storage_configuration>
|
||||||
|
<disks>
|
||||||
|
<s3_plain>
|
||||||
|
<type>s3_plain</type>
|
||||||
|
<endpoint></endpoint>
|
||||||
|
<access_key_id></access_key_id>
|
||||||
|
<secret_access_key></secret_access_key>
|
||||||
|
</s3_plain>
|
||||||
|
</disks>
|
||||||
|
<policies>
|
||||||
|
<s3>
|
||||||
|
<volumes>
|
||||||
|
<main>
|
||||||
|
<disk>s3</disk>
|
||||||
|
</main>
|
||||||
|
</volumes>
|
||||||
|
</s3>
|
||||||
|
</policies>
|
||||||
|
</storage_configuration>
|
||||||
|
|
||||||
|
<backups>
|
||||||
|
<allowed_disk>s3_plain</allowed_disk>
|
||||||
|
</backups>
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
And then `BACKUP`/`RESTORE` as usual:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
BACKUP TABLE data TO Disk('s3_plain', 'cloud_backup');
|
||||||
|
RESTORE TABLE data AS data_restored FROM Disk('s3_plain', 'cloud_backup');
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
But keep in mind that:
|
||||||
|
- This disk should not be used for `MergeTree` itself, only for `BACKUP`/`RESTORE`
|
||||||
|
- It has excessive API calls
|
||||||
|
:::
|
||||||
|
|
||||||
## Alternatives
|
## Alternatives
|
||||||
|
|
||||||
ClickHouse stores data on disk, and there are many ways to backup disks. These are some alternatives that have been used in the past, and that may fit in well in your environment.
|
ClickHouse stores data on disk, and there are many ways to backup disks. These are some alternatives that have been used in the past, and that may fit in well in your environment.
|
@ -3902,6 +3902,13 @@ Controls validation of UTF-8 sequences in JSON output formats, doesn't impact fo
|
|||||||
|
|
||||||
Disabled by default.
|
Disabled by default.
|
||||||
|
|
||||||
|
### format_json_object_each_row_column_for_object_name {#format_json_object_each_row_column_for_object_name}
|
||||||
|
|
||||||
|
The name of column that will be used for storing/writing object names in [JSONObjectEachRow](../../interfaces/formats.md#jsonobjecteachrow) format.
|
||||||
|
Column type should be String. If value is empty, default names `row_{i}`will be used for object names.
|
||||||
|
|
||||||
|
Default value: ''.
|
||||||
|
|
||||||
## TSV format settings {#tsv-format-settings}
|
## TSV format settings {#tsv-format-settings}
|
||||||
|
|
||||||
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
### input_format_tsv_empty_as_default {#input_format_tsv_empty_as_default}
|
||||||
|
@ -17,6 +17,33 @@ title: Troubleshooting
|
|||||||
- Check firewall settings.
|
- Check firewall settings.
|
||||||
- If you cannot access the repository for any reason, download packages as described in the [install guide](../getting-started/install.md) article and install them manually using the `sudo dpkg -i <packages>` command. You will also need the `tzdata` package.
|
- If you cannot access the repository for any reason, download packages as described in the [install guide](../getting-started/install.md) article and install them manually using the `sudo dpkg -i <packages>` command. You will also need the `tzdata` package.
|
||||||
|
|
||||||
|
### You Cannot Update Deb Packages from ClickHouse Repository with Apt-get {#you-cannot-update-deb-packages-from-clickhouse-repository-with-apt-get}
|
||||||
|
|
||||||
|
- The issue may be happened when the GPG key is changed.
|
||||||
|
|
||||||
|
Please use the following scripts to resolve the issue:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
|
||||||
|
sudo apt-get update
|
||||||
|
```
|
||||||
|
|
||||||
|
### You Get the Unsupported Architecture Warning with Apt-get {#you-get-the-unsupported-architecture-warning-with-apt-get}
|
||||||
|
|
||||||
|
- The completed warning message is as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
N: Skipping acquire of configured file 'main/binary-i386/Packages' as repository 'https://packages.clickhouse.com/deb stable InRelease' doesn't support architecture 'i386'
|
||||||
|
```
|
||||||
|
|
||||||
|
To resolve the above issue, please use the following script:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo rm /var/lib/apt/lists/packages.clickhouse.com_* /var/lib/dpkg/arch
|
||||||
|
sudo apt-get clean
|
||||||
|
sudo apt-get autoclean
|
||||||
|
```
|
||||||
|
|
||||||
## Connecting to the Server {#troubleshooting-accepts-no-connections}
|
## Connecting to the Server {#troubleshooting-accepts-no-connections}
|
||||||
|
|
||||||
Possible issues:
|
Possible issues:
|
||||||
|
@ -14,8 +14,10 @@ Example of a polygon dictionary configuration:
|
|||||||
<dictionary>
|
<dictionary>
|
||||||
<structure>
|
<structure>
|
||||||
<key>
|
<key>
|
||||||
<name>key</name>
|
<attribute>
|
||||||
<type>Array(Array(Array(Array(Float64))))</type>
|
<name>key</name>
|
||||||
|
<type>Array(Array(Array(Array(Float64))))</type>
|
||||||
|
</attribute>
|
||||||
</key>
|
</key>
|
||||||
|
|
||||||
<attribute>
|
<attribute>
|
||||||
|
@ -271,11 +271,7 @@ Result:
|
|||||||
The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) which is `0` by default.
|
The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) which is `0` by default.
|
||||||
|
|
||||||
Behavior for
|
Behavior for
|
||||||
* `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. In case argument is out of normal range:
|
* `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results.
|
||||||
* If the argument is smaller than 1970, the result will be calculated from the argument `1970-01-01 (00:00:00)` instead.
|
|
||||||
* If the return type is `DateTime` and the argument is larger than `2106-02-07 08:28:15`, the result will be calculated from the argument `2106-02-07 08:28:15` instead.
|
|
||||||
* If the return type is `Date` and the argument is larger than `2149-06-06`, the result will be calculated from the argument `2149-06-06` instead.
|
|
||||||
* If `toLastDayOfMonth` is called with an argument greater then `2149-05-31`, the result will be calculated from the argument `2149-05-31` instead.
|
|
||||||
* `enable_extended_results_for_datetime_functions = 1`:
|
* `enable_extended_results_for_datetime_functions = 1`:
|
||||||
* Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime` if their argument is a `Date` or `DateTime`, and they return `Date32` or `DateTime64` if their argument is a `Date32` or `DateTime64`.
|
* Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime` if their argument is a `Date` or `DateTime`, and they return `Date32` or `DateTime64` if their argument is a `Date32` or `DateTime64`.
|
||||||
* Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime` if their argument is a `Date` or `DateTime`, and they return `DateTime64` if their argument is a `Date32` or `DateTime64`.
|
* Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime` if their argument is a `Date` or `DateTime`, and they return `DateTime64` if their argument is a `Date32` or `DateTime64`.
|
||||||
@ -302,25 +298,22 @@ Returns the date.
|
|||||||
Rounds down a date or date with time to the first day of the month.
|
Rounds down a date or date with time to the first day of the month.
|
||||||
Returns the date.
|
Returns the date.
|
||||||
|
|
||||||
## toLastDayOfMonth
|
:::note
|
||||||
|
The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow.
|
||||||
Rounds up a date or date with time to the last day of the month.
|
:::
|
||||||
Returns the date.
|
|
||||||
|
|
||||||
If `toLastDayOfMonth` is called with an argument of type `Date` greater then 2149-05-31, the result will be calculated from the argument 2149-05-31 instead.
|
If `toLastDayOfMonth` is called with an argument of type `Date` greater then 2149-05-31, the result will be calculated from the argument 2149-05-31 instead.
|
||||||
|
|
||||||
## toMonday
|
## toMonday
|
||||||
|
|
||||||
Rounds down a date or date with time to the nearest Monday.
|
Rounds down a date or date with time to the nearest Monday.
|
||||||
As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` return date `1970-01-01`.
|
|
||||||
Returns the date.
|
Returns the date.
|
||||||
|
|
||||||
## toStartOfWeek(t\[,mode\])
|
## toStartOfWeek(t\[,mode\])
|
||||||
|
|
||||||
Rounds down a date or date with time to the nearest Sunday or Monday by mode.
|
Rounds down a date or date with time to the nearest Sunday or Monday by mode.
|
||||||
Returns the date.
|
Returns the date.
|
||||||
As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` (and `1970-01-05` if `mode` is `1`) return date `1970-01-01`.
|
The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used.
|
||||||
The `mode` argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used.
|
|
||||||
|
|
||||||
## toStartOfDay
|
## toStartOfDay
|
||||||
|
|
||||||
@ -671,9 +664,9 @@ Aliases: `dateDiff`, `DATE_DIFF`.
|
|||||||
- `quarter`
|
- `quarter`
|
||||||
- `year`
|
- `year`
|
||||||
|
|
||||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||||
|
|
||||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||||
|
|
||||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
@ -1075,7 +1068,7 @@ Example:
|
|||||||
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
||||||
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
||||||
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
||||||
```
|
```
|
||||||
``` text
|
``` text
|
||||||
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
||||||
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
||||||
@ -1163,7 +1156,7 @@ dateName(date_part, date)
|
|||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `date_part` — Date part. Possible values: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md).
|
- `date_part` — Date part. Possible values: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md).
|
||||||
- `date` — Date. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
- `date` — Date. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||||
- `timezone` — Timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
- `timezone` — Timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
@ -376,14 +376,6 @@ Result:
|
|||||||
└─────┘
|
└─────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
## UUIDStringToNum(str)
|
|
||||||
|
|
||||||
Accepts a string containing 36 characters in the format `123e4567-e89b-12d3-a456-426655440000`, and returns it as a set of bytes in a FixedString(16).
|
|
||||||
|
|
||||||
## UUIDNumToString(str)
|
|
||||||
|
|
||||||
Accepts a FixedString(16) value. Returns a string containing 36 characters in text format.
|
|
||||||
|
|
||||||
## bitmaskToList(num)
|
## bitmaskToList(num)
|
||||||
|
|
||||||
Accepts an integer. Returns a string containing the list of powers of two that total the source number when summed. They are comma-separated without spaces in text format, in ascending order.
|
Accepts an integer. Returns a string containing the list of powers of two that total the source number when summed. They are comma-separated without spaces in text format, in ascending order.
|
||||||
|
@ -571,7 +571,7 @@ Example:
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s,
|
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s,
|
||||||
count() AS c
|
count() AS c
|
||||||
FROM test.hits
|
FROM test.hits
|
||||||
GROUP BY domain(Referer)
|
GROUP BY domain(Referer)
|
||||||
|
@ -211,12 +211,19 @@ SELECT toUUIDOrZero('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid
|
|||||||
|
|
||||||
## UUIDStringToNum
|
## UUIDStringToNum
|
||||||
|
|
||||||
Accepts a string containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns it as a set of bytes in a [FixedString(16)](../../sql-reference/data-types/fixedstring.md).
|
Accepts `string` containing 36 characters in the format `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`, and returns a [FixedString(16)](../../sql-reference/data-types/fixedstring.md) as its binary representation, with its format optionally specified by `variant` (`Big-endian` by default).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
UUIDStringToNum(String)
|
UUIDStringToNum(string[, variant = 1])
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `string` — String of 36 characters or FixedString(36). [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||||
|
- `variant` — Integer, representing a variant as specified by [RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.1). 1 = `Big-endian` (default), 2 = `Microsoft`.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
FixedString(16)
|
FixedString(16)
|
||||||
@ -235,14 +242,33 @@ SELECT
|
|||||||
└──────────────────────────────────────┴──────────────────┘
|
└──────────────────────────────────────┴──────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
'612f3c40-5d3b-217e-707b-6a546a3d7b29' AS uuid,
|
||||||
|
UUIDStringToNum(uuid, 2) AS bytes
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─uuid─────────────────────────────────┬─bytes────────────┐
|
||||||
|
│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ @</a;]~!p{jTj={) │
|
||||||
|
└──────────────────────────────────────┴──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## UUIDNumToString
|
## UUIDNumToString
|
||||||
|
|
||||||
Accepts a [FixedString(16)](../../sql-reference/data-types/fixedstring.md) value, and returns a string containing 36 characters in text format.
|
Accepts `binary` containing a binary representation of a UUID, with its format optionally specified by `variant` (`Big-endian` by default), and returns a string containing 36 characters in text format.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
UUIDNumToString(FixedString(16))
|
UUIDNumToString(binary[, variant = 1])
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `binary` — [FixedString(16)](../../sql-reference/data-types/fixedstring.md) as a binary representation of a UUID.
|
||||||
|
- `variant` — Integer, representing a variant as specified by [RFC4122](https://datatracker.ietf.org/doc/html/rfc4122#section-4.1.1). 1 = `Big-endian` (default), 2 = `Microsoft`.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
String.
|
String.
|
||||||
@ -261,6 +287,18 @@ SELECT
|
|||||||
└──────────────────┴──────────────────────────────────────┘
|
└──────────────────┴──────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
'@</a;]~!p{jTj={)' AS bytes,
|
||||||
|
UUIDNumToString(toFixedString(bytes, 16), 2) AS uuid
|
||||||
|
```
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─bytes────────────┬─uuid─────────────────────────────────┐
|
||||||
|
│ @</a;]~!p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │
|
||||||
|
└──────────────────┴──────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## serverUUID()
|
## serverUUID()
|
||||||
|
|
||||||
Returns the random and unique UUID, which is generated when the server is first started and stored forever. The result writes to the file `uuid` created in the ClickHouse server directory `/var/lib/clickhouse/`.
|
Returns the random and unique UUID, which is generated when the server is first started and stored forever. The result writes to the file `uuid` created in the ClickHouse server directory `/var/lib/clickhouse/`.
|
||||||
|
@ -194,7 +194,7 @@ To restore data from a backup, do the following:
|
|||||||
|
|
||||||
Restoring from a backup does not require stopping the server.
|
Restoring from a backup does not require stopping the server.
|
||||||
|
|
||||||
For more information about backups and restoring data, see the [Data Backup](../../../operations/backup.md) section.
|
For more information about backups and restoring data, see the [Data Backup](/docs/en/manage/backups.mdx) section.
|
||||||
|
|
||||||
## UNFREEZE PARTITION
|
## UNFREEZE PARTITION
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ By default, ClickHouse uses its own [Atomic](../../../engines/database-engines/a
|
|||||||
|
|
||||||
### COMMENT
|
### COMMENT
|
||||||
|
|
||||||
You can add a comment to the database when you creating it.
|
You can add a comment to the database when you are creating it.
|
||||||
|
|
||||||
The comment is supported for all database engines.
|
The comment is supported for all database engines.
|
||||||
|
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /en/sql-reference/statements/misc
|
|
||||||
toc_hidden: true
|
|
||||||
sidebar_position: 70
|
|
||||||
---
|
|
||||||
|
|
||||||
# Miscellaneous Statements
|
|
||||||
|
|
||||||
- [ATTACH](../../sql-reference/statements/attach.md)
|
|
||||||
- [CHECK TABLE](../../sql-reference/statements/check-table.md)
|
|
||||||
- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md)
|
|
||||||
- [DETACH](../../sql-reference/statements/detach.md)
|
|
||||||
- [DROP](../../sql-reference/statements/drop.md)
|
|
||||||
- [EXISTS](../../sql-reference/statements/exists.md)
|
|
||||||
- [KILL](../../sql-reference/statements/kill.md)
|
|
||||||
- [OPTIMIZE](../../sql-reference/statements/optimize.md)
|
|
||||||
- [RENAME](../../sql-reference/statements/rename.md)
|
|
||||||
- [SET](../../sql-reference/statements/set.md)
|
|
||||||
- [SET ROLE](../../sql-reference/statements/set-role.md)
|
|
||||||
- [TRUNCATE](../../sql-reference/statements/truncate.md)
|
|
||||||
- [USE](../../sql-reference/statements/use.md)
|
|
@ -81,6 +81,7 @@ Multiple path components can have globs. For being processed file must exist and
|
|||||||
- `?` — Substitutes any single character.
|
- `?` — Substitutes any single character.
|
||||||
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
- `{some_string,another_string,yet_another_one}` — Substitutes any of strings `'some_string', 'another_string', 'yet_another_one'`.
|
||||||
- `{N..M}` — Substitutes any number in range from N to M including both borders.
|
- `{N..M}` — Substitutes any number in range from N to M including both borders.
|
||||||
|
- `**` - Fetches all files inside the folder recursively.
|
||||||
|
|
||||||
Constructions with `{}` are similar to the [remote](remote.md) table function.
|
Constructions with `{}` are similar to the [remote](remote.md) table function.
|
||||||
|
|
||||||
@ -119,6 +120,22 @@ Query the data from files named `file000`, `file001`, … , `file999`:
|
|||||||
SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32');
|
SELECT count(*) FROM file('big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name String, value UInt32');
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query the data from all files inside `big_dir` directory recursively:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT count(*) FROM file('big_dir/**', 'CSV', 'name String, value UInt32');
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query the data from all `file002` files from any folder inside `big_dir` directory recursively:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt32');
|
||||||
|
```
|
||||||
|
|
||||||
## Virtual Columns
|
## Virtual Columns
|
||||||
|
|
||||||
- `_path` — Path to the file.
|
- `_path` — Path to the file.
|
||||||
|
@ -127,6 +127,18 @@ INSERT INTO FUNCTION s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-
|
|||||||
SELECT name, value FROM existing_table;
|
SELECT name, value FROM existing_table;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Glob ** can be used for recursive directory traversal. Consider the below example, it will fetch all files from `my-test-bucket-768` directory recursively:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**', 'CSV', 'name String, value UInt32', 'gzip');
|
||||||
|
```
|
||||||
|
|
||||||
|
The below get data from all `test-data.csv.gz` files from any folder inside `my-test-bucket` directory recursively:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT * FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/my-test-bucket-768/**/test-data.csv.gz', 'CSV', 'name String, value UInt32', 'gzip');
|
||||||
|
```
|
||||||
|
|
||||||
## Partitioned Write
|
## Partitioned Write
|
||||||
|
|
||||||
If you specify `PARTITION BY` expression when inserting data into `S3` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency.
|
If you specify `PARTITION BY` expression when inserting data into `S3` table, a separate file is created for each partition value. Splitting the data into separate files helps to improve reading operations efficiency.
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /ru/development/browse-code
|
|
||||||
sidebar_position: 72
|
|
||||||
sidebar_label: "Навигация по коду ClickHouse"
|
|
||||||
---
|
|
||||||
|
|
||||||
|
|
||||||
# Навигация по коду ClickHouse {#navigatsiia-po-kodu-clickhouse}
|
|
||||||
|
|
||||||
Для навигации по коду онлайн доступен **Woboq**, он расположен [здесь](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). В нём реализовано удобное перемещение между исходными файлами, семантическая подсветка, подсказки, индексация и поиск. Слепок кода обновляется ежедневно.
|
|
||||||
|
|
||||||
Также вы можете просматривать исходники на [GitHub](https://github.com/ClickHouse/ClickHouse).
|
|
||||||
|
|
||||||
Если вы интересуетесь, какую среду разработки выбрать для работы с ClickHouse, мы рекомендуем CLion, QT Creator, VSCode или KDevelop (с некоторыми предостережениями). Вы можете использовать свою любимую среду разработки, Vim и Emacs тоже считаются.
|
|
@ -87,14 +87,15 @@ SETTINGS
|
|||||||
|
|
||||||
<summary>Устаревший способ создания таблицы</summary>
|
<summary>Устаревший способ создания таблицы</summary>
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Attention"
|
||||||
Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше.
|
Не используйте этот метод в новых проектах. По возможности переключите старые проекты на метод, описанный выше.
|
||||||
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format
|
Kafka(kafka_broker_list, kafka_topic_list, kafka_group_name, kafka_format
|
||||||
[, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages])
|
[, kafka_row_delimiter, kafka_schema, kafka_num_consumers, kafka_skip_broken_messages])
|
||||||
```
|
```
|
||||||
:::
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
## Описание {#opisanie}
|
## Описание {#opisanie}
|
||||||
|
@ -39,9 +39,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
<summary>Устаревший способ создания таблицы</summary>
|
<summary>Устаревший способ создания таблицы</summary>
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Attention"
|
||||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
|
@ -43,9 +43,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
<summary>Устаревший способ создания таблицы</summary>
|
<summary>Устаревший способ создания таблицы</summary>
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Attention"
|
||||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
@ -59,7 +60,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
- `sign` — Имя столбца с типом строки: `1` — строка состояния, `-1` — строка отмены состояния.
|
- `sign` — Имя столбца с типом строки: `1` — строка состояния, `-1` — строка отмены состояния.
|
||||||
|
|
||||||
Тип данных столбца — `Int8`.
|
Тип данных столбца — `Int8`.
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
@ -55,9 +55,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
<summary>Устаревший способ создания таблицы</summary>
|
<summary>Устаревший способ создания таблицы</summary>
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Attention"
|
||||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
|
@ -115,9 +115,10 @@ ENGINE MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDa
|
|||||||
|
|
||||||
<summary>Устаревший способ создания таблицы</summary>
|
<summary>Устаревший способ создания таблицы</summary>
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Attention"
|
||||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше.
|
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ, описанный выше.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
|
@ -42,9 +42,10 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
<summary>Устаревший способ создания таблицы</summary>
|
<summary>Устаревший способ создания таблицы</summary>
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Attention"
|
||||||
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
Не используйте этот способ в новых проектах и по возможности переведите старые проекты на способ описанный выше.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||||
(
|
(
|
||||||
|
@ -34,6 +34,7 @@ sidebar_label: "Клиентские библиотеки от сторонни
|
|||||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||||
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||||
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||||
|
- [node-clickhouse-orm](https://github.com/zimv/node-clickhouse-orm)
|
||||||
- Perl
|
- Perl
|
||||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: Date
|
|||||||
|
|
||||||
# Date {#data-type-date}
|
# Date {#data-type-date}
|
||||||
|
|
||||||
Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2149 года, последний полностью поддерживаемый год - 2148).
|
Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2106 года, последний полностью поддерживаемый год - 2105).
|
||||||
|
|
||||||
Диапазон значений: \[1970-01-01, 2149-06-06\].
|
Диапазон значений: \[1970-01-01, 2149-06-06\].
|
||||||
|
|
||||||
|
@ -272,15 +272,9 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp;
|
|||||||
|
|
||||||
Поведение для
|
Поведение для
|
||||||
* `enable_extended_results_for_datetime_functions = 0`: Функции `toStartOf*`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime`. Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime`. Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
* `enable_extended_results_for_datetime_functions = 0`: Функции `toStartOf*`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime`. Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime`. Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
||||||
В случае если значение аргумента вне нормального диапазона:
|
|
||||||
* `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года,
|
|
||||||
* `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`,
|
|
||||||
* `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`,
|
|
||||||
* `2149-05-31` будет результатом функции `toLastDayOfMonth` при обработке аргумента больше `2149-05-31`.
|
|
||||||
* `enable_extended_results_for_datetime_functions = 1`:
|
* `enable_extended_results_for_datetime_functions = 1`:
|
||||||
* Функции `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `Date32` или `DateTime64` если их аргумент `Date32` или `DateTime64`.
|
* Функции `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `Date32` или `DateTime64` если их аргумент `Date32` или `DateTime64`.
|
||||||
* Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `DateTime64` если их аргумент `Date32` или `DateTime64`.
|
* Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `DateTime64` если их аргумент `Date32` или `DateTime64`.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## toStartOfYear {#tostartofyear}
|
## toStartOfYear {#tostartofyear}
|
||||||
@ -321,20 +315,20 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101;
|
|||||||
Округляет дату или дату-с-временем до последнего числа месяца.
|
Округляет дату или дату-с-временем до последнего числа месяца.
|
||||||
Возвращается дата.
|
Возвращается дата.
|
||||||
|
|
||||||
Если `toLastDayOfMonth` вызывается с аргументом типа `Date` большим чем 2149-05-31, то результат будет вычислен от аргумента 2149-05-31.
|
:::note "Attention"
|
||||||
|
Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами.
|
||||||
|
:::
|
||||||
|
|
||||||
## toMonday {#tomonday}
|
## toMonday {#tomonday}
|
||||||
|
|
||||||
Округляет дату или дату-с-временем вниз до ближайшего понедельника.
|
Округляет дату или дату-с-временем вниз до ближайшего понедельника.
|
||||||
Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` результатом будет `1970-01-01`.
|
|
||||||
Возвращается дата.
|
Возвращается дата.
|
||||||
|
|
||||||
## toStartOfWeek(t[,mode]) {#tostartofweek}
|
## toStartOfWeek(t[,mode]) {#tostartofweek}
|
||||||
|
|
||||||
Округляет дату или дату со временем до ближайшего воскресенья или понедельника в соответствии с mode.
|
Округляет дату или дату со временем до ближайшего воскресенья или понедельника в соответствии с mode.
|
||||||
Возвращается дата.
|
Возвращается дата.
|
||||||
Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` (и `1970-01-05`, если `mode` равен `1`) результатом будет `1970-01-01`.
|
Аргумент mode работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0.
|
||||||
Аргумент `mode` работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0.
|
|
||||||
|
|
||||||
## toStartOfDay {#tostartofday}
|
## toStartOfDay {#tostartofday}
|
||||||
|
|
||||||
@ -721,9 +715,9 @@ date_diff('unit', startdate, enddate, [timezone])
|
|||||||
- `quarter`
|
- `quarter`
|
||||||
- `year`
|
- `year`
|
||||||
|
|
||||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md).
|
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||||
|
|
||||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md).
|
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||||
|
|
||||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
@ -975,8 +969,7 @@ SELECT now('Europe/Moscow');
|
|||||||
|
|
||||||
## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size}
|
## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size}
|
||||||
Для интервала, начинающегося в `StartTime` и длящегося `Duration` секунд, возвращает массив моментов времени, кратных `Size`. Параметр `Size` указывать необязательно, по умолчанию он равен 1800 секундам (30 минутам) - необязательный параметр.
|
Для интервала, начинающегося в `StartTime` и длящегося `Duration` секунд, возвращает массив моментов времени, кратных `Size`. Параметр `Size` указывать необязательно, по умолчанию он равен 1800 секундам (30 минутам) - необязательный параметр.
|
||||||
Данная функция может использоваться, например, для анализа количества просмотров страницы за соответствующую сессию.
|
|
||||||
Аргумент `StartTime` может иметь тип `DateTime` или `DateTime64`. В случае, если используется `DateTime`, аргументы `Duration` и `Size` должны иметь тип `UInt32`; Для DateTime64 они должны быть типа `Decimal64`.
|
|
||||||
Возвращает массив DateTime/DateTime64 (тип будет совпадать с типом параметра ’StartTime’). Для DateTime64 масштаб(scale) возвращаемой величины может отличаться от масштаба фргумента ’StartTime’ --- результат будет иметь наибольший масштаб среди всех данных аргументов.
|
Возвращает массив DateTime/DateTime64 (тип будет совпадать с типом параметра ’StartTime’). Для DateTime64 масштаб(scale) возвращаемой величины может отличаться от масштаба фргумента ’StartTime’ --- результат будет иметь наибольший масштаб среди всех данных аргументов.
|
||||||
|
|
||||||
Пример использования:
|
Пример использования:
|
||||||
@ -1085,7 +1078,7 @@ dateName(date_part, date)
|
|||||||
**Аргументы**
|
**Аргументы**
|
||||||
|
|
||||||
- `date_part` — часть даты. Возможные значения: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md).
|
- `date_part` — часть даты. Возможные значения: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md).
|
||||||
- `date` — дата. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
- `date` — дата. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||||
- `timezone` — часовой пояс. Необязательный аргумент. [String](../../sql-reference/data-types/string.md).
|
- `timezone` — часовой пояс. Необязательный аргумент. [String](../../sql-reference/data-types/string.md).
|
||||||
|
|
||||||
**Возвращаемое значение**
|
**Возвращаемое значение**
|
||||||
|
@ -568,7 +568,7 @@ ORDER BY c DESC
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s,
|
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s,
|
||||||
count() AS c
|
count() AS c
|
||||||
FROM test.hits
|
FROM test.hits
|
||||||
GROUP BY domain(Referer)
|
GROUP BY domain(Referer)
|
||||||
|
@ -122,9 +122,9 @@ FROM t_null
|
|||||||
|
|
||||||
Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса.
|
Существует два варианта IN-ов с подзапросами (аналогично для JOIN-ов): обычный `IN` / `JOIN` и `GLOBAL IN` / `GLOBAL JOIN`. Они отличаются способом выполнения при распределённой обработке запроса.
|
||||||
|
|
||||||
:::note "Attention"
|
:::note "Attention"
|
||||||
Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`.
|
Помните, что алгоритмы, описанные ниже, могут работать иначе в зависимости от [настройки](../../operations/settings/settings.md) `distributed_product_mode`.
|
||||||
:::
|
:::
|
||||||
При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`.
|
При использовании обычного IN-а, запрос отправляется на удалённые серверы, и на каждом из них выполняются подзапросы в секциях `IN` / `JOIN`.
|
||||||
|
|
||||||
При использовании `GLOBAL IN` / `GLOBAL JOIN-а`, сначала выполняются все подзапросы для `GLOBAL IN` / `GLOBAL JOIN-ов`, и результаты складываются во временные таблицы. Затем эти временные таблицы передаются на каждый удалённый сервер, и на них выполняются запросы, с использованием этих переданных временных данных.
|
При использовании `GLOBAL IN` / `GLOBAL JOIN-а`, сначала выполняются все подзапросы для `GLOBAL IN` / `GLOBAL JOIN-ов`, и результаты складываются во временные таблицы. Затем эти временные таблицы передаются на каждый удалённый сервер, и на них выполняются запросы, с использованием этих переданных временных данных.
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /ru/sql-reference/statements/misc
|
|
||||||
sidebar_position: 41
|
|
||||||
---
|
|
||||||
|
|
||||||
# Прочие виды запросов {#prochie-vidy-zaprosov}
|
|
||||||
|
|
||||||
- [ATTACH](../../sql-reference/statements/attach.md)
|
|
||||||
- [CHECK TABLE](../../sql-reference/statements/check-table.md)
|
|
||||||
- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md)
|
|
||||||
- [DETACH](../../sql-reference/statements/detach.md)
|
|
||||||
- [DROP](../../sql-reference/statements/drop.md)
|
|
||||||
- [EXISTS](../../sql-reference/statements/exists.md)
|
|
||||||
- [KILL](../../sql-reference/statements/kill.md)
|
|
||||||
- [OPTIMIZE](../../sql-reference/statements/optimize.md)
|
|
||||||
- [RENAME](../../sql-reference/statements/rename.md)
|
|
||||||
- [SET](../../sql-reference/statements/set.md)
|
|
||||||
- [SET ROLE](../../sql-reference/statements/set-role.md)
|
|
||||||
- [TRUNCATE](../../sql-reference/statements/truncate.md)
|
|
||||||
- [USE](../../sql-reference/statements/use.md)
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
slug: /zh/development/browse-code
|
|
||||||
sidebar_position: 63
|
|
||||||
sidebar_label: "\u6D4F\u89C8\u6E90\u4EE3\u7801"
|
|
||||||
---
|
|
||||||
|
|
||||||
# 浏览ClickHouse源代码 {#browse-clickhouse-source-code}
|
|
||||||
|
|
||||||
您可以使用 **Woboq** 在线代码浏览器 [点击这里](https://clickhouse.com/codebrowser/ClickHouse/src/index.html). 它提供了代码导航和语义突出显示、搜索和索引。 代码快照每天更新。
|
|
||||||
|
|
||||||
此外,您还可以像往常一样浏览源代码 [GitHub](https://github.com/ClickHouse/ClickHouse)
|
|
||||||
|
|
||||||
如果你希望了解哪种IDE较好,我们推荐使用CLion,QT Creator,VS Code和KDevelop(有注意事项)。 您可以使用任何您喜欢的IDE。 Vim和Emacs也可以。
|
|
@ -1,10 +1,460 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/getting-started/example-datasets/brown-benchmark
|
slug: /zh/getting-started/example-datasets/brown-benchmark
|
||||||
sidebar_label: Brown University Benchmark
|
sidebar_label: 布朗大学基准
|
||||||
description: A new analytical benchmark for machine-generated log data
|
description: 机器生成日志数据的新分析基准
|
||||||
title: "Brown University Benchmark"
|
title: "布朗大学基准"
|
||||||
---
|
---
|
||||||
|
|
||||||
import Content from '@site/docs/en/getting-started/example-datasets/brown-benchmark.md';
|
`MgBench` 是机器生成的日志数据的新分析基准,[Andrew Crotty](http://cs.brown.edu/people/acrotty/)。
|
||||||
|
|
||||||
<Content />
|
下载数据:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget https://datasets.clickhouse.com/mgbench{1..3}.csv.xz
|
||||||
|
```
|
||||||
|
|
||||||
|
解压数据:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
xz -v -d mgbench{1..3}.csv.xz
|
||||||
|
```
|
||||||
|
|
||||||
|
创建数据库和表:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE mgbench;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
USE mgbench;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE mgbench.logs1 (
|
||||||
|
log_time DateTime,
|
||||||
|
machine_name LowCardinality(String),
|
||||||
|
machine_group LowCardinality(String),
|
||||||
|
cpu_idle Nullable(Float32),
|
||||||
|
cpu_nice Nullable(Float32),
|
||||||
|
cpu_system Nullable(Float32),
|
||||||
|
cpu_user Nullable(Float32),
|
||||||
|
cpu_wio Nullable(Float32),
|
||||||
|
disk_free Nullable(Float32),
|
||||||
|
disk_total Nullable(Float32),
|
||||||
|
part_max_used Nullable(Float32),
|
||||||
|
load_fifteen Nullable(Float32),
|
||||||
|
load_five Nullable(Float32),
|
||||||
|
load_one Nullable(Float32),
|
||||||
|
mem_buffers Nullable(Float32),
|
||||||
|
mem_cached Nullable(Float32),
|
||||||
|
mem_free Nullable(Float32),
|
||||||
|
mem_shared Nullable(Float32),
|
||||||
|
swap_free Nullable(Float32),
|
||||||
|
bytes_in Nullable(Float32),
|
||||||
|
bytes_out Nullable(Float32)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY (machine_group, machine_name, log_time);
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE mgbench.logs2 (
|
||||||
|
log_time DateTime,
|
||||||
|
client_ip IPv4,
|
||||||
|
request String,
|
||||||
|
status_code UInt16,
|
||||||
|
object_size UInt64
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY log_time;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE mgbench.logs3 (
|
||||||
|
log_time DateTime64,
|
||||||
|
device_id FixedString(15),
|
||||||
|
device_name LowCardinality(String),
|
||||||
|
device_type LowCardinality(String),
|
||||||
|
device_floor UInt8,
|
||||||
|
event_type LowCardinality(String),
|
||||||
|
event_unit FixedString(1),
|
||||||
|
event_value Nullable(Float32)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
ORDER BY (event_type, log_time);
|
||||||
|
```
|
||||||
|
|
||||||
|
插入数据:
|
||||||
|
|
||||||
|
```
|
||||||
|
clickhouse-client --query "INSERT INTO mgbench.logs1 FORMAT CSVWithNames" < mgbench1.csv
|
||||||
|
clickhouse-client --query "INSERT INTO mgbench.logs2 FORMAT CSVWithNames" < mgbench2.csv
|
||||||
|
clickhouse-client --query "INSERT INTO mgbench.logs3 FORMAT CSVWithNames" < mgbench3.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
## 运行基准查询:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
USE mgbench;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q1.1: 自午夜以来每个 Web 服务器的 CPU/网络利用率是多少?
|
||||||
|
|
||||||
|
SELECT machine_name,
|
||||||
|
MIN(cpu) AS cpu_min,
|
||||||
|
MAX(cpu) AS cpu_max,
|
||||||
|
AVG(cpu) AS cpu_avg,
|
||||||
|
MIN(net_in) AS net_in_min,
|
||||||
|
MAX(net_in) AS net_in_max,
|
||||||
|
AVG(net_in) AS net_in_avg,
|
||||||
|
MIN(net_out) AS net_out_min,
|
||||||
|
MAX(net_out) AS net_out_max,
|
||||||
|
AVG(net_out) AS net_out_avg
|
||||||
|
FROM (
|
||||||
|
SELECT machine_name,
|
||||||
|
COALESCE(cpu_user, 0.0) AS cpu,
|
||||||
|
COALESCE(bytes_in, 0.0) AS net_in,
|
||||||
|
COALESCE(bytes_out, 0.0) AS net_out
|
||||||
|
FROM logs1
|
||||||
|
WHERE machine_name IN ('anansi','aragog','urd')
|
||||||
|
AND log_time >= TIMESTAMP '2017-01-11 00:00:00'
|
||||||
|
) AS r
|
||||||
|
GROUP BY machine_name;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q1.2:最近一天有哪些机房的机器离线?
|
||||||
|
|
||||||
|
SELECT machine_name,
|
||||||
|
log_time
|
||||||
|
FROM logs1
|
||||||
|
WHERE (machine_name LIKE 'cslab%' OR
|
||||||
|
machine_name LIKE 'mslab%')
|
||||||
|
AND load_one IS NULL
|
||||||
|
AND log_time >= TIMESTAMP '2017-01-10 00:00:00'
|
||||||
|
ORDER BY machine_name,
|
||||||
|
log_time;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q1.3:特定工作站过去 10 天的每小时的平均指标是多少?
|
||||||
|
|
||||||
|
SELECT dt,
|
||||||
|
hr,
|
||||||
|
AVG(load_fifteen) AS load_fifteen_avg,
|
||||||
|
AVG(load_five) AS load_five_avg,
|
||||||
|
AVG(load_one) AS load_one_avg,
|
||||||
|
AVG(mem_free) AS mem_free_avg,
|
||||||
|
AVG(swap_free) AS swap_free_avg
|
||||||
|
FROM (
|
||||||
|
SELECT CAST(log_time AS DATE) AS dt,
|
||||||
|
EXTRACT(HOUR FROM log_time) AS hr,
|
||||||
|
load_fifteen,
|
||||||
|
load_five,
|
||||||
|
load_one,
|
||||||
|
mem_free,
|
||||||
|
swap_free
|
||||||
|
FROM logs1
|
||||||
|
WHERE machine_name = 'babbage'
|
||||||
|
AND load_fifteen IS NOT NULL
|
||||||
|
AND load_five IS NOT NULL
|
||||||
|
AND load_one IS NOT NULL
|
||||||
|
AND mem_free IS NOT NULL
|
||||||
|
AND swap_free IS NOT NULL
|
||||||
|
AND log_time >= TIMESTAMP '2017-01-01 00:00:00'
|
||||||
|
) AS r
|
||||||
|
GROUP BY dt,
|
||||||
|
hr
|
||||||
|
ORDER BY dt,
|
||||||
|
hr;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q1.4: 1 个月内,每台服务器的磁盘 I/O 阻塞的频率是多少?
|
||||||
|
|
||||||
|
SELECT machine_name,
|
||||||
|
COUNT(*) AS spikes
|
||||||
|
FROM logs1
|
||||||
|
WHERE machine_group = 'Servers'
|
||||||
|
AND cpu_wio > 0.99
|
||||||
|
AND log_time >= TIMESTAMP '2016-12-01 00:00:00'
|
||||||
|
AND log_time < TIMESTAMP '2017-01-01 00:00:00'
|
||||||
|
GROUP BY machine_name
|
||||||
|
ORDER BY spikes DESC
|
||||||
|
LIMIT 10;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q1.5:哪些外部可访问的虚拟机的运行内存不足?
|
||||||
|
|
||||||
|
SELECT machine_name,
|
||||||
|
dt,
|
||||||
|
MIN(mem_free) AS mem_free_min
|
||||||
|
FROM (
|
||||||
|
SELECT machine_name,
|
||||||
|
CAST(log_time AS DATE) AS dt,
|
||||||
|
mem_free
|
||||||
|
FROM logs1
|
||||||
|
WHERE machine_group = 'DMZ'
|
||||||
|
AND mem_free IS NOT NULL
|
||||||
|
) AS r
|
||||||
|
GROUP BY machine_name,
|
||||||
|
dt
|
||||||
|
HAVING MIN(mem_free) < 10000
|
||||||
|
ORDER BY machine_name,
|
||||||
|
dt;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q1.6: 每小时所有文件服务器的总网络流量是多少?
|
||||||
|
|
||||||
|
SELECT dt,
|
||||||
|
hr,
|
||||||
|
SUM(net_in) AS net_in_sum,
|
||||||
|
SUM(net_out) AS net_out_sum,
|
||||||
|
SUM(net_in) + SUM(net_out) AS both_sum
|
||||||
|
FROM (
|
||||||
|
SELECT CAST(log_time AS DATE) AS dt,
|
||||||
|
EXTRACT(HOUR FROM log_time) AS hr,
|
||||||
|
COALESCE(bytes_in, 0.0) / 1000000000.0 AS net_in,
|
||||||
|
COALESCE(bytes_out, 0.0) / 1000000000.0 AS net_out
|
||||||
|
FROM logs1
|
||||||
|
WHERE machine_name IN ('allsorts','andes','bigred','blackjack','bonbon',
|
||||||
|
'cadbury','chiclets','cotton','crows','dove','fireball','hearts','huey',
|
||||||
|
'lindt','milkduds','milkyway','mnm','necco','nerds','orbit','peeps',
|
||||||
|
'poprocks','razzles','runts','smarties','smuggler','spree','stride',
|
||||||
|
'tootsie','trident','wrigley','york')
|
||||||
|
) AS r
|
||||||
|
GROUP BY dt,
|
||||||
|
hr
|
||||||
|
ORDER BY both_sum DESC
|
||||||
|
LIMIT 10;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q2.1:过去 2 周内哪些请求导致了服务器错误?
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM logs2
|
||||||
|
WHERE status_code >= 500
|
||||||
|
AND log_time >= TIMESTAMP '2012-12-18 00:00:00'
|
||||||
|
ORDER BY log_time;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q2.2:在特定的某 2 周内,用户密码文件是否被泄露了?
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM logs2
|
||||||
|
WHERE status_code >= 200
|
||||||
|
AND status_code < 300
|
||||||
|
AND request LIKE '%/etc/passwd%'
|
||||||
|
AND log_time >= TIMESTAMP '2012-05-06 00:00:00'
|
||||||
|
AND log_time < TIMESTAMP '2012-05-20 00:00:00';
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q2.3:过去一个月顶级请求的平均路径深度是多少?
|
||||||
|
|
||||||
|
SELECT top_level,
|
||||||
|
AVG(LENGTH(request) - LENGTH(REPLACE(request, '/', ''))) AS depth_avg
|
||||||
|
FROM (
|
||||||
|
SELECT SUBSTRING(request FROM 1 FOR len) AS top_level,
|
||||||
|
request
|
||||||
|
FROM (
|
||||||
|
SELECT POSITION(SUBSTRING(request FROM 2), '/') AS len,
|
||||||
|
request
|
||||||
|
FROM logs2
|
||||||
|
WHERE status_code >= 200
|
||||||
|
AND status_code < 300
|
||||||
|
AND log_time >= TIMESTAMP '2012-12-01 00:00:00'
|
||||||
|
) AS r
|
||||||
|
WHERE len > 0
|
||||||
|
) AS s
|
||||||
|
WHERE top_level IN ('/about','/courses','/degrees','/events',
|
||||||
|
'/grad','/industry','/news','/people',
|
||||||
|
'/publications','/research','/teaching','/ugrad')
|
||||||
|
GROUP BY top_level
|
||||||
|
ORDER BY top_level;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q2.4:在过去的 3 个月里,哪些客户端发出了过多的请求?
|
||||||
|
|
||||||
|
SELECT client_ip,
|
||||||
|
COUNT(*) AS num_requests
|
||||||
|
FROM logs2
|
||||||
|
WHERE log_time >= TIMESTAMP '2012-10-01 00:00:00'
|
||||||
|
GROUP BY client_ip
|
||||||
|
HAVING COUNT(*) >= 100000
|
||||||
|
ORDER BY num_requests DESC;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q2.5:每天的独立访问者数量是多少?
|
||||||
|
|
||||||
|
SELECT dt,
|
||||||
|
COUNT(DISTINCT client_ip)
|
||||||
|
FROM (
|
||||||
|
SELECT CAST(log_time AS DATE) AS dt,
|
||||||
|
client_ip
|
||||||
|
FROM logs2
|
||||||
|
) AS r
|
||||||
|
GROUP BY dt
|
||||||
|
ORDER BY dt;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q2.6:平均和最大数据传输速率(Gbps)是多少?
|
||||||
|
|
||||||
|
SELECT AVG(transfer) / 125000000.0 AS transfer_avg,
|
||||||
|
MAX(transfer) / 125000000.0 AS transfer_max
|
||||||
|
FROM (
|
||||||
|
SELECT log_time,
|
||||||
|
SUM(object_size) AS transfer
|
||||||
|
FROM logs2
|
||||||
|
GROUP BY log_time
|
||||||
|
) AS r;
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q3.1:自 2019/11/29 17:00 以来,室温是否达到过冰点?
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM logs3
|
||||||
|
WHERE event_type = 'temperature'
|
||||||
|
AND event_value <= 32.0
|
||||||
|
AND log_time >= '2019-11-29 17:00:00.000';
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q3.4:在过去的 6 个月里,每扇门打开的频率是多少?
|
||||||
|
|
||||||
|
SELECT device_name,
|
||||||
|
device_floor,
|
||||||
|
COUNT(*) AS ct
|
||||||
|
FROM logs3
|
||||||
|
WHERE event_type = 'door_open'
|
||||||
|
AND log_time >= '2019-06-01 00:00:00.000'
|
||||||
|
GROUP BY device_name,
|
||||||
|
device_floor
|
||||||
|
ORDER BY ct DESC;
|
||||||
|
```
|
||||||
|
|
||||||
|
下面的查询 3.5 使用了 UNION 关键词。设置该模式以便组合 SELECT 的查询结果。该设置仅在未明确指定 UNION ALL 或 UNION DISTINCT 但使用了 UNION 进行共享时使用。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SET union_default_mode = 'DISTINCT'
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q3.5: 在冬季和夏季,建筑物内哪些地方会出现较大的温度变化?
|
||||||
|
|
||||||
|
WITH temperature AS (
|
||||||
|
SELECT dt,
|
||||||
|
device_name,
|
||||||
|
device_type,
|
||||||
|
device_floor
|
||||||
|
FROM (
|
||||||
|
SELECT dt,
|
||||||
|
hr,
|
||||||
|
device_name,
|
||||||
|
device_type,
|
||||||
|
device_floor,
|
||||||
|
AVG(event_value) AS temperature_hourly_avg
|
||||||
|
FROM (
|
||||||
|
SELECT CAST(log_time AS DATE) AS dt,
|
||||||
|
EXTRACT(HOUR FROM log_time) AS hr,
|
||||||
|
device_name,
|
||||||
|
device_type,
|
||||||
|
device_floor,
|
||||||
|
event_value
|
||||||
|
FROM logs3
|
||||||
|
WHERE event_type = 'temperature'
|
||||||
|
) AS r
|
||||||
|
GROUP BY dt,
|
||||||
|
hr,
|
||||||
|
device_name,
|
||||||
|
device_type,
|
||||||
|
device_floor
|
||||||
|
) AS s
|
||||||
|
GROUP BY dt,
|
||||||
|
device_name,
|
||||||
|
device_type,
|
||||||
|
device_floor
|
||||||
|
HAVING MAX(temperature_hourly_avg) - MIN(temperature_hourly_avg) >= 25.0
|
||||||
|
)
|
||||||
|
SELECT DISTINCT device_name,
|
||||||
|
device_type,
|
||||||
|
device_floor,
|
||||||
|
'WINTER'
|
||||||
|
FROM temperature
|
||||||
|
WHERE dt >= DATE '2018-12-01'
|
||||||
|
AND dt < DATE '2019-03-01'
|
||||||
|
UNION
|
||||||
|
SELECT DISTINCT device_name,
|
||||||
|
device_type,
|
||||||
|
device_floor,
|
||||||
|
'SUMMER'
|
||||||
|
FROM temperature
|
||||||
|
WHERE dt >= DATE '2019-06-01'
|
||||||
|
AND dt < DATE '2019-09-01';
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- Q3.6:对于每种类别的设备,每月的功耗指标是什么?
|
||||||
|
|
||||||
|
SELECT yr,
|
||||||
|
mo,
|
||||||
|
SUM(coffee_hourly_avg) AS coffee_monthly_sum,
|
||||||
|
AVG(coffee_hourly_avg) AS coffee_monthly_avg,
|
||||||
|
SUM(printer_hourly_avg) AS printer_monthly_sum,
|
||||||
|
AVG(printer_hourly_avg) AS printer_monthly_avg,
|
||||||
|
SUM(projector_hourly_avg) AS projector_monthly_sum,
|
||||||
|
AVG(projector_hourly_avg) AS projector_monthly_avg,
|
||||||
|
SUM(vending_hourly_avg) AS vending_monthly_sum,
|
||||||
|
AVG(vending_hourly_avg) AS vending_monthly_avg
|
||||||
|
FROM (
|
||||||
|
SELECT dt,
|
||||||
|
yr,
|
||||||
|
mo,
|
||||||
|
hr,
|
||||||
|
AVG(coffee) AS coffee_hourly_avg,
|
||||||
|
AVG(printer) AS printer_hourly_avg,
|
||||||
|
AVG(projector) AS projector_hourly_avg,
|
||||||
|
AVG(vending) AS vending_hourly_avg
|
||||||
|
FROM (
|
||||||
|
SELECT CAST(log_time AS DATE) AS dt,
|
||||||
|
EXTRACT(YEAR FROM log_time) AS yr,
|
||||||
|
EXTRACT(MONTH FROM log_time) AS mo,
|
||||||
|
EXTRACT(HOUR FROM log_time) AS hr,
|
||||||
|
CASE WHEN device_name LIKE 'coffee%' THEN event_value END AS coffee,
|
||||||
|
CASE WHEN device_name LIKE 'printer%' THEN event_value END AS printer,
|
||||||
|
CASE WHEN device_name LIKE 'projector%' THEN event_value END AS projector,
|
||||||
|
CASE WHEN device_name LIKE 'vending%' THEN event_value END AS vending
|
||||||
|
FROM logs3
|
||||||
|
WHERE device_type = 'meter'
|
||||||
|
) AS r
|
||||||
|
GROUP BY dt,
|
||||||
|
yr,
|
||||||
|
mo,
|
||||||
|
hr
|
||||||
|
) AS s
|
||||||
|
GROUP BY yr,
|
||||||
|
mo
|
||||||
|
ORDER BY yr,
|
||||||
|
mo;
|
||||||
|
```
|
||||||
|
|
||||||
|
此数据集可在 [Playground](https://play.clickhouse.com/play?user=play) 中进行交互式的请求, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1hY2hpbmVfbmFtZSwKICAgICAgIE1JTihjcHUpIEFTIGNwdV9taW4sCiAgICAgICBNQVgoY3B1KSBBUyBjcHVfbWF4LAogICAgICAgQVZHKGNwdSkgQVMgY3B1X2F2ZywKICAgICAgIE1JTihuZXRfaW4pIEFTIG5ldF9pbl9taW4sCiAgICAgICBNQVgobmV0X2luKSBBUyBuZXRfaW5fbWF4LAogICAgICAgQVZHKG5ldF9pbikgQVMgbmV0X2luX2F2ZywKICAgICAgIE1JTihuZXRfb3V0KSBBUyBuZXRfb3V0X21pbiwKICAgICAgIE1BWChuZXRfb3V0KSBBUyBuZXRfb3V0X21heCwKICAgICAgIEFWRyhuZXRfb3V0KSBBUyBuZXRfb3V0X2F2ZwpGUk9NICgKICBTRUxFQ1QgbWFjaGluZV9uYW1lLAogICAgICAgICBDT0FMRVNDRShjcHVfdXNlciwgMC4wKSBBUyBjcHUsCiAgICAgICAgIENPQUxFU0NFKGJ5dGVzX2luLCAwLjApIEFTIG5ldF9pbiwKICAgICAgICAgQ09BTEVTQ0UoYnl0ZXNfb3V0LCAwLjApIEFTIG5ldF9vdXQKICBGUk9NIG1nYmVuY2gubG9nczEKICBXSEVSRSBtYWNoaW5lX25hbWUgSU4gKCdhbmFuc2knLCdhcmFnb2cnLCd1cmQnKQogICAgQU5EIGxvZ190aW1lID49IFRJTUVTVEFNUCAnMjAxNy0wMS0xMSAwMDowMDowMCcKKSBBUyByCkdST1VQIEJZIG1hY2hpbmVfbmFtZQ==).
|
||||||
|
@ -1,9 +1,232 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/getting-started/example-datasets/cell-towers
|
slug: /zh/getting-started/example-datasets/cell-towers
|
||||||
sidebar_label: Cell Towers
|
sidebar_label: 蜂窝信号塔
|
||||||
title: "Cell Towers"
|
sidebar_position: 3
|
||||||
|
title: "蜂窝信号塔"
|
||||||
---
|
---
|
||||||
|
|
||||||
import Content from '@site/docs/en/getting-started/example-datasets/cell-towers.md';
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
import CodeBlock from '@theme/CodeBlock';
|
||||||
|
import ActionsMenu from '@site/docs/en/_snippets/_service_actions_menu.md';
|
||||||
|
import SQLConsoleDetail from '@site/docs/en/_snippets/_launch_sql_console.md';
|
||||||
|
|
||||||
|
该数据集来自 [OpenCellid](https://www.opencellid.org/) - 世界上最大的蜂窝信号塔的开放数据库。
|
||||||
|
|
||||||
|
截至 2021 年,它拥有超过 4000 万条关于全球蜂窝信号塔(GSM、LTE、UMTS 等)的记录及其地理坐标和元数据(国家代码、网络等)。
|
||||||
|
|
||||||
|
OpenCelliD 项目在 `Creative Commons Attribution-ShareAlike 4.0 International License` 协议下许可使用,我们根据相同许可条款重新分发此数据集的快照。登录后即可下载最新版本的数据集。
|
||||||
|
|
||||||
|
|
||||||
|
## 获取数据集 {#get-the-dataset}
|
||||||
|
|
||||||
|
<Tabs groupId="deployMethod">
|
||||||
|
<TabItem value="serverless" label="ClickHouse Cloud" default>
|
||||||
|
|
||||||
|
在 ClickHouse Cloud 上可以通过一个按钮实现通过 S3 上传此数据集。登录你的 ClickHouse Cloud 组织,或通过 [ClickHouse.cloud](https://clickhouse.cloud) 创建免费试用版。<ActionsMenu menu="Load Data" />
|
||||||
|
|
||||||
|
从 **Sample data** 选项卡中选择 **Cell Towers** 数据集,然后选择 **Load data**:
|
||||||
|
|
||||||
|
![加载数据集](@site/docs/en/_snippets/images/cloud-load-data-sample.png)
|
||||||
|
|
||||||
|
检查 cell_towers 的表结构:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DESCRIBE TABLE cell_towers
|
||||||
|
```
|
||||||
|
|
||||||
|
<SQLConsoleDetail />
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="selfmanaged" label="Self-managed">
|
||||||
|
|
||||||
|
1. 下载 2021 年 2 月以来的数据集快照:[cell_towers.csv.xz](https://datasets.clickhouse.com/cell_towers.csv.xz) (729 MB)。
|
||||||
|
|
||||||
|
2. 验证完整性(可选步骤):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
md5sum cell_towers.csv.xz
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 使用以下命令解压:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
xz -d cell_towers.csv.xz
|
||||||
|
```
|
||||||
|
|
||||||
|
4. 创建表:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE cell_towers
|
||||||
|
(
|
||||||
|
radio Enum8('' = 0, 'CDMA' = 1, 'GSM' = 2, 'LTE' = 3, 'NR' = 4, 'UMTS' = 5),
|
||||||
|
mcc UInt16,
|
||||||
|
net UInt16,
|
||||||
|
area UInt16,
|
||||||
|
cell UInt64,
|
||||||
|
unit Int16,
|
||||||
|
lon Float64,
|
||||||
|
lat Float64,
|
||||||
|
range UInt32,
|
||||||
|
samples UInt32,
|
||||||
|
changeable UInt8,
|
||||||
|
created DateTime,
|
||||||
|
updated DateTime,
|
||||||
|
averageSignal UInt8
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree ORDER BY (radio, mcc, net, created);
|
||||||
|
```
|
||||||
|
|
||||||
|
5. 插入数据集:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
clickhouse-client --query "INSERT INTO cell_towers FORMAT CSVWithNames" < cell_towers.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
## 查询示例 {#examples}
|
||||||
|
|
||||||
|
1. 按类型划分的基站数量:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─radio─┬────────c─┐
|
||||||
|
│ UMTS │ 20686487 │
|
||||||
|
│ LTE │ 12101148 │
|
||||||
|
│ GSM │ 9931312 │
|
||||||
|
│ CDMA │ 556344 │
|
||||||
|
│ NR │ 867 │
|
||||||
|
└───────┴──────────┘
|
||||||
|
|
||||||
|
5 rows in set. Elapsed: 0.011 sec. Processed 43.28 million rows, 43.28 MB (3.83 billion rows/s., 3.83 GB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 各个[移动国家代码(MCC)](https://en.wikipedia.org/wiki/Mobile_country_code)对应的蜂窝信号塔数量:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT mcc, count() FROM cell_towers GROUP BY mcc ORDER BY count() DESC LIMIT 10
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─mcc─┬─count()─┐
|
||||||
|
│ 310 │ 5024650 │
|
||||||
|
│ 262 │ 2622423 │
|
||||||
|
│ 250 │ 1953176 │
|
||||||
|
│ 208 │ 1891187 │
|
||||||
|
│ 724 │ 1836150 │
|
||||||
|
│ 404 │ 1729151 │
|
||||||
|
│ 234 │ 1618924 │
|
||||||
|
│ 510 │ 1353998 │
|
||||||
|
│ 440 │ 1343355 │
|
||||||
|
│ 311 │ 1332798 │
|
||||||
|
└─────┴─────────┘
|
||||||
|
|
||||||
|
10 rows in set. Elapsed: 0.019 sec. Processed 43.28 million rows, 86.55 MB (2.33 billion rows/s., 4.65 GB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
排名靠前的国家是:美国、德国和俄罗斯。
|
||||||
|
|
||||||
|
你可以通过在 ClickHouse 中创建一个 [External Dictionary](../../sql-reference/dictionaries/external-dictionaries/external-dicts.md) 来解码这些值。
|
||||||
|
|
||||||
|
## 用例:合并地理数据 {#use-case}
|
||||||
|
|
||||||
|
使用 `pointInPolygon` 函数。
|
||||||
|
|
||||||
|
1. 创建一个用于存储多边形的表:
|
||||||
|
|
||||||
|
<Tabs groupId="deployMethod">
|
||||||
|
<TabItem value="serverless" label="ClickHouse Cloud" default>
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE moscow (polygon Array(Tuple(Float64, Float64)))
|
||||||
|
ORDER BY polygon;
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="selfmanaged" label="Self-managed">
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TEMPORARY TABLE
|
||||||
|
moscow (polygon Array(Tuple(Float64, Float64)));
|
||||||
|
```
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
2. 以下点大致上构造了莫斯科的地理围栏(除“新莫斯科”外):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO moscow VALUES ([(37.84172564285271, 55.78000432402266),
|
||||||
|
(37.8381207618713, 55.775874525970494), (37.83979446823122, 55.775626746008065), (37.84243326983639, 55.77446586811748), (37.84262672750849, 55.771974101091104), (37.84153238623039, 55.77114545193181), (37.841124690460184, 55.76722010265554),
|
||||||
|
(37.84239076983644, 55.76654891107098), (37.842283558197025, 55.76258709833121), (37.8421759312134, 55.758073999993734), (37.84198330422974, 55.75381499999371), (37.8416827275085, 55.749277102484484), (37.84157576190186, 55.74794544108413),
|
||||||
|
(37.83897929098507, 55.74525257875241), (37.83739676451868, 55.74404373042019), (37.838732481460525, 55.74298009816793), (37.841183997352545, 55.743060321833575), (37.84097476190185, 55.73938799999373), (37.84048155819702, 55.73570799999372),
|
||||||
|
(37.840095812164286, 55.73228210777237), (37.83983814285274, 55.73080491981639), (37.83846476321406, 55.729799917464675), (37.83835745269769, 55.72919751082619), (37.838636380279524, 55.72859509486539), (37.8395161005249, 55.727705075632784),
|
||||||
|
(37.83897964285276, 55.722727886185154), (37.83862557539366, 55.72034817326636), (37.83559735744853, 55.71944437307499), (37.835370708803126, 55.71831419154461), (37.83738169402022, 55.71765218986692), (37.83823396494291, 55.71691750159089),
|
||||||
|
(37.838056931213345, 55.71547311301385), (37.836812846557606, 55.71221445615604), (37.83522525396725, 55.709331054395555), (37.83269301586908, 55.70953687463627), (37.829667367706236, 55.70903403789297), (37.83311126588435, 55.70552351822608),
|
||||||
|
(37.83058993121339, 55.70041317726053), (37.82983872750851, 55.69883771404813), (37.82934501586913, 55.69718947487017), (37.828926414016685, 55.69504441658371), (37.82876530422971, 55.69287499999378), (37.82894754100031, 55.690759754047335),
|
||||||
|
(37.827697554878185, 55.68951421135665), (37.82447346292115, 55.68965045405069), (37.83136543914793, 55.68322046195302), (37.833554015869154, 55.67814012759211), (37.83544184655761, 55.67295011628339), (37.837480388885474, 55.6672498719639),
|
||||||
|
(37.838960677246064, 55.66316274139358), (37.83926093121332, 55.66046999999383), (37.839025050262435, 55.65869897264431), (37.83670784390257, 55.65794084879904), (37.835656529083245, 55.65694309303843), (37.83704060449217, 55.65689306460552),
|
||||||
|
(37.83696819873806, 55.65550363526252), (37.83760389616388, 55.65487847246661), (37.83687972750851, 55.65356745541324), (37.83515216004943, 55.65155951234079), (37.83312418518067, 55.64979413590619), (37.82801726983639, 55.64640836412121),
|
||||||
|
(37.820614174591, 55.64164525405531), (37.818908190475426, 55.6421883258084), (37.81717543386075, 55.64112490388471), (37.81690987037274, 55.63916106913107), (37.815099354492155, 55.637925371757085), (37.808769150787356, 55.633798276884455),
|
||||||
|
(37.80100123544311, 55.62873670012244), (37.79598013491824, 55.62554336109055), (37.78634567724606, 55.62033499605651), (37.78334147619623, 55.618768681480326), (37.77746201055901, 55.619855533402706), (37.77527329626457, 55.61909966711279),
|
||||||
|
(37.77801986242668, 55.618770300976294), (37.778212973541216, 55.617257701952106), (37.77784818518065, 55.61574504433011), (37.77016867724609, 55.61148576294007), (37.760191219573976, 55.60599579539028), (37.75338926983641, 55.60227892751446),
|
||||||
|
(37.746329965606634, 55.59920577639331), (37.73939925396728, 55.59631430313617), (37.73273665739439, 55.5935318803559), (37.7299954450912, 55.59350760316188), (37.7268679946899, 55.59469840523759), (37.72626726983634, 55.59229549697373),
|
||||||
|
(37.7262673598022, 55.59081598950582), (37.71897193121335, 55.5877595845419), (37.70871550793456, 55.58393177431724), (37.700497489410374, 55.580917323756644), (37.69204305026244, 55.57778089778455), (37.68544477378839, 55.57815154690915),
|
||||||
|
(37.68391050793454, 55.57472945079756), (37.678803592590306, 55.57328235936491), (37.6743402539673, 55.57255251445782), (37.66813862698363, 55.57216388774464), (37.617927457672096, 55.57505691895805), (37.60443099999999, 55.5757737568051),
|
||||||
|
(37.599683515869145, 55.57749105910326), (37.59754177842709, 55.57796291823627), (37.59625834786988, 55.57906686095235), (37.59501783265684, 55.57746616444403), (37.593090671936025, 55.57671634534502), (37.587018007904, 55.577944600233785),
|
||||||
|
(37.578692203704804, 55.57982895000019), (37.57327546607398, 55.58116294118248), (37.57385012109279, 55.581550362779), (37.57399562266922, 55.5820107079112), (37.5735356072979, 55.58226289171689), (37.57290393054962, 55.582393529795155),
|
||||||
|
(37.57037722355653, 55.581919415056234), (37.5592298306885, 55.584471614867844), (37.54189249206543, 55.58867650795186), (37.5297256269836, 55.59158133551745), (37.517837865081766, 55.59443656218868), (37.51200186508174, 55.59635625174229),
|
||||||
|
(37.506808949737554, 55.59907823904434), (37.49820432275389, 55.6062944994944), (37.494406071441674, 55.60967103463367), (37.494760001358024, 55.61066689753365), (37.49397137107085, 55.61220931698269), (37.49016528606031, 55.613417718449064),
|
||||||
|
(37.48773249206542, 55.61530616333343), (37.47921386508177, 55.622640129112334), (37.470652153442394, 55.62993723476164), (37.46273446298218, 55.6368075123157), (37.46350692265317, 55.64068225239439), (37.46050283203121, 55.640794546982576),
|
||||||
|
(37.457627470916734, 55.64118904154646), (37.450718034393326, 55.64690488145138), (37.44239252645875, 55.65397824729769), (37.434587576721185, 55.66053543155961), (37.43582144975277, 55.661693766520735), (37.43576786245721, 55.662755031737014),
|
||||||
|
(37.430982915344174, 55.664610641628116), (37.428547447097685, 55.66778515273695), (37.42945134592044, 55.668633314343566), (37.42859571562949, 55.66948145750025), (37.4262836402282, 55.670813882451405), (37.418709037048295, 55.6811141674414),
|
||||||
|
(37.41922139651101, 55.68235377885389), (37.419218771842885, 55.68359335082235), (37.417196501327446, 55.684375235224735), (37.41607020370478, 55.68540557585352), (37.415640857147146, 55.68686637150793), (37.414632153442334, 55.68903015131686),
|
||||||
|
(37.413344899475064, 55.690896881757396), (37.41171432275391, 55.69264232162232), (37.40948282275393, 55.69455101638112), (37.40703674603271, 55.69638690385348), (37.39607169577025, 55.70451821283731), (37.38952706878662, 55.70942491932811),
|
||||||
|
(37.387778313491815, 55.71149057784176), (37.39049275399779, 55.71419814298992), (37.385557272491454, 55.7155489617061), (37.38388335714726, 55.71849856042102), (37.378368238098155, 55.7292763261685), (37.37763597123337, 55.730845879211614),
|
||||||
|
(37.37890062088197, 55.73167906388319), (37.37750451918789, 55.734703664681774), (37.375610832015965, 55.734851959522246), (37.3723813571472, 55.74105626086403), (37.37014935714723, 55.746115620904355), (37.36944173016362, 55.750883999993725),
|
||||||
|
(37.36975304365541, 55.76335905525834), (37.37244070571134, 55.76432079697595), (37.3724259757175, 55.76636979670426), (37.369922155757884, 55.76735417953104), (37.369892695770275, 55.76823419316575), (37.370214730163575, 55.782312184391266),
|
||||||
|
(37.370493611114505, 55.78436801120489), (37.37120164550783, 55.78596427165359), (37.37284851456452, 55.7874378183096), (37.37608325135799, 55.7886695054807), (37.3764587460632, 55.78947647305964), (37.37530000265506, 55.79146512926804),
|
||||||
|
(37.38235915344241, 55.79899647809345), (37.384344043655396, 55.80113596939471), (37.38594269577028, 55.80322699999366), (37.38711208598329, 55.804919036911976), (37.3880239841309, 55.806610999993666), (37.38928977249147, 55.81001864976979),
|
||||||
|
(37.39038389947512, 55.81348641242801), (37.39235781481933, 55.81983538336746), (37.393709457672124, 55.82417822811877), (37.394685720901464, 55.82792275755836), (37.39557615344238, 55.830447148154136), (37.39844478226658, 55.83167107969975),
|
||||||
|
(37.40019761214057, 55.83151823557964), (37.400398790382326, 55.83264967594742), (37.39659544313046, 55.83322180909622), (37.39667059524539, 55.83402792148566), (37.39682089947515, 55.83638877400216), (37.39643489154053, 55.83861656112751),
|
||||||
|
(37.3955338994751, 55.84072348043264), (37.392680272491454, 55.84502158126453), (37.39241188227847, 55.84659117913199), (37.392529730163616, 55.84816071336481), (37.39486835714723, 55.85288092980303), (37.39873052645878, 55.859893456073635),
|
||||||
|
(37.40272161111449, 55.86441833633205), (37.40697072750854, 55.867579567544375), (37.410007082016016, 55.868369880337), (37.4120992989502, 55.86920843741314), (37.412668021163924, 55.87055369615854), (37.41482461111453, 55.87170587948249),
|
||||||
|
(37.41862266137694, 55.873183961039565), (37.42413732540892, 55.874879126654704), (37.4312182698669, 55.875614937236705), (37.43111093783558, 55.8762723478417), (37.43332105622856, 55.87706546369396), (37.43385747619623, 55.87790681284802),
|
||||||
|
(37.441303050262405, 55.88027084462084), (37.44747234260555, 55.87942070143253), (37.44716141796871, 55.88072960917233), (37.44769797085568, 55.88121221323979), (37.45204320500181, 55.882080694420715), (37.45673176190186, 55.882346110794586),
|
||||||
|
(37.463383999999984, 55.88252729504517), (37.46682797486874, 55.88294937719063), (37.470014457672086, 55.88361266759345), (37.47751410450743, 55.88546991372396), (37.47860317658232, 55.88534929207307), (37.48165826025772, 55.882563306475106),
|
||||||
|
(37.48316434442331, 55.8815803226785), (37.483831555817645, 55.882427612793315), (37.483182967125686, 55.88372791409729), (37.483092277908824, 55.88495581062434), (37.4855716508179, 55.8875561994203), (37.486440636245746, 55.887827444039566),
|
||||||
|
(37.49014203439328, 55.88897899871799), (37.493210285705544, 55.890208937135604), (37.497512451065035, 55.891342397444696), (37.49780744510645, 55.89174030252967), (37.49940333499519, 55.89239745507079), (37.50018383334346, 55.89339220941865),
|
||||||
|
(37.52421672750851, 55.903869074155224), (37.52977457672118, 55.90564076517974), (37.53503220370484, 55.90661661218259), (37.54042858064267, 55.90714113744566), (37.54320461007303, 55.905645048442985), (37.545686966066306, 55.906608607018505),
|
||||||
|
(37.54743976120755, 55.90788552162358), (37.55796999999999, 55.90901557907218), (37.572711542327866, 55.91059395704873), (37.57942799999998, 55.91073854155573), (37.58502865872187, 55.91009969268444), (37.58739968913264, 55.90794809960554),
|
||||||
|
(37.59131567193598, 55.908713267595054), (37.612687423278814, 55.902866854295375), (37.62348079629517, 55.90041967242986), (37.635797880950896, 55.898141151686396), (37.649487626983664, 55.89639275532968), (37.65619302513125, 55.89572360207488),
|
||||||
|
(37.66294133862307, 55.895295577183965), (37.66874564418033, 55.89505457604897), (37.67375601586915, 55.89254677027454), (37.67744661901856, 55.8947775867987), (37.688347, 55.89450045676125), (37.69480554232789, 55.89422926332761),
|
||||||
|
(37.70107096560668, 55.89322256101114), (37.705962965606716, 55.891763491662616), (37.711885134918205, 55.889110234998974), (37.71682005026245, 55.886577568759876), (37.7199315476074, 55.88458159806678), (37.72234560316464, 55.882281005794134),
|
||||||
|
(37.72364385977171, 55.8809452036196), (37.725371142837474, 55.8809722706006), (37.727870902099546, 55.88037213862385), (37.73394330422971, 55.877941504088696), (37.745339592590376, 55.87208120378722), (37.75525267724611, 55.86703807949492),
|
||||||
|
(37.76919976190188, 55.859821640197474), (37.827835219574, 55.82962968399116), (37.83341438888553, 55.82575289922351), (37.83652584655761, 55.82188784027888), (37.83809213491821, 55.81612575504693), (37.83605359521481, 55.81460347077685),
|
||||||
|
(37.83632178569025, 55.81276696067908), (37.838623105812026, 55.811486181656385), (37.83912198147584, 55.807329380532785), (37.839079078033414, 55.80510270463816), (37.83965844708251, 55.79940712529036), (37.840581150787344, 55.79131399999368),
|
||||||
|
(37.84172564285271, 55.78000432402266)]);
|
||||||
|
```
|
||||||
|
|
||||||
|
3. 检查莫斯科有多少个蜂窝信号塔:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT count() FROM cell_towers
|
||||||
|
WHERE pointInPolygon((lon, lat), (SELECT * FROM moscow))
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─count()─┐
|
||||||
|
│ 310463 │
|
||||||
|
└─────────┘
|
||||||
|
|
||||||
|
1 rows in set. Elapsed: 0.067 sec. Processed 43.28 million rows, 692.42 MB (645.83 million rows/s., 10.33 GB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
虽然不能创建临时表,但此数据集仍可在 [Playground](https://play.clickhouse.com/play?user=play) 中进行交互式的请求, [example](https://play.clickhouse.com/play?user=play#U0VMRUNUIG1jYywgY291bnQoKSBGUk9NIGNlbGxfdG93ZXJzIEdST1VQIEJZIG1jYyBPUkRFUiBCWSBjb3VudCgpIERFU0M=).
|
||||||
|
|
||||||
<Content />
|
|
||||||
|
@ -1,9 +1,352 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/getting-started/example-datasets/menus
|
slug: /zh/getting-started/example-datasets/menus
|
||||||
sidebar_label: New York Public Library "What's on the Menu?" Dataset
|
sidebar_label: '纽约公共图书馆“菜单上有什么?”数据集'
|
||||||
title: "New York Public Library \"What's on the Menu?\" Dataset"
|
title: '纽约公共图书馆“菜单上有什么?”数据集'
|
||||||
---
|
---
|
||||||
|
|
||||||
import Content from '@site/docs/en/getting-started/example-datasets/menus.md';
|
该数据集由纽约公共图书馆创建。其中含有有关酒店、餐馆和咖啡馆的菜单上的菜肴及其价格的历史数据。
|
||||||
|
|
||||||
<Content />
|
来源:http://menus.nypl.org/data
|
||||||
|
数据为开放数据。
|
||||||
|
|
||||||
|
数据来自于图书馆中的档案,因此可能不完整,以至于难以进行统计分析。尽管如此,该数据集也是非常有意思的。数据集中只有 130 万条关于菜单中的菜肴的记录 - 这对于 ClickHouse 来说是一个非常小的数据量,但这仍是一个很好的例子。
|
||||||
|
|
||||||
|
## 下载数据集 {#download-dataset}
|
||||||
|
|
||||||
|
运行命令:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget https://s3.amazonaws.com/menusdata.nypl.org/gzips/2021_08_01_07_01_17_data.tgz
|
||||||
|
```
|
||||||
|
|
||||||
|
如果有需要可以使用 http://menus.nypl.org/data 中的最新链接。下载的大小约为 35 MB。
|
||||||
|
|
||||||
|
## 解压数据集 {#unpack-dataset}
|
||||||
|
|
||||||
|
```bash
|
||||||
|
tar xvf 2021_08_01_07_01_17_data.tgz
|
||||||
|
```
|
||||||
|
|
||||||
|
解压后的的大小约为 150 MB。
|
||||||
|
|
||||||
|
数据集由四个表组成:
|
||||||
|
|
||||||
|
- `Menu` - 有关菜单的信息,其中包含:餐厅名称,看到菜单的日期等
|
||||||
|
- `Dish` - 有关菜肴的信息,其中包含:菜肴名称以及一些特征。
|
||||||
|
- `MenuPage` - 有关菜单中页面的信息,每个页面都属于某个 `Menu`。
|
||||||
|
- `MenuItem` - 菜单项。某个菜单页面上的菜肴及其价格:指向 `Dish` 和 `MenuPage`的链接。
|
||||||
|
|
||||||
|
## 创建表 {#create-tables}
|
||||||
|
|
||||||
|
使用 [Decimal](/docs/zh/sql-reference/data-types/decimal.md) 数据类型来存储价格。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE dish
|
||||||
|
(
|
||||||
|
id UInt32,
|
||||||
|
name String,
|
||||||
|
description String,
|
||||||
|
menus_appeared UInt32,
|
||||||
|
times_appeared Int32,
|
||||||
|
first_appeared UInt16,
|
||||||
|
last_appeared UInt16,
|
||||||
|
lowest_price Decimal64(3),
|
||||||
|
highest_price Decimal64(3)
|
||||||
|
) ENGINE = MergeTree ORDER BY id;
|
||||||
|
|
||||||
|
CREATE TABLE menu
|
||||||
|
(
|
||||||
|
id UInt32,
|
||||||
|
name String,
|
||||||
|
sponsor String,
|
||||||
|
event String,
|
||||||
|
venue String,
|
||||||
|
place String,
|
||||||
|
physical_description String,
|
||||||
|
occasion String,
|
||||||
|
notes String,
|
||||||
|
call_number String,
|
||||||
|
keywords String,
|
||||||
|
language String,
|
||||||
|
date String,
|
||||||
|
location String,
|
||||||
|
location_type String,
|
||||||
|
currency String,
|
||||||
|
currency_symbol String,
|
||||||
|
status String,
|
||||||
|
page_count UInt16,
|
||||||
|
dish_count UInt16
|
||||||
|
) ENGINE = MergeTree ORDER BY id;
|
||||||
|
|
||||||
|
CREATE TABLE menu_page
|
||||||
|
(
|
||||||
|
id UInt32,
|
||||||
|
menu_id UInt32,
|
||||||
|
page_number UInt16,
|
||||||
|
image_id String,
|
||||||
|
full_height UInt16,
|
||||||
|
full_width UInt16,
|
||||||
|
uuid UUID
|
||||||
|
) ENGINE = MergeTree ORDER BY id;
|
||||||
|
|
||||||
|
CREATE TABLE menu_item
|
||||||
|
(
|
||||||
|
id UInt32,
|
||||||
|
menu_page_id UInt32,
|
||||||
|
price Decimal64(3),
|
||||||
|
high_price Decimal64(3),
|
||||||
|
dish_id UInt32,
|
||||||
|
created_at DateTime,
|
||||||
|
updated_at DateTime,
|
||||||
|
xpos Float64,
|
||||||
|
ypos Float64
|
||||||
|
) ENGINE = MergeTree ORDER BY id;
|
||||||
|
```
|
||||||
|
|
||||||
|
## 导入数据 {#import-data}
|
||||||
|
|
||||||
|
执行以下命令将数据导入 ClickHouse:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO dish FORMAT CSVWithNames" < Dish.csv
|
||||||
|
clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO menu FORMAT CSVWithNames" < Menu.csv
|
||||||
|
clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --query "INSERT INTO menu_page FORMAT CSVWithNames" < MenuPage.csv
|
||||||
|
clickhouse-client --format_csv_allow_single_quotes 0 --input_format_null_as_default 0 --date_time_input_format best_effort --query "INSERT INTO menu_item FORMAT CSVWithNames" < MenuItem.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
因为数据由带有标题的 CSV 表示,所以使用 [CSVWithNames](/docs/zh/interfaces/formats.md#csvwithnames) 格式。
|
||||||
|
|
||||||
|
因为只有双引号用于数据字段,单引号可以在值内,所以禁用了 `format_csv_allow_single_quotes` 以避免混淆 CSV 解析器。
|
||||||
|
|
||||||
|
因为数据中没有 [NULL](/docs/zh/sql-reference/syntax.md#null-literal) 值,所以禁用 [input_format_null_as_default](/docs/zh/operations/settings/settings.md#settings-input-format-null-as-default)。不然 ClickHouse 将会尝试解析 `\N` 序列,并可能与数据中的 `\` 混淆。
|
||||||
|
|
||||||
|
设置 [date_time_input_format best_effort](/docs/zh/operations/settings/settings.md#settings-date_time_input_format) 以便解析各种格式的 [DateTime](/docs/zh/sql-reference/data-types/datetime.md)字段。例如,识别像“2000-01-01 01:02”这样没有秒数的 ISO-8601 时间字符串。如果没有此设置,则仅允许使用固定的 DateTime 格式。
|
||||||
|
|
||||||
|
## 非规范化数据 {#denormalize-data}
|
||||||
|
|
||||||
|
数据以 [规范化形式] (https://en.wikipedia.org/wiki/Database_normalization#Normal_forms) 在多个表格中呈现。这意味着如果你想进行如查询菜单项中的菜名这类的查询,则必须执行 [JOIN](/docs/zh/sql-reference/statements/select/join.md#select-join)。在典型的分析任务中,预先处理联接的数据以避免每次都执行“联接”会更有效率。这中操作被称为“非规范化”数据。
|
||||||
|
|
||||||
|
我们将创建一个表“menu_item_denorm”,其中将包含所有联接在一起的数据:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE menu_item_denorm
|
||||||
|
ENGINE = MergeTree ORDER BY (dish_name, created_at)
|
||||||
|
AS SELECT
|
||||||
|
price,
|
||||||
|
high_price,
|
||||||
|
created_at,
|
||||||
|
updated_at,
|
||||||
|
xpos,
|
||||||
|
ypos,
|
||||||
|
dish.id AS dish_id,
|
||||||
|
dish.name AS dish_name,
|
||||||
|
dish.description AS dish_description,
|
||||||
|
dish.menus_appeared AS dish_menus_appeared,
|
||||||
|
dish.times_appeared AS dish_times_appeared,
|
||||||
|
dish.first_appeared AS dish_first_appeared,
|
||||||
|
dish.last_appeared AS dish_last_appeared,
|
||||||
|
dish.lowest_price AS dish_lowest_price,
|
||||||
|
dish.highest_price AS dish_highest_price,
|
||||||
|
menu.id AS menu_id,
|
||||||
|
menu.name AS menu_name,
|
||||||
|
menu.sponsor AS menu_sponsor,
|
||||||
|
menu.event AS menu_event,
|
||||||
|
menu.venue AS menu_venue,
|
||||||
|
menu.place AS menu_place,
|
||||||
|
menu.physical_description AS menu_physical_description,
|
||||||
|
menu.occasion AS menu_occasion,
|
||||||
|
menu.notes AS menu_notes,
|
||||||
|
menu.call_number AS menu_call_number,
|
||||||
|
menu.keywords AS menu_keywords,
|
||||||
|
menu.language AS menu_language,
|
||||||
|
menu.date AS menu_date,
|
||||||
|
menu.location AS menu_location,
|
||||||
|
menu.location_type AS menu_location_type,
|
||||||
|
menu.currency AS menu_currency,
|
||||||
|
menu.currency_symbol AS menu_currency_symbol,
|
||||||
|
menu.status AS menu_status,
|
||||||
|
menu.page_count AS menu_page_count,
|
||||||
|
menu.dish_count AS menu_dish_count
|
||||||
|
FROM menu_item
|
||||||
|
JOIN dish ON menu_item.dish_id = dish.id
|
||||||
|
JOIN menu_page ON menu_item.menu_page_id = menu_page.id
|
||||||
|
JOIN menu ON menu_page.menu_id = menu.id;
|
||||||
|
```
|
||||||
|
|
||||||
|
## 验证数据 {#validate-data}
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT count() FROM menu_item_denorm;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─count()─┐
|
||||||
|
│ 1329175 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## 运行一些查询 {#run-queries}
|
||||||
|
|
||||||
|
### 菜品的平均历史价格 {#query-averaged-historical-prices}
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d,
|
||||||
|
count(),
|
||||||
|
round(avg(price), 2),
|
||||||
|
bar(avg(price), 0, 100, 100)
|
||||||
|
FROM menu_item_denorm
|
||||||
|
WHERE (menu_currency = 'Dollars') AND (d > 0) AND (d < 2022)
|
||||||
|
GROUP BY d
|
||||||
|
ORDER BY d ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 100, 100)─┐
|
||||||
|
│ 1850 │ 618 │ 1.5 │ █▍ │
|
||||||
|
│ 1860 │ 1634 │ 1.29 │ █▎ │
|
||||||
|
│ 1870 │ 2215 │ 1.36 │ █▎ │
|
||||||
|
│ 1880 │ 3909 │ 1.01 │ █ │
|
||||||
|
│ 1890 │ 8837 │ 1.4 │ █▍ │
|
||||||
|
│ 1900 │ 176292 │ 0.68 │ ▋ │
|
||||||
|
│ 1910 │ 212196 │ 0.88 │ ▊ │
|
||||||
|
│ 1920 │ 179590 │ 0.74 │ ▋ │
|
||||||
|
│ 1930 │ 73707 │ 0.6 │ ▌ │
|
||||||
|
│ 1940 │ 58795 │ 0.57 │ ▌ │
|
||||||
|
│ 1950 │ 41407 │ 0.95 │ ▊ │
|
||||||
|
│ 1960 │ 51179 │ 1.32 │ █▎ │
|
||||||
|
│ 1970 │ 12914 │ 1.86 │ █▋ │
|
||||||
|
│ 1980 │ 7268 │ 4.35 │ ████▎ │
|
||||||
|
│ 1990 │ 11055 │ 6.03 │ ██████ │
|
||||||
|
│ 2000 │ 2467 │ 11.85 │ ███████████▋ │
|
||||||
|
│ 2010 │ 597 │ 25.66 │ █████████████████████████▋ │
|
||||||
|
└──────┴─────────┴──────────────────────┴──────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
带上一粒盐。
|
||||||
|
|
||||||
|
### 汉堡价格 {#query-burger-prices}
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d,
|
||||||
|
count(),
|
||||||
|
round(avg(price), 2),
|
||||||
|
bar(avg(price), 0, 50, 100)
|
||||||
|
FROM menu_item_denorm
|
||||||
|
WHERE (menu_currency = 'Dollars') AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%burger%')
|
||||||
|
GROUP BY d
|
||||||
|
ORDER BY d ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)───────────┐
|
||||||
|
│ 1880 │ 2 │ 0.42 │ ▋ │
|
||||||
|
│ 1890 │ 7 │ 0.85 │ █▋ │
|
||||||
|
│ 1900 │ 399 │ 0.49 │ ▊ │
|
||||||
|
│ 1910 │ 589 │ 0.68 │ █▎ │
|
||||||
|
│ 1920 │ 280 │ 0.56 │ █ │
|
||||||
|
│ 1930 │ 74 │ 0.42 │ ▋ │
|
||||||
|
│ 1940 │ 119 │ 0.59 │ █▏ │
|
||||||
|
│ 1950 │ 134 │ 1.09 │ ██▏ │
|
||||||
|
│ 1960 │ 272 │ 0.92 │ █▋ │
|
||||||
|
│ 1970 │ 108 │ 1.18 │ ██▎ │
|
||||||
|
│ 1980 │ 88 │ 2.82 │ █████▋ │
|
||||||
|
│ 1990 │ 184 │ 3.68 │ ███████▎ │
|
||||||
|
│ 2000 │ 21 │ 7.14 │ ██████████████▎ │
|
||||||
|
│ 2010 │ 6 │ 18.42 │ ████████████████████████████████████▋ │
|
||||||
|
└──────┴─────────┴──────────────────────┴───────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
###伏特加{#query-vodka}
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d,
|
||||||
|
count(),
|
||||||
|
round(avg(price), 2),
|
||||||
|
bar(avg(price), 0, 50, 100)
|
||||||
|
FROM menu_item_denorm
|
||||||
|
WHERE (menu_currency IN ('Dollars', '')) AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%vodka%')
|
||||||
|
GROUP BY d
|
||||||
|
ORDER BY d ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)─┐
|
||||||
|
│ 1910 │ 2 │ 0 │ │
|
||||||
|
│ 1920 │ 1 │ 0.3 │ ▌ │
|
||||||
|
│ 1940 │ 21 │ 0.42 │ ▋ │
|
||||||
|
│ 1950 │ 14 │ 0.59 │ █▏ │
|
||||||
|
│ 1960 │ 113 │ 2.17 │ ████▎ │
|
||||||
|
│ 1970 │ 37 │ 0.68 │ █▎ │
|
||||||
|
│ 1980 │ 19 │ 2.55 │ █████ │
|
||||||
|
│ 1990 │ 86 │ 3.6 │ ███████▏ │
|
||||||
|
│ 2000 │ 2 │ 3.98 │ ███████▊ │
|
||||||
|
└──────┴─────────┴──────────────────────┴─────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
要查询 `Vodka`,必须声明通过 `ILIKE '%vodka%'` 进行查询。
|
||||||
|
|
||||||
|
### 鱼子酱 {#query-caviar}
|
||||||
|
|
||||||
|
列出鱼子酱的价格。另外,列出任何带有鱼子酱的菜肴的名称。
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
round(toUInt32OrZero(extract(menu_date, '^\\d{4}')), -1) AS d,
|
||||||
|
count(),
|
||||||
|
round(avg(price), 2),
|
||||||
|
bar(avg(price), 0, 50, 100),
|
||||||
|
any(dish_name)
|
||||||
|
FROM menu_item_denorm
|
||||||
|
WHERE (menu_currency IN ('Dollars', '')) AND (d > 0) AND (d < 2022) AND (dish_name ILIKE '%caviar%')
|
||||||
|
GROUP BY d
|
||||||
|
ORDER BY d ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌────d─┬─count()─┬─round(avg(price), 2)─┬─bar(avg(price), 0, 50, 100)──────┬─any(dish_name)──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ 1090 │ 1 │ 0 │ │ Caviar │
|
||||||
|
│ 1880 │ 3 │ 0 │ │ Caviar │
|
||||||
|
│ 1890 │ 39 │ 0.59 │ █▏ │ Butter and caviar │
|
||||||
|
│ 1900 │ 1014 │ 0.34 │ ▋ │ Anchovy Caviar on Toast │
|
||||||
|
│ 1910 │ 1588 │ 1.35 │ ██▋ │ 1/1 Brötchen Caviar │
|
||||||
|
│ 1920 │ 927 │ 1.37 │ ██▋ │ ASTRAKAN CAVIAR │
|
||||||
|
│ 1930 │ 289 │ 1.91 │ ███▋ │ Astrachan caviar │
|
||||||
|
│ 1940 │ 201 │ 0.83 │ █▋ │ (SPECIAL) Domestic Caviar Sandwich │
|
||||||
|
│ 1950 │ 81 │ 2.27 │ ████▌ │ Beluga Caviar │
|
||||||
|
│ 1960 │ 126 │ 2.21 │ ████▍ │ Beluga Caviar │
|
||||||
|
│ 1970 │ 105 │ 0.95 │ █▊ │ BELUGA MALOSSOL CAVIAR AMERICAN DRESSING │
|
||||||
|
│ 1980 │ 12 │ 7.22 │ ██████████████▍ │ Authentic Iranian Beluga Caviar the world's finest black caviar presented in ice garni and a sampling of chilled 100° Russian vodka │
|
||||||
|
│ 1990 │ 74 │ 14.42 │ ████████████████████████████▋ │ Avocado Salad, Fresh cut avocado with caviare │
|
||||||
|
│ 2000 │ 3 │ 7.82 │ ███████████████▋ │ Aufgeschlagenes Kartoffelsueppchen mit Forellencaviar │
|
||||||
|
│ 2010 │ 6 │ 15.58 │ ███████████████████████████████▏ │ "OYSTERS AND PEARLS" "Sabayon" of Pearl Tapioca with Island Creek Oysters and Russian Sevruga Caviar │
|
||||||
|
└──────┴─────────┴──────────────────────┴──────────────────────────────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
至少他们有伏特加配鱼子酱。真棒。
|
||||||
|
|
||||||
|
## 在线 Playground{#playground}
|
||||||
|
|
||||||
|
此数据集已经上传到了 ClickHouse Playground 中,[example](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICByb3VuZCh0b1VJbnQzMk9yWmVybyhleHRyYWN0KG1lbnVfZGF0ZSwgJ15cXGR7NH0nKSksIC0xKSBBUyBkLAogICAgY291bnQoKSwKICAgIHJvdW5kKGF2ZyhwcmljZSksIDIpLAogICAgYmFyKGF2ZyhwcmljZSksIDAsIDUwLCAxMDApLAogICAgYW55KGRpc2hfbmFtZSkKRlJPTSBtZW51X2l0ZW1fZGVub3JtCldIRVJFIChtZW51X2N1cnJlbmN5IElOICgnRG9sbGFycycsICcnKSkgQU5EIChkID4gMCkgQU5EIChkIDwgMjAyMikgQU5EIChkaXNoX25hbWUgSUxJS0UgJyVjYXZpYXIlJykKR1JPVVAgQlkgZApPUkRFUiBCWSBkIEFTQw==)。
|
||||||
|
@ -1,9 +1,416 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/getting-started/example-datasets/opensky
|
slug: /zh/getting-started/example-datasets/opensky
|
||||||
sidebar_label: Air Traffic Data
|
sidebar_label: 空中交通数据
|
||||||
title: "Crowdsourced air traffic data from The OpenSky Network 2020"
|
description: 该数据集中的数据是从完整的 OpenSky 数据集中衍生而来的,对其中的数据进行了必要的清理,用以展示在 COVID-19 期间空中交通的发展。
|
||||||
|
title: "来自 The OpenSky Network 2020 的众包空中交通数据"
|
||||||
---
|
---
|
||||||
|
|
||||||
import Content from '@site/docs/en/getting-started/example-datasets/opensky.md';
|
该数据集中的数据是从完整的 OpenSky 数据集中派生和清理的,以说明 COVID-19 大流行期间空中交通的发展。它涵盖了自 2019 年 1 月 1 日以来该网络中 2500 多名成员观测到的所有航班。直到 COVID-19 大流行结束,更多数据将定期的更新到数据集中。
|
||||||
|
|
||||||
<Content />
|
来源:https://zenodo.org/record/5092942#.YRBCyTpRXYd
|
||||||
|
|
||||||
|
Martin Strohmeier、Xavier Olive、Jannis Lübbe、Matthias Schäfer 和 Vincent Lenders “来自 OpenSky 网络 2019-2020 的众包空中交通数据”地球系统科学数据 13(2),2021 https://doi.org/10.5194/essd- 13-357-2021
|
||||||
|
|
||||||
|
## 下载数据集 {#download-dataset}
|
||||||
|
|
||||||
|
运行命令:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget -O- https://zenodo.org/record/5092942 | grep -oP 'https://zenodo.org/record/5092942/files/flightlist_\d+_\d+\.csv\.gz' | xargs wget
|
||||||
|
```
|
||||||
|
|
||||||
|
Download will take about 2 minutes with good internet connection. There are 30 files with total size of 4.3 GB.
|
||||||
|
|
||||||
|
## 创建表 {#create-table}
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE opensky
|
||||||
|
(
|
||||||
|
callsign String,
|
||||||
|
number String,
|
||||||
|
icao24 String,
|
||||||
|
registration String,
|
||||||
|
typecode String,
|
||||||
|
origin String,
|
||||||
|
destination String,
|
||||||
|
firstseen DateTime,
|
||||||
|
lastseen DateTime,
|
||||||
|
day DateTime,
|
||||||
|
latitude_1 Float64,
|
||||||
|
longitude_1 Float64,
|
||||||
|
altitude_1 Float64,
|
||||||
|
latitude_2 Float64,
|
||||||
|
longitude_2 Float64,
|
||||||
|
altitude_2 Float64
|
||||||
|
) ENGINE = MergeTree ORDER BY (origin, destination, callsign);
|
||||||
|
```
|
||||||
|
|
||||||
|
## 导入数据 {#import-data}
|
||||||
|
|
||||||
|
将数据并行导入到 ClickHouse:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ls -1 flightlist_*.csv.gz | xargs -P100 -I{} bash -c 'gzip -c -d "{}" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"'
|
||||||
|
```
|
||||||
|
|
||||||
|
- 这里我们将文件列表(`ls -1 flightlist_*.csv.gz`)传递给`xargs`以进行并行处理。 `xargs -P100` 指定最多使用 100 个并行工作程序,但由于我们只有 30 个文件,工作程序的数量将只有 30 个。
|
||||||
|
- 对于每个文件,`xargs` 将通过 `bash -c` 为每个文件运行一个脚本文件。该脚本通过使用 `{}` 表示文件名占位符,然后 `xargs` 由命令进行填充(使用 `-I{}`)。
|
||||||
|
- 该脚本会将文件 (`gzip -c -d "{}"`) 解压缩到标准输出(`-c` 参数),并将输出重定向到 `clickhouse-client`。
|
||||||
|
- 我们还要求使用扩展解析器解析 [DateTime](../../sql-reference/data-types/datetime.md) 字段 ([--date_time_input_format best_effort](../../operations/settings/ settings.md#settings-date_time_input_format)) 以识别具有时区偏移的 ISO-8601 格式。
|
||||||
|
|
||||||
|
最后,`clickhouse-client` 会以 [CSVWithNames](../../interfaces/formats.md#csvwithnames) 格式读取输入数据然后执行插入。
|
||||||
|
|
||||||
|
并行导入需要 24 秒。
|
||||||
|
|
||||||
|
如果您不想使用并行导入,以下是顺序导入的方式:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
for file in flightlist_*.csv.gz; do gzip -c -d "$file" | clickhouse-client --date_time_input_format best_effort --query "INSERT INTO opensky FORMAT CSVWithNames"; done
|
||||||
|
```
|
||||||
|
|
||||||
|
## 验证数据 {#validate-data}
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT count() FROM opensky;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌──count()─┐
|
||||||
|
│ 66010819 │
|
||||||
|
└──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
ClickHouse 中的数据集大小只有 2.66 GiB,检查一下。
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT formatReadableSize(total_bytes) FROM system.tables WHERE name = 'opensky';
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─formatReadableSize(total_bytes)─┐
|
||||||
|
│ 2.66 GiB │
|
||||||
|
└─────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## 运行一些查询 {#run-queries}
|
||||||
|
|
||||||
|
总行驶距离为 680 亿公里。
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT formatReadableQuantity(sum(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)) / 1000) FROM opensky;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─formatReadableQuantity(divide(sum(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)), 1000))─┐
|
||||||
|
│ 68.72 billion │
|
||||||
|
└──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
平均飞行距离约为 1000 公里。
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2)) FROM opensky;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2))─┐
|
||||||
|
│ 1041090.6465708319 │
|
||||||
|
└────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 最繁忙的始发机场和观测到的平均距离{#busy-airports-average-distance}
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
origin,
|
||||||
|
count(),
|
||||||
|
round(avg(geoDistance(longitude_1, latitude_1, longitude_2, latitude_2))) AS distance,
|
||||||
|
bar(distance, 0, 10000000, 100) AS bar
|
||||||
|
FROM opensky
|
||||||
|
WHERE origin != ''
|
||||||
|
GROUP BY origin
|
||||||
|
ORDER BY count() DESC
|
||||||
|
LIMIT 100;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─origin─┬─count()─┬─distance─┬─bar────────────────────────────────────┐
|
||||||
|
1. │ KORD │ 745007 │ 1546108 │ ███████████████▍ │
|
||||||
|
2. │ KDFW │ 696702 │ 1358721 │ █████████████▌ │
|
||||||
|
3. │ KATL │ 667286 │ 1169661 │ ███████████▋ │
|
||||||
|
4. │ KDEN │ 582709 │ 1287742 │ ████████████▊ │
|
||||||
|
5. │ KLAX │ 581952 │ 2628393 │ ██████████████████████████▎ │
|
||||||
|
6. │ KLAS │ 447789 │ 1336967 │ █████████████▎ │
|
||||||
|
7. │ KPHX │ 428558 │ 1345635 │ █████████████▍ │
|
||||||
|
8. │ KSEA │ 412592 │ 1757317 │ █████████████████▌ │
|
||||||
|
9. │ KCLT │ 404612 │ 880355 │ ████████▋ │
|
||||||
|
10. │ VIDP │ 363074 │ 1445052 │ ██████████████▍ │
|
||||||
|
11. │ EDDF │ 362643 │ 2263960 │ ██████████████████████▋ │
|
||||||
|
12. │ KSFO │ 361869 │ 2445732 │ ████████████████████████▍ │
|
||||||
|
13. │ KJFK │ 349232 │ 2996550 │ █████████████████████████████▊ │
|
||||||
|
14. │ KMSP │ 346010 │ 1287328 │ ████████████▋ │
|
||||||
|
15. │ LFPG │ 344748 │ 2206203 │ ██████████████████████ │
|
||||||
|
16. │ EGLL │ 341370 │ 3216593 │ ████████████████████████████████▏ │
|
||||||
|
17. │ EHAM │ 340272 │ 2116425 │ █████████████████████▏ │
|
||||||
|
18. │ KEWR │ 337696 │ 1826545 │ ██████████████████▎ │
|
||||||
|
19. │ KPHL │ 320762 │ 1291761 │ ████████████▊ │
|
||||||
|
20. │ OMDB │ 308855 │ 2855706 │ ████████████████████████████▌ │
|
||||||
|
21. │ UUEE │ 307098 │ 1555122 │ ███████████████▌ │
|
||||||
|
22. │ KBOS │ 304416 │ 1621675 │ ████████████████▏ │
|
||||||
|
23. │ LEMD │ 291787 │ 1695097 │ ████████████████▊ │
|
||||||
|
24. │ YSSY │ 272979 │ 1875298 │ ██████████████████▋ │
|
||||||
|
25. │ KMIA │ 265121 │ 1923542 │ ███████████████████▏ │
|
||||||
|
26. │ ZGSZ │ 263497 │ 745086 │ ███████▍ │
|
||||||
|
27. │ EDDM │ 256691 │ 1361453 │ █████████████▌ │
|
||||||
|
28. │ WMKK │ 254264 │ 1626688 │ ████████████████▎ │
|
||||||
|
29. │ CYYZ │ 251192 │ 2175026 │ █████████████████████▋ │
|
||||||
|
30. │ KLGA │ 248699 │ 1106935 │ ███████████ │
|
||||||
|
31. │ VHHH │ 248473 │ 3457658 │ ██████████████████████████████████▌ │
|
||||||
|
32. │ RJTT │ 243477 │ 1272744 │ ████████████▋ │
|
||||||
|
33. │ KBWI │ 241440 │ 1187060 │ ███████████▋ │
|
||||||
|
34. │ KIAD │ 239558 │ 1683485 │ ████████████████▋ │
|
||||||
|
35. │ KIAH │ 234202 │ 1538335 │ ███████████████▍ │
|
||||||
|
36. │ KFLL │ 223447 │ 1464410 │ ██████████████▋ │
|
||||||
|
37. │ KDAL │ 212055 │ 1082339 │ ██████████▋ │
|
||||||
|
38. │ KDCA │ 207883 │ 1013359 │ ██████████▏ │
|
||||||
|
39. │ LIRF │ 207047 │ 1427965 │ ██████████████▎ │
|
||||||
|
40. │ PANC │ 206007 │ 2525359 │ █████████████████████████▎ │
|
||||||
|
41. │ LTFJ │ 205415 │ 860470 │ ████████▌ │
|
||||||
|
42. │ KDTW │ 204020 │ 1106716 │ ███████████ │
|
||||||
|
43. │ VABB │ 201679 │ 1300865 │ █████████████ │
|
||||||
|
44. │ OTHH │ 200797 │ 3759544 │ █████████████████████████████████████▌ │
|
||||||
|
45. │ KMDW │ 200796 │ 1232551 │ ████████████▎ │
|
||||||
|
46. │ KSAN │ 198003 │ 1495195 │ ██████████████▊ │
|
||||||
|
47. │ KPDX │ 197760 │ 1269230 │ ████████████▋ │
|
||||||
|
48. │ SBGR │ 197624 │ 2041697 │ ████████████████████▍ │
|
||||||
|
49. │ VOBL │ 189011 │ 1040180 │ ██████████▍ │
|
||||||
|
50. │ LEBL │ 188956 │ 1283190 │ ████████████▋ │
|
||||||
|
51. │ YBBN │ 188011 │ 1253405 │ ████████████▌ │
|
||||||
|
52. │ LSZH │ 187934 │ 1572029 │ ███████████████▋ │
|
||||||
|
53. │ YMML │ 187643 │ 1870076 │ ██████████████████▋ │
|
||||||
|
54. │ RCTP │ 184466 │ 2773976 │ ███████████████████████████▋ │
|
||||||
|
55. │ KSNA │ 180045 │ 778484 │ ███████▋ │
|
||||||
|
56. │ EGKK │ 176420 │ 1694770 │ ████████████████▊ │
|
||||||
|
57. │ LOWW │ 176191 │ 1274833 │ ████████████▋ │
|
||||||
|
58. │ UUDD │ 176099 │ 1368226 │ █████████████▋ │
|
||||||
|
59. │ RKSI │ 173466 │ 3079026 │ ██████████████████████████████▋ │
|
||||||
|
60. │ EKCH │ 172128 │ 1229895 │ ████████████▎ │
|
||||||
|
61. │ KOAK │ 171119 │ 1114447 │ ███████████▏ │
|
||||||
|
62. │ RPLL │ 170122 │ 1440735 │ ██████████████▍ │
|
||||||
|
63. │ KRDU │ 167001 │ 830521 │ ████████▎ │
|
||||||
|
64. │ KAUS │ 164524 │ 1256198 │ ████████████▌ │
|
||||||
|
65. │ KBNA │ 163242 │ 1022726 │ ██████████▏ │
|
||||||
|
66. │ KSDF │ 162655 │ 1380867 │ █████████████▋ │
|
||||||
|
67. │ ENGM │ 160732 │ 910108 │ █████████ │
|
||||||
|
68. │ LIMC │ 160696 │ 1564620 │ ███████████████▋ │
|
||||||
|
69. │ KSJC │ 159278 │ 1081125 │ ██████████▋ │
|
||||||
|
70. │ KSTL │ 157984 │ 1026699 │ ██████████▎ │
|
||||||
|
71. │ UUWW │ 156811 │ 1261155 │ ████████████▌ │
|
||||||
|
72. │ KIND │ 153929 │ 987944 │ █████████▊ │
|
||||||
|
73. │ ESSA │ 153390 │ 1203439 │ ████████████ │
|
||||||
|
74. │ KMCO │ 153351 │ 1508657 │ ███████████████ │
|
||||||
|
75. │ KDVT │ 152895 │ 74048 │ ▋ │
|
||||||
|
76. │ VTBS │ 152645 │ 2255591 │ ██████████████████████▌ │
|
||||||
|
77. │ CYVR │ 149574 │ 2027413 │ ████████████████████▎ │
|
||||||
|
78. │ EIDW │ 148723 │ 1503985 │ ███████████████ │
|
||||||
|
79. │ LFPO │ 143277 │ 1152964 │ ███████████▌ │
|
||||||
|
80. │ EGSS │ 140830 │ 1348183 │ █████████████▍ │
|
||||||
|
81. │ KAPA │ 140776 │ 420441 │ ████▏ │
|
||||||
|
82. │ KHOU │ 138985 │ 1068806 │ ██████████▋ │
|
||||||
|
83. │ KTPA │ 138033 │ 1338223 │ █████████████▍ │
|
||||||
|
84. │ KFFZ │ 137333 │ 55397 │ ▌ │
|
||||||
|
85. │ NZAA │ 136092 │ 1581264 │ ███████████████▋ │
|
||||||
|
86. │ YPPH │ 133916 │ 1271550 │ ████████████▋ │
|
||||||
|
87. │ RJBB │ 133522 │ 1805623 │ ██████████████████ │
|
||||||
|
88. │ EDDL │ 133018 │ 1265919 │ ████████████▋ │
|
||||||
|
89. │ ULLI │ 130501 │ 1197108 │ ███████████▊ │
|
||||||
|
90. │ KIWA │ 127195 │ 250876 │ ██▌ │
|
||||||
|
91. │ KTEB │ 126969 │ 1189414 │ ███████████▊ │
|
||||||
|
92. │ VOMM │ 125616 │ 1127757 │ ███████████▎ │
|
||||||
|
93. │ LSGG │ 123998 │ 1049101 │ ██████████▍ │
|
||||||
|
94. │ LPPT │ 122733 │ 1779187 │ █████████████████▋ │
|
||||||
|
95. │ WSSS │ 120493 │ 3264122 │ ████████████████████████████████▋ │
|
||||||
|
96. │ EBBR │ 118539 │ 1579939 │ ███████████████▋ │
|
||||||
|
97. │ VTBD │ 118107 │ 661627 │ ██████▌ │
|
||||||
|
98. │ KVNY │ 116326 │ 692960 │ ██████▊ │
|
||||||
|
99. │ EDDT │ 115122 │ 941740 │ █████████▍ │
|
||||||
|
100. │ EFHK │ 114860 │ 1629143 │ ████████████████▎ │
|
||||||
|
└────────┴─────────┴──────────┴────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 每周来自莫斯科三个主要机场的航班数量 {#flights-from-moscow}
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toMonday(day) AS k,
|
||||||
|
count() AS c,
|
||||||
|
bar(c, 0, 10000, 100) AS bar
|
||||||
|
FROM opensky
|
||||||
|
WHERE origin IN ('UUEE', 'UUDD', 'UUWW')
|
||||||
|
GROUP BY k
|
||||||
|
ORDER BY k ASC;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌──────────k─┬────c─┬─bar──────────────────────────────────────────────────────────────────────────┐
|
||||||
|
1. │ 2018-12-31 │ 5248 │ ████████████████████████████████████████████████████▍ │
|
||||||
|
2. │ 2019-01-07 │ 6302 │ ███████████████████████████████████████████████████████████████ │
|
||||||
|
3. │ 2019-01-14 │ 5701 │ █████████████████████████████████████████████████████████ │
|
||||||
|
4. │ 2019-01-21 │ 5638 │ ████████████████████████████████████████████████████████▍ │
|
||||||
|
5. │ 2019-01-28 │ 5731 │ █████████████████████████████████████████████████████████▎ │
|
||||||
|
6. │ 2019-02-04 │ 5683 │ ████████████████████████████████████████████████████████▋ │
|
||||||
|
7. │ 2019-02-11 │ 5759 │ █████████████████████████████████████████████████████████▌ │
|
||||||
|
8. │ 2019-02-18 │ 5736 │ █████████████████████████████████████████████████████████▎ │
|
||||||
|
9. │ 2019-02-25 │ 5873 │ ██████████████████████████████████████████████████████████▋ │
|
||||||
|
10. │ 2019-03-04 │ 5965 │ ███████████████████████████████████████████████████████████▋ │
|
||||||
|
11. │ 2019-03-11 │ 5900 │ ███████████████████████████████████████████████████████████ │
|
||||||
|
12. │ 2019-03-18 │ 5823 │ ██████████████████████████████████████████████████████████▏ │
|
||||||
|
13. │ 2019-03-25 │ 5899 │ ██████████████████████████████████████████████████████████▊ │
|
||||||
|
14. │ 2019-04-01 │ 6043 │ ████████████████████████████████████████████████████████████▍ │
|
||||||
|
15. │ 2019-04-08 │ 6098 │ ████████████████████████████████████████████████████████████▊ │
|
||||||
|
16. │ 2019-04-15 │ 6196 │ █████████████████████████████████████████████████████████████▊ │
|
||||||
|
17. │ 2019-04-22 │ 6486 │ ████████████████████████████████████████████████████████████████▋ │
|
||||||
|
18. │ 2019-04-29 │ 6682 │ ██████████████████████████████████████████████████████████████████▋ │
|
||||||
|
19. │ 2019-05-06 │ 6739 │ ███████████████████████████████████████████████████████████████████▍ │
|
||||||
|
20. │ 2019-05-13 │ 6600 │ ██████████████████████████████████████████████████████████████████ │
|
||||||
|
21. │ 2019-05-20 │ 6575 │ █████████████████████████████████████████████████████████████████▋ │
|
||||||
|
22. │ 2019-05-27 │ 6786 │ ███████████████████████████████████████████████████████████████████▋ │
|
||||||
|
23. │ 2019-06-03 │ 6872 │ ████████████████████████████████████████████████████████████████████▋ │
|
||||||
|
24. │ 2019-06-10 │ 7045 │ ██████████████████████████████████████████████████████████████████████▍ │
|
||||||
|
25. │ 2019-06-17 │ 7045 │ ██████████████████████████████████████████████████████████████████████▍ │
|
||||||
|
26. │ 2019-06-24 │ 6852 │ ████████████████████████████████████████████████████████████████████▌ │
|
||||||
|
27. │ 2019-07-01 │ 7248 │ ████████████████████████████████████████████████████████████████████████▍ │
|
||||||
|
28. │ 2019-07-08 │ 7284 │ ████████████████████████████████████████████████████████████████████████▋ │
|
||||||
|
29. │ 2019-07-15 │ 7142 │ ███████████████████████████████████████████████████████████████████████▍ │
|
||||||
|
30. │ 2019-07-22 │ 7108 │ ███████████████████████████████████████████████████████████████████████ │
|
||||||
|
31. │ 2019-07-29 │ 7251 │ ████████████████████████████████████████████████████████████████████████▌ │
|
||||||
|
32. │ 2019-08-05 │ 7403 │ ██████████████████████████████████████████████████████████████████████████ │
|
||||||
|
33. │ 2019-08-12 │ 7457 │ ██████████████████████████████████████████████████████████████████████████▌ │
|
||||||
|
34. │ 2019-08-19 │ 7502 │ ███████████████████████████████████████████████████████████████████████████ │
|
||||||
|
35. │ 2019-08-26 │ 7540 │ ███████████████████████████████████████████████████████████████████████████▍ │
|
||||||
|
36. │ 2019-09-02 │ 7237 │ ████████████████████████████████████████████████████████████████████████▎ │
|
||||||
|
37. │ 2019-09-09 │ 7328 │ █████████████████████████████████████████████████████████████████████████▎ │
|
||||||
|
38. │ 2019-09-16 │ 5566 │ ███████████████████████████████████████████████████████▋ │
|
||||||
|
39. │ 2019-09-23 │ 7049 │ ██████████████████████████████████████████████████████████████████████▍ │
|
||||||
|
40. │ 2019-09-30 │ 6880 │ ████████████████████████████████████████████████████████████████████▋ │
|
||||||
|
41. │ 2019-10-07 │ 6518 │ █████████████████████████████████████████████████████████████████▏ │
|
||||||
|
42. │ 2019-10-14 │ 6688 │ ██████████████████████████████████████████████████████████████████▊ │
|
||||||
|
43. │ 2019-10-21 │ 6667 │ ██████████████████████████████████████████████████████████████████▋ │
|
||||||
|
44. │ 2019-10-28 │ 6303 │ ███████████████████████████████████████████████████████████████ │
|
||||||
|
45. │ 2019-11-04 │ 6298 │ ██████████████████████████████████████████████████████████████▊ │
|
||||||
|
46. │ 2019-11-11 │ 6137 │ █████████████████████████████████████████████████████████████▎ │
|
||||||
|
47. │ 2019-11-18 │ 6051 │ ████████████████████████████████████████████████████████████▌ │
|
||||||
|
48. │ 2019-11-25 │ 5820 │ ██████████████████████████████████████████████████████████▏ │
|
||||||
|
49. │ 2019-12-02 │ 5942 │ ███████████████████████████████████████████████████████████▍ │
|
||||||
|
50. │ 2019-12-09 │ 4891 │ ████████████████████████████████████████████████▊ │
|
||||||
|
51. │ 2019-12-16 │ 5682 │ ████████████████████████████████████████████████████████▋ │
|
||||||
|
52. │ 2019-12-23 │ 6111 │ █████████████████████████████████████████████████████████████ │
|
||||||
|
53. │ 2019-12-30 │ 5870 │ ██████████████████████████████████████████████████████████▋ │
|
||||||
|
54. │ 2020-01-06 │ 5953 │ ███████████████████████████████████████████████████████████▌ │
|
||||||
|
55. │ 2020-01-13 │ 5698 │ ████████████████████████████████████████████████████████▊ │
|
||||||
|
56. │ 2020-01-20 │ 5339 │ █████████████████████████████████████████████████████▍ │
|
||||||
|
57. │ 2020-01-27 │ 5566 │ ███████████████████████████████████████████████████████▋ │
|
||||||
|
58. │ 2020-02-03 │ 5801 │ ██████████████████████████████████████████████████████████ │
|
||||||
|
59. │ 2020-02-10 │ 5692 │ ████████████████████████████████████████████████████████▊ │
|
||||||
|
60. │ 2020-02-17 │ 5912 │ ███████████████████████████████████████████████████████████ │
|
||||||
|
61. │ 2020-02-24 │ 6031 │ ████████████████████████████████████████████████████████████▎ │
|
||||||
|
62. │ 2020-03-02 │ 6105 │ █████████████████████████████████████████████████████████████ │
|
||||||
|
63. │ 2020-03-09 │ 5823 │ ██████████████████████████████████████████████████████████▏ │
|
||||||
|
64. │ 2020-03-16 │ 4659 │ ██████████████████████████████████████████████▌ │
|
||||||
|
65. │ 2020-03-23 │ 3720 │ █████████████████████████████████████▏ │
|
||||||
|
66. │ 2020-03-30 │ 1720 │ █████████████████▏ │
|
||||||
|
67. │ 2020-04-06 │ 849 │ ████████▍ │
|
||||||
|
68. │ 2020-04-13 │ 710 │ ███████ │
|
||||||
|
69. │ 2020-04-20 │ 725 │ ███████▏ │
|
||||||
|
70. │ 2020-04-27 │ 920 │ █████████▏ │
|
||||||
|
71. │ 2020-05-04 │ 859 │ ████████▌ │
|
||||||
|
72. │ 2020-05-11 │ 1047 │ ██████████▍ │
|
||||||
|
73. │ 2020-05-18 │ 1135 │ ███████████▎ │
|
||||||
|
74. │ 2020-05-25 │ 1266 │ ████████████▋ │
|
||||||
|
75. │ 2020-06-01 │ 1793 │ █████████████████▊ │
|
||||||
|
76. │ 2020-06-08 │ 1979 │ ███████████████████▋ │
|
||||||
|
77. │ 2020-06-15 │ 2297 │ ██████████████████████▊ │
|
||||||
|
78. │ 2020-06-22 │ 2788 │ ███████████████████████████▊ │
|
||||||
|
79. │ 2020-06-29 │ 3389 │ █████████████████████████████████▊ │
|
||||||
|
80. │ 2020-07-06 │ 3545 │ ███████████████████████████████████▍ │
|
||||||
|
81. │ 2020-07-13 │ 3569 │ ███████████████████████████████████▋ │
|
||||||
|
82. │ 2020-07-20 │ 3784 │ █████████████████████████████████████▋ │
|
||||||
|
83. │ 2020-07-27 │ 3960 │ ███████████████████████████████████████▌ │
|
||||||
|
84. │ 2020-08-03 │ 4323 │ ███████████████████████████████████████████▏ │
|
||||||
|
85. │ 2020-08-10 │ 4581 │ █████████████████████████████████████████████▋ │
|
||||||
|
86. │ 2020-08-17 │ 4791 │ ███████████████████████████████████████████████▊ │
|
||||||
|
87. │ 2020-08-24 │ 4928 │ █████████████████████████████████████████████████▎ │
|
||||||
|
88. │ 2020-08-31 │ 4687 │ ██████████████████████████████████████████████▋ │
|
||||||
|
89. │ 2020-09-07 │ 4643 │ ██████████████████████████████████████████████▍ │
|
||||||
|
90. │ 2020-09-14 │ 4594 │ █████████████████████████████████████████████▊ │
|
||||||
|
91. │ 2020-09-21 │ 4478 │ ████████████████████████████████████████████▋ │
|
||||||
|
92. │ 2020-09-28 │ 4382 │ ███████████████████████████████████████████▋ │
|
||||||
|
93. │ 2020-10-05 │ 4261 │ ██████████████████████████████████████████▌ │
|
||||||
|
94. │ 2020-10-12 │ 4243 │ ██████████████████████████████████████████▍ │
|
||||||
|
95. │ 2020-10-19 │ 3941 │ ███████████████████████████████████████▍ │
|
||||||
|
96. │ 2020-10-26 │ 3616 │ ████████████████████████████████████▏ │
|
||||||
|
97. │ 2020-11-02 │ 3586 │ ███████████████████████████████████▋ │
|
||||||
|
98. │ 2020-11-09 │ 3403 │ ██████████████████████████████████ │
|
||||||
|
99. │ 2020-11-16 │ 3336 │ █████████████████████████████████▎ │
|
||||||
|
100. │ 2020-11-23 │ 3230 │ ████████████████████████████████▎ │
|
||||||
|
101. │ 2020-11-30 │ 3183 │ ███████████████████████████████▋ │
|
||||||
|
102. │ 2020-12-07 │ 3285 │ ████████████████████████████████▋ │
|
||||||
|
103. │ 2020-12-14 │ 3367 │ █████████████████████████████████▋ │
|
||||||
|
104. │ 2020-12-21 │ 3748 │ █████████████████████████████████████▍ │
|
||||||
|
105. │ 2020-12-28 │ 3986 │ ███████████████████████████████████████▋ │
|
||||||
|
106. │ 2021-01-04 │ 3906 │ ███████████████████████████████████████ │
|
||||||
|
107. │ 2021-01-11 │ 3425 │ ██████████████████████████████████▎ │
|
||||||
|
108. │ 2021-01-18 │ 3144 │ ███████████████████████████████▍ │
|
||||||
|
109. │ 2021-01-25 │ 3115 │ ███████████████████████████████▏ │
|
||||||
|
110. │ 2021-02-01 │ 3285 │ ████████████████████████████████▋ │
|
||||||
|
111. │ 2021-02-08 │ 3321 │ █████████████████████████████████▏ │
|
||||||
|
112. │ 2021-02-15 │ 3475 │ ██████████████████████████████████▋ │
|
||||||
|
113. │ 2021-02-22 │ 3549 │ ███████████████████████████████████▍ │
|
||||||
|
114. │ 2021-03-01 │ 3755 │ █████████████████████████████████████▌ │
|
||||||
|
115. │ 2021-03-08 │ 3080 │ ██████████████████████████████▋ │
|
||||||
|
116. │ 2021-03-15 │ 3789 │ █████████████████████████████████████▊ │
|
||||||
|
117. │ 2021-03-22 │ 3804 │ ██████████████████████████████████████ │
|
||||||
|
118. │ 2021-03-29 │ 4238 │ ██████████████████████████████████████████▍ │
|
||||||
|
119. │ 2021-04-05 │ 4307 │ ███████████████████████████████████████████ │
|
||||||
|
120. │ 2021-04-12 │ 4225 │ ██████████████████████████████████████████▎ │
|
||||||
|
121. │ 2021-04-19 │ 4391 │ ███████████████████████████████████████████▊ │
|
||||||
|
122. │ 2021-04-26 │ 4868 │ ████████████████████████████████████████████████▋ │
|
||||||
|
123. │ 2021-05-03 │ 4977 │ █████████████████████████████████████████████████▋ │
|
||||||
|
124. │ 2021-05-10 │ 5164 │ ███████████████████████████████████████████████████▋ │
|
||||||
|
125. │ 2021-05-17 │ 4986 │ █████████████████████████████████████████████████▋ │
|
||||||
|
126. │ 2021-05-24 │ 5024 │ ██████████████████████████████████████████████████▏ │
|
||||||
|
127. │ 2021-05-31 │ 4824 │ ████████████████████████████████████████████████▏ │
|
||||||
|
128. │ 2021-06-07 │ 5652 │ ████████████████████████████████████████████████████████▌ │
|
||||||
|
129. │ 2021-06-14 │ 5613 │ ████████████████████████████████████████████████████████▏ │
|
||||||
|
130. │ 2021-06-21 │ 6061 │ ████████████████████████████████████████████████████████████▌ │
|
||||||
|
131. │ 2021-06-28 │ 2554 │ █████████████████████████▌ │
|
||||||
|
└────────────┴──────┴──────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 在线 Playground {#playground}
|
||||||
|
|
||||||
|
你可以使用交互式资源 [Online Playground](https://play.clickhouse.com/play?user=play) 来尝试对此数据集的其他查询。 例如, [执行这个查询](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBvcmlnaW4sCiAgICBjb3VudCgpLAogICAgcm91bmQoYXZnKGdlb0Rpc3RhbmNlKGxvbmdpdHVkZV8xLCBsYXRpdHVkZV8xLCBsb25naXR1ZGVfMiwgbGF0aXR1ZGVfMikpKSBBUyBkaXN0YW5jZSwKICAgIGJhcihkaXN0YW5jZSwgMCwgMTAwMDAwMDAsIDEwMCkgQVMgYmFyCkZST00gb3BlbnNreQpXSEVSRSBvcmlnaW4gIT0gJycKR1JPVVAgQlkgb3JpZ2luCk9SREVSIEJZIGNvdW50KCkgREVTQwpMSU1JVCAxMDA=). 但是,请注意无法在 Playground 中创建临时表。
|
||||||
|
@ -1,9 +1,339 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/getting-started/example-datasets/recipes
|
slug: /zh/getting-started/example-datasets/recipes
|
||||||
sidebar_label: Recipes Dataset
|
sidebar_label: 食谱数据集
|
||||||
title: "Recipes Dataset"
|
title: "食谱数据集"
|
||||||
---
|
---
|
||||||
|
|
||||||
import Content from '@site/docs/en/getting-started/example-datasets/recipes.md';
|
RecipeNLG 数据集可在 [此处](https://recipenlg.cs.put.poznan.pl/dataset) 下载。其中包含 220 万份食谱。大小略小于 1 GB。
|
||||||
|
|
||||||
<Content />
|
## 下载并解压数据集
|
||||||
|
|
||||||
|
1. 进入下载页面[https://recipenlg.cs.put.poznan.pl/dataset](https://recipenlg.cs.put.poznan.pl/dataset)。
|
||||||
|
2. 接受条款和条件并下载 zip 文件。
|
||||||
|
3. 使用 `unzip` 解压 zip 文件,得到 `full_dataset.csv` 文件。
|
||||||
|
|
||||||
|
## 创建表
|
||||||
|
|
||||||
|
运行 clickhouse-client 并执行以下 CREATE 请求:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
CREATE TABLE recipes
|
||||||
|
(
|
||||||
|
title String,
|
||||||
|
ingredients Array(String),
|
||||||
|
directions Array(String),
|
||||||
|
link String,
|
||||||
|
source LowCardinality(String),
|
||||||
|
NER Array(String)
|
||||||
|
) ENGINE = MergeTree ORDER BY title;
|
||||||
|
```
|
||||||
|
|
||||||
|
## 插入数据
|
||||||
|
|
||||||
|
运行以下命令:
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
clickhouse-client --query "
|
||||||
|
INSERT INTO recipes
|
||||||
|
SELECT
|
||||||
|
title,
|
||||||
|
JSONExtract(ingredients, 'Array(String)'),
|
||||||
|
JSONExtract(directions, 'Array(String)'),
|
||||||
|
link,
|
||||||
|
source,
|
||||||
|
JSONExtract(NER, 'Array(String)')
|
||||||
|
FROM input('num UInt32, title String, ingredients String, directions String, link String, source LowCardinality(String), NER String')
|
||||||
|
FORMAT CSVWithNames
|
||||||
|
" --input_format_with_names_use_header 0 --format_csv_allow_single_quote 0 --input_format_allow_errors_num 10 < full_dataset.csv
|
||||||
|
```
|
||||||
|
|
||||||
|
这是一个展示如何解析自定义 CSV,这其中涉及了许多调整。
|
||||||
|
|
||||||
|
说明:
|
||||||
|
- 数据集为 CSV 格式,但在插入时需要一些预处理;使用表函数 [input](../../sql-reference/table-functions/input.md) 进行预处理;
|
||||||
|
- CSV 文件的结构在表函数 `input` 的参数中指定;
|
||||||
|
- 字段 `num`(行号)是不需要的 - 可以忽略并从文件中进行解析;
|
||||||
|
- 使用 `FORMAT CSVWithNames`,因为标题不包含第一个字段的名称,因此 CSV 中的标题将被忽略(通过命令行参数 `--input_format_with_names_use_header 0`);
|
||||||
|
- 文件仅使用双引号将 CSV 字符串括起来;一些字符串没有用双引号括起来,单引号也不能被解析为括起来的字符串 - 所以添加`--format_csv_allow_single_quote 0`参数接受文件中的单引号;
|
||||||
|
- 由于某些 CSV 的字符串的开头包含 `\M/` 因此无法被解析; CSV 中唯一可能以反斜杠开头的值是 `\N`,这个值被解析为 SQL NULL。通过添加`--input_format_allow_errors_num 10`参数,允许在导入过程中跳过 10 个格式错误;
|
||||||
|
- 在数据集中的 Ingredients、directions 和 NER 字段为数组;但这些数组并没有以一般形式表示:这些字段作为 JSON 序列化为字符串,然后放入 CSV 中 - 在导入是将它们解析为字符串,然后使用 [JSONExtract](../../sql-reference/functions/json-functions.md ) 函数将其转换为数组。
|
||||||
|
|
||||||
|
## 验证插入的数据
|
||||||
|
|
||||||
|
通过检查行数:
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT count() FROM recipes;
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─count()─┐
|
||||||
|
│ 2231141 │
|
||||||
|
└─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## 示例查询
|
||||||
|
|
||||||
|
### 按配方数量排列的顶级组件:
|
||||||
|
|
||||||
|
在此示例中,我们学习如何使用 [arrayJoin](../../sql-reference/functions/array-join/) 函数将数组扩展为行的集合。
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
arrayJoin(NER) AS k,
|
||||||
|
count() AS c
|
||||||
|
FROM recipes
|
||||||
|
GROUP BY k
|
||||||
|
ORDER BY c DESC
|
||||||
|
LIMIT 50
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─k────────────────────┬──────c─┐
|
||||||
|
│ salt │ 890741 │
|
||||||
|
│ sugar │ 620027 │
|
||||||
|
│ butter │ 493823 │
|
||||||
|
│ flour │ 466110 │
|
||||||
|
│ eggs │ 401276 │
|
||||||
|
│ onion │ 372469 │
|
||||||
|
│ garlic │ 358364 │
|
||||||
|
│ milk │ 346769 │
|
||||||
|
│ water │ 326092 │
|
||||||
|
│ vanilla │ 270381 │
|
||||||
|
│ olive oil │ 197877 │
|
||||||
|
│ pepper │ 179305 │
|
||||||
|
│ brown sugar │ 174447 │
|
||||||
|
│ tomatoes │ 163933 │
|
||||||
|
│ egg │ 160507 │
|
||||||
|
│ baking powder │ 148277 │
|
||||||
|
│ lemon juice │ 146414 │
|
||||||
|
│ Salt │ 122557 │
|
||||||
|
│ cinnamon │ 117927 │
|
||||||
|
│ sour cream │ 116682 │
|
||||||
|
│ cream cheese │ 114423 │
|
||||||
|
│ margarine │ 112742 │
|
||||||
|
│ celery │ 112676 │
|
||||||
|
│ baking soda │ 110690 │
|
||||||
|
│ parsley │ 102151 │
|
||||||
|
│ chicken │ 101505 │
|
||||||
|
│ onions │ 98903 │
|
||||||
|
│ vegetable oil │ 91395 │
|
||||||
|
│ oil │ 85600 │
|
||||||
|
│ mayonnaise │ 84822 │
|
||||||
|
│ pecans │ 79741 │
|
||||||
|
│ nuts │ 78471 │
|
||||||
|
│ potatoes │ 75820 │
|
||||||
|
│ carrots │ 75458 │
|
||||||
|
│ pineapple │ 74345 │
|
||||||
|
│ soy sauce │ 70355 │
|
||||||
|
│ black pepper │ 69064 │
|
||||||
|
│ thyme │ 68429 │
|
||||||
|
│ mustard │ 65948 │
|
||||||
|
│ chicken broth │ 65112 │
|
||||||
|
│ bacon │ 64956 │
|
||||||
|
│ honey │ 64626 │
|
||||||
|
│ oregano │ 64077 │
|
||||||
|
│ ground beef │ 64068 │
|
||||||
|
│ unsalted butter │ 63848 │
|
||||||
|
│ mushrooms │ 61465 │
|
||||||
|
│ Worcestershire sauce │ 59328 │
|
||||||
|
│ cornstarch │ 58476 │
|
||||||
|
│ green pepper │ 58388 │
|
||||||
|
│ Cheddar cheese │ 58354 │
|
||||||
|
└──────────────────────┴────────┘
|
||||||
|
|
||||||
|
50 rows in set. Elapsed: 0.112 sec. Processed 2.23 million rows, 361.57 MB (19.99 million rows/s., 3.24 GB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 最复杂的草莓食谱
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT
|
||||||
|
title,
|
||||||
|
length(NER),
|
||||||
|
length(directions)
|
||||||
|
FROM recipes
|
||||||
|
WHERE has(NER, 'strawberry')
|
||||||
|
ORDER BY length(directions) DESC
|
||||||
|
LIMIT 10
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─title────────────────────────────────────────────────────────────┬─length(NER)─┬─length(directions)─┐
|
||||||
|
│ Chocolate-Strawberry-Orange Wedding Cake │ 24 │ 126 │
|
||||||
|
│ Strawberry Cream Cheese Crumble Tart │ 19 │ 47 │
|
||||||
|
│ Charlotte-Style Ice Cream │ 11 │ 45 │
|
||||||
|
│ Sinfully Good a Million Layers Chocolate Layer Cake, With Strawb │ 31 │ 45 │
|
||||||
|
│ Sweetened Berries With Elderflower Sherbet │ 24 │ 44 │
|
||||||
|
│ Chocolate-Strawberry Mousse Cake │ 15 │ 42 │
|
||||||
|
│ Rhubarb Charlotte with Strawberries and Rum │ 20 │ 42 │
|
||||||
|
│ Chef Joey's Strawberry Vanilla Tart │ 7 │ 37 │
|
||||||
|
│ Old-Fashioned Ice Cream Sundae Cake │ 17 │ 37 │
|
||||||
|
│ Watermelon Cake │ 16 │ 36 │
|
||||||
|
└──────────────────────────────────────────────────────────────────┴─────────────┴────────────────────┘
|
||||||
|
|
||||||
|
10 rows in set. Elapsed: 0.215 sec. Processed 2.23 million rows, 1.48 GB (10.35 million rows/s., 6.86 GB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
在此示例中,我们使用 [has](../../sql-reference/functions/array-functions/#hasarr-elem) 函数来按过滤数组类型元素并按 directions 的数量进行排序。
|
||||||
|
|
||||||
|
有一个婚礼蛋糕需要整个126个步骤来制作!显示 directions:
|
||||||
|
|
||||||
|
请求:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT arrayJoin(directions)
|
||||||
|
FROM recipes
|
||||||
|
WHERE title = 'Chocolate-Strawberry-Orange Wedding Cake'
|
||||||
|
```
|
||||||
|
|
||||||
|
结果:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─arrayJoin(directions)───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
|
||||||
|
│ Position 1 rack in center and 1 rack in bottom third of oven and preheat to 350F. │
|
||||||
|
│ Butter one 5-inch-diameter cake pan with 2-inch-high sides, one 8-inch-diameter cake pan with 2-inch-high sides and one 12-inch-diameter cake pan with 2-inch-high sides. │
|
||||||
|
│ Dust pans with flour; line bottoms with parchment. │
|
||||||
|
│ Combine 1/3 cup orange juice and 2 ounces unsweetened chocolate in heavy small saucepan. │
|
||||||
|
│ Stir mixture over medium-low heat until chocolate melts. │
|
||||||
|
│ Remove from heat. │
|
||||||
|
│ Gradually mix in 1 2/3 cups orange juice. │
|
||||||
|
│ Sift 3 cups flour, 2/3 cup cocoa, 2 teaspoons baking soda, 1 teaspoon salt and 1/2 teaspoon baking powder into medium bowl. │
|
||||||
|
│ using electric mixer, beat 1 cup (2 sticks) butter and 3 cups sugar in large bowl until blended (mixture will look grainy). │
|
||||||
|
│ Add 4 eggs, 1 at a time, beating to blend after each. │
|
||||||
|
│ Beat in 1 tablespoon orange peel and 1 tablespoon vanilla extract. │
|
||||||
|
│ Add dry ingredients alternately with orange juice mixture in 3 additions each, beating well after each addition. │
|
||||||
|
│ Mix in 1 cup chocolate chips. │
|
||||||
|
│ Transfer 1 cup plus 2 tablespoons batter to prepared 5-inch pan, 3 cups batter to prepared 8-inch pan and remaining batter (about 6 cups) to 12-inch pan. │
|
||||||
|
│ Place 5-inch and 8-inch pans on center rack of oven. │
|
||||||
|
│ Place 12-inch pan on lower rack of oven. │
|
||||||
|
│ Bake cakes until tester inserted into center comes out clean, about 35 minutes. │
|
||||||
|
│ Transfer cakes in pans to racks and cool completely. │
|
||||||
|
│ Mark 4-inch diameter circle on one 6-inch-diameter cardboard cake round. │
|
||||||
|
│ Cut out marked circle. │
|
||||||
|
│ Mark 7-inch-diameter circle on one 8-inch-diameter cardboard cake round. │
|
||||||
|
│ Cut out marked circle. │
|
||||||
|
│ Mark 11-inch-diameter circle on one 12-inch-diameter cardboard cake round. │
|
||||||
|
│ Cut out marked circle. │
|
||||||
|
│ Cut around sides of 5-inch-cake to loosen. │
|
||||||
|
│ Place 4-inch cardboard over pan. │
|
||||||
|
│ Hold cardboard and pan together; turn cake out onto cardboard. │
|
||||||
|
│ Peel off parchment.Wrap cakes on its cardboard in foil. │
|
||||||
|
│ Repeat turning out, peeling off parchment and wrapping cakes in foil, using 7-inch cardboard for 8-inch cake and 11-inch cardboard for 12-inch cake. │
|
||||||
|
│ Using remaining ingredients, make 1 more batch of cake batter and bake 3 more cake layers as described above. │
|
||||||
|
│ Cool cakes in pans. │
|
||||||
|
│ Cover cakes in pans tightly with foil. │
|
||||||
|
│ (Can be prepared ahead. │
|
||||||
|
│ Let stand at room temperature up to 1 day or double-wrap all cake layers and freeze up to 1 week. │
|
||||||
|
│ Bring cake layers to room temperature before using.) │
|
||||||
|
│ Place first 12-inch cake on its cardboard on work surface. │
|
||||||
|
│ Spread 2 3/4 cups ganache over top of cake and all the way to edge. │
|
||||||
|
│ Spread 2/3 cup jam over ganache, leaving 1/2-inch chocolate border at edge. │
|
||||||
|
│ Drop 1 3/4 cups white chocolate frosting by spoonfuls over jam. │
|
||||||
|
│ Gently spread frosting over jam, leaving 1/2-inch chocolate border at edge. │
|
||||||
|
│ Rub some cocoa powder over second 12-inch cardboard. │
|
||||||
|
│ Cut around sides of second 12-inch cake to loosen. │
|
||||||
|
│ Place cardboard, cocoa side down, over pan. │
|
||||||
|
│ Turn cake out onto cardboard. │
|
||||||
|
│ Peel off parchment. │
|
||||||
|
│ Carefully slide cake off cardboard and onto filling on first 12-inch cake. │
|
||||||
|
│ Refrigerate. │
|
||||||
|
│ Place first 8-inch cake on its cardboard on work surface. │
|
||||||
|
│ Spread 1 cup ganache over top all the way to edge. │
|
||||||
|
│ Spread 1/4 cup jam over, leaving 1/2-inch chocolate border at edge. │
|
||||||
|
│ Drop 1 cup white chocolate frosting by spoonfuls over jam. │
|
||||||
|
│ Gently spread frosting over jam, leaving 1/2-inch chocolate border at edge. │
|
||||||
|
│ Rub some cocoa over second 8-inch cardboard. │
|
||||||
|
│ Cut around sides of second 8-inch cake to loosen. │
|
||||||
|
│ Place cardboard, cocoa side down, over pan. │
|
||||||
|
│ Turn cake out onto cardboard. │
|
||||||
|
│ Peel off parchment. │
|
||||||
|
│ Slide cake off cardboard and onto filling on first 8-inch cake. │
|
||||||
|
│ Refrigerate. │
|
||||||
|
│ Place first 5-inch cake on its cardboard on work surface. │
|
||||||
|
│ Spread 1/2 cup ganache over top of cake and all the way to edge. │
|
||||||
|
│ Spread 2 tablespoons jam over, leaving 1/2-inch chocolate border at edge. │
|
||||||
|
│ Drop 1/3 cup white chocolate frosting by spoonfuls over jam. │
|
||||||
|
│ Gently spread frosting over jam, leaving 1/2-inch chocolate border at edge. │
|
||||||
|
│ Rub cocoa over second 6-inch cardboard. │
|
||||||
|
│ Cut around sides of second 5-inch cake to loosen. │
|
||||||
|
│ Place cardboard, cocoa side down, over pan. │
|
||||||
|
│ Turn cake out onto cardboard. │
|
||||||
|
│ Peel off parchment. │
|
||||||
|
│ Slide cake off cardboard and onto filling on first 5-inch cake. │
|
||||||
|
│ Chill all cakes 1 hour to set filling. │
|
||||||
|
│ Place 12-inch tiered cake on its cardboard on revolving cake stand. │
|
||||||
|
│ Spread 2 2/3 cups frosting over top and sides of cake as a first coat. │
|
||||||
|
│ Refrigerate cake. │
|
||||||
|
│ Place 8-inch tiered cake on its cardboard on cake stand. │
|
||||||
|
│ Spread 1 1/4 cups frosting over top and sides of cake as a first coat. │
|
||||||
|
│ Refrigerate cake. │
|
||||||
|
│ Place 5-inch tiered cake on its cardboard on cake stand. │
|
||||||
|
│ Spread 3/4 cup frosting over top and sides of cake as a first coat. │
|
||||||
|
│ Refrigerate all cakes until first coats of frosting set, about 1 hour. │
|
||||||
|
│ (Cakes can be made to this point up to 1 day ahead; cover and keep refrigerate.) │
|
||||||
|
│ Prepare second batch of frosting, using remaining frosting ingredients and following directions for first batch. │
|
||||||
|
│ Spoon 2 cups frosting into pastry bag fitted with small star tip. │
|
||||||
|
│ Place 12-inch cake on its cardboard on large flat platter. │
|
||||||
|
│ Place platter on cake stand. │
|
||||||
|
│ Using icing spatula, spread 2 1/2 cups frosting over top and sides of cake; smooth top. │
|
||||||
|
│ Using filled pastry bag, pipe decorative border around top edge of cake. │
|
||||||
|
│ Refrigerate cake on platter. │
|
||||||
|
│ Place 8-inch cake on its cardboard on cake stand. │
|
||||||
|
│ Using icing spatula, spread 1 1/2 cups frosting over top and sides of cake; smooth top. │
|
||||||
|
│ Using pastry bag, pipe decorative border around top edge of cake. │
|
||||||
|
│ Refrigerate cake on its cardboard. │
|
||||||
|
│ Place 5-inch cake on its cardboard on cake stand. │
|
||||||
|
│ Using icing spatula, spread 3/4 cup frosting over top and sides of cake; smooth top. │
|
||||||
|
│ Using pastry bag, pipe decorative border around top edge of cake, spooning more frosting into bag if necessary. │
|
||||||
|
│ Refrigerate cake on its cardboard. │
|
||||||
|
│ Keep all cakes refrigerated until frosting sets, about 2 hours. │
|
||||||
|
│ (Can be prepared 2 days ahead. │
|
||||||
|
│ Cover loosely; keep refrigerated.) │
|
||||||
|
│ Place 12-inch cake on platter on work surface. │
|
||||||
|
│ Press 1 wooden dowel straight down into and completely through center of cake. │
|
||||||
|
│ Mark dowel 1/4 inch above top of frosting. │
|
||||||
|
│ Remove dowel and cut with serrated knife at marked point. │
|
||||||
|
│ Cut 4 more dowels to same length. │
|
||||||
|
│ Press 1 cut dowel back into center of cake. │
|
||||||
|
│ Press remaining 4 cut dowels into cake, positioning 3 1/2 inches inward from cake edges and spacing evenly. │
|
||||||
|
│ Place 8-inch cake on its cardboard on work surface. │
|
||||||
|
│ Press 1 dowel straight down into and completely through center of cake. │
|
||||||
|
│ Mark dowel 1/4 inch above top of frosting. │
|
||||||
|
│ Remove dowel and cut with serrated knife at marked point. │
|
||||||
|
│ Cut 3 more dowels to same length. │
|
||||||
|
│ Press 1 cut dowel back into center of cake. │
|
||||||
|
│ Press remaining 3 cut dowels into cake, positioning 2 1/2 inches inward from edges and spacing evenly. │
|
||||||
|
│ Using large metal spatula as aid, place 8-inch cake on its cardboard atop dowels in 12-inch cake, centering carefully. │
|
||||||
|
│ Gently place 5-inch cake on its cardboard atop dowels in 8-inch cake, centering carefully. │
|
||||||
|
│ Using citrus stripper, cut long strips of orange peel from oranges. │
|
||||||
|
│ Cut strips into long segments. │
|
||||||
|
│ To make orange peel coils, wrap peel segment around handle of wooden spoon; gently slide peel off handle so that peel keeps coiled shape. │
|
||||||
|
│ Garnish cake with orange peel coils, ivy or mint sprigs, and some berries. │
|
||||||
|
│ (Assembled cake can be made up to 8 hours ahead. │
|
||||||
|
│ Let stand at cool room temperature.) │
|
||||||
|
│ Remove top and middle cake tiers. │
|
||||||
|
│ Remove dowels from cakes. │
|
||||||
|
│ Cut top and middle cakes into slices. │
|
||||||
|
│ To cut 12-inch cake: Starting 3 inches inward from edge and inserting knife straight down, cut through from top to bottom to make 6-inch-diameter circle in center of cake. │
|
||||||
|
│ Cut outer portion of cake into slices; cut inner portion into slices and serve with strawberries. │
|
||||||
|
└─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
126 rows in set. Elapsed: 0.011 sec. Processed 8.19 thousand rows, 5.34 MB (737.75 thousand rows/s., 480.59 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 在线 Playground
|
||||||
|
|
||||||
|
此数据集也可在 [在线 Playground](https://play.clickhouse.com/play?user=play#U0VMRUNUCiAgICBhcnJheUpvaW4oTkVSKSBBUyBrLAogICAgY291bnQoKSBBUyBjCkZST00gcmVjaXBlcwpHUk9VUCBCWSBrCk9SREVSIEJZIGMgREVTQwpMSU1JVCA1MA==) 中体验。
|
||||||
|
|
||||||
|
[原文链接](https://clickhouse.com/docs/en/getting-started/example-datasets/recipes/)
|
||||||
|
@ -1,10 +1,450 @@
|
|||||||
---
|
---
|
||||||
slug: /zh/getting-started/example-datasets/uk-price-paid
|
slug: /zh/getting-started/example-datasets/uk-price-paid
|
||||||
sidebar_label: UK Property Price Paid
|
sidebar_label: 英国房地产支付价格
|
||||||
sidebar_position: 1
|
sidebar_position: 1
|
||||||
title: "UK Property Price Paid"
|
title: "英国房地产支付价格"
|
||||||
---
|
---
|
||||||
|
|
||||||
import Content from '@site/docs/en/getting-started/example-datasets/uk-price-paid.md';
|
该数据集包含自 1995 年以来有关英格兰和威尔士房地产价格的数据。未压缩的大小约为 4 GiB,在 ClickHouse 中大约需要 278 MiB。
|
||||||
|
|
||||||
<Content />
|
来源:https://www.gov.uk/government/statistical-data-sets/price-paid-data-downloads
|
||||||
|
字段说明:https://www.gov.uk/guidance/about-the-price-data
|
||||||
|
|
||||||
|
包含 HM Land Registry data © Crown copyright and database right 2021.。此数据集需在 Open Government License v3.0 的许可下使用。
|
||||||
|
|
||||||
|
## 创建表 {#create-table}
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE uk_price_paid
|
||||||
|
(
|
||||||
|
price UInt32,
|
||||||
|
date Date,
|
||||||
|
postcode1 LowCardinality(String),
|
||||||
|
postcode2 LowCardinality(String),
|
||||||
|
type Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0),
|
||||||
|
is_new UInt8,
|
||||||
|
duration Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0),
|
||||||
|
addr1 String,
|
||||||
|
addr2 String,
|
||||||
|
street LowCardinality(String),
|
||||||
|
locality LowCardinality(String),
|
||||||
|
town LowCardinality(String),
|
||||||
|
district LowCardinality(String),
|
||||||
|
county LowCardinality(String)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY (postcode1, postcode2, addr1, addr2);
|
||||||
|
```
|
||||||
|
|
||||||
|
## 预处理和插入数据 {#preprocess-import-data}
|
||||||
|
|
||||||
|
我们将使用 `url` 函数将数据流式传输到 ClickHouse。我们需要首先预处理一些传入的数据,其中包括:
|
||||||
|
|
||||||
|
- 将`postcode` 拆分为两个不同的列 - `postcode1` 和 `postcode2`,因为这更适合存储和查询
|
||||||
|
- 将`time` 字段转换为日期为它只包含 00:00 时间
|
||||||
|
- 忽略 [UUid](/docs/zh/sql-reference/data-types/uuid.md) 字段,因为我们不需要它进行分析
|
||||||
|
- 使用 [transform](/docs/zh/sql-reference/functions/other-functions.md#transform) 函数将 `Enum` 字段 `type` 和 `duration` 转换为更易读的 `Enum` 字段
|
||||||
|
- 将 `is_new` 字段从单字符串(` Y`/`N`) 到 [UInt8](/docs/zh/sql-reference/data-types/int-uint.md#uint8-uint16-uint32-uint64-uint256-int8-int16-int32-int64 -int128-int256) 字段为 0 或 1
|
||||||
|
- 删除最后两列,因为它们都具有相同的值(即 0)
|
||||||
|
|
||||||
|
`url` 函数将来自网络服务器的数据流式传输到 ClickHouse 表中。以下命令将 500 万行插入到 `uk_price_paid` 表中:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO uk_price_paid
|
||||||
|
WITH
|
||||||
|
splitByChar(' ', postcode) AS p
|
||||||
|
SELECT
|
||||||
|
toUInt32(price_string) AS price,
|
||||||
|
parseDateTimeBestEffortUS(time) AS date,
|
||||||
|
p[1] AS postcode1,
|
||||||
|
p[2] AS postcode2,
|
||||||
|
transform(a, ['T', 'S', 'D', 'F', 'O'], ['terraced', 'semi-detached', 'detached', 'flat', 'other']) AS type,
|
||||||
|
b = 'Y' AS is_new,
|
||||||
|
transform(c, ['F', 'L', 'U'], ['freehold', 'leasehold', 'unknown']) AS duration,
|
||||||
|
addr1,
|
||||||
|
addr2,
|
||||||
|
street,
|
||||||
|
locality,
|
||||||
|
town,
|
||||||
|
district,
|
||||||
|
county
|
||||||
|
FROM url(
|
||||||
|
'http://prod.publicdata.landregistry.gov.uk.s3-website-eu-west-1.amazonaws.com/pp-complete.csv',
|
||||||
|
'CSV',
|
||||||
|
'uuid_string String,
|
||||||
|
price_string String,
|
||||||
|
time String,
|
||||||
|
postcode String,
|
||||||
|
a String,
|
||||||
|
b String,
|
||||||
|
c String,
|
||||||
|
addr1 String,
|
||||||
|
addr2 String,
|
||||||
|
street String,
|
||||||
|
locality String,
|
||||||
|
town String,
|
||||||
|
district String,
|
||||||
|
county String,
|
||||||
|
d String,
|
||||||
|
e String'
|
||||||
|
) SETTINGS max_http_get_redirects=10;
|
||||||
|
```
|
||||||
|
|
||||||
|
需要等待一两分钟以便数据插入,具体时间取决于网络速度。
|
||||||
|
|
||||||
|
## 验证数据 {#validate-data}
|
||||||
|
|
||||||
|
让我们通过查看插入了多少行来验证它是否有效:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT count()
|
||||||
|
FROM uk_price_paid
|
||||||
|
```
|
||||||
|
|
||||||
|
在执行此查询时,数据集有 27,450,499 行。让我们看看 ClickHouse 中表的大小是多少:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT formatReadableSize(total_bytes)
|
||||||
|
FROM system.tables
|
||||||
|
WHERE name = 'uk_price_paid'
|
||||||
|
```
|
||||||
|
|
||||||
|
请注意,表的大小仅为 221.43 MiB!
|
||||||
|
|
||||||
|
## 运行一些查询 {#run-queries}
|
||||||
|
|
||||||
|
让我们运行一些查询来分析数据:
|
||||||
|
|
||||||
|
### 查询 1. 每年平均价格 {#average-price}
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toYear(date) AS year,
|
||||||
|
round(avg(price)) AS price,
|
||||||
|
bar(price, 0, 1000000, 80
|
||||||
|
)
|
||||||
|
FROM uk_price_paid
|
||||||
|
GROUP BY year
|
||||||
|
ORDER BY year
|
||||||
|
```
|
||||||
|
|
||||||
|
结果如下所示:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─year─┬──price─┬─bar(round(avg(price)), 0, 1000000, 80)─┐
|
||||||
|
│ 1995 │ 67934 │ █████▍ │
|
||||||
|
│ 1996 │ 71508 │ █████▋ │
|
||||||
|
│ 1997 │ 78536 │ ██████▎ │
|
||||||
|
│ 1998 │ 85441 │ ██████▋ │
|
||||||
|
│ 1999 │ 96038 │ ███████▋ │
|
||||||
|
│ 2000 │ 107487 │ ████████▌ │
|
||||||
|
│ 2001 │ 118888 │ █████████▌ │
|
||||||
|
│ 2002 │ 137948 │ ███████████ │
|
||||||
|
│ 2003 │ 155893 │ ████████████▍ │
|
||||||
|
│ 2004 │ 178888 │ ██████████████▎ │
|
||||||
|
│ 2005 │ 189359 │ ███████████████▏ │
|
||||||
|
│ 2006 │ 203532 │ ████████████████▎ │
|
||||||
|
│ 2007 │ 219375 │ █████████████████▌ │
|
||||||
|
│ 2008 │ 217056 │ █████████████████▎ │
|
||||||
|
│ 2009 │ 213419 │ █████████████████ │
|
||||||
|
│ 2010 │ 236110 │ ██████████████████▊ │
|
||||||
|
│ 2011 │ 232805 │ ██████████████████▌ │
|
||||||
|
│ 2012 │ 238381 │ ███████████████████ │
|
||||||
|
│ 2013 │ 256927 │ ████████████████████▌ │
|
||||||
|
│ 2014 │ 280008 │ ██████████████████████▍ │
|
||||||
|
│ 2015 │ 297263 │ ███████████████████████▋ │
|
||||||
|
│ 2016 │ 313518 │ █████████████████████████ │
|
||||||
|
│ 2017 │ 346371 │ ███████████████████████████▋ │
|
||||||
|
│ 2018 │ 350556 │ ████████████████████████████ │
|
||||||
|
│ 2019 │ 352184 │ ████████████████████████████▏ │
|
||||||
|
│ 2020 │ 375808 │ ██████████████████████████████ │
|
||||||
|
│ 2021 │ 381105 │ ██████████████████████████████▍ │
|
||||||
|
│ 2022 │ 362572 │ █████████████████████████████ │
|
||||||
|
└──────┴────────┴────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 查询 2. 伦敦每年的平均价格 {#average-price-london}
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toYear(date) AS year,
|
||||||
|
round(avg(price)) AS price,
|
||||||
|
bar(price, 0, 2000000, 100
|
||||||
|
)
|
||||||
|
FROM uk_price_paid
|
||||||
|
WHERE town = 'LONDON'
|
||||||
|
GROUP BY year
|
||||||
|
ORDER BY year
|
||||||
|
```
|
||||||
|
|
||||||
|
结果如下所示:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─year─┬───price─┬─bar(round(avg(price)), 0, 2000000, 100)───────────────┐
|
||||||
|
│ 1995 │ 109110 │ █████▍ │
|
||||||
|
│ 1996 │ 118659 │ █████▊ │
|
||||||
|
│ 1997 │ 136526 │ ██████▋ │
|
||||||
|
│ 1998 │ 153002 │ ███████▋ │
|
||||||
|
│ 1999 │ 180633 │ █████████ │
|
||||||
|
│ 2000 │ 215849 │ ██████████▋ │
|
||||||
|
│ 2001 │ 232987 │ ███████████▋ │
|
||||||
|
│ 2002 │ 263668 │ █████████████▏ │
|
||||||
|
│ 2003 │ 278424 │ █████████████▊ │
|
||||||
|
│ 2004 │ 304664 │ ███████████████▏ │
|
||||||
|
│ 2005 │ 322887 │ ████████████████▏ │
|
||||||
|
│ 2006 │ 356195 │ █████████████████▋ │
|
||||||
|
│ 2007 │ 404062 │ ████████████████████▏ │
|
||||||
|
│ 2008 │ 420741 │ █████████████████████ │
|
||||||
|
│ 2009 │ 427754 │ █████████████████████▍ │
|
||||||
|
│ 2010 │ 480322 │ ████████████████████████ │
|
||||||
|
│ 2011 │ 496278 │ ████████████████████████▋ │
|
||||||
|
│ 2012 │ 519482 │ █████████████████████████▊ │
|
||||||
|
│ 2013 │ 616195 │ ██████████████████████████████▋ │
|
||||||
|
│ 2014 │ 724121 │ ████████████████████████████████████▏ │
|
||||||
|
│ 2015 │ 792101 │ ███████████████████████████████████████▌ │
|
||||||
|
│ 2016 │ 843589 │ ██████████████████████████████████████████▏ │
|
||||||
|
│ 2017 │ 983523 │ █████████████████████████████████████████████████▏ │
|
||||||
|
│ 2018 │ 1016753 │ ██████████████████████████████████████████████████▋ │
|
||||||
|
│ 2019 │ 1041673 │ ████████████████████████████████████████████████████ │
|
||||||
|
│ 2020 │ 1060027 │ █████████████████████████████████████████████████████ │
|
||||||
|
│ 2021 │ 958249 │ ███████████████████████████████████████████████▊ │
|
||||||
|
│ 2022 │ 902596 │ █████████████████████████████████████████████▏ │
|
||||||
|
└──────┴─────────┴───────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
2020 年房价出事了!但这并不令人意外……
|
||||||
|
|
||||||
|
### 查询 3. 最昂贵的社区 {#most-expensive-neighborhoods}
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
town,
|
||||||
|
district,
|
||||||
|
count() AS c,
|
||||||
|
round(avg(price)) AS price,
|
||||||
|
bar(price, 0, 5000000, 100)
|
||||||
|
FROM uk_price_paid
|
||||||
|
WHERE date >= '2020-01-01'
|
||||||
|
GROUP BY
|
||||||
|
town,
|
||||||
|
district
|
||||||
|
HAVING c >= 100
|
||||||
|
ORDER BY price DESC
|
||||||
|
LIMIT 100
|
||||||
|
```
|
||||||
|
|
||||||
|
结果如下所示:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─town─────────────────┬─district───────────────┬─────c─┬───price─┬─bar(round(avg(price)), 0, 5000000, 100)─────────────────────────┐
|
||||||
|
│ LONDON │ CITY OF LONDON │ 578 │ 3149590 │ ██████████████████████████████████████████████████████████████▊ │
|
||||||
|
│ LONDON │ CITY OF WESTMINSTER │ 7083 │ 2903794 │ ██████████████████████████████████████████████████████████ │
|
||||||
|
│ LONDON │ KENSINGTON AND CHELSEA │ 4986 │ 2333782 │ ██████████████████████████████████████████████▋ │
|
||||||
|
│ LEATHERHEAD │ ELMBRIDGE │ 203 │ 2071595 │ █████████████████████████████████████████▍ │
|
||||||
|
│ VIRGINIA WATER │ RUNNYMEDE │ 308 │ 1939465 │ ██████████████████████████████████████▋ │
|
||||||
|
│ LONDON │ CAMDEN │ 5750 │ 1673687 │ █████████████████████████████████▍ │
|
||||||
|
│ WINDLESHAM │ SURREY HEATH │ 182 │ 1428358 │ ████████████████████████████▌ │
|
||||||
|
│ NORTHWOOD │ THREE RIVERS │ 112 │ 1404170 │ ████████████████████████████ │
|
||||||
|
│ BARNET │ ENFIELD │ 259 │ 1338299 │ ██████████████████████████▋ │
|
||||||
|
│ LONDON │ ISLINGTON │ 5504 │ 1275520 │ █████████████████████████▌ │
|
||||||
|
│ LONDON │ RICHMOND UPON THAMES │ 1345 │ 1261935 │ █████████████████████████▏ │
|
||||||
|
│ COBHAM │ ELMBRIDGE │ 727 │ 1251403 │ █████████████████████████ │
|
||||||
|
│ BEACONSFIELD │ BUCKINGHAMSHIRE │ 680 │ 1199970 │ ███████████████████████▊ │
|
||||||
|
│ LONDON │ TOWER HAMLETS │ 10012 │ 1157827 │ ███████████████████████▏ │
|
||||||
|
│ LONDON │ HOUNSLOW │ 1278 │ 1144389 │ ██████████████████████▊ │
|
||||||
|
│ BURFORD │ WEST OXFORDSHIRE │ 182 │ 1139393 │ ██████████████████████▋ │
|
||||||
|
│ RICHMOND │ RICHMOND UPON THAMES │ 1649 │ 1130076 │ ██████████████████████▌ │
|
||||||
|
│ KINGSTON UPON THAMES │ RICHMOND UPON THAMES │ 147 │ 1126111 │ ██████████████████████▌ │
|
||||||
|
│ ASCOT │ WINDSOR AND MAIDENHEAD │ 773 │ 1106109 │ ██████████████████████ │
|
||||||
|
│ LONDON │ HAMMERSMITH AND FULHAM │ 6162 │ 1056198 │ █████████████████████ │
|
||||||
|
│ RADLETT │ HERTSMERE │ 513 │ 1045758 │ ████████████████████▊ │
|
||||||
|
│ LEATHERHEAD │ GUILDFORD │ 354 │ 1045175 │ ████████████████████▊ │
|
||||||
|
│ WEYBRIDGE │ ELMBRIDGE │ 1275 │ 1036702 │ ████████████████████▋ │
|
||||||
|
│ FARNHAM │ EAST HAMPSHIRE │ 107 │ 1033682 │ ████████████████████▋ │
|
||||||
|
│ ESHER │ ELMBRIDGE │ 915 │ 1032753 │ ████████████████████▋ │
|
||||||
|
│ FARNHAM │ HART │ 102 │ 1002692 │ ████████████████████ │
|
||||||
|
│ GERRARDS CROSS │ BUCKINGHAMSHIRE │ 845 │ 983639 │ ███████████████████▋ │
|
||||||
|
│ CHALFONT ST GILES │ BUCKINGHAMSHIRE │ 286 │ 973993 │ ███████████████████▍ │
|
||||||
|
│ SALCOMBE │ SOUTH HAMS │ 215 │ 965724 │ ███████████████████▎ │
|
||||||
|
│ SURBITON │ ELMBRIDGE │ 181 │ 960346 │ ███████████████████▏ │
|
||||||
|
│ BROCKENHURST │ NEW FOREST │ 226 │ 951278 │ ███████████████████ │
|
||||||
|
│ SUTTON COLDFIELD │ LICHFIELD │ 110 │ 930757 │ ██████████████████▌ │
|
||||||
|
│ EAST MOLESEY │ ELMBRIDGE │ 372 │ 927026 │ ██████████████████▌ │
|
||||||
|
│ LLANGOLLEN │ WREXHAM │ 127 │ 925681 │ ██████████████████▌ │
|
||||||
|
│ OXFORD │ SOUTH OXFORDSHIRE │ 638 │ 923830 │ ██████████████████▍ │
|
||||||
|
│ LONDON │ MERTON │ 4383 │ 923194 │ ██████████████████▍ │
|
||||||
|
│ GUILDFORD │ WAVERLEY │ 261 │ 905733 │ ██████████████████ │
|
||||||
|
│ TEDDINGTON │ RICHMOND UPON THAMES │ 1147 │ 894856 │ █████████████████▊ │
|
||||||
|
│ HARPENDEN │ ST ALBANS │ 1271 │ 893079 │ █████████████████▋ │
|
||||||
|
│ HENLEY-ON-THAMES │ SOUTH OXFORDSHIRE │ 1042 │ 887557 │ █████████████████▋ │
|
||||||
|
│ POTTERS BAR │ WELWYN HATFIELD │ 314 │ 863037 │ █████████████████▎ │
|
||||||
|
│ LONDON │ WANDSWORTH │ 13210 │ 857318 │ █████████████████▏ │
|
||||||
|
│ BILLINGSHURST │ CHICHESTER │ 255 │ 856508 │ █████████████████▏ │
|
||||||
|
│ LONDON │ SOUTHWARK │ 7742 │ 843145 │ ████████████████▋ │
|
||||||
|
│ LONDON │ HACKNEY │ 6656 │ 839716 │ ████████████████▋ │
|
||||||
|
│ LUTTERWORTH │ HARBOROUGH │ 1096 │ 836546 │ ████████████████▋ │
|
||||||
|
│ KINGSTON UPON THAMES │ KINGSTON UPON THAMES │ 1846 │ 828990 │ ████████████████▌ │
|
||||||
|
│ LONDON │ EALING │ 5583 │ 820135 │ ████████████████▍ │
|
||||||
|
│ INGATESTONE │ CHELMSFORD │ 120 │ 815379 │ ████████████████▎ │
|
||||||
|
│ MARLOW │ BUCKINGHAMSHIRE │ 718 │ 809943 │ ████████████████▏ │
|
||||||
|
│ EAST GRINSTEAD │ TANDRIDGE │ 105 │ 809461 │ ████████████████▏ │
|
||||||
|
│ CHIGWELL │ EPPING FOREST │ 484 │ 809338 │ ████████████████▏ │
|
||||||
|
│ EGHAM │ RUNNYMEDE │ 989 │ 807858 │ ████████████████▏ │
|
||||||
|
│ HASLEMERE │ CHICHESTER │ 223 │ 804173 │ ████████████████ │
|
||||||
|
│ PETWORTH │ CHICHESTER │ 288 │ 803206 │ ████████████████ │
|
||||||
|
│ TWICKENHAM │ RICHMOND UPON THAMES │ 2194 │ 802616 │ ████████████████ │
|
||||||
|
│ WEMBLEY │ BRENT │ 1698 │ 801733 │ ████████████████ │
|
||||||
|
│ HINDHEAD │ WAVERLEY │ 233 │ 801482 │ ████████████████ │
|
||||||
|
│ LONDON │ BARNET │ 8083 │ 792066 │ ███████████████▋ │
|
||||||
|
│ WOKING │ GUILDFORD │ 343 │ 789360 │ ███████████████▋ │
|
||||||
|
│ STOCKBRIDGE │ TEST VALLEY │ 318 │ 777909 │ ███████████████▌ │
|
||||||
|
│ BERKHAMSTED │ DACORUM │ 1049 │ 776138 │ ███████████████▌ │
|
||||||
|
│ MAIDENHEAD │ BUCKINGHAMSHIRE │ 236 │ 775572 │ ███████████████▌ │
|
||||||
|
│ SOLIHULL │ STRATFORD-ON-AVON │ 142 │ 770727 │ ███████████████▍ │
|
||||||
|
│ GREAT MISSENDEN │ BUCKINGHAMSHIRE │ 431 │ 764493 │ ███████████████▎ │
|
||||||
|
│ TADWORTH │ REIGATE AND BANSTEAD │ 920 │ 757511 │ ███████████████▏ │
|
||||||
|
│ LONDON │ BRENT │ 4124 │ 757194 │ ███████████████▏ │
|
||||||
|
│ THAMES DITTON │ ELMBRIDGE │ 470 │ 750828 │ ███████████████ │
|
||||||
|
│ LONDON │ LAMBETH │ 10431 │ 750532 │ ███████████████ │
|
||||||
|
│ RICKMANSWORTH │ THREE RIVERS │ 1500 │ 747029 │ ██████████████▊ │
|
||||||
|
│ KINGS LANGLEY │ DACORUM │ 281 │ 746536 │ ██████████████▊ │
|
||||||
|
│ HARLOW │ EPPING FOREST │ 172 │ 739423 │ ██████████████▋ │
|
||||||
|
│ TONBRIDGE │ SEVENOAKS │ 103 │ 738740 │ ██████████████▋ │
|
||||||
|
│ BELVEDERE │ BEXLEY │ 686 │ 736385 │ ██████████████▋ │
|
||||||
|
│ CRANBROOK │ TUNBRIDGE WELLS │ 769 │ 734328 │ ██████████████▋ │
|
||||||
|
│ SOLIHULL │ WARWICK │ 116 │ 733286 │ ██████████████▋ │
|
||||||
|
│ ALDERLEY EDGE │ CHESHIRE EAST │ 357 │ 732882 │ ██████████████▋ │
|
||||||
|
│ WELWYN │ WELWYN HATFIELD │ 404 │ 730281 │ ██████████████▌ │
|
||||||
|
│ CHISLEHURST │ BROMLEY │ 870 │ 730279 │ ██████████████▌ │
|
||||||
|
│ LONDON │ HARINGEY │ 6488 │ 726715 │ ██████████████▌ │
|
||||||
|
│ AMERSHAM │ BUCKINGHAMSHIRE │ 965 │ 725426 │ ██████████████▌ │
|
||||||
|
│ SEVENOAKS │ SEVENOAKS │ 2183 │ 725102 │ ██████████████▌ │
|
||||||
|
│ BOURNE END │ BUCKINGHAMSHIRE │ 269 │ 724595 │ ██████████████▍ │
|
||||||
|
│ NORTHWOOD │ HILLINGDON │ 568 │ 722436 │ ██████████████▍ │
|
||||||
|
│ PURFLEET │ THURROCK │ 143 │ 722205 │ ██████████████▍ │
|
||||||
|
│ SLOUGH │ BUCKINGHAMSHIRE │ 832 │ 721529 │ ██████████████▍ │
|
||||||
|
│ INGATESTONE │ BRENTWOOD │ 301 │ 718292 │ ██████████████▎ │
|
||||||
|
│ EPSOM │ REIGATE AND BANSTEAD │ 315 │ 709264 │ ██████████████▏ │
|
||||||
|
│ ASHTEAD │ MOLE VALLEY │ 524 │ 708646 │ ██████████████▏ │
|
||||||
|
│ BETCHWORTH │ MOLE VALLEY │ 155 │ 708525 │ ██████████████▏ │
|
||||||
|
│ OXTED │ TANDRIDGE │ 645 │ 706946 │ ██████████████▏ │
|
||||||
|
│ READING │ SOUTH OXFORDSHIRE │ 593 │ 705466 │ ██████████████ │
|
||||||
|
│ FELTHAM │ HOUNSLOW │ 1536 │ 703815 │ ██████████████ │
|
||||||
|
│ TUNBRIDGE WELLS │ WEALDEN │ 207 │ 703296 │ ██████████████ │
|
||||||
|
│ LEWES │ WEALDEN │ 116 │ 701349 │ ██████████████ │
|
||||||
|
│ OXFORD │ OXFORD │ 3656 │ 700813 │ ██████████████ │
|
||||||
|
│ MAYFIELD │ WEALDEN │ 177 │ 698158 │ █████████████▊ │
|
||||||
|
│ PINNER │ HARROW │ 997 │ 697876 │ █████████████▊ │
|
||||||
|
│ LECHLADE │ COTSWOLD │ 155 │ 696262 │ █████████████▊ │
|
||||||
|
│ WALTON-ON-THAMES │ ELMBRIDGE │ 1850 │ 690102 │ █████████████▋ │
|
||||||
|
└──────────────────────┴────────────────────────┴───────┴─────────┴─────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## 使用 Projection 加速查询 {#speedup-with-projections}
|
||||||
|
|
||||||
|
[Projections](/docs/zh/sql-reference/statements/alter/projection.mdx) 允许我们通过存储任意格式的预先聚合的数据来提高查询速度。在此示例中,我们创建了一个按年份、地区和城镇分组的房产的平均价格、总价格和数量的 Projection。在执行时,如果 ClickHouse 认为 Projection 可以提高查询的性能,它将使用 Projection(何时使用由 ClickHouse 决定)。
|
||||||
|
|
||||||
|
### 构建投影{#build-projection}
|
||||||
|
|
||||||
|
让我们通过维度 `toYear(date)`、`district` 和 `town` 创建一个聚合 Projection:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE uk_price_paid
|
||||||
|
ADD PROJECTION projection_by_year_district_town
|
||||||
|
(
|
||||||
|
SELECT
|
||||||
|
toYear(date),
|
||||||
|
district,
|
||||||
|
town,
|
||||||
|
avg(price),
|
||||||
|
sum(price),
|
||||||
|
count()
|
||||||
|
GROUP BY
|
||||||
|
toYear(date),
|
||||||
|
district,
|
||||||
|
town
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
填充现有数据的 Projection。 (如果不进行 materialize 操作,则 ClickHouse 只会为新插入的数据创建 Projection):
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ALTER TABLE uk_price_paid
|
||||||
|
MATERIALIZE PROJECTION projection_by_year_district_town
|
||||||
|
SETTINGS mutations_sync = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test Performance {#test-performance}
|
||||||
|
|
||||||
|
让我们再次运行相同的 3 个查询:
|
||||||
|
|
||||||
|
### 查询 1. 每年平均价格 {#average-price-projections}
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toYear(date) AS year,
|
||||||
|
round(avg(price)) AS price,
|
||||||
|
bar(price, 0, 1000000, 80)
|
||||||
|
FROM uk_price_paid
|
||||||
|
GROUP BY year
|
||||||
|
ORDER BY year ASC
|
||||||
|
```
|
||||||
|
|
||||||
|
结果是一样的,但是性能更好!
|
||||||
|
```response
|
||||||
|
No projection: 28 rows in set. Elapsed: 1.775 sec. Processed 27.45 million rows, 164.70 MB (15.47 million rows/s., 92.79 MB/s.)
|
||||||
|
With projection: 28 rows in set. Elapsed: 0.665 sec. Processed 87.51 thousand rows, 3.21 MB (131.51 thousand rows/s., 4.82 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### 查询 2. 伦敦每年的平均价格 {#average-price-london-projections}
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toYear(date) AS year,
|
||||||
|
round(avg(price)) AS price,
|
||||||
|
bar(price, 0, 2000000, 100)
|
||||||
|
FROM uk_price_paid
|
||||||
|
WHERE town = 'LONDON'
|
||||||
|
GROUP BY year
|
||||||
|
ORDER BY year ASC
|
||||||
|
```
|
||||||
|
|
||||||
|
Same result, but notice the improvement in query performance:
|
||||||
|
|
||||||
|
```response
|
||||||
|
No projection: 28 rows in set. Elapsed: 0.720 sec. Processed 27.45 million rows, 46.61 MB (38.13 million rows/s., 64.74 MB/s.)
|
||||||
|
With projection: 28 rows in set. Elapsed: 0.015 sec. Processed 87.51 thousand rows, 3.51 MB (5.74 million rows/s., 230.24 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 查询 3. 最昂贵的社区 {#most-expensive-neighborhoods-projections}
|
||||||
|
|
||||||
|
注意:需要修改 (date >= '2020-01-01') 以使其与 Projection 定义的维度 (`toYear(date) >= 2020)` 匹配:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
town,
|
||||||
|
district,
|
||||||
|
count() AS c,
|
||||||
|
round(avg(price)) AS price,
|
||||||
|
bar(price, 0, 5000000, 100)
|
||||||
|
FROM uk_price_paid
|
||||||
|
WHERE toYear(date) >= 2020
|
||||||
|
GROUP BY
|
||||||
|
town,
|
||||||
|
district
|
||||||
|
HAVING c >= 100
|
||||||
|
ORDER BY price DESC
|
||||||
|
LIMIT 100
|
||||||
|
```
|
||||||
|
|
||||||
|
同样,结果是相同的,但请注意查询性能的改进:
|
||||||
|
|
||||||
|
```response
|
||||||
|
No projection: 100 rows in set. Elapsed: 0.928 sec. Processed 27.45 million rows, 103.80 MB (29.56 million rows/s., 111.80 MB/s.)
|
||||||
|
With projection: 100 rows in set. Elapsed: 0.336 sec. Processed 17.32 thousand rows, 1.23 MB (51.61 thousand rows/s., 3.65 MB/s.)
|
||||||
|
```
|
||||||
|
|
||||||
|
### 在 Playground 上测试{#playground}
|
||||||
|
|
||||||
|
也可以在 [Online Playground](https://play.clickhouse.com/play?user=play#U0VMRUNUIHRvd24sIGRpc3RyaWN0LCBjb3VudCgpIEFTIGMsIHJvdW5kKGF2ZyhwcmljZSkpIEFTIHByaWNlLCBiYXIocHJpY2UsIDAsIDUwMDAwMDAsIDEwMCkgRlJPTSB1a19wcmljZV9wYWlkIFdIRVJFIGRhdGUgPj0gJzIwMjAtMDEtMDEnIEdST1VQIEJZIHRvd24sIGRpc3RyaWN0IEhBVklORyBjID49IDEwMCBPUkRFUiBCWSBwcmljZSBERVNDIExJTUlUIDEwMA==) 上找到此数据集。
|
||||||
|
@ -35,6 +35,9 @@ Yandex**没有**维护下面列出的库,也没有做过任何广泛的测试
|
|||||||
- NodeJs
|
- NodeJs
|
||||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||||
|
- [nestjs-clickhouse](https://github.com/depyronick/nestjs-clickhouse)
|
||||||
|
- [clickhouse-client](https://github.com/depyronick/clickhouse-client)
|
||||||
|
- [node-clickhouse-orm](https://github.com/zimv/node-clickhouse-orm)
|
||||||
- Perl
|
- Perl
|
||||||
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
- [perl-DBD-ClickHouse](https://github.com/elcamlost/perl-DBD-ClickHouse)
|
||||||
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
- [HTTP-ClickHouse](https://metacpan.org/release/HTTP-ClickHouse)
|
||||||
|
@ -3,7 +3,7 @@ slug: /zh/sql-reference/data-types/date
|
|||||||
---
|
---
|
||||||
# 日期 {#date}
|
# 日期 {#date}
|
||||||
|
|
||||||
日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2149年,但最终完全支持的年份为2148)。最小值输出为1970-01-01。
|
日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2106年,但最终完全支持的年份为2105)。最小值输出为1970-01-01。
|
||||||
|
|
||||||
值的范围: \[1970-01-01, 2149-06-06\]。
|
值的范围: \[1970-01-01, 2149-06-06\]。
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ ORDER BY c DESC
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s,
|
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s,
|
||||||
count() AS c
|
count() AS c
|
||||||
FROM test.hits
|
FROM test.hits
|
||||||
GROUP BY domain(Referer)
|
GROUP BY domain(Referer)
|
||||||
|
@ -26,8 +26,10 @@ SOURCE=${SOURCE:-$PKG_ROOT}
|
|||||||
HELP="${0} [--test] [--rpm] [-h|--help]
|
HELP="${0} [--test] [--rpm] [-h|--help]
|
||||||
--test - adds '+test' prefix to version
|
--test - adds '+test' prefix to version
|
||||||
--apk - build APK packages
|
--apk - build APK packages
|
||||||
|
--archlinux - build archlinux packages
|
||||||
--rpm - build RPM packages
|
--rpm - build RPM packages
|
||||||
--tgz - build tarball package
|
--tgz - build tarball package
|
||||||
|
--deb - build deb package
|
||||||
--help - show this help and exit
|
--help - show this help and exit
|
||||||
|
|
||||||
Used envs:
|
Used envs:
|
||||||
@ -47,16 +49,21 @@ fi
|
|||||||
export CLICKHOUSE_VERSION_STRING
|
export CLICKHOUSE_VERSION_STRING
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
while [[ $1 == --* ]]
|
while [[ $1 == --* ]]
|
||||||
do
|
do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
--test )
|
--test )
|
||||||
VERSION_POSTFIX+='+test'
|
VERSION_POSTFIX+='+test'
|
||||||
shift ;;
|
shift ;;
|
||||||
|
--deb )
|
||||||
|
MAKE_DEB=1
|
||||||
|
shift ;;
|
||||||
--apk )
|
--apk )
|
||||||
MAKE_APK=1
|
MAKE_APK=1
|
||||||
shift ;;
|
shift ;;
|
||||||
|
--archlinux )
|
||||||
|
MAKE_ARCHLINUX=1
|
||||||
|
shift ;;
|
||||||
--rpm )
|
--rpm )
|
||||||
MAKE_RPM=1
|
MAKE_RPM=1
|
||||||
shift ;;
|
shift ;;
|
||||||
@ -131,18 +138,24 @@ CLICKHOUSE_VERSION_STRING+=$VERSION_POSTFIX
|
|||||||
echo -e "\nCurrent version is $CLICKHOUSE_VERSION_STRING"
|
echo -e "\nCurrent version is $CLICKHOUSE_VERSION_STRING"
|
||||||
|
|
||||||
for config in clickhouse*.yaml; do
|
for config in clickhouse*.yaml; do
|
||||||
echo "Building deb package for $config"
|
if [ -n "$MAKE_DEB" ] || [ -n "$MAKE_TGZ" ]; then
|
||||||
|
echo "Building deb package for $config"
|
||||||
|
|
||||||
# Preserve package path
|
# Preserve package path
|
||||||
exec 9>&1
|
exec 9>&1
|
||||||
PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9)
|
PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9)
|
||||||
PKG_PATH=${PKG_PATH##*created package: }
|
PKG_PATH=${PKG_PATH##*created package: }
|
||||||
exec 9>&-
|
exec 9>&-
|
||||||
|
fi
|
||||||
|
|
||||||
if [ -n "$MAKE_APK" ]; then
|
if [ -n "$MAKE_APK" ]; then
|
||||||
echo "Building apk package for $config"
|
echo "Building apk package for $config"
|
||||||
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager apk
|
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager apk
|
||||||
fi
|
fi
|
||||||
|
if [ -n "$MAKE_ARCHLINUX" ]; then
|
||||||
|
echo "Building archlinux package for $config"
|
||||||
|
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager archlinux
|
||||||
|
fi
|
||||||
if [ -n "$MAKE_RPM" ]; then
|
if [ -n "$MAKE_RPM" ]; then
|
||||||
echo "Building rpm package for $config"
|
echo "Building rpm package for $config"
|
||||||
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager rpm
|
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager rpm
|
||||||
|
@ -27,8 +27,8 @@ deb:
|
|||||||
Source: clickhouse
|
Source: clickhouse
|
||||||
|
|
||||||
contents:
|
contents:
|
||||||
- src: root/etc/clickhouse-keeper
|
- src: root/etc/clickhouse-keeper/keeper_config.xml
|
||||||
dst: /etc/clickhouse-keeper
|
dst: /etc/clickhouse-keeper/keeper_config.xml
|
||||||
type: config
|
type: config
|
||||||
- src: root/usr/bin/clickhouse-keeper
|
- src: root/usr/bin/clickhouse-keeper
|
||||||
dst: /usr/bin/clickhouse-keeper
|
dst: /usr/bin/clickhouse-keeper
|
||||||
|
@ -42,8 +42,11 @@ deb:
|
|||||||
Source: clickhouse
|
Source: clickhouse
|
||||||
|
|
||||||
contents:
|
contents:
|
||||||
- src: root/etc/clickhouse-server
|
- src: root/etc/clickhouse-server/config.xml
|
||||||
dst: /etc/clickhouse-server
|
dst: /etc/clickhouse-server/config.xml
|
||||||
|
type: config
|
||||||
|
- src: root/etc/clickhouse-server/users.xml
|
||||||
|
dst: /etc/clickhouse-server/users.xml
|
||||||
type: config
|
type: config
|
||||||
- src: clickhouse-server.init
|
- src: clickhouse-server.init
|
||||||
dst: /etc/init.d/clickhouse-server
|
dst: /etc/init.d/clickhouse-server
|
||||||
|
@ -1108,15 +1108,21 @@ void Client::processConfig()
|
|||||||
else
|
else
|
||||||
format = config().getString("format", is_interactive ? "PrettyCompact" : "TabSeparated");
|
format = config().getString("format", is_interactive ? "PrettyCompact" : "TabSeparated");
|
||||||
|
|
||||||
format_max_block_size = config().getInt("format_max_block_size", global_context->getSettingsRef().max_block_size);
|
format_max_block_size = config().getUInt64("format_max_block_size",
|
||||||
|
global_context->getSettingsRef().max_block_size);
|
||||||
|
|
||||||
insert_format = "Values";
|
insert_format = "Values";
|
||||||
|
|
||||||
/// Setting value from cmd arg overrides one from config
|
/// Setting value from cmd arg overrides one from config
|
||||||
if (global_context->getSettingsRef().max_insert_block_size.changed)
|
if (global_context->getSettingsRef().max_insert_block_size.changed)
|
||||||
|
{
|
||||||
insert_format_max_block_size = global_context->getSettingsRef().max_insert_block_size;
|
insert_format_max_block_size = global_context->getSettingsRef().max_insert_block_size;
|
||||||
|
}
|
||||||
else
|
else
|
||||||
insert_format_max_block_size = config().getInt("insert_format_max_block_size", global_context->getSettingsRef().max_insert_block_size);
|
{
|
||||||
|
insert_format_max_block_size = config().getUInt64("insert_format_max_block_size",
|
||||||
|
global_context->getSettingsRef().max_insert_block_size);
|
||||||
|
}
|
||||||
|
|
||||||
ClientInfo & client_info = global_context->getClientInfo();
|
ClientInfo & client_info = global_context->getClientInfo();
|
||||||
client_info.setInitialQuery();
|
client_info.setInitialQuery();
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
{host}
|
{host}
|
||||||
{port}
|
{port}
|
||||||
{user}
|
{user}
|
||||||
{database}
|
|
||||||
{display_name}
|
{display_name}
|
||||||
Terminal colors: https://misc.flogisoft.com/bash/tip_colors_and_formatting
|
Terminal colors: https://misc.flogisoft.com/bash/tip_colors_and_formatting
|
||||||
See also: https://wiki.hackzine.org/development/misc/readline-color-prompt.html
|
See also: https://wiki.hackzine.org/development/misc/readline-color-prompt.html
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Interpreters/Cluster.h>
|
#include <base/types.h>
|
||||||
|
|
||||||
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
@ -8,21 +12,4 @@ namespace DB
|
|||||||
|
|
||||||
using DatabaseAndTableName = std::pair<String, String>;
|
using DatabaseAndTableName = std::pair<String, String>;
|
||||||
using ListOfDatabasesAndTableNames = std::vector<DatabaseAndTableName>;
|
using ListOfDatabasesAndTableNames = std::vector<DatabaseAndTableName>;
|
||||||
|
|
||||||
/// Hierarchical description of the tasks
|
|
||||||
struct ShardPartitionPiece;
|
|
||||||
struct ShardPartition;
|
|
||||||
struct TaskShard;
|
|
||||||
struct TaskTable;
|
|
||||||
struct TaskCluster;
|
|
||||||
struct ClusterPartition;
|
|
||||||
|
|
||||||
using PartitionPieces = std::vector<ShardPartitionPiece>;
|
|
||||||
using TasksPartition = std::map<String, ShardPartition, std::greater<>>;
|
|
||||||
using ShardInfo = Cluster::ShardInfo;
|
|
||||||
using TaskShardPtr = std::shared_ptr<TaskShard>;
|
|
||||||
using TasksShard = std::vector<TaskShardPtr>;
|
|
||||||
using TasksTable = std::list<TaskTable>;
|
|
||||||
using ClusterPartitions = std::map<String, ClusterPartition, std::greater<>>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,13 @@
|
|||||||
set(CLICKHOUSE_COPIER_SOURCES
|
set(CLICKHOUSE_COPIER_SOURCES
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp"
|
"${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopierApp.cpp"
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp"
|
"${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp"
|
||||||
"${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp")
|
"${CMAKE_CURRENT_SOURCE_DIR}/Internals.cpp"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/ShardPartition.cpp"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/ShardPartitionPiece.cpp"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/StatusAccumulator.cpp"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/TaskCluster.cpp"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/TaskShard.cpp"
|
||||||
|
"${CMAKE_CURRENT_SOURCE_DIR}/TaskTable.cpp")
|
||||||
|
|
||||||
set (CLICKHOUSE_COPIER_LINK
|
set (CLICKHOUSE_COPIER_LINK
|
||||||
PRIVATE
|
PRIVATE
|
||||||
|
@ -3,7 +3,8 @@
|
|||||||
#include "Aliases.h"
|
#include "Aliases.h"
|
||||||
#include "Internals.h"
|
#include "Internals.h"
|
||||||
#include "TaskCluster.h"
|
#include "TaskCluster.h"
|
||||||
#include "TaskTableAndShard.h"
|
#include "TaskShard.h"
|
||||||
|
#include "TaskTable.h"
|
||||||
#include "ShardPartition.h"
|
#include "ShardPartition.h"
|
||||||
#include "ShardPartitionPiece.h"
|
#include "ShardPartitionPiece.h"
|
||||||
#include "ZooKeeperStaff.h"
|
#include "ZooKeeperStaff.h"
|
||||||
|
@ -1,17 +1,22 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "Aliases.h"
|
#include <base/types.h>
|
||||||
|
#include <map>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
/// Contains info about all shards that contain a partition
|
|
||||||
struct ClusterPartition
|
|
||||||
{
|
|
||||||
double elapsed_time_seconds = 0;
|
|
||||||
UInt64 bytes_copied = 0;
|
|
||||||
UInt64 rows_copied = 0;
|
|
||||||
UInt64 blocks_copied = 0;
|
|
||||||
|
|
||||||
UInt64 total_tries = 0;
|
/// Contains info about all shards that contain a partition
|
||||||
};
|
struct ClusterPartition
|
||||||
|
{
|
||||||
|
double elapsed_time_seconds = 0;
|
||||||
|
UInt64 bytes_copied = 0;
|
||||||
|
UInt64 rows_copied = 0;
|
||||||
|
UInt64 blocks_copied = 0;
|
||||||
|
|
||||||
|
UInt64 total_tries = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
using ClusterPartitions = std::map<String, ClusterPartition, std::greater<>>;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
70
programs/copier/ShardPartition.cpp
Normal file
70
programs/copier/ShardPartition.cpp
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
#include "ShardPartition.h"
|
||||||
|
|
||||||
|
#include "TaskShard.h"
|
||||||
|
#include "TaskTable.h"
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
ShardPartition::ShardPartition(TaskShard & parent, String name_quoted_, size_t number_of_splits)
|
||||||
|
: task_shard(parent)
|
||||||
|
, name(std::move(name_quoted_))
|
||||||
|
{
|
||||||
|
pieces.reserve(number_of_splits);
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getPartitionCleanStartPath() const
|
||||||
|
{
|
||||||
|
return getPartitionPath() + "/clean_start";
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getPartitionPieceCleanStartPath(size_t current_piece_number) const
|
||||||
|
{
|
||||||
|
assert(current_piece_number < task_shard.task_table.number_of_splits);
|
||||||
|
return getPartitionPiecePath(current_piece_number) + "/clean_start";
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getPartitionPath() const
|
||||||
|
{
|
||||||
|
return task_shard.task_table.getPartitionPath(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getPartitionPiecePath(size_t current_piece_number) const
|
||||||
|
{
|
||||||
|
assert(current_piece_number < task_shard.task_table.number_of_splits);
|
||||||
|
return task_shard.task_table.getPartitionPiecePath(name, current_piece_number);
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getShardStatusPath() const
|
||||||
|
{
|
||||||
|
// schema: /<root...>/tables/<table>/<partition>/shards/<shard>
|
||||||
|
// e.g. /root/table_test.hits/201701/shards/1
|
||||||
|
return getPartitionShardsPath() + "/" + toString(task_shard.numberInCluster());
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getPartitionShardsPath() const
|
||||||
|
{
|
||||||
|
return getPartitionPath() + "/shards";
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getPartitionActiveWorkersPath() const
|
||||||
|
{
|
||||||
|
return getPartitionPath() + "/partition_active_workers";
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getActiveWorkerPath() const
|
||||||
|
{
|
||||||
|
return getPartitionActiveWorkersPath() + "/" + toString(task_shard.numberInCluster());
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getCommonPartitionIsDirtyPath() const
|
||||||
|
{
|
||||||
|
return getPartitionPath() + "/is_dirty";
|
||||||
|
}
|
||||||
|
|
||||||
|
String ShardPartition::getCommonPartitionIsCleanedPath() const
|
||||||
|
{
|
||||||
|
return getCommonPartitionIsDirtyPath() + "/cleaned";
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user