mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 18:12:02 +00:00
Merge branch 'master' into arrow-strings
This commit is contained in:
commit
efdb49a222
2
.github/workflows/tags_stable.yml
vendored
2
.github/workflows/tags_stable.yml
vendored
@ -32,7 +32,7 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv
|
||||
GID=$(id -d "${UID}")
|
||||
GID=$(id -g "${UID}")
|
||||
docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 \
|
||||
--volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \
|
||||
/ClickHouse/utils/changelog/changelog.py -vv --gh-user-or-token="$GITHUB_TOKEN" \
|
||||
|
@ -2,11 +2,11 @@
|
||||
|
||||
# NOTE: has nothing common with DBMS_TCP_PROTOCOL_VERSION,
|
||||
# only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes.
|
||||
SET(VERSION_REVISION 54462)
|
||||
SET(VERSION_REVISION 54463)
|
||||
SET(VERSION_MAJOR 22)
|
||||
SET(VERSION_MINOR 5)
|
||||
SET(VERSION_MINOR 6)
|
||||
SET(VERSION_PATCH 1)
|
||||
SET(VERSION_GITHASH 77a82cc090dd5dba2d995946e82a12a2cadaaff3)
|
||||
SET(VERSION_DESCRIBE v22.5.1.1-testing)
|
||||
SET(VERSION_STRING 22.5.1.1)
|
||||
SET(VERSION_GITHASH df0cb0620985eb5ec59760cc76f7736e5b6209bb)
|
||||
SET(VERSION_DESCRIBE v22.6.1.1-testing)
|
||||
SET(VERSION_STRING 22.6.1.1)
|
||||
# end of autochange
|
||||
|
@ -178,6 +178,19 @@
|
||||
* Fix segfault in Avro that appears after the second insert into file. [#33566](https://github.com/ClickHouse/ClickHouse/pull/33566) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix wrong database for JOIN w/o explicit database in distributed queries (Fixes: [#10471](https://github.com/ClickHouse/ClickHouse/issues/10471)). [#33611](https://github.com/ClickHouse/ClickHouse/pull/33611) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release):
|
||||
|
||||
* Fix possible crash (or incorrect result) in case of `LowCardinality` arguments of window function. Fixes [#31114](https://github.com/ClickHouse/ClickHouse/issues/31114). [#31888](https://github.com/ClickHouse/ClickHouse/pull/31888) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### Bug Fix (v21.9.4.35-stable)
|
||||
|
||||
* Fix [#32964](https://github.com/ClickHouse/ClickHouse/issues/32964). [#32965](https://github.com/ClickHouse/ClickHouse/pull/32965) ([save-my-heart](https://github.com/save-my-heart)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Fix Regular Expression while key path search. [#33023](https://github.com/ClickHouse/ClickHouse/pull/33023) ([mreddy017](https://github.com/mreddy017)).
|
||||
* - Allow to split GraphiteMergeTree rollup rules for plain/tagged metrics (optional rule_type field). [#33494](https://github.com/ClickHouse/ClickHouse/pull/33494) ([Michail Safronov](https://github.com/msaf1980)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Update CHANGELOG.md'. [#32472](https://github.com/ClickHouse/ClickHouse/pull/32472) ([Rich Raposa](https://github.com/rfraposa)).
|
||||
@ -198,19 +211,6 @@
|
||||
* NO CL ENTRY: 'Added Superwall to adopters list'. [#33573](https://github.com/ClickHouse/ClickHouse/pull/33573) ([Justin Hilliard](https://github.com/jahilliard)).
|
||||
* NO CL ENTRY: 'Revert "Ignore parse failure of opentelemetry header"'. [#33594](https://github.com/ClickHouse/ClickHouse/pull/33594) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release):
|
||||
|
||||
* Fix possible crash (or incorrect result) in case of `LowCardinality` arguments of window function. Fixes [#31114](https://github.com/ClickHouse/ClickHouse/issues/31114). [#31888](https://github.com/ClickHouse/ClickHouse/pull/31888) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Fix Regular Expression while key path search. [#33023](https://github.com/ClickHouse/ClickHouse/pull/33023) ([mreddy017](https://github.com/mreddy017)).
|
||||
* - Allow to split GraphiteMergeTree rollup rules for plain/tagged metrics (optional rule_type field). [#33494](https://github.com/ClickHouse/ClickHouse/pull/33494) ([Michail Safronov](https://github.com/msaf1980)).
|
||||
|
||||
#### Bug Fix (v21.9.4.35-stable)
|
||||
|
||||
* Fix [#32964](https://github.com/ClickHouse/ClickHouse/issues/32964). [#32965](https://github.com/ClickHouse/ClickHouse/pull/32965) ([save-my-heart](https://github.com/save-my-heart)).
|
||||
|
||||
#### New Feature / New Tool
|
||||
|
||||
* Tool for collecting diagnostics data. [#33175](https://github.com/ClickHouse/ClickHouse/pull/33175) ([Alexander Burmak](https://github.com/Alex-Burmak)).
|
||||
|
@ -38,6 +38,7 @@
|
||||
|
||||
#### Improvement
|
||||
* Now ReplicatedMergeTree can recover data when some of its disks are broken. [#13544](https://github.com/ClickHouse/ClickHouse/pull/13544) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Dynamic reload of server TLS certificates on config reload. Closes [#15764](https://github.com/ClickHouse/ClickHouse/issues/15764). [#15765](https://github.com/ClickHouse/ClickHouse/pull/15765) ([johnskopis](https://github.com/johnskopis)).
|
||||
* Merge [#15765](https://github.com/ClickHouse/ClickHouse/issues/15765) (Dynamic reload of server TLS certificates on config reload) cc @johnskopis. [#31257](https://github.com/ClickHouse/ClickHouse/pull/31257) ([Filatenkov Artur](https://github.com/FArthur-cmd)).
|
||||
* Added `UUID` data type support for functions `hex`, `bin`. [#32170](https://github.com/ClickHouse/ClickHouse/pull/32170) ([Frank Chen](https://github.com/FrankChen021)).
|
||||
* Support `optimize_read_in_order` if prefix of sorting key is already sorted. E.g. if we have sorting key `ORDER BY (a, b)` in table and query with `WHERE a = const ORDER BY b` clauses, now it will be applied reading in order of sorting key instead of full sort. [#32748](https://github.com/ClickHouse/ClickHouse/pull/32748) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
@ -194,15 +195,16 @@
|
||||
* Fixed the assertion in case of using `allow_experimental_parallel_reading_from_replicas` with `max_parallel_replicas` equals to 1. This fixes [#34525](https://github.com/ClickHouse/ClickHouse/issues/34525). [#34613](https://github.com/ClickHouse/ClickHouse/pull/34613) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* - Add Debug workflow to get variables for all actions on demand - Fix lack of pr_info.number for some edge case. [#34644](https://github.com/ClickHouse/ClickHouse/pull/34644) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Reverting to previous docker images, will take a closer look at failing tests from [#34373](https://github.com/ClickHouse/ClickHouse/issues/34373). [#34413](https://github.com/ClickHouse/ClickHouse/pull/34413) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Switch gosu to su-exec'. [#33563](https://github.com/ClickHouse/ClickHouse/pull/33563) ([Anselmo D. Adams](https://github.com/anselmodadams)).
|
||||
* NO CL ENTRY: 'Revert "Additionally check remote_fs_execute_merges_on_single_replica_time_threshold inside ReplicatedMergeTreeQueue"'. [#34201](https://github.com/ClickHouse/ClickHouse/pull/34201) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Add func tests run with s3"'. [#34211](https://github.com/ClickHouse/ClickHouse/pull/34211) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Add pool to WriteBufferFromS3"'. [#34212](https://github.com/ClickHouse/ClickHouse/pull/34212) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Add support agreement page and snippets.'. [#34512](https://github.com/ClickHouse/ClickHouse/pull/34512) ([Tom Risse](https://github.com/flickerbox-tom)).
|
||||
* NO CL ENTRY: 'Add Gigasheet to adopters'. [#34589](https://github.com/ClickHouse/ClickHouse/pull/34589) ([Brian Hunter](https://github.com/bjhunter)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Reverting to previous docker images, will take a closer look at failing tests from [#34373](https://github.com/ClickHouse/ClickHouse/issues/34373). [#34413](https://github.com/ClickHouse/ClickHouse/pull/34413) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
|
@ -1,6 +1,2 @@
|
||||
### ClickHouse release v22.3.2.2-lts FIXME as compared to v22.3.1.1262-prestable
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Fix bug in S3 zero-copy replication which can lead to errors like `Found parts with the same min block and with the same max block as the missing part` after concurrent fetch/drop table. [#35348](https://github.com/ClickHouse/ClickHouse/pull/35348) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
* Backported in [#36244](https://github.com/ClickHouse/ClickHouse/issues/36244): Fix usage of quota with asynchronous inserts. [#35645](https://github.com/ClickHouse/ClickHouse/pull/35645) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#36240](https://github.com/ClickHouse/ClickHouse/issues/36240): Fix possible loss of subcolumns in type `Object`. [#35682](https://github.com/ClickHouse/ClickHouse/pull/35682) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#36242](https://github.com/ClickHouse/ClickHouse/issues/36242): Fix possible `Can't adjust last granule` exception while reading subcolumns of type `Object`. [#35687](https://github.com/ClickHouse/ClickHouse/pull/35687) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#35938](https://github.com/ClickHouse/ClickHouse/issues/35938): Avoid processing per-column TTL multiple times. [#35820](https://github.com/ClickHouse/ClickHouse/pull/35820) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Backported in [#36147](https://github.com/ClickHouse/ClickHouse/issues/36147): Fix reading from `Kafka` tables when `kafka_num_consumers > 1` and `kafka_thread_per_consumer = 0`. Returns parallel & multithreaded reading, accidentally broken in 21.11. Closes [#35153](https://github.com/ClickHouse/ClickHouse/issues/35153). [#35973](https://github.com/ClickHouse/ClickHouse/pull/35973) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#36276](https://github.com/ClickHouse/ClickHouse/issues/36276): Fix reading of empty arrays in reverse order (in queries with descending sorting by prefix of primary key). [#36215](https://github.com/ClickHouse/ClickHouse/pull/36215) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
|
@ -2,6 +2,5 @@
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Backported in [#36525](https://github.com/ClickHouse/ClickHouse/issues/36525): Queries with aliases inside special operators returned parsing error (was broken in 22.1). Example: `SELECT substring('test' AS t, 1, 1)`. [#36167](https://github.com/ClickHouse/ClickHouse/pull/36167) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#36795](https://github.com/ClickHouse/ClickHouse/issues/36795): Fix vertical merges in wide parts. Previously an exception `There is no column` can be thrown during merge. [#36707](https://github.com/ClickHouse/ClickHouse/pull/36707) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
|
@ -150,6 +150,10 @@
|
||||
* Check a number of required reports in BuilderSpecialReport. [#36413](https://github.com/ClickHouse/ClickHouse/pull/36413) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Add a labeling for `Revert` PRs. [#36422](https://github.com/ClickHouse/ClickHouse/pull/36422) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (prestable release)
|
||||
|
||||
* call RemoteQueryExecutor with original_query instead of an rewritten query, elimate the AMBIGUOUS_COLUMN_NAME exception. [#35748](https://github.com/ClickHouse/ClickHouse/pull/35748) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Disallow ALTER TTL for engines that does not support it, to avoid breaking ATTACH TABLE (closes [#33344](https://github.com/ClickHouse/ClickHouse/issues/33344)). [#33391](https://github.com/ClickHouse/ClickHouse/pull/33391) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
@ -158,7 +162,6 @@
|
||||
* Fix mutations in tables with enabled sparse columns. [#35284](https://github.com/ClickHouse/ClickHouse/pull/35284) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix schema inference for TSKV format while using small max_read_buffer_size. [#35332](https://github.com/ClickHouse/ClickHouse/pull/35332) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix partition pruning in case of comparison with constant in `WHERE`. If column and constant had different types, overflow was possible. Query could return an incorrect empty result. This fixes [#35304](https://github.com/ClickHouse/ClickHouse/issues/35304). [#35334](https://github.com/ClickHouse/ClickHouse/pull/35334) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in S3 zero-copy replication which can lead to errors like `Found parts with the same min block and with the same max block as the missing part` after concurrent fetch/drop table. [#35348](https://github.com/ClickHouse/ClickHouse/pull/35348) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix issue with non-existing directory https://github.com/ClickHouse/ClickHouse/runs/5588046879?check_suite_focus=true. [#35376](https://github.com/ClickHouse/ClickHouse/pull/35376) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Fix possible deadlock in cache. [#35378](https://github.com/ClickHouse/ClickHouse/pull/35378) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix wrong assets path in release workflow. [#35379](https://github.com/ClickHouse/ClickHouse/pull/35379) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
@ -233,7 +236,3 @@
|
||||
* NO CL ENTRY: 'Revert "clang-tidy report issues with Medium priority"'. [#35941](https://github.com/ClickHouse/ClickHouse/pull/35941) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Fix crash in ParallelReadBuffer"'. [#36210](https://github.com/ClickHouse/ClickHouse/pull/36210) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### Bug Fix (prestable release)
|
||||
|
||||
* call RemoteQueryExecutor with original_query instead of an rewritten query, elimate the AMBIGUOUS_COLUMN_NAME exception. [#35748](https://github.com/ClickHouse/ClickHouse/pull/35748) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
|
||||
|
@ -1,6 +1,2 @@
|
||||
### ClickHouse release v22.4.2.1-stable FIXME as compared to v22.4.1.2305-prestable
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Fix projection analysis which might lead to wrong query result when IN subquery is used. This fixes [#35336](https://github.com/ClickHouse/ClickHouse/issues/35336). [#35631](https://github.com/ClickHouse/ClickHouse/pull/35631) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
|
@ -3,6 +3,5 @@
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Backported in [#36524](https://github.com/ClickHouse/ClickHouse/issues/36524): Queries with aliases inside special operators returned parsing error (was broken in 22.1). Example: `SELECT substring('test' AS t, 1, 1)`. [#36167](https://github.com/ClickHouse/ClickHouse/pull/36167) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#36582](https://github.com/ClickHouse/ClickHouse/issues/36582): Fix nullptr dereference in JOIN and COLUMNS matcher. This fixes [#36416](https://github.com/ClickHouse/ClickHouse/issues/36416) . This is for https://github.com/ClickHouse/ClickHouse/pull/36417. [#36430](https://github.com/ClickHouse/ClickHouse/pull/36430) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#36673](https://github.com/ClickHouse/ClickHouse/issues/36673): Fix merges of wide parts with type `Object`. [#36637](https://github.com/ClickHouse/ClickHouse/pull/36637) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* Backported in [#36524](https://github.com/ClickHouse/ClickHouse/issues/36524): Queries with aliases inside special operators returned parsing error (was broken in 22.1). Example: `SELECT substring('test' AS t, 1, 1)`. [#36167](https://github.com/ClickHouse/ClickHouse/pull/36167) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Backported in [#36635](https://github.com/ClickHouse/ClickHouse/issues/36635): Fix `Missing column` exception which could happen while using `INTERPOLATE` with `ENGINE = MergeTree` table. [#36549](https://github.com/ClickHouse/ClickHouse/pull/36549) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Backported in [#36794](https://github.com/ClickHouse/ClickHouse/issues/36794): Fix vertical merges in wide parts. Previously an exception `There is no column` can be thrown during merge. [#36707](https://github.com/ClickHouse/ClickHouse/pull/36707) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#36926](https://github.com/ClickHouse/ClickHouse/issues/36926): Fix bug in clickhouse-keeper which can lead to corrupted compressed log files in case of small load and restarts. [#36910](https://github.com/ClickHouse/ClickHouse/pull/36910) ([alesapin](https://github.com/alesapin)).
|
||||
|
182
docs/changelogs/v22.5.1.2079-stable.md
Normal file
182
docs/changelogs/v22.5.1.2079-stable.md
Normal file
@ -0,0 +1,182 @@
|
||||
### ClickHouse release v22.5.1.2079-stable FIXME as compared to v22.4.1.2305-prestable
|
||||
|
||||
#### Backward Incompatible Change
|
||||
* Updated the BoringSSL module to the official FIPS compliant version. This makes ClickHouse FIPS compliant. [#35914](https://github.com/ClickHouse/ClickHouse/pull/35914) ([Meena-Renganathan](https://github.com/Meena-Renganathan)).
|
||||
* Now, background merges, mutations and `OPTIMIZE` will not increment `SelectedRows` and `SelectedBytes` metrics. They (still) will increment `MergedRows` and `MergedUncompressedBytes` as it was before. [#37040](https://github.com/ClickHouse/ClickHouse/pull/37040) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
#### New Feature
|
||||
* add implementation of MeiliSearch storage and table function. [#33332](https://github.com/ClickHouse/ClickHouse/pull/33332) ([Mikhail Artemenko](https://github.com/Michicosun)).
|
||||
* Add support of GROUPING SETS in GROUP BY clause. Follow up after [#33186](https://github.com/ClickHouse/ClickHouse/issues/33186). This implementation supports a parallel processing of grouping sets. [#33631](https://github.com/ClickHouse/ClickHouse/pull/33631) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* According to the design mentioned at :[#19627](https://github.com/ClickHouse/ClickHouse/issues/19627)#issuecomment-1068772646. [#35318](https://github.com/ClickHouse/ClickHouse/pull/35318) ([徐炘](https://github.com/weeds085490)).
|
||||
* Added `SYSTEM SYNC DATABASE REPLICA` query which allows to sync tables metadata inside Replicated database, because currently synchronisation is asynchronous. [#35944](https://github.com/ClickHouse/ClickHouse/pull/35944) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* - Add output format Prometheus, [#36051](https://github.com/ClickHouse/ClickHouse/issues/36051). [#36206](https://github.com/ClickHouse/ClickHouse/pull/36206) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Parse collations in CREATE TABLE, throw exception or ignore. closes [#35892](https://github.com/ClickHouse/ClickHouse/issues/35892). [#36271](https://github.com/ClickHouse/ClickHouse/pull/36271) ([yuuch](https://github.com/yuuch)).
|
||||
* Add aliases JSONLines and NDJSON for JSONEachRow. Closes [#36303](https://github.com/ClickHouse/ClickHouse/issues/36303). [#36327](https://github.com/ClickHouse/ClickHouse/pull/36327) ([flynn](https://github.com/ucasfl)).
|
||||
* Set parts_to_delay_insert and parts_to_throw_insert as query-level settings. If they are defined, they can override table-level settings. [#36371](https://github.com/ClickHouse/ClickHouse/pull/36371) ([Memo](https://github.com/Joeywzr)).
|
||||
* temporary table can show total rows and total bytes. [#36401](https://github.com/ClickHouse/ClickHouse/issues/36401). [#36439](https://github.com/ClickHouse/ClickHouse/pull/36439) ([xiedeyantu](https://github.com/xiedeyantu)).
|
||||
* Added new hash function - wyHash64. [#36467](https://github.com/ClickHouse/ClickHouse/pull/36467) ([olevino](https://github.com/olevino)).
|
||||
* Window function nth_value was added. [#36601](https://github.com/ClickHouse/ClickHouse/pull/36601) ([Nikolay](https://github.com/ndchikin)).
|
||||
* Add MySQLDump input format. It reads all data from INSERT queries belonging to one table in dump. If there are more than one table, by default it reads data from the first one. [#36667](https://github.com/ClickHouse/ClickHouse/pull/36667) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* New single binary based diagnostics tool. [#36705](https://github.com/ClickHouse/ClickHouse/pull/36705) ([Dale McDiarmid](https://github.com/gingerwizard)).
|
||||
* **Description:** It is used to count the system table of a request for remote file access, which can help users analyze the causes of performance fluctuations in the scenario of separation of storage and computer. The current system table structure is as follows. When a query reads a segment of a remote file, a record is generated. Read type include **READ_FROM_FS_AND_DOWNLOADED_TO_CACHE、READ_FROM_CACHE、READ_FROM_FS_BYPASSING_CACHE**, which used to indicate whether the query accesses the segment from the cache or from a remote file. [#36802](https://github.com/ClickHouse/ClickHouse/pull/36802) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Adds `h3Line`, `h3Distance` and `h3HexRing` functions. [#37030](https://github.com/ClickHouse/ClickHouse/pull/37030) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Related issue - [#35101](https://github.com/ClickHouse/ClickHouse/issues/35101). [#37033](https://github.com/ClickHouse/ClickHouse/pull/37033) ([qieqieplus](https://github.com/qieqieplus)).
|
||||
* Added system.certificates table. [#37142](https://github.com/ClickHouse/ClickHouse/pull/37142) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
|
||||
#### Performance Improvement
|
||||
* Improve performance or ORDER BY, MergeJoin, insertion into MergeTree using JIT compilation of sort columns comparator. [#34469](https://github.com/ClickHouse/ClickHouse/pull/34469) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* First commit is to increase the inline threshold. Next commits will improve queries by inlining for those who have shown better performance. This way we will not increase the compile time and binary size and optimize the program. [#34544](https://github.com/ClickHouse/ClickHouse/pull/34544) ([Daniel Kutenin](https://github.com/danlark1)).
|
||||
* Transform OR LIKE chain to multiMatchAny. Will enable once we have more confidence it works. [#34932](https://github.com/ClickHouse/ClickHouse/pull/34932) ([Daniel Kutenin](https://github.com/danlark1)).
|
||||
* Rewrite 'select countDistinct(a) from t' to 'select count(1) from (select a from t groupBy a)'. [#35993](https://github.com/ClickHouse/ClickHouse/pull/35993) ([zhanglistar](https://github.com/zhanglistar)).
|
||||
* Change structure of `system.asynchronous_metric_log`. It will take about 10 times less space. This closes [#36357](https://github.com/ClickHouse/ClickHouse/issues/36357). The field `event_time_microseconds` was removed, because it is useless. [#36360](https://github.com/ClickHouse/ClickHouse/pull/36360) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The default `HashJoin` is not thread safe for inserting right table's rows and run it in a single thread. When the right table is large, the join process is too slow with low cpu utilization. [#36415](https://github.com/ClickHouse/ClickHouse/pull/36415) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Improve performance of reading from storage `File` and table functions `file` in case when path has globs and matched directory contains large number of files. [#36647](https://github.com/ClickHouse/ClickHouse/pull/36647) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Appy parallel parsing for input format `HiveText`, which can speed up HiveText parsing by 2x when reading local file. [#36650](https://github.com/ClickHouse/ClickHouse/pull/36650) ([李扬](https://github.com/taiyang-li)).
|
||||
* Improves performance of file descriptor cache by narrowing mutex scopes. [#36682](https://github.com/ClickHouse/ClickHouse/pull/36682) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* This PR improves the `WATCH` query in WindowView: 1. Reduce the latency of providing query results by calling the `fire_condition` signal. 2. Makes the cancel query operation(ctrl-c) faster, by checking `isCancelled()` more frequently. [#37226](https://github.com/ClickHouse/ClickHouse/pull/37226) ([vxider](https://github.com/Vxider)).
|
||||
* Improve performance of `avg`, `sum` aggregate functions if used without GROUP BY expression. [#37257](https://github.com/ClickHouse/ClickHouse/pull/37257) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance of unary arithmetic functions (`bitCount`, `bitNot`, `abs`, `intExp2`, `intExp10`, `negate`, `roundAge`, `roundDuration`, `roundToExp2`, `sign`) using dynamic dispatch. [#37289](https://github.com/ClickHouse/ClickHouse/pull/37289) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### Improvement
|
||||
* Remind properly if use clickhouse-client --file without preceeding --external. Close [#34747](https://github.com/ClickHouse/ClickHouse/issues/34747). [#34765](https://github.com/ClickHouse/ClickHouse/pull/34765) ([李扬](https://github.com/taiyang-li)).
|
||||
* Added support for specifying `content_type` in predefined and static HTTP handler config. [#34916](https://github.com/ClickHouse/ClickHouse/pull/34916) ([Roman Nikonov](https://github.com/nic11)).
|
||||
* Implement partial GROUP BY key for optimize_aggregation_in_order. [#35111](https://github.com/ClickHouse/ClickHouse/pull/35111) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Nullables detection in protobuf using Google wrappers. [#35149](https://github.com/ClickHouse/ClickHouse/pull/35149) ([Jakub Kuklis](https://github.com/jkuklis)).
|
||||
* If the required amount of memory is available before the selected query stopped, all waiting queries continue execution. Now we don't stop any query if memory is freed before the moment when the selected query knows about the cancellation. [#35637](https://github.com/ClickHouse/ClickHouse/pull/35637) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Enable memory overcommit by default. [#35921](https://github.com/ClickHouse/ClickHouse/pull/35921) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* - Add branch to avoid unnecessary memcpy in readbig. [#36095](https://github.com/ClickHouse/ClickHouse/pull/36095) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
* Refactor code around schema inference with globs. Try next file from glob only if it makes sense (previously we tried next file in case of any error). Also it fixes [#36317](https://github.com/ClickHouse/ClickHouse/issues/36317). [#36205](https://github.com/ClickHouse/ClickHouse/pull/36205) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Improve schema inference for json objects. [#36207](https://github.com/ClickHouse/ClickHouse/pull/36207) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Add support for force recovery which allows you to reconfigure cluster without quorum. [#36258](https://github.com/ClickHouse/ClickHouse/pull/36258) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* We create a local interpreter if we want to execute query on localhost replica. But for when executing query on multiple replicas we rely on the fact that a connection exists so replicas can talk to coordinator. It is now improved and localhost replica can talk to coordinator directly in the same process. [#36281](https://github.com/ClickHouse/ClickHouse/pull/36281) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Show names of erroneous files in case of parsing errors while executing table functions `file`, `s3` and `url`. [#36314](https://github.com/ClickHouse/ClickHouse/pull/36314) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allowed to increase the number of threads for executing background operations (merges, mutations, moves and fetches) at runtime if they are specified at top level config. [#36425](https://github.com/ClickHouse/ClickHouse/pull/36425) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* clickhouse-benchmark can read auth from environment variables. [#36497](https://github.com/ClickHouse/ClickHouse/pull/36497) ([Anton Kozlov](https://github.com/tonickkozlov)).
|
||||
* Allow names of tuple elements that start from digits. [#36544](https://github.com/ClickHouse/ClickHouse/pull/36544) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allow file descriptors in table function file if it is run in clickhouse-local. [#36562](https://github.com/ClickHouse/ClickHouse/pull/36562) ([wuxiaobai24](https://github.com/wuxiaobai24)).
|
||||
* Allow to cast columns of type `Object(...)` to `Object(Nullable(...))`. [#36564](https://github.com/ClickHouse/ClickHouse/pull/36564) ([awakeljw](https://github.com/awakeljw)).
|
||||
* Cleanup CSS in Play UI. The pixels are more evenly placed. Better usability for long content in table cells. [#36569](https://github.com/ClickHouse/ClickHouse/pull/36569) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* The metrics about time spent reading from s3 now calculated correctly. Close [#35483](https://github.com/ClickHouse/ClickHouse/issues/35483). [#36572](https://github.com/ClickHouse/ClickHouse/pull/36572) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve `SYSTEM DROP FILESYSTEM CACHE` query: `<path>` option and `FORCE` option. [#36639](https://github.com/ClickHouse/ClickHouse/pull/36639) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Add `is_all_data_sent` column into `system.processes`, and improve internal testing hardening check based on it. [#36649](https://github.com/ClickHouse/ClickHouse/pull/36649) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Now date time conversion functions that generates time before 1970-01-01 00:00:00 with partial hours/minutes timezones will be saturated to zero instead of overflow. This is the continuation of https://github.com/ClickHouse/ClickHouse/pull/29953 which addresses https://github.com/ClickHouse/ClickHouse/pull/29953#discussion_r800550280 . Mark as improvement because it's implementation defined behavior (and very rare case) and we are allowed to break it. [#36656](https://github.com/ClickHouse/ClickHouse/pull/36656) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Allow to cancel query while still keep decent query id in MySQLHandler. [#36699](https://github.com/ClickHouse/ClickHouse/pull/36699) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Properly cancel INSERT queries in `clickhouse-client`/`clickhouse-local`. [#36710](https://github.com/ClickHouse/ClickHouse/pull/36710) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Allow cluster macro in s3Cluster table function. [#36726](https://github.com/ClickHouse/ClickHouse/pull/36726) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
||||
* Added `user_defined_path` config setting. [#36753](https://github.com/ClickHouse/ClickHouse/pull/36753) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Allow to execute hash functions with arguments of type `Array(Tuple(..))`. [#36812](https://github.com/ClickHouse/ClickHouse/pull/36812) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Add warning if someone running clickhouse-server with log level "test". The log level "test" was added recently and cannot be used in production due to inevitable, unavoidable, fatal and life-threatening performance degradation. [#36824](https://github.com/ClickHouse/ClickHouse/pull/36824) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Play UI: If there is one row in result and more than a few columns, display the result vertically. Continuation of [#36811](https://github.com/ClickHouse/ClickHouse/issues/36811). [#36842](https://github.com/ClickHouse/ClickHouse/pull/36842) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add extra diagnostic info (if applicable) when sending exception to other server. [#36872](https://github.com/ClickHouse/ClickHouse/pull/36872) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* After [#36425](https://github.com/ClickHouse/ClickHouse/issues/36425) settings like `background_fetches_pool_size` became obsolete and can appear in top level config, but clickhouse throws and exception like `Error updating configuration from '/etc/clickhouse-server/config.xml' config.: Code: 137. DB::Exception: A setting 'background_fetches_pool_size' appeared at top level in config /etc/clickhouse-server/config.xml.` This is fixed. [#36917](https://github.com/ClickHouse/ClickHouse/pull/36917) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Finalize write buffers in case of exception to avoid doing it in destructors. Hope it fixes: [#36907](https://github.com/ClickHouse/ClickHouse/issues/36907). [#36979](https://github.com/ClickHouse/ClickHouse/pull/36979) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Play UI: Nullable numbers will be aligned to the right in table cells. This closes [#36982](https://github.com/ClickHouse/ClickHouse/issues/36982). [#36988](https://github.com/ClickHouse/ClickHouse/pull/36988) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Implemented a new mode of handling row policies which can be enabled in the main configuration which enables users without permissive row policies to read rows. [#36997](https://github.com/ClickHouse/ClickHouse/pull/36997) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix bug which can lead to forgotten outdated parts in MergeTree table engines family in case of filesystem failures during parts removal. Before fix they will be removed only after first server restart. [#37014](https://github.com/ClickHouse/ClickHouse/pull/37014) ([alesapin](https://github.com/alesapin)).
|
||||
* Modify query div in play.html to be extendable beyond 200px height. In case of very long queries it is helpful to extend the textarea element, only today, since the div is fixed height, the extended textarea hides the data div underneath. With this fix, extending the textarea element will push the data div down/up such the extended textarea won't hide it. [#37051](https://github.com/ClickHouse/ClickHouse/pull/37051) ([guyco87](https://github.com/guyco87)).
|
||||
* Better read from cache. [#37054](https://github.com/ClickHouse/ClickHouse/pull/37054) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix progress indication for `INSERT SELECT` in clickhouse-local for any query and for file progress in client, more correct file progress. [#37075](https://github.com/ClickHouse/ClickHouse/pull/37075) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Disable `log_query_threads` setting by default. It controls the logging of statistics about every thread participating in query execution. After supporting asynchronous reads, the total number of distinct thread ids became too large, and logging into the `query_thread_log` has become too heavy. [#37077](https://github.com/ClickHouse/ClickHouse/pull/37077) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Option `compatibility_ignore_auto_increment_in_create_table` allows ignoring `AUTO_INCREMENT` keyword in a column declaration to simplify migration from MySQL. [#37178](https://github.com/ClickHouse/ClickHouse/pull/37178) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Added implicit cast for `h3kRing` function second argument to improve usability. Closes [#35432](https://github.com/ClickHouse/ClickHouse/issues/35432). [#37189](https://github.com/ClickHouse/ClickHouse/pull/37189) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Limit the max partitions could be queried for each hive table. Avoid resource overruns. [#37281](https://github.com/ClickHouse/ClickHouse/pull/37281) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
|
||||
#### Bug Fix
|
||||
* Extracts Version ID if present from the URI and adds a request to the AWS HTTP URI. Closes [#31221](https://github.com/ClickHouse/ClickHouse/issues/31221). - [x] Extract `Version ID` from URI if present and reassemble without it. - [x] Configure `AWS HTTP URI` object with request. - [x] Unit Tests: [`gtest_s3_uri`](https://github.com/ClickHouse/ClickHouse/blob/2340a6c6849ebc05a8efbf97ba8de3ff9dc0eff4/src/IO/tests/gtest_s3_uri.cpp) - [x] Drop instrumentation commit. [#34571](https://github.com/ClickHouse/ClickHouse/pull/34571) ([Saad Ur Rahman](https://github.com/surahman)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
* Now `clickhouse-keeper` for the `x86_64` architecture is statically linked with [musl](https://musl.libc.org/) and doesn't depend on any system libraries. [#31833](https://github.com/ClickHouse/ClickHouse/pull/31833) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fail performance comparison on errors in the report. [#34797](https://github.com/ClickHouse/ClickHouse/pull/34797) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Check out the most of build jobs with depth=1. [#36091](https://github.com/ClickHouse/ClickHouse/pull/36091) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Bump minizip-ng to a sane version, or else old git won't be able to address dangling remote ref. [#35656](https://github.com/ClickHouse/ClickHouse/issues/35656). [#36295](https://github.com/ClickHouse/ClickHouse/pull/36295) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Use consistent `force tests` label in CI. [#36496](https://github.com/ClickHouse/ClickHouse/pull/36496) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Limit PowerPC code generation to Power8 for better compatibility. This closes [#36025](https://github.com/ClickHouse/ClickHouse/issues/36025). [#36529](https://github.com/ClickHouse/ClickHouse/pull/36529) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* - More robust handling of unknown architectures in CMake. [#36614](https://github.com/ClickHouse/ClickHouse/pull/36614) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Simplify performance test. This will give a chance for us to use it. [#36769](https://github.com/ClickHouse/ClickHouse/pull/36769) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix checking for rabbitmq liveness in tests. Fixed incorrect import. [#36938](https://github.com/ClickHouse/ClickHouse/pull/36938) ([tchepavel](https://github.com/tchepavel)).
|
||||
* ClickHouse builds for `PowerPC64LE` architecture are now available in universal installation script `curl https://clickhouse.com/ | sh` and by direct link `https://builds.clickhouse.com/master/powerpc64le/clickhouse`. [#37095](https://github.com/ClickHouse/ClickHouse/pull/37095) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* - Make cmake build scripts a bit more robust. [#37169](https://github.com/ClickHouse/ClickHouse/pull/37169) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* The ilike() function on FixedString columns could have returned wrong results (i.e. match less than it should). [#37117](https://github.com/ClickHouse/ClickHouse/pull/37117) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Fix implicit cast for optimize_skip_unused_shards_rewrite_in. [#37153](https://github.com/ClickHouse/ClickHouse/pull/37153) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Enable `enable_global_with_statement` for subqueries, close [#37141](https://github.com/ClickHouse/ClickHouse/issues/37141). [#37166](https://github.com/ClickHouse/ClickHouse/pull/37166) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Now WindowView `WATCH EVENTS` query will not be terminated due to the nonempty Chunk created in `WindowViewSource.h:58`. [#37182](https://github.com/ClickHouse/ClickHouse/pull/37182) ([vxider](https://github.com/Vxider)).
|
||||
* Fix "Cannot create column of type Set" for distributed queries with LIMIT BY. [#37193](https://github.com/ClickHouse/ClickHouse/pull/37193) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix possible overflow during `OvercommitRatio` comparison. cc @tavplubix. [#37197](https://github.com/ClickHouse/ClickHouse/pull/37197) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Update `max_fired_watermark ` after blocks **actually** fired, in case delete data that hasn't been fired yet. [#37225](https://github.com/ClickHouse/ClickHouse/pull/37225) ([vxider](https://github.com/Vxider)).
|
||||
* Kafka does not need `group.id` on producer stage. In console log you can find Warning that describe this issue: ``` 2022.05.15 17:59:13.270227 [ 137 ] {} <Warning> StorageKafka (topic-name): [rdk:CONFWARN] [thrd:app]: Configuration property group.id is a consumer property and will be ignored by this producer instance ```. [#37228](https://github.com/ClickHouse/ClickHouse/pull/37228) ([Mark Andreev](https://github.com/mrk-andreev)).
|
||||
* fix MySQL database engine to compatible with binary(0) dataType. [#37232](https://github.com/ClickHouse/ClickHouse/pull/37232) ([zzsmdfj](https://github.com/zzsmdfj)).
|
||||
* Fix execution of mutations in tables, in which there exist columns of type `Object`. Using subcolumns of type `Object` in `WHERE` expression of `UPDATE` or `DELETE` queries is now allowed yet, as well as manipulating (`DROP`, `MODIFY`) of separate subcolumns. Fixes [#37205](https://github.com/ClickHouse/ClickHouse/issues/37205). [#37266](https://github.com/ClickHouse/ClickHouse/pull/37266) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix Nullable(String) to Nullable(Bool/IPv4/IPv6) conversion Closes [#37221](https://github.com/ClickHouse/ClickHouse/issues/37221). [#37270](https://github.com/ClickHouse/ClickHouse/pull/37270) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix system.opentelemetry_span_log attribute.values alias to values instead of keys. [#37275](https://github.com/ClickHouse/ClickHouse/pull/37275) ([Aleksandr Razumov](https://github.com/ernado)).
|
||||
* Fix possible deadlock in OvercommitTracker during logging. cc @alesapin @tavplubix Fixes [#37272](https://github.com/ClickHouse/ClickHouse/issues/37272). [#37299](https://github.com/ClickHouse/ClickHouse/pull/37299) ([Dmitry Novik](https://github.com/novikd)).
|
||||
|
||||
#### Bug Fix (user-visible misbehaviour in official stable or prestable release)
|
||||
|
||||
* - fix substring function range error length when `offset` and `length` is negative constant and `s` is not constant. [#33861](https://github.com/ClickHouse/ClickHouse/pull/33861) ([RogerYK](https://github.com/RogerYK)).
|
||||
* Accidentally ZSTD support for Arrow was not being built. This fixes [#35283](https://github.com/ClickHouse/ClickHouse/issues/35283). [#35486](https://github.com/ClickHouse/ClickHouse/pull/35486) ([Sean Lafferty](https://github.com/seanlaff)).
|
||||
* Fix ALTER DROP COLUMN of nested column with compact parts (i.e. `ALTER TABLE x DROP COLUMN n`, when there is column `n.d`). [#35797](https://github.com/ClickHouse/ClickHouse/pull/35797) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix insertion of complex JSONs with nested arrays to columns of type `Object`. [#36077](https://github.com/ClickHouse/ClickHouse/pull/36077) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Queries with aliases inside special operators returned parsing error (was broken in 22.1). Example: `SELECT substring('test' AS t, 1, 1)`. [#36167](https://github.com/ClickHouse/ClickHouse/pull/36167) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* - Fix assertion in JOIN, close [#36199](https://github.com/ClickHouse/ClickHouse/issues/36199). [#36201](https://github.com/ClickHouse/ClickHouse/pull/36201) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix dictionary reload for `ClickHouseDictionarySource` if it contains scalar subqueries. [#36390](https://github.com/ClickHouse/ClickHouse/pull/36390) ([lthaooo](https://github.com/lthaooo)).
|
||||
* Fix nullptr dereference in JOIN and COLUMNS matcher. This fixes [#36416](https://github.com/ClickHouse/ClickHouse/issues/36416) . This is for https://github.com/ClickHouse/ClickHouse/pull/36417. [#36430](https://github.com/ClickHouse/ClickHouse/pull/36430) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in s3Cluster schema inference that let to the fact that not all data was read in the select from s3Cluster. The bug appeared in https://github.com/ClickHouse/ClickHouse/pull/35544. [#36434](https://github.com/ClickHouse/ClickHouse/pull/36434) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Server might fail to start if it cannot resolve hostname of external ClickHouse dictionary. It's fixed. Fixes [#36451](https://github.com/ClickHouse/ClickHouse/issues/36451). [#36463](https://github.com/ClickHouse/ClickHouse/pull/36463) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* This code segment can prove bug. ``` int main() { RangeGenerator g{1230, 100}; std::cout << g.totalRanges() << std::endl; int count = 0; while(g.nextRange()) ++count; std::cout << "count:" << count << std::endl; return 0; }. [#36469](https://github.com/ClickHouse/ClickHouse/pull/36469) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix clickhouse-benchmark json report results. [#36473](https://github.com/ClickHouse/ClickHouse/pull/36473) ([Tian Xinhui](https://github.com/xinhuitian)).
|
||||
* Add missing enum values in system.session_log table. Closes [#36474](https://github.com/ClickHouse/ClickHouse/issues/36474). [#36480](https://github.com/ClickHouse/ClickHouse/pull/36480) ([Memo](https://github.com/Joeywzr)).
|
||||
* Fix possible exception with unknown packet from server in client. [#36481](https://github.com/ClickHouse/ClickHouse/pull/36481) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix usage of executable user defined functions in GROUP BY. Before executable user defined functions cannot be used as expressions in GROUP BY. Closes [#36448](https://github.com/ClickHouse/ClickHouse/issues/36448). [#36486](https://github.com/ClickHouse/ClickHouse/pull/36486) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* close [#33906](https://github.com/ClickHouse/ClickHouse/issues/33906). [#36489](https://github.com/ClickHouse/ClickHouse/pull/36489) ([awakeljw](https://github.com/awakeljw)).
|
||||
* Fix hostname sanity checks for Keeper cluster configuration. Add `keeper_server.host_checks_enabled` config to enable/disable those checks. [#36492](https://github.com/ClickHouse/ClickHouse/pull/36492) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Fix offset update ReadBufferFromEncryptedFile, which could cause undefined behaviour. [#36493](https://github.com/ClickHouse/ClickHouse/pull/36493) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* - Fix potential error with literals in `WHERE` for join queries. Close [#36279](https://github.com/ClickHouse/ClickHouse/issues/36279). [#36542](https://github.com/ClickHouse/ClickHouse/pull/36542) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Fix `Missing column` exception which could happen while using `INTERPOLATE` with `ENGINE = MergeTree` table. [#36549](https://github.com/ClickHouse/ClickHouse/pull/36549) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Fix format crash when default expression follow EPHEMERAL not literal. Closes [#36618](https://github.com/ClickHouse/ClickHouse/issues/36618). [#36633](https://github.com/ClickHouse/ClickHouse/pull/36633) ([flynn](https://github.com/ucasfl)).
|
||||
* Fix merges of wide parts with type `Object`. [#36637](https://github.com/ClickHouse/ClickHouse/pull/36637) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed parsing of query settings in `CREATE` query when engine is not specified. Fixes https://github.com/ClickHouse/ClickHouse/pull/34187#issuecomment-1103812419. [#36642](https://github.com/ClickHouse/ClickHouse/pull/36642) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix possible heap-use-after-free in schema inference. Closes [#36661](https://github.com/ClickHouse/ClickHouse/issues/36661). [#36679](https://github.com/ClickHouse/ClickHouse/pull/36679) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix server restart if cache configuration changed. [#36685](https://github.com/ClickHouse/ClickHouse/pull/36685) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* In the previous [PR](https://github.com/ClickHouse/ClickHouse/pull/36376), I found that testing **(stateless tests, flaky check (address, actions))** is timeout. Moreover, testing locally can also trigger unstable system deadlocks. This problem still exists when using the latest source code of master. [#36697](https://github.com/ClickHouse/ClickHouse/pull/36697) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Fix server reload on port change (do not wait for current connections from query context). [#36700](https://github.com/ClickHouse/ClickHouse/pull/36700) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix vertical merges in wide parts. Previously an exception `There is no column` can be thrown during merge. [#36707](https://github.com/ClickHouse/ClickHouse/pull/36707) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* During the [test](https://s3.amazonaws.com/clickhouse-test-reports/36376/1cb1c7275cb53769ab826772db9b71361bb3e413/stress_test__thread__actions_/clickhouse-server.clean.log) in [PR](https://github.com/ClickHouse/ClickHouse/pull/36376), I found that the one cache class was initialized twice, it throws a exception. Although the cause of this problem is not clear, there should be code logic of repeatedly loading disk in ClickHouse, so we need to make special judgment for this situation. [#36737](https://github.com/ClickHouse/ClickHouse/pull/36737) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Fix a bug of `groupBitmapAndState`/`groupBitmapOrState`/`groupBitmapXorState` on distributed table. [#36739](https://github.com/ClickHouse/ClickHouse/pull/36739) ([Zhang Yifan](https://github.com/zhangyifan27)).
|
||||
* Fix timeouts in Hedged requests. Connection hang right after sending remote query could lead to eternal waiting. [#36749](https://github.com/ClickHouse/ClickHouse/pull/36749) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix insertion to columns of type `Object` from multiple files, e.g. via table function `file` with globs. [#36762](https://github.com/ClickHouse/ClickHouse/pull/36762) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix some issues with async reads from remote filesystem which happened when reading low cardinality. [#36763](https://github.com/ClickHouse/ClickHouse/pull/36763) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix creation of tables with `flatten_nested = 0`. Previously unflattened `Nested` columns could be flattened after server restart. [#36803](https://github.com/ClickHouse/ClickHouse/pull/36803) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix incorrect cast in cached buffer from remote fs. [#36809](https://github.com/ClickHouse/ClickHouse/pull/36809) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Remove function `groupArraySorted` which has a bug. [#36822](https://github.com/ClickHouse/ClickHouse/pull/36822) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix fire in window view with hop window [#34044](https://github.com/ClickHouse/ClickHouse/issues/34044). [#36861](https://github.com/ClickHouse/ClickHouse/pull/36861) ([vxider](https://github.com/Vxider)).
|
||||
* Fix `current_size` count in cache. [#36887](https://github.com/ClickHouse/ClickHouse/pull/36887) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Fix incorrect query result when doing constant aggregation. This fixes [#36728](https://github.com/ClickHouse/ClickHouse/issues/36728) . [#36888](https://github.com/ClickHouse/ClickHouse/pull/36888) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in clickhouse-keeper which can lead to corrupted compressed log files in case of small load and restarts. [#36910](https://github.com/ClickHouse/ClickHouse/pull/36910) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bugs when using multiple columns in WindowView by adding converting actions to make it possible to call`writeIntoWindowView` with a slightly different schema. [#36928](https://github.com/ClickHouse/ClickHouse/pull/36928) ([vxider](https://github.com/Vxider)).
|
||||
* Fix issue: [#36671](https://github.com/ClickHouse/ClickHouse/issues/36671). [#36929](https://github.com/ClickHouse/ClickHouse/pull/36929) ([李扬](https://github.com/taiyang-li)).
|
||||
* Fix stuck when dropping source table in WindowView. Closes [#35678](https://github.com/ClickHouse/ClickHouse/issues/35678). [#36967](https://github.com/ClickHouse/ClickHouse/pull/36967) ([vxider](https://github.com/Vxider)).
|
||||
* Fixed logical error on `TRUNCATE` query in `Replicated` database. Fixes [#33747](https://github.com/ClickHouse/ClickHouse/issues/33747). [#36976](https://github.com/ClickHouse/ClickHouse/pull/36976) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix sending external tables data in HedgedConnections with max_parallel_replicas != 1. [#36981](https://github.com/ClickHouse/ClickHouse/pull/36981) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fixed problem with infs in `quantileTDigest`. Fixes [#32107](https://github.com/ClickHouse/ClickHouse/issues/32107). [#37021](https://github.com/ClickHouse/ClickHouse/pull/37021) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Fix LowCardinality->ArrowDictionary invalid output when type of indexes is not UInt8. Closes [#36832](https://github.com/ClickHouse/ClickHouse/issues/36832). [#37043](https://github.com/ClickHouse/ClickHouse/pull/37043) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix in-order `GROUP BY` (`optimize_aggregation_in_order=1`) with `*Array` (`groupArrayArray`/...) aggregate functions. [#37046](https://github.com/ClickHouse/ClickHouse/pull/37046) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed performance degradation of some INSERT SELECT queries with implicit aggregation. Fixes [#36792](https://github.com/ClickHouse/ClickHouse/issues/36792). [#37047](https://github.com/ClickHouse/ClickHouse/pull/37047) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix optimize_aggregation_in_order with prefix GROUP BY and *Array aggregate functions. [#37050](https://github.com/ClickHouse/ClickHouse/pull/37050) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Minor refactor to prefer C++ Standard Algorithms"'. [#36511](https://github.com/ClickHouse/ClickHouse/pull/36511) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Strict taskstats parser"'. [#36591](https://github.com/ClickHouse/ClickHouse/pull/36591) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Translate docs/zh/sql-reference/data-types/map.md"'. [#36594](https://github.com/ClickHouse/ClickHouse/pull/36594) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Revert "Update setting.md"'. [#36595](https://github.com/ClickHouse/ClickHouse/pull/36595) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* NO CL ENTRY: 'Documentation: Add a missing **ESTIMATE** in explain syntax'. [#36717](https://github.com/ClickHouse/ClickHouse/pull/36717) ([小蝌蚪](https://github.com/kayhaw)).
|
||||
* NO CL ENTRY: '[Snyk] Security upgrade numpy from 1.16.6 to 1.22.2'. [#36729](https://github.com/ClickHouse/ClickHouse/pull/36729) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* NO CL ENTRY: 'Translate playground.md to Chinese'. [#36821](https://github.com/ClickHouse/ClickHouse/pull/36821) ([小蝌蚪](https://github.com/kayhaw)).
|
||||
* NO CL ENTRY: 'Revert "Memory overcommit: continue query execution if memory is available"'. [#36858](https://github.com/ClickHouse/ClickHouse/pull/36858) ([alesapin](https://github.com/alesapin)).
|
||||
* NO CL ENTRY: 'Revert "Revert "Memory overcommit: continue query execution if memory is available""'. [#36859](https://github.com/ClickHouse/ClickHouse/pull/36859) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* NO CL ENTRY: 'Revert "BLAKE3 hash function documentation"'. [#37092](https://github.com/ClickHouse/ClickHouse/pull/37092) ([Rich Raposa](https://github.com/rfraposa)).
|
||||
* NO CL ENTRY: 'Revert "Remove height restrictions from the query div in play web tool."'. [#37261](https://github.com/ClickHouse/ClickHouse/pull/37261) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
@ -170,6 +170,7 @@ Hierarchy of privileges:
|
||||
- `SYSTEM FLUSH`
|
||||
- `SYSTEM FLUSH DISTRIBUTED`
|
||||
- `SYSTEM FLUSH LOGS`
|
||||
- `CLUSTER` (see also `access_control_improvements.on_cluster_queries_require_cluster_grant` configuration directive)
|
||||
- [INTROSPECTION](#grant-introspection)
|
||||
- `addressToLine`
|
||||
- `addressToLineWithInlines`
|
||||
|
@ -551,6 +551,9 @@
|
||||
if this setting is true the user B will see all rows, and if this setting is false the user B will see no rows.
|
||||
By default this setting is false for compatibility with earlier access configurations. -->
|
||||
<users_without_row_policies_can_read_rows>false</users_without_row_policies_can_read_rows>
|
||||
<!-- By default, for backward compatibility ON CLUSTER queries ignore CLUSTER grant,
|
||||
however you can change this behaviour by setting this to true -->
|
||||
<on_cluster_queries_require_cluster_grant>false</on_cluster_queries_require_cluster_grant>
|
||||
</access_control_improvements>
|
||||
|
||||
<!-- Default profile of settings. -->
|
||||
|
@ -163,6 +163,10 @@ void AccessControl::setUpFromMainConfig(const Poco::Util::AbstractConfiguration
|
||||
"access_control_improvements.users_without_row_policies_can_read_rows",
|
||||
false /* false because we need to be compatible with earlier access configurations */));
|
||||
|
||||
setOnClusterQueriesRequireClusterGrant(config_.getBool(
|
||||
"access_control_improvements.on_cluster_queries_require_cluster_grant",
|
||||
false /* false because we need to be compatible with earlier access configurations */));
|
||||
|
||||
addStoragesFromMainConfig(config_, config_path_, get_zookeeper_function_);
|
||||
}
|
||||
|
||||
|
@ -131,6 +131,10 @@ public:
|
||||
void setEnabledUsersWithoutRowPoliciesCanReadRows(bool enable) { users_without_row_policies_can_read_rows = enable; }
|
||||
bool isEnabledUsersWithoutRowPoliciesCanReadRows() const { return users_without_row_policies_can_read_rows; }
|
||||
|
||||
/// Require CLUSTER grant for ON CLUSTER queries.
|
||||
void setOnClusterQueriesRequireClusterGrant(bool enable) { on_cluster_queries_require_cluster_grant = enable; }
|
||||
bool doesOnClusterQueriesRequireClusterGrant() const { return on_cluster_queries_require_cluster_grant; }
|
||||
|
||||
UUID authenticate(const Credentials & credentials, const Poco::Net::IPAddress & address) const;
|
||||
void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config);
|
||||
|
||||
@ -188,6 +192,7 @@ private:
|
||||
std::atomic_bool allow_plaintext_password = true;
|
||||
std::atomic_bool allow_no_password = true;
|
||||
std::atomic_bool users_without_row_policies_can_read_rows = false;
|
||||
std::atomic_bool on_cluster_queries_require_cluster_grant = false;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -188,6 +188,8 @@ enum class AccessType
|
||||
M(HIVE, "", GLOBAL, SOURCES) \
|
||||
M(SOURCES, "", GROUP, ALL) \
|
||||
\
|
||||
M(CLUSTER, "", GLOBAL, ALL) /* ON CLUSTER queries */ \
|
||||
\
|
||||
M(ALL, "ALL PRIVILEGES", GROUP, NONE) /* full access */ \
|
||||
M(NONE, "USAGE, NO PRIVILEGES", GROUP, NONE) /* no access */
|
||||
|
||||
|
@ -359,7 +359,7 @@ std::shared_ptr<const AccessRights> ContextAccess::getAccessRightsWithImplicit()
|
||||
|
||||
|
||||
template <bool throw_if_denied, bool grant_option, typename... Args>
|
||||
bool ContextAccess::checkAccessImplHelper(const AccessFlags & flags, const Args &... args) const
|
||||
bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... args) const
|
||||
{
|
||||
auto access_granted = [&]
|
||||
{
|
||||
@ -379,6 +379,9 @@ bool ContextAccess::checkAccessImplHelper(const AccessFlags & flags, const Args
|
||||
return false;
|
||||
};
|
||||
|
||||
if (flags & AccessType::CLUSTER && !access_control->doesOnClusterQueriesRequireClusterGrant())
|
||||
flags &= ~AccessType::CLUSTER;
|
||||
|
||||
if (!flags || is_full_access)
|
||||
return access_granted();
|
||||
|
||||
|
@ -179,7 +179,7 @@ private:
|
||||
bool checkAccessImpl(const AccessRightsElements & elements) const;
|
||||
|
||||
template <bool throw_if_denied, bool grant_option, typename... Args>
|
||||
bool checkAccessImplHelper(const AccessFlags & flags, const Args &... args) const;
|
||||
bool checkAccessImplHelper(AccessFlags flags, const Args &... args) const;
|
||||
|
||||
template <bool throw_if_denied, bool grant_option>
|
||||
bool checkAccessImplHelper(const AccessRightsElement & element) const;
|
||||
|
@ -93,37 +93,58 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
|
||||
{
|
||||
try
|
||||
{
|
||||
if (connected)
|
||||
disconnect();
|
||||
|
||||
LOG_TRACE(log_wrapper.get(), "Connecting. Database: {}. User: {}{}{}",
|
||||
default_database.empty() ? "(not specified)" : default_database,
|
||||
user,
|
||||
static_cast<bool>(secure) ? ". Secure" : "",
|
||||
static_cast<bool>(compression) ? "" : ". Uncompressed");
|
||||
|
||||
if (static_cast<bool>(secure))
|
||||
{
|
||||
#if USE_SSL
|
||||
socket = std::make_unique<Poco::Net::SecureStreamSocket>();
|
||||
|
||||
/// we resolve the ip when we open SecureStreamSocket, so to make Server Name Indication (SNI)
|
||||
/// work we need to pass host name separately. It will be send into TLS Hello packet to let
|
||||
/// the server know which host we want to talk with (single IP can process requests for multiple hosts using SNI).
|
||||
static_cast<Poco::Net::SecureStreamSocket*>(socket.get())->setPeerHostName(host);
|
||||
#else
|
||||
throw Exception{"tcp_secure protocol is disabled because poco library was built without NetSSL support.", ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
socket = std::make_unique<Poco::Net::StreamSocket>();
|
||||
}
|
||||
|
||||
current_resolved_address = DNSResolver::instance().resolveAddress(host, port);
|
||||
|
||||
auto addresses = DNSResolver::instance().resolveAddressList(host, port);
|
||||
const auto & connection_timeout = static_cast<bool>(secure) ? timeouts.secure_connection_timeout : timeouts.connection_timeout;
|
||||
socket->connect(*current_resolved_address, connection_timeout);
|
||||
|
||||
for (auto it = addresses.begin(); it != addresses.end();)
|
||||
{
|
||||
if (connected)
|
||||
disconnect();
|
||||
|
||||
if (static_cast<bool>(secure))
|
||||
{
|
||||
#if USE_SSL
|
||||
socket = std::make_unique<Poco::Net::SecureStreamSocket>();
|
||||
|
||||
/// we resolve the ip when we open SecureStreamSocket, so to make Server Name Indication (SNI)
|
||||
/// work we need to pass host name separately. It will be send into TLS Hello packet to let
|
||||
/// the server know which host we want to talk with (single IP can process requests for multiple hosts using SNI).
|
||||
static_cast<Poco::Net::SecureStreamSocket*>(socket.get())->setPeerHostName(host);
|
||||
#else
|
||||
throw Exception{"tcp_secure protocol is disabled because poco library was built without NetSSL support.", ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
}
|
||||
else
|
||||
{
|
||||
socket = std::make_unique<Poco::Net::StreamSocket>();
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
socket->connect(*it, connection_timeout);
|
||||
current_resolved_address = *it;
|
||||
break;
|
||||
}
|
||||
catch (Poco::Net::NetException &)
|
||||
{
|
||||
if (++it == addresses.end())
|
||||
throw;
|
||||
continue;
|
||||
}
|
||||
catch (Poco::TimeoutException &)
|
||||
{
|
||||
if (++it == addresses.end())
|
||||
throw;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
socket->setReceiveTimeout(timeouts.receive_timeout);
|
||||
socket->setSendTimeout(timeouts.send_timeout);
|
||||
socket->setNoDelay(true);
|
||||
|
@ -83,25 +83,8 @@ static void splitHostAndPort(const std::string & host_and_port, std::string & ou
|
||||
throw Exception("Port must be numeric", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
static DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host)
|
||||
static DNSResolver::IPAddresses hostByName(const std::string & host)
|
||||
{
|
||||
Poco::Net::IPAddress ip;
|
||||
|
||||
/// NOTE:
|
||||
/// - Poco::Net::DNS::resolveOne(host) doesn't work for IP addresses like 127.0.0.2
|
||||
/// - Poco::Net::IPAddress::tryParse() expect hex string for IPv6 (without brackets)
|
||||
if (host.starts_with('['))
|
||||
{
|
||||
assert(host.ends_with(']'));
|
||||
if (Poco::Net::IPAddress::tryParse(host.substr(1, host.size() - 2), ip))
|
||||
return DNSResolver::IPAddresses(1, ip);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (Poco::Net::IPAddress::tryParse(host, ip))
|
||||
return DNSResolver::IPAddresses(1, ip);
|
||||
}
|
||||
|
||||
/// Family: AF_UNSPEC
|
||||
/// AI_ALL is required for checking if client is allowed to connect from an address
|
||||
auto flags = Poco::Net::DNS::DNS_HINT_AI_V4MAPPED | Poco::Net::DNS::DNS_HINT_AI_ALL;
|
||||
@ -131,6 +114,30 @@ static DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host)
|
||||
return addresses;
|
||||
}
|
||||
|
||||
static DNSResolver::IPAddresses resolveIPAddressImpl(const std::string & host)
|
||||
{
|
||||
Poco::Net::IPAddress ip;
|
||||
|
||||
/// NOTE:
|
||||
/// - Poco::Net::DNS::resolveOne(host) doesn't work for IP addresses like 127.0.0.2
|
||||
/// - Poco::Net::IPAddress::tryParse() expect hex string for IPv6 (without brackets)
|
||||
if (host.starts_with('['))
|
||||
{
|
||||
assert(host.ends_with(']'));
|
||||
if (Poco::Net::IPAddress::tryParse(host.substr(1, host.size() - 2), ip))
|
||||
return DNSResolver::IPAddresses(1, ip);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (Poco::Net::IPAddress::tryParse(host, ip))
|
||||
return DNSResolver::IPAddresses(1, ip);
|
||||
}
|
||||
|
||||
DNSResolver::IPAddresses addresses = hostByName(host);
|
||||
|
||||
return addresses;
|
||||
}
|
||||
|
||||
static String reverseResolveImpl(const Poco::Net::IPAddress & address)
|
||||
{
|
||||
Poco::Net::SocketAddress sock_addr(address, 0);
|
||||
@ -208,6 +215,26 @@ Poco::Net::SocketAddress DNSResolver::resolveAddress(const std::string & host, U
|
||||
return Poco::Net::SocketAddress(impl->cache_host(host).front(), port);
|
||||
}
|
||||
|
||||
std::vector<Poco::Net::SocketAddress> DNSResolver::resolveAddressList(const std::string & host, UInt16 port)
|
||||
{
|
||||
if (Poco::Net::IPAddress ip; Poco::Net::IPAddress::tryParse(host, ip))
|
||||
return std::vector<Poco::Net::SocketAddress>{{ip, port}};
|
||||
|
||||
std::vector<Poco::Net::SocketAddress> addresses;
|
||||
|
||||
if (!impl->disable_cache)
|
||||
addToNewHosts(host);
|
||||
|
||||
std::vector<Poco::Net::IPAddress> ips = impl->disable_cache ? hostByName(host) : impl->cache_host(host);
|
||||
auto ips_end = std::unique(ips.begin(), ips.end());
|
||||
|
||||
addresses.reserve(ips_end - ips.begin());
|
||||
for (auto ip = ips.begin(); ip != ips_end; ++ip)
|
||||
addresses.emplace_back(*ip, port);
|
||||
|
||||
return addresses;
|
||||
}
|
||||
|
||||
String DNSResolver::reverseResolve(const Poco::Net::IPAddress & address)
|
||||
{
|
||||
if (impl->disable_cache)
|
||||
|
@ -34,6 +34,8 @@ public:
|
||||
|
||||
Poco::Net::SocketAddress resolveAddress(const std::string & host, UInt16 port);
|
||||
|
||||
std::vector<Poco::Net::SocketAddress> resolveAddressList(const std::string & host, UInt16 port);
|
||||
|
||||
/// Accepts host IP and resolves its host name
|
||||
String reverseResolve(const Poco::Net::IPAddress & address);
|
||||
|
||||
|
@ -276,7 +276,7 @@ DECLARE_AVX512F_SPECIFIC_CODE(
|
||||
\
|
||||
FUNCTION_HEADER \
|
||||
\
|
||||
AVX2_FUNCTION_SPECIFIC_ATTRIBUTE \
|
||||
SSE42_FUNCTION_SPECIFIC_ATTRIBUTE \
|
||||
name##SSE42 \
|
||||
FUNCTION_BODY \
|
||||
\
|
||||
|
@ -1,11 +1,11 @@
|
||||
#include <city.h>
|
||||
#include <cstring>
|
||||
|
||||
#include <base/unaligned.h>
|
||||
#include <base/types.h>
|
||||
#include <base/unaligned.h>
|
||||
|
||||
#include "CompressedWriteBuffer.h"
|
||||
#include <Compression/CompressionFactory.h>
|
||||
#include "CompressedWriteBuffer.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -22,14 +22,29 @@ void CompressedWriteBuffer::nextImpl()
|
||||
if (!offset())
|
||||
return;
|
||||
|
||||
UInt32 compressed_size = 0;
|
||||
size_t decompressed_size = offset();
|
||||
UInt32 compressed_reserve_size = codec->getCompressedReserveSize(decompressed_size);
|
||||
compressed_buffer.resize(compressed_reserve_size);
|
||||
UInt32 compressed_size = codec->compress(working_buffer.begin(), decompressed_size, compressed_buffer.data());
|
||||
|
||||
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(compressed_buffer.data(), compressed_size);
|
||||
out.write(reinterpret_cast<const char *>(&checksum), CHECKSUM_SIZE);
|
||||
out.write(compressed_buffer.data(), compressed_size);
|
||||
if (out.available() > compressed_reserve_size + CHECKSUM_SIZE)
|
||||
{
|
||||
char * out_checksum_ptr = out.position();
|
||||
char * out_compressed_ptr = out.position() + CHECKSUM_SIZE;
|
||||
compressed_size = codec->compress(working_buffer.begin(), decompressed_size, out_compressed_ptr);
|
||||
|
||||
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(out_compressed_ptr, compressed_size);
|
||||
memcpy(out_checksum_ptr, reinterpret_cast<const char *>(&checksum), CHECKSUM_SIZE);
|
||||
out.position() += CHECKSUM_SIZE + compressed_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
compressed_buffer.resize(compressed_reserve_size);
|
||||
compressed_size = codec->compress(working_buffer.begin(), decompressed_size, compressed_buffer.data());
|
||||
|
||||
CityHash_v1_0_2::uint128 checksum = CityHash_v1_0_2::CityHash128(compressed_buffer.data(), compressed_size);
|
||||
out.write(reinterpret_cast<const char *>(&checksum), CHECKSUM_SIZE);
|
||||
out.write(compressed_buffer.data(), compressed_size);
|
||||
}
|
||||
}
|
||||
|
||||
CompressedWriteBuffer::~CompressedWriteBuffer()
|
||||
@ -37,10 +52,7 @@ CompressedWriteBuffer::~CompressedWriteBuffer()
|
||||
finalize();
|
||||
}
|
||||
|
||||
CompressedWriteBuffer::CompressedWriteBuffer(
|
||||
WriteBuffer & out_,
|
||||
CompressionCodecPtr codec_,
|
||||
size_t buf_size)
|
||||
CompressedWriteBuffer::CompressedWriteBuffer(WriteBuffer & out_, CompressionCodecPtr codec_, size_t buf_size)
|
||||
: BufferWithOwnMemory<WriteBuffer>(buf_size), out(out_), codec(std::move(codec_))
|
||||
{
|
||||
}
|
||||
|
@ -1,7 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <Functions/castTypeToEither.h>
|
||||
|
||||
#include <Common/config.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
# pragma GCC diagnostic push
|
||||
@ -41,11 +42,31 @@ struct UnaryOperationImpl
|
||||
using ArrayA = typename ColVecA::Container;
|
||||
using ArrayC = typename ColVecC::Container;
|
||||
|
||||
static void NO_INLINE vector(const ArrayA & a, ArrayC & c)
|
||||
MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(vectorImpl,
|
||||
MULTITARGET_FH(
|
||||
static void NO_INLINE), /*vectorImpl*/ MULTITARGET_FB((const ArrayA & a, ArrayC & c) /// NOLINT
|
||||
{
|
||||
size_t size = a.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
c[i] = Op::apply(a[i]);
|
||||
}))
|
||||
|
||||
static void NO_INLINE vector(const ArrayA & a, ArrayC & c)
|
||||
{
|
||||
#if USE_MULTITARGET_CODE
|
||||
if (isArchSupported(TargetArch::AVX2))
|
||||
{
|
||||
vectorImplAVX2(a, c);
|
||||
return;
|
||||
}
|
||||
else if (isArchSupported(TargetArch::SSE42))
|
||||
{
|
||||
vectorImplSSE42(a, c);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
vectorImpl(a, c);
|
||||
}
|
||||
|
||||
static void constant(A a, ResultType & c)
|
||||
@ -58,11 +79,31 @@ struct UnaryOperationImpl
|
||||
template <typename Op>
|
||||
struct FixedStringUnaryOperationImpl
|
||||
{
|
||||
static void NO_INLINE vector(const ColumnFixedString::Chars & a, ColumnFixedString::Chars & c)
|
||||
MULTITARGET_FUNCTION_WRAPPER_AVX2_SSE42(vectorImpl,
|
||||
MULTITARGET_FH(
|
||||
static void NO_INLINE), /*vectorImpl*/ MULTITARGET_FB((const ColumnFixedString::Chars & a, ColumnFixedString::Chars & c) /// NOLINT
|
||||
{
|
||||
size_t size = a.size();
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
c[i] = Op::apply(a[i]);
|
||||
}))
|
||||
|
||||
static void NO_INLINE vector(const ColumnFixedString::Chars & a, ColumnFixedString::Chars & c)
|
||||
{
|
||||
#if USE_MULTITARGET_CODE
|
||||
if (isArchSupported(TargetArch::AVX2))
|
||||
{
|
||||
vectorImplAVX2(a, c);
|
||||
return;
|
||||
}
|
||||
else if (isArchSupported(TargetArch::SSE42))
|
||||
{
|
||||
vectorImplSSE42(a, c);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
vectorImpl(a, c);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <Access/AccessControl.h>
|
||||
#include <Access/CachedAccessChecking.h>
|
||||
#include <Access/ContextAccess.h>
|
||||
#include <Access/EnabledRolesInfo.h>
|
||||
#include <Access/Role.h>
|
||||
#include <Access/RolesOrUsersSet.h>
|
||||
#include <Access/User.h>
|
||||
@ -147,13 +148,22 @@ std::vector<AccessEntityPtr> InterpreterShowGrantsQuery::getEntities() const
|
||||
CachedAccessChecking show_roles(access, AccessType::SHOW_ROLES);
|
||||
bool throw_if_access_denied = !show_query.for_roles->all;
|
||||
|
||||
auto current_user = access->getUser();
|
||||
auto roles_info = access->getRolesInfo();
|
||||
|
||||
std::vector<AccessEntityPtr> entities;
|
||||
for (const auto & id : ids)
|
||||
{
|
||||
auto entity = access_control.tryRead(id);
|
||||
if (!entity)
|
||||
continue;
|
||||
if ((id == access->getUserID() /* Any user can see his own grants */)
|
||||
|
||||
bool is_current_user = (id == access->getUserID());
|
||||
bool is_enabled_or_granted_role = entity->isTypeOf<Role>()
|
||||
&& ((current_user && current_user->granted_roles.isGranted(id)) || roles_info->enabled_roles.contains(id));
|
||||
|
||||
if ((is_current_user /* Any user can see his own grants */)
|
||||
|| (is_enabled_or_granted_role /* and grants from the granted roles */)
|
||||
|| (entity->isTypeOf<User>() && show_users.checkAccess(throw_if_access_denied))
|
||||
|| (entity->isTypeOf<Role>() && show_roles.checkAccess(throw_if_access_denied)))
|
||||
entities.push_back(entity);
|
||||
|
@ -2358,8 +2358,11 @@ void InterpreterSelectQuery::executeTotalsAndHaving(
|
||||
{
|
||||
const Settings & settings = context->getSettingsRef();
|
||||
|
||||
const auto & header_before = query_plan.getCurrentDataStream().header;
|
||||
|
||||
auto totals_having_step = std::make_unique<TotalsHavingStep>(
|
||||
query_plan.getCurrentDataStream(),
|
||||
getAggregatesMask(header_before, query_analyzer->aggregates()),
|
||||
overflow_row,
|
||||
expression,
|
||||
has_having ? getSelectQuery().having()->getColumnName() : "",
|
||||
|
@ -82,6 +82,10 @@ BlockIO executeDDLQueryOnCluster(const ASTPtr & query_ptr_, ContextPtr context,
|
||||
}
|
||||
|
||||
query->cluster = context->getMacros()->expand(query->cluster);
|
||||
|
||||
/// TODO: support per-cluster grant
|
||||
context->checkAccess(AccessType::CLUSTER);
|
||||
|
||||
ClusterPtr cluster = params.cluster ? params.cluster : context->getCluster(query->cluster);
|
||||
DDLWorker & ddl_worker = context->getDDLWorker();
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#include <Processors/QueryPlan/TotalsHavingStep.h>
|
||||
#include <Processors/Transforms/DistinctTransform.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Processors/Transforms/TotalsHavingTransform.h>
|
||||
#include <QueryPipeline/QueryPipelineBuilder.h>
|
||||
#include <Interpreters/ExpressionActions.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <Common/JSONBuilder.h>
|
||||
@ -27,6 +27,7 @@ static ITransformingStep::Traits getTraits(bool has_filter)
|
||||
|
||||
TotalsHavingStep::TotalsHavingStep(
|
||||
const DataStream & input_stream_,
|
||||
const ColumnsMask & aggregates_mask_,
|
||||
bool overflow_row_,
|
||||
const ActionsDAGPtr & actions_dag_,
|
||||
const std::string & filter_column_,
|
||||
@ -41,8 +42,10 @@ TotalsHavingStep::TotalsHavingStep(
|
||||
actions_dag_.get(),
|
||||
filter_column_,
|
||||
remove_filter_,
|
||||
final_),
|
||||
final_,
|
||||
aggregates_mask_),
|
||||
getTraits(!filter_column_.empty()))
|
||||
, aggregates_mask(aggregates_mask_)
|
||||
, overflow_row(overflow_row_)
|
||||
, actions_dag(actions_dag_)
|
||||
, filter_column_name(filter_column_)
|
||||
@ -59,6 +62,7 @@ void TotalsHavingStep::transformPipeline(QueryPipelineBuilder & pipeline, const
|
||||
|
||||
auto totals_having = std::make_shared<TotalsHavingTransform>(
|
||||
pipeline.getHeader(),
|
||||
aggregates_mask,
|
||||
overflow_row,
|
||||
expression_actions,
|
||||
filter_column_name,
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
#include <Processors/QueryPlan/ITransformingStep.h>
|
||||
#include <Processors/Transforms/finalizeChunk.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -15,6 +16,7 @@ class TotalsHavingStep : public ITransformingStep
|
||||
public:
|
||||
TotalsHavingStep(
|
||||
const DataStream & input_stream_,
|
||||
const ColumnsMask & aggregates_mask_,
|
||||
bool overflow_row_,
|
||||
const ActionsDAGPtr & actions_dag_,
|
||||
const std::string & filter_column_,
|
||||
@ -33,6 +35,7 @@ public:
|
||||
const ActionsDAGPtr & getActions() const { return actions_dag; }
|
||||
|
||||
private:
|
||||
const ColumnsMask aggregates_mask;
|
||||
bool overflow_row;
|
||||
ActionsDAGPtr actions_dag;
|
||||
String filter_column_name;
|
||||
|
@ -38,7 +38,7 @@ AggregatingInOrderTransform::AggregatingInOrderTransform(
|
||||
, variants(*many_data->variants[current_variant])
|
||||
{
|
||||
/// We won't finalize states in order to merge same states (generated due to multi-thread execution) in AggregatingSortedTransform
|
||||
res_header = params->getCustomHeader(false);
|
||||
res_header = params->getCustomHeader(/* final_= */ false);
|
||||
|
||||
for (size_t i = 0; i < group_by_info->order_key_prefix_descr.size(); ++i)
|
||||
{
|
||||
@ -310,5 +310,23 @@ void AggregatingInOrderTransform::generate()
|
||||
need_generate = false;
|
||||
}
|
||||
|
||||
FinalizeAggregatedTransform::FinalizeAggregatedTransform(Block header, AggregatingTransformParamsPtr params_)
|
||||
: ISimpleTransform({std::move(header)}, {params_->getHeader()}, true)
|
||||
, params(params_)
|
||||
, aggregates_mask(getAggregatesMask(params->getHeader(), params->params.aggregates))
|
||||
{
|
||||
}
|
||||
|
||||
void FinalizeAggregatedTransform::transform(Chunk & chunk)
|
||||
{
|
||||
if (params->final)
|
||||
finalizeChunk(chunk, aggregates_mask);
|
||||
else if (!chunk.getChunkInfo())
|
||||
{
|
||||
auto info = std::make_shared<AggregatedChunkInfo>();
|
||||
chunk.setChunkInfo(std::move(info));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <Interpreters/Aggregator.h>
|
||||
#include <Processors/ISimpleTransform.h>
|
||||
#include <Processors/Transforms/AggregatingTransform.h>
|
||||
#include <Processors/Transforms/TotalsHavingTransform.h>
|
||||
#include <Processors/Transforms/finalizeChunk.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -90,25 +90,14 @@ private:
|
||||
class FinalizeAggregatedTransform : public ISimpleTransform
|
||||
{
|
||||
public:
|
||||
FinalizeAggregatedTransform(Block header, AggregatingTransformParamsPtr params_)
|
||||
: ISimpleTransform({std::move(header)}, {params_->getHeader()}, true)
|
||||
, params(params_) {}
|
||||
|
||||
void transform(Chunk & chunk) override
|
||||
{
|
||||
if (params->final)
|
||||
finalizeChunk(chunk);
|
||||
else if (!chunk.getChunkInfo())
|
||||
{
|
||||
auto info = std::make_shared<AggregatedChunkInfo>();
|
||||
chunk.setChunkInfo(std::move(info));
|
||||
}
|
||||
}
|
||||
FinalizeAggregatedTransform(Block header, AggregatingTransformParamsPtr params_);
|
||||
|
||||
void transform(Chunk & chunk) override;
|
||||
String getName() const override { return "FinalizeAggregatedTransform"; }
|
||||
|
||||
private:
|
||||
AggregatingTransformParamsPtr params;
|
||||
ColumnsMask aggregates_mask;
|
||||
};
|
||||
|
||||
|
||||
|
@ -12,6 +12,7 @@ CubeTransform::CubeTransform(Block header, AggregatingTransformParamsPtr params_
|
||||
: IAccumulatingTransform(std::move(header), params_->getHeader())
|
||||
, params(std::move(params_))
|
||||
, keys(params->params.keys)
|
||||
, aggregates_mask(getAggregatesMask(params->getHeader(), params->params.aggregates))
|
||||
{
|
||||
if (keys.size() >= 8 * sizeof(mask))
|
||||
throw Exception("Too many keys are used for CubeTransform.", ErrorCodes::LOGICAL_ERROR);
|
||||
@ -73,7 +74,7 @@ Chunk CubeTransform::generate()
|
||||
cube_chunk = merge(std::move(chunks), false);
|
||||
}
|
||||
|
||||
finalizeChunk(gen_chunk);
|
||||
finalizeChunk(gen_chunk, aggregates_mask);
|
||||
return gen_chunk;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include <Processors/IInflatingTransform.h>
|
||||
#include <Processors/Transforms/AggregatingTransform.h>
|
||||
#include <Processors/Transforms/finalizeChunk.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -20,7 +21,8 @@ protected:
|
||||
|
||||
private:
|
||||
AggregatingTransformParamsPtr params;
|
||||
ColumnNumbers keys;
|
||||
const ColumnNumbers keys;
|
||||
const ColumnsMask aggregates_mask;
|
||||
|
||||
Chunks consumed_chunks;
|
||||
Chunk cube_chunk;
|
||||
|
@ -8,6 +8,7 @@ RollupTransform::RollupTransform(Block header, AggregatingTransformParamsPtr par
|
||||
: IAccumulatingTransform(std::move(header), params_->getHeader())
|
||||
, params(std::move(params_))
|
||||
, keys(params->params.keys)
|
||||
, aggregates_mask(getAggregatesMask(params->getHeader(), params->params.aggregates))
|
||||
{
|
||||
}
|
||||
|
||||
@ -56,7 +57,7 @@ Chunk RollupTransform::generate()
|
||||
rollup_chunk = merge(std::move(chunks), false);
|
||||
}
|
||||
|
||||
finalizeChunk(gen_chunk);
|
||||
finalizeChunk(gen_chunk, aggregates_mask);
|
||||
return gen_chunk;
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include <Processors/IAccumulatingTransform.h>
|
||||
#include <Processors/Transforms/AggregatingTransform.h>
|
||||
#include <Processors/Transforms/finalizeChunk.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -19,7 +20,9 @@ protected:
|
||||
|
||||
private:
|
||||
AggregatingTransformParamsPtr params;
|
||||
ColumnNumbers keys;
|
||||
const ColumnNumbers keys;
|
||||
const ColumnsMask aggregates_mask;
|
||||
|
||||
Chunks consumed_chunks;
|
||||
Chunk rollup_chunk;
|
||||
size_t last_removed_key = 0;
|
||||
|
@ -17,33 +17,21 @@ namespace ErrorCodes
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
}
|
||||
|
||||
void finalizeChunk(Chunk & chunk)
|
||||
{
|
||||
auto num_rows = chunk.getNumRows();
|
||||
auto columns = chunk.detachColumns();
|
||||
|
||||
for (auto & column : columns)
|
||||
if (typeid_cast<const ColumnAggregateFunction *>(column.get()))
|
||||
column = ColumnAggregateFunction::convertToValues(IColumn::mutate(std::move(column)));
|
||||
|
||||
chunk.setColumns(std::move(columns), num_rows);
|
||||
}
|
||||
|
||||
void finalizeBlock(Block & block)
|
||||
static void finalizeBlock(Block & block, const ColumnsMask & aggregates_mask)
|
||||
{
|
||||
for (size_t i = 0; i < block.columns(); ++i)
|
||||
{
|
||||
ColumnWithTypeAndName & current = block.getByPosition(i);
|
||||
const DataTypeAggregateFunction * unfinalized_type = typeid_cast<const DataTypeAggregateFunction *>(current.type.get());
|
||||
if (!aggregates_mask[i])
|
||||
continue;
|
||||
|
||||
if (unfinalized_type)
|
||||
ColumnWithTypeAndName & current = block.getByPosition(i);
|
||||
const DataTypeAggregateFunction & unfinalized_type = typeid_cast<const DataTypeAggregateFunction &>(*current.type);
|
||||
|
||||
current.type = unfinalized_type.getReturnType();
|
||||
if (current.column)
|
||||
{
|
||||
current.type = unfinalized_type->getReturnType();
|
||||
if (current.column)
|
||||
{
|
||||
auto mut_column = IColumn::mutate(std::move(current.column));
|
||||
current.column = ColumnAggregateFunction::convertToValues(std::move(mut_column));
|
||||
}
|
||||
auto mut_column = IColumn::mutate(std::move(current.column));
|
||||
current.column = ColumnAggregateFunction::convertToValues(std::move(mut_column));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -53,10 +41,11 @@ Block TotalsHavingTransform::transformHeader(
|
||||
const ActionsDAG * expression,
|
||||
const std::string & filter_column_name,
|
||||
bool remove_filter,
|
||||
bool final)
|
||||
bool final,
|
||||
const ColumnsMask & aggregates_mask)
|
||||
{
|
||||
if (final)
|
||||
finalizeBlock(block);
|
||||
finalizeBlock(block, aggregates_mask);
|
||||
|
||||
if (expression)
|
||||
{
|
||||
@ -70,6 +59,7 @@ Block TotalsHavingTransform::transformHeader(
|
||||
|
||||
TotalsHavingTransform::TotalsHavingTransform(
|
||||
const Block & header,
|
||||
const ColumnsMask & aggregates_mask_,
|
||||
bool overflow_row_,
|
||||
const ExpressionActionsPtr & expression_,
|
||||
const std::string & filter_column_,
|
||||
@ -77,7 +67,8 @@ TotalsHavingTransform::TotalsHavingTransform(
|
||||
TotalsMode totals_mode_,
|
||||
double auto_include_threshold_,
|
||||
bool final_)
|
||||
: ISimpleTransform(header, transformHeader(header, expression_ ? &expression_->getActionsDAG() : nullptr, filter_column_, remove_filter_, final_), true)
|
||||
: ISimpleTransform(header, transformHeader(header, expression_ ? &expression_->getActionsDAG() : nullptr, filter_column_, remove_filter_, final_, aggregates_mask_), true)
|
||||
, aggregates_mask(aggregates_mask_)
|
||||
, overflow_row(overflow_row_)
|
||||
, expression(expression_)
|
||||
, filter_column_name(filter_column_)
|
||||
@ -87,7 +78,7 @@ TotalsHavingTransform::TotalsHavingTransform(
|
||||
, final(final_)
|
||||
{
|
||||
finalized_header = getInputPort().getHeader();
|
||||
finalizeBlock(finalized_header);
|
||||
finalizeBlock(finalized_header, aggregates_mask);
|
||||
|
||||
/// Port for Totals.
|
||||
if (expression)
|
||||
@ -179,7 +170,7 @@ void TotalsHavingTransform::transform(Chunk & chunk)
|
||||
|
||||
auto finalized = chunk.clone();
|
||||
if (final)
|
||||
finalizeChunk(finalized);
|
||||
finalizeChunk(finalized, aggregates_mask);
|
||||
|
||||
total_keys += finalized.getNumRows();
|
||||
|
||||
@ -300,7 +291,7 @@ void TotalsHavingTransform::prepareTotals()
|
||||
}
|
||||
|
||||
totals = Chunk(std::move(current_totals), 1);
|
||||
finalizeChunk(totals);
|
||||
finalizeChunk(totals, aggregates_mask);
|
||||
|
||||
if (expression)
|
||||
{
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Processors/ISimpleTransform.h>
|
||||
#include <Processors/Transforms/finalizeChunk.h>
|
||||
#include <Common/Arena.h>
|
||||
|
||||
namespace DB
|
||||
@ -25,6 +26,7 @@ class TotalsHavingTransform : public ISimpleTransform
|
||||
public:
|
||||
TotalsHavingTransform(
|
||||
const Block & header,
|
||||
const ColumnsMask & aggregates_mask_,
|
||||
bool overflow_row_,
|
||||
const ExpressionActionsPtr & expression_,
|
||||
const std::string & filter_column_,
|
||||
@ -40,7 +42,7 @@ public:
|
||||
Status prepare() override;
|
||||
void work() override;
|
||||
|
||||
static Block transformHeader(Block block, const ActionsDAG * expression, const std::string & filter_column_name, bool remove_filter, bool final);
|
||||
static Block transformHeader(Block block, const ActionsDAG * expression, const std::string & filter_column_name, bool remove_filter, bool final, const ColumnsMask & aggregates_mask);
|
||||
|
||||
protected:
|
||||
void transform(Chunk & chunk) override;
|
||||
@ -54,6 +56,7 @@ private:
|
||||
void prepareTotals();
|
||||
|
||||
/// Params
|
||||
const ColumnsMask aggregates_mask;
|
||||
bool overflow_row;
|
||||
ExpressionActionsPtr expression;
|
||||
String filter_column_name;
|
||||
@ -77,6 +80,4 @@ private:
|
||||
MutableColumns current_totals;
|
||||
};
|
||||
|
||||
void finalizeChunk(Chunk & chunk);
|
||||
|
||||
}
|
||||
|
32
src/Processors/Transforms/finalizeChunk.cpp
Normal file
32
src/Processors/Transforms/finalizeChunk.cpp
Normal file
@ -0,0 +1,32 @@
|
||||
#include <Processors/Transforms/finalizeChunk.h>
|
||||
#include <Columns/ColumnAggregateFunction.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
ColumnsMask getAggregatesMask(const Block & header, const AggregateDescriptions & aggregates)
|
||||
{
|
||||
ColumnsMask mask(header.columns());
|
||||
for (const auto & aggregate : aggregates)
|
||||
mask[header.getPositionByName(aggregate.column_name)] = true;
|
||||
return mask;
|
||||
}
|
||||
|
||||
void finalizeChunk(Chunk & chunk, const ColumnsMask & aggregates_mask)
|
||||
{
|
||||
auto num_rows = chunk.getNumRows();
|
||||
auto columns = chunk.detachColumns();
|
||||
|
||||
for (size_t i = 0; i < columns.size(); ++i)
|
||||
{
|
||||
if (!aggregates_mask[i])
|
||||
continue;
|
||||
|
||||
auto & column = columns[i];
|
||||
column = ColumnAggregateFunction::convertToValues(IColumn::mutate(std::move(column)));
|
||||
}
|
||||
|
||||
chunk.setColumns(std::move(columns), num_rows);
|
||||
}
|
||||
|
||||
}
|
20
src/Processors/Transforms/finalizeChunk.h
Normal file
20
src/Processors/Transforms/finalizeChunk.h
Normal file
@ -0,0 +1,20 @@
|
||||
#pragma once
|
||||
|
||||
#include <Interpreters/AggregateDescription.h>
|
||||
#include <Core/Block.h>
|
||||
#include <Processors/Chunk.h>
|
||||
#include <vector>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
using ColumnsMask = std::vector<bool>;
|
||||
|
||||
ColumnsMask getAggregatesMask(const Block & header, const AggregateDescriptions & aggregates);
|
||||
|
||||
/// Convert ColumnAggregateFunction to real values.
|
||||
///
|
||||
/// @param aggregates_mask columns to convert (see getAggregatesMask())
|
||||
void finalizeChunk(Chunk & chunk, const ColumnsMask & aggregates_mask);
|
||||
|
||||
}
|
@ -68,7 +68,7 @@ void MergeTreeIndexReader::seek(size_t mark)
|
||||
MergeTreeIndexGranulePtr MergeTreeIndexReader::read()
|
||||
{
|
||||
auto granule = index->createIndexGranule();
|
||||
granule->deserializeBinary(*stream->data_buffer, version);
|
||||
granule->deserializeBinary(*stream->getDataBuffer(), version);
|
||||
return granule;
|
||||
}
|
||||
|
||||
|
@ -17,25 +17,35 @@ namespace ErrorCodes
|
||||
MergeTreeReaderStream::MergeTreeReaderStream(
|
||||
DiskPtr disk_,
|
||||
const String & path_prefix_, const String & data_file_extension_, size_t marks_count_,
|
||||
const MarkRanges & all_mark_ranges,
|
||||
const MergeTreeReaderSettings & settings,
|
||||
const MarkRanges & all_mark_ranges_,
|
||||
const MergeTreeReaderSettings & settings_,
|
||||
MarkCache * mark_cache_,
|
||||
UncompressedCache * uncompressed_cache, size_t file_size_,
|
||||
UncompressedCache * uncompressed_cache_, size_t file_size_,
|
||||
const MergeTreeIndexGranularityInfo * index_granularity_info_,
|
||||
const ReadBufferFromFileBase::ProfileCallback & profile_callback, clockid_t clock_type,
|
||||
const ReadBufferFromFileBase::ProfileCallback & profile_callback_, clockid_t clock_type_,
|
||||
bool is_low_cardinality_dictionary_)
|
||||
: disk(std::move(disk_))
|
||||
: settings(settings_)
|
||||
, profile_callback(profile_callback_)
|
||||
, clock_type(clock_type_)
|
||||
, all_mark_ranges(all_mark_ranges_)
|
||||
, file_size(file_size_)
|
||||
, uncompressed_cache(uncompressed_cache_)
|
||||
, disk(std::move(disk_))
|
||||
, path_prefix(path_prefix_)
|
||||
, data_file_extension(data_file_extension_)
|
||||
, is_low_cardinality_dictionary(is_low_cardinality_dictionary_)
|
||||
, marks_count(marks_count_)
|
||||
, file_size(file_size_)
|
||||
, mark_cache(mark_cache_)
|
||||
, save_marks_in_cache(settings.save_marks_in_cache)
|
||||
, index_granularity_info(index_granularity_info_)
|
||||
, marks_loader(disk, mark_cache, index_granularity_info->getMarksFilePath(path_prefix),
|
||||
marks_count, *index_granularity_info, save_marks_in_cache)
|
||||
marks_count, *index_granularity_info, save_marks_in_cache) {}
|
||||
|
||||
void MergeTreeReaderStream::init()
|
||||
{
|
||||
if (initialized)
|
||||
return;
|
||||
initialized = true;
|
||||
/// Compute the size of the buffer.
|
||||
size_t max_mark_range_bytes = 0;
|
||||
size_t sum_mark_range_bytes = 0;
|
||||
@ -192,6 +202,7 @@ size_t MergeTreeReaderStream::getRightOffset(size_t right_mark_non_included)
|
||||
|
||||
void MergeTreeReaderStream::seekToMark(size_t index)
|
||||
{
|
||||
init();
|
||||
MarkInCompressedFile mark = marks_loader.getMark(index);
|
||||
|
||||
try
|
||||
@ -214,6 +225,7 @@ void MergeTreeReaderStream::seekToMark(size_t index)
|
||||
|
||||
void MergeTreeReaderStream::seekToStart()
|
||||
{
|
||||
init();
|
||||
try
|
||||
{
|
||||
compressed_data_buffer->seek(0, 0);
|
||||
@ -236,6 +248,7 @@ void MergeTreeReaderStream::adjustRightMark(size_t right_mark)
|
||||
* read from stream, but we must update last_right_offset only if it is bigger than
|
||||
* the last one to avoid redundantly cancelling prefetches.
|
||||
*/
|
||||
init();
|
||||
auto right_offset = getRightOffset(right_mark);
|
||||
if (!right_offset)
|
||||
{
|
||||
@ -255,4 +268,16 @@ void MergeTreeReaderStream::adjustRightMark(size_t right_mark)
|
||||
}
|
||||
}
|
||||
|
||||
ReadBuffer * MergeTreeReaderStream::getDataBuffer()
|
||||
{
|
||||
init();
|
||||
return data_buffer;
|
||||
}
|
||||
|
||||
CompressedReadBufferBase * MergeTreeReaderStream::getCompressedDataBuffer()
|
||||
{
|
||||
init();
|
||||
return compressed_data_buffer;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#pragma once
|
||||
#include <tuple>
|
||||
#include <Storages/MarkCache.h>
|
||||
#include <Storages/MergeTree/MarkRange.h>
|
||||
#include <Storages/MergeTree/MergeTreeData.h>
|
||||
@ -37,12 +38,20 @@ public:
|
||||
*/
|
||||
void adjustRightMark(size_t right_mark);
|
||||
|
||||
ReadBuffer * data_buffer;
|
||||
CompressedReadBufferBase * compressed_data_buffer;
|
||||
ReadBuffer * getDataBuffer();
|
||||
CompressedReadBufferBase * getCompressedDataBuffer();
|
||||
|
||||
private:
|
||||
void init();
|
||||
size_t getRightOffset(size_t right_mark_non_included);
|
||||
|
||||
const MergeTreeReaderSettings settings;
|
||||
const ReadBufferFromFileBase::ProfileCallback profile_callback;
|
||||
clockid_t clock_type;
|
||||
const MarkRanges all_mark_ranges;
|
||||
size_t file_size;
|
||||
UncompressedCache * uncompressed_cache;
|
||||
|
||||
DiskPtr disk;
|
||||
std::string path_prefix;
|
||||
std::string data_file_extension;
|
||||
@ -50,10 +59,13 @@ private:
|
||||
bool is_low_cardinality_dictionary = false;
|
||||
|
||||
size_t marks_count;
|
||||
size_t file_size;
|
||||
|
||||
|
||||
ReadBuffer * data_buffer;
|
||||
CompressedReadBufferBase * compressed_data_buffer;
|
||||
MarkCache * mark_cache;
|
||||
bool save_marks_in_cache;
|
||||
bool initialized = false;
|
||||
|
||||
std::optional<size_t> last_right_offset;
|
||||
|
||||
|
@ -221,7 +221,7 @@ static ReadBuffer * getStream(
|
||||
else if (seek_to_mark)
|
||||
stream.seekToMark(from_mark);
|
||||
|
||||
return stream.data_buffer;
|
||||
return stream.getDataBuffer();
|
||||
}
|
||||
|
||||
void MergeTreeReaderWide::deserializePrefix(
|
||||
|
@ -19,6 +19,7 @@ const char * auto_contributors[] {
|
||||
"Albert Kidrachev",
|
||||
"Alberto",
|
||||
"Aleksandr Karo",
|
||||
"Aleksandr Razumov",
|
||||
"Aleksandr Shalimov",
|
||||
"Aleksandra (Ася)",
|
||||
"Aleksandrov Vladimir",
|
||||
@ -179,6 +180,7 @@ const char * auto_contributors[] {
|
||||
"Boris Granveaud",
|
||||
"Boris Kuschel",
|
||||
"Bowen Masco",
|
||||
"Brandon",
|
||||
"Braulio Valdivielso",
|
||||
"Brendan Cox",
|
||||
"Brett Hoerner",
|
||||
@ -205,6 +207,7 @@ const char * auto_contributors[] {
|
||||
"CurtizJ",
|
||||
"DF5HSE",
|
||||
"DIAOZHAFENG",
|
||||
"Dale McDiarmid",
|
||||
"Dan Roscigno",
|
||||
"Daniel Bershatsky",
|
||||
"Daniel Dao",
|
||||
@ -392,6 +395,7 @@ const char * auto_contributors[] {
|
||||
"João Figueiredo",
|
||||
"Julian Gilyadov",
|
||||
"Julian Zhou",
|
||||
"Julio Jimenez",
|
||||
"Justin Hilliard",
|
||||
"Kang Liu",
|
||||
"Karl Pietrzak",
|
||||
@ -425,6 +429,7 @@ const char * auto_contributors[] {
|
||||
"LAL2211",
|
||||
"LB",
|
||||
"LIJINGBO",
|
||||
"Ladislav Snizek",
|
||||
"Larry Luo",
|
||||
"Lars Eidnes",
|
||||
"Latysheva Alexandra",
|
||||
@ -451,6 +456,7 @@ const char * auto_contributors[] {
|
||||
"Maksim Kita",
|
||||
"Malte",
|
||||
"Marat IDRISOV",
|
||||
"Marcelo Rodriguez",
|
||||
"Marek Vavrusa",
|
||||
"Marek Vavruša",
|
||||
"Marek Vavruša",
|
||||
@ -510,6 +516,7 @@ const char * auto_contributors[] {
|
||||
"Mike Kot",
|
||||
"Mikhail",
|
||||
"Mikhail Andreev",
|
||||
"Mikhail Artemenko",
|
||||
"Mikhail Cheshkov",
|
||||
"Mikhail Fandyushin",
|
||||
"Mikhail Filimonov",
|
||||
@ -615,6 +622,7 @@ const char * auto_contributors[] {
|
||||
"Philippe Ombredanne",
|
||||
"Potya",
|
||||
"Pradeep Chhetri",
|
||||
"Prashant Shahi",
|
||||
"Pxl",
|
||||
"Pysaoke",
|
||||
"Quid37",
|
||||
@ -652,6 +660,7 @@ const char * auto_contributors[] {
|
||||
"Russ Frank",
|
||||
"Ruzal Ibragimov",
|
||||
"Ryad ZENINE",
|
||||
"Ryadh DAHIMENE",
|
||||
"S.M.A. Djawadi",
|
||||
"Saad Ur Rahman",
|
||||
"Sabyanin Maxim",
|
||||
@ -661,6 +670,7 @@ const char * auto_contributors[] {
|
||||
"Samuel Chou",
|
||||
"Saulius Valatka",
|
||||
"Sean Haynes",
|
||||
"Sean Lafferty",
|
||||
"Serg Kulakov",
|
||||
"Serge Rider",
|
||||
"Sergei Bocharov",
|
||||
@ -677,6 +687,7 @@ const char * auto_contributors[] {
|
||||
"Sergey Mirvoda",
|
||||
"Sergey Ryzhkov",
|
||||
"Sergey Shtykov",
|
||||
"Sergey Tulentsev",
|
||||
"Sergey V. Galtsev",
|
||||
"Sergey Zaikin",
|
||||
"Sergi Almacellas Abellana",
|
||||
@ -727,6 +738,7 @@ const char * auto_contributors[] {
|
||||
"The-Alchemist",
|
||||
"Thom O'Connor",
|
||||
"Thomas Berdy",
|
||||
"Tian Xinhui",
|
||||
"Tiaonmmn",
|
||||
"Tigran Khudaverdyan",
|
||||
"Timur Magomedov",
|
||||
@ -804,11 +816,13 @@ const char * auto_contributors[] {
|
||||
"Weiqing Xu",
|
||||
"William Shallum",
|
||||
"Winter Zhang",
|
||||
"XenoAmess",
|
||||
"Xianda Ke",
|
||||
"Xiang Zhou",
|
||||
"Xin Wang",
|
||||
"Xudong Zhang",
|
||||
"Y Lu",
|
||||
"Yakko Majuri",
|
||||
"Yakov Olkhovskiy",
|
||||
"Yangkuan Liu",
|
||||
"Yatian Xu",
|
||||
@ -821,6 +835,7 @@ const char * auto_contributors[] {
|
||||
"Yiğit Konur",
|
||||
"Yohann Jardin",
|
||||
"Yong Wang",
|
||||
"Yong-Hao Zou",
|
||||
"Youenn Lebras",
|
||||
"Yuntao Wu",
|
||||
"Yuri Dyachenko",
|
||||
@ -884,6 +899,7 @@ const char * auto_contributors[] {
|
||||
"benbiti",
|
||||
"bgranvea",
|
||||
"bharatnc",
|
||||
"bkuschel",
|
||||
"blazerer",
|
||||
"bluebirddm",
|
||||
"bo zeng",
|
||||
@ -936,6 +952,7 @@ const char * auto_contributors[] {
|
||||
"dmi-feo",
|
||||
"dmitrii",
|
||||
"dmitriiut",
|
||||
"dmitriy",
|
||||
"dmitry kuzmin",
|
||||
"dongyifeng",
|
||||
"eaxdev",
|
||||
@ -986,9 +1003,13 @@ const char * auto_contributors[] {
|
||||
"grantovsky",
|
||||
"gulige",
|
||||
"guoleiyi",
|
||||
"guomaolin",
|
||||
"guov100",
|
||||
"guykohen",
|
||||
"gyuton",
|
||||
"hanqf-git",
|
||||
"hao.he",
|
||||
"hardstep33",
|
||||
"hchen9",
|
||||
"hcz",
|
||||
"heleihelei",
|
||||
@ -997,6 +1018,7 @@ const char * auto_contributors[] {
|
||||
"hermano",
|
||||
"hexiaoting",
|
||||
"hhell",
|
||||
"homeward",
|
||||
"hotid",
|
||||
"huangzhaowei",
|
||||
"hustnn",
|
||||
@ -1025,6 +1047,7 @@ const char * auto_contributors[] {
|
||||
"jennyma",
|
||||
"jetgm",
|
||||
"jewisliu",
|
||||
"jiahui-97",
|
||||
"jianmei zhang",
|
||||
"jkuklis",
|
||||
"jus1096",
|
||||
@ -1045,6 +1068,7 @@ const char * auto_contributors[] {
|
||||
"l",
|
||||
"l1tsolaiki",
|
||||
"lalex",
|
||||
"lanfz",
|
||||
"larryluogit",
|
||||
"laurieliyang",
|
||||
"lehasm",
|
||||
@ -1054,6 +1078,7 @@ const char * auto_contributors[] {
|
||||
"levushkin aleksej",
|
||||
"levysh",
|
||||
"lgbo",
|
||||
"lgbo-usstc",
|
||||
"lgbo-ustc",
|
||||
"lhuang0928",
|
||||
"lhuang09287750",
|
||||
@ -1066,6 +1091,7 @@ const char * auto_contributors[] {
|
||||
"listar",
|
||||
"litao91",
|
||||
"liu-bov",
|
||||
"liumaojing",
|
||||
"liuneng1994",
|
||||
"liuyangkuan",
|
||||
"liuyimin",
|
||||
@ -1120,8 +1146,10 @@ const char * auto_contributors[] {
|
||||
"nagorny",
|
||||
"nauta",
|
||||
"nautaa",
|
||||
"ndchikin",
|
||||
"neng.liu",
|
||||
"never lee",
|
||||
"ni1l",
|
||||
"nicelulu",
|
||||
"nickzhwang",
|
||||
"nikitamikhaylov",
|
||||
@ -1134,6 +1162,7 @@ const char * auto_contributors[] {
|
||||
"ogorbacheva",
|
||||
"olegkv",
|
||||
"olevino",
|
||||
"olevino999",
|
||||
"olgarev",
|
||||
"orantius",
|
||||
"p0ny",
|
||||
@ -1205,6 +1234,7 @@ const char * auto_contributors[] {
|
||||
"tangjiangling",
|
||||
"tao jiang",
|
||||
"tavplubix",
|
||||
"tchepavel",
|
||||
"tcoyvwac",
|
||||
"tekeri",
|
||||
"templarzq",
|
||||
@ -1237,10 +1267,12 @@ const char * auto_contributors[] {
|
||||
"vzakaznikov",
|
||||
"wangchao",
|
||||
"weeds085490",
|
||||
"wuxiaobai24",
|
||||
"wzl",
|
||||
"xPoSx",
|
||||
"xiedeyantu",
|
||||
"xinhuitian",
|
||||
"yakkomajuri",
|
||||
"yakov-olkhovskiy",
|
||||
"yandd",
|
||||
"yang",
|
||||
@ -1276,6 +1308,7 @@ const char * auto_contributors[] {
|
||||
"zhukai",
|
||||
"zkun",
|
||||
"zlx19950903",
|
||||
"zombee0",
|
||||
"zvonand",
|
||||
"zvrr",
|
||||
"zvvr",
|
||||
@ -1296,6 +1329,7 @@ const char * auto_contributors[] {
|
||||
"何李夫",
|
||||
"凌涛",
|
||||
"吴健",
|
||||
"小蝌蚪",
|
||||
"小路",
|
||||
"张中南",
|
||||
"张健",
|
||||
|
@ -89,6 +89,11 @@ class Release:
|
||||
self._git.update()
|
||||
self.version = get_version_from_repo(git=self._git)
|
||||
|
||||
def get_stable_release_type(self) -> str:
|
||||
if self.version.minor % 5 == 3: # our 3 and 8 are LTS
|
||||
return VersionType.LTS
|
||||
return VersionType.STABLE
|
||||
|
||||
def check_prerequisites(self):
|
||||
"""
|
||||
Check tooling installed in the system
|
||||
@ -96,7 +101,7 @@ class Release:
|
||||
self.run("gh auth status")
|
||||
self.run("git status")
|
||||
|
||||
def do(self, check_dirty: bool, check_branch: bool, with_prestable: bool):
|
||||
def do(self, check_dirty: bool, check_branch: bool, with_release_branch: bool):
|
||||
self.check_prerequisites()
|
||||
|
||||
if check_dirty:
|
||||
@ -115,14 +120,22 @@ class Release:
|
||||
with self._checkout(self.release_commit, True):
|
||||
if self.release_type in self.BIG:
|
||||
# Checkout to the commit, it will provide the correct current version
|
||||
if with_prestable:
|
||||
with self.prestable():
|
||||
if with_release_branch:
|
||||
with self.create_release_branch():
|
||||
logging.info("Prestable part of the releasing is done")
|
||||
else:
|
||||
logging.info("Skipping prestable stage")
|
||||
logging.info("Skipping creating release branch stage")
|
||||
|
||||
with self.testing():
|
||||
logging.info("Testing part of the releasing is done")
|
||||
rollback = self._rollback_stack.copy()
|
||||
try:
|
||||
with self.testing():
|
||||
logging.info("Testing part of the releasing is done")
|
||||
except (Exception, KeyboardInterrupt):
|
||||
logging.fatal("Testing part failed, rollback previous steps")
|
||||
rollback.reverse()
|
||||
for cmd in rollback:
|
||||
self.run(cmd)
|
||||
raise
|
||||
|
||||
elif self.release_type in self.SMALL:
|
||||
with self.stable():
|
||||
@ -152,7 +165,10 @@ class Release:
|
||||
)
|
||||
|
||||
# Prefetch the branch to have it updated
|
||||
self.run(f"git fetch {self.repo.url} {branch}:{branch}")
|
||||
if self._git.branch == branch:
|
||||
self.run("git pull")
|
||||
else:
|
||||
self.run(f"git fetch {self.repo.url} {branch}:{branch}")
|
||||
output = self.run(f"git branch --contains={self.release_commit} {branch}")
|
||||
if branch not in output:
|
||||
raise Exception(
|
||||
@ -170,16 +186,16 @@ class Release:
|
||||
)
|
||||
|
||||
@contextmanager
|
||||
def prestable(self):
|
||||
def create_release_branch(self):
|
||||
self.check_no_tags_after()
|
||||
# Create release branch
|
||||
self.read_version()
|
||||
with self._create_branch(self.release_branch, self.release_commit):
|
||||
with self._checkout(self.release_branch, True):
|
||||
self.read_version()
|
||||
self.version.with_description(VersionType.PRESTABLE)
|
||||
with self._create_gh_release(True):
|
||||
with self._bump_prestable_version():
|
||||
self.version.with_description(self.get_stable_release_type())
|
||||
with self._create_gh_release(False):
|
||||
with self._bump_release_branch():
|
||||
# At this point everything will rollback automatically
|
||||
yield
|
||||
|
||||
@ -187,9 +203,7 @@ class Release:
|
||||
def stable(self):
|
||||
self.check_no_tags_after()
|
||||
self.read_version()
|
||||
version_type = VersionType.STABLE
|
||||
if self.version.minor % 5 == 3: # our 3 and 8 are LTS
|
||||
version_type = VersionType.LTS
|
||||
version_type = self.get_stable_release_type()
|
||||
self.version.with_description(version_type)
|
||||
with self._create_gh_release(False):
|
||||
self.version = self.version.update(self.release_type)
|
||||
@ -198,7 +212,7 @@ class Release:
|
||||
update_contributors(raise_error=True)
|
||||
# Checkouting the commit of the branch and not the branch itself,
|
||||
# then we are able to skip rollback
|
||||
with self._checkout(f"{self.release_branch}@{{0}}", False):
|
||||
with self._checkout(f"{self.release_branch}^0", False):
|
||||
current_commit = self.run("git rev-parse HEAD")
|
||||
self.run(
|
||||
f"git commit -m "
|
||||
@ -254,11 +268,15 @@ class Release:
|
||||
self._release_commit = commit(release_commit)
|
||||
|
||||
@contextmanager
|
||||
def _bump_prestable_version(self):
|
||||
def _bump_release_branch(self):
|
||||
# Update only git, origal version stays the same
|
||||
self._git.update()
|
||||
new_version = self.version.patch_update()
|
||||
new_version.with_description("prestable")
|
||||
version_type = self.get_stable_release_type()
|
||||
pr_labels = "--label release"
|
||||
if version_type == VersionType.LTS:
|
||||
pr_labels += " --label release-lts"
|
||||
new_version.with_description(version_type)
|
||||
update_cmake_version(new_version)
|
||||
update_contributors(raise_error=True)
|
||||
self.run(
|
||||
@ -272,22 +290,23 @@ class Release:
|
||||
with self._create_gh_label(
|
||||
f"v{self.release_branch}-affected", "c2bfff"
|
||||
):
|
||||
# The following command is rolled back by self._push
|
||||
self.run(
|
||||
f"gh pr create --repo {self.repo} --title "
|
||||
f"'Release pull request for branch {self.release_branch}' "
|
||||
f"--head {self.release_branch} --label release "
|
||||
f"--head {self.release_branch} {pr_labels} "
|
||||
"--body 'This PullRequest is a part of ClickHouse release "
|
||||
"cycle. It is used by CI system only. Do not perform any "
|
||||
"changes with it.'"
|
||||
)
|
||||
# Here the prestable part is done
|
||||
# Here the release branch part is done
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
def _bump_testing_version(self, helper_branch: str):
|
||||
self.read_version()
|
||||
self.version = self.version.update(self.release_type)
|
||||
self.version.with_description("testing")
|
||||
self.version.with_description(VersionType.TESTING)
|
||||
update_cmake_version(self.version)
|
||||
update_contributors(raise_error=True)
|
||||
self.run(
|
||||
@ -300,7 +319,7 @@ class Release:
|
||||
f"gh pr create --repo {self.repo} --title 'Update version after "
|
||||
f"release' --head {helper_branch} --body-file '{body_file}'"
|
||||
)
|
||||
# Here the prestable part is done
|
||||
# Here the testing part is done
|
||||
yield
|
||||
|
||||
@contextmanager
|
||||
@ -314,7 +333,7 @@ class Release:
|
||||
rollback_cmd = f"git checkout {orig_ref}"
|
||||
try:
|
||||
yield
|
||||
except BaseException:
|
||||
except (Exception, KeyboardInterrupt):
|
||||
logging.warning("Rolling back checked out %s for %s", ref, orig_ref)
|
||||
self.run(f"git reset --hard; git checkout {orig_ref}")
|
||||
raise
|
||||
@ -329,7 +348,7 @@ class Release:
|
||||
self._rollback_stack.append(rollback_cmd)
|
||||
try:
|
||||
yield
|
||||
except BaseException:
|
||||
except (Exception, KeyboardInterrupt):
|
||||
logging.warning("Rolling back created branch %s", name)
|
||||
self.run(rollback_cmd)
|
||||
raise
|
||||
@ -344,7 +363,7 @@ class Release:
|
||||
self._rollback_stack.append(rollback_cmd)
|
||||
try:
|
||||
yield
|
||||
except BaseException:
|
||||
except (Exception, KeyboardInterrupt):
|
||||
logging.warning("Rolling back label %s", label)
|
||||
self.run(rollback_cmd)
|
||||
raise
|
||||
@ -358,14 +377,14 @@ class Release:
|
||||
if as_prerelease:
|
||||
prerelease = "--prerelease"
|
||||
self.run(
|
||||
f"gh release create {prerelease} --draft --repo {self.repo} "
|
||||
f"gh release create {prerelease} --repo {self.repo} "
|
||||
f"--title 'Release {tag}' '{tag}'"
|
||||
)
|
||||
rollback_cmd = f"gh release delete --yes --repo {self.repo} '{tag}'"
|
||||
self._rollback_stack.append(rollback_cmd)
|
||||
try:
|
||||
yield
|
||||
except BaseException:
|
||||
except (Exception, KeyboardInterrupt):
|
||||
logging.warning("Rolling back release publishing")
|
||||
self.run(rollback_cmd)
|
||||
raise
|
||||
@ -379,7 +398,7 @@ class Release:
|
||||
try:
|
||||
with self._push(f"'{tag}'"):
|
||||
yield
|
||||
except BaseException:
|
||||
except (Exception, KeyboardInterrupt):
|
||||
logging.warning("Rolling back tag %s", tag)
|
||||
self.run(rollback_cmd)
|
||||
raise
|
||||
@ -396,7 +415,7 @@ class Release:
|
||||
|
||||
try:
|
||||
yield
|
||||
except BaseException:
|
||||
except (Exception, KeyboardInterrupt):
|
||||
if with_rollback_on_fail:
|
||||
logging.warning("Rolling back pushed ref %s", ref)
|
||||
self.run(rollback_cmd)
|
||||
@ -437,14 +456,13 @@ def parse_args() -> argparse.Namespace:
|
||||
dest="release_type",
|
||||
help="a release type, new branch is created only for 'major' and 'minor'",
|
||||
)
|
||||
parser.add_argument("--with-prestable", default=True, help=argparse.SUPPRESS)
|
||||
parser.add_argument("--with-release-branch", default=True, help=argparse.SUPPRESS)
|
||||
parser.add_argument(
|
||||
"--no-prestable",
|
||||
dest="with_prestable",
|
||||
"--no-release-branch",
|
||||
dest="with_release_branch",
|
||||
action="store_false",
|
||||
default=argparse.SUPPRESS,
|
||||
help=f"if set, for release types in {Release.BIG} skip creating prestable "
|
||||
"release and release branch",
|
||||
help=f"if set, for release types in {Release.BIG} skip creating release branch",
|
||||
)
|
||||
parser.add_argument("--check-dirty", default=True, help=argparse.SUPPRESS)
|
||||
parser.add_argument(
|
||||
@ -475,7 +493,7 @@ def main():
|
||||
repo = Repo(args.repo, args.remote_protocol)
|
||||
release = Release(repo, args.commit, args.release_type)
|
||||
|
||||
release.do(args.check_dirty, args.check_branch, args.with_prestable)
|
||||
release.do(args.check_dirty, args.check_branch, args.with_release_branch)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1,5 +1,6 @@
|
||||
<clickhouse>
|
||||
<access_control_improvements>
|
||||
<users_without_row_policies_can_read_rows>true</users_without_row_policies_can_read_rows>
|
||||
<on_cluster_queries_require_cluster_grant>true</on_cluster_queries_require_cluster_grant>
|
||||
</access_control_improvements>
|
||||
</clickhouse>
|
||||
|
@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
from helpers.client import QueryRuntimeException
|
||||
from helpers.cluster import ClickHouseCluster
|
||||
from helpers.test_tools import TSV
|
||||
|
||||
@ -260,6 +261,11 @@ def test_introspection():
|
||||
assert instance.query("SHOW GRANTS", user="A") == TSV(
|
||||
["GRANT SELECT ON test.table TO A", "GRANT R1 TO A"]
|
||||
)
|
||||
|
||||
assert instance.query("SHOW GRANTS FOR R1", user="A") == TSV([])
|
||||
with pytest.raises(QueryRuntimeException, match="Not enough privileges"):
|
||||
assert instance.query("SHOW GRANTS FOR R2", user="A")
|
||||
|
||||
assert instance.query("SHOW GRANTS", user="B") == TSV(
|
||||
[
|
||||
"GRANT CREATE ON *.* TO B WITH GRANT OPTION",
|
||||
|
@ -1,19 +0,0 @@
|
||||
<test>
|
||||
|
||||
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>expr</name>
|
||||
<values>
|
||||
<value>number</value>
|
||||
<value>toUInt32(number)</value>
|
||||
<value>toUInt16(number)</value>
|
||||
<value>toUInt8(number)</value>
|
||||
<value>toInt32(number)</value>
|
||||
<value>toFloat64(number)</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<query>SELECT bitCount({expr}) FROM numbers(100000000) FORMAT Null</query>
|
||||
</test>
|
42
tests/performance/unary_arithmetic_functions.xml
Normal file
42
tests/performance/unary_arithmetic_functions.xml
Normal file
@ -0,0 +1,42 @@
|
||||
<test>
|
||||
|
||||
|
||||
<substitutions>
|
||||
<substitution>
|
||||
<name>func</name>
|
||||
<values>
|
||||
<value>bitCount</value>
|
||||
<value>bitNot</value>
|
||||
<value>abs</value>
|
||||
<value>intExp2</value>
|
||||
<value>intExp10</value>
|
||||
<value>negate</value>
|
||||
<value>roundAge</value>
|
||||
<value>roundDuration</value>
|
||||
<value>roundToExp2</value>
|
||||
<value>sign</value>
|
||||
</values>
|
||||
</substitution>
|
||||
|
||||
<substitution>
|
||||
<name>expr</name>
|
||||
|
||||
<values>
|
||||
<value>number</value>
|
||||
<value>toUInt32(number)</value>
|
||||
<value>toUInt16(number)</value>
|
||||
<value>toUInt8(number)</value>
|
||||
|
||||
<value>toInt64(number)</value>
|
||||
<value>toInt32(number)</value>
|
||||
<value>toInt16(number)</value>
|
||||
<value>toInt8(number)</value>
|
||||
|
||||
<value>toFloat64(number)</value>
|
||||
<value>toFloat32(number)</value>
|
||||
</values>
|
||||
</substitution>
|
||||
</substitutions>
|
||||
|
||||
<query>SELECT {func}({expr}) FROM numbers(100000000) FORMAT Null</query>
|
||||
</test>
|
@ -138,5 +138,6 @@ HDFS [] GLOBAL SOURCES
|
||||
S3 [] GLOBAL SOURCES
|
||||
HIVE [] GLOBAL SOURCES
|
||||
SOURCES [] \N ALL
|
||||
CLUSTER [] GLOBAL ALL
|
||||
ALL ['ALL PRIVILEGES'] \N \N
|
||||
NONE ['USAGE','NO PRIVILEGES'] \N \N
|
||||
|
@ -276,7 +276,7 @@ CREATE TABLE system.grants
|
||||
(
|
||||
`user_name` Nullable(String),
|
||||
`role_name` Nullable(String),
|
||||
`access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'ALL' = 140, 'NONE' = 141),
|
||||
`access_type` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'CLUSTER' = 140, 'ALL' = 141, 'NONE' = 142),
|
||||
`database` Nullable(String),
|
||||
`table` Nullable(String),
|
||||
`column` Nullable(String),
|
||||
@ -549,10 +549,10 @@ ENGINE = SystemPartsColumns()
|
||||
COMMENT 'SYSTEM TABLE is built on the fly.'
|
||||
CREATE TABLE system.privileges
|
||||
(
|
||||
`privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'ALL' = 140, 'NONE' = 141),
|
||||
`privilege` Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'CLUSTER' = 140, 'ALL' = 141, 'NONE' = 142),
|
||||
`aliases` Array(String),
|
||||
`level` Nullable(Enum8('GLOBAL' = 0, 'DATABASE' = 1, 'TABLE' = 2, 'DICTIONARY' = 3, 'VIEW' = 4, 'COLUMN' = 5)),
|
||||
`parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'ALL' = 140, 'NONE' = 141))
|
||||
`parent_group` Nullable(Enum16('SHOW DATABASES' = 0, 'SHOW TABLES' = 1, 'SHOW COLUMNS' = 2, 'SHOW DICTIONARIES' = 3, 'SHOW' = 4, 'SELECT' = 5, 'INSERT' = 6, 'ALTER UPDATE' = 7, 'ALTER DELETE' = 8, 'ALTER ADD COLUMN' = 9, 'ALTER MODIFY COLUMN' = 10, 'ALTER DROP COLUMN' = 11, 'ALTER COMMENT COLUMN' = 12, 'ALTER CLEAR COLUMN' = 13, 'ALTER RENAME COLUMN' = 14, 'ALTER MATERIALIZE COLUMN' = 15, 'ALTER COLUMN' = 16, 'ALTER MODIFY COMMENT' = 17, 'ALTER ORDER BY' = 18, 'ALTER SAMPLE BY' = 19, 'ALTER ADD INDEX' = 20, 'ALTER DROP INDEX' = 21, 'ALTER MATERIALIZE INDEX' = 22, 'ALTER CLEAR INDEX' = 23, 'ALTER INDEX' = 24, 'ALTER ADD PROJECTION' = 25, 'ALTER DROP PROJECTION' = 26, 'ALTER MATERIALIZE PROJECTION' = 27, 'ALTER CLEAR PROJECTION' = 28, 'ALTER PROJECTION' = 29, 'ALTER ADD CONSTRAINT' = 30, 'ALTER DROP CONSTRAINT' = 31, 'ALTER CONSTRAINT' = 32, 'ALTER TTL' = 33, 'ALTER MATERIALIZE TTL' = 34, 'ALTER SETTINGS' = 35, 'ALTER MOVE PARTITION' = 36, 'ALTER FETCH PARTITION' = 37, 'ALTER FREEZE PARTITION' = 38, 'ALTER DATABASE SETTINGS' = 39, 'ALTER TABLE' = 40, 'ALTER DATABASE' = 41, 'ALTER VIEW REFRESH' = 42, 'ALTER VIEW MODIFY QUERY' = 43, 'ALTER VIEW' = 44, 'ALTER' = 45, 'CREATE DATABASE' = 46, 'CREATE TABLE' = 47, 'CREATE VIEW' = 48, 'CREATE DICTIONARY' = 49, 'CREATE TEMPORARY TABLE' = 50, 'CREATE FUNCTION' = 51, 'CREATE' = 52, 'DROP DATABASE' = 53, 'DROP TABLE' = 54, 'DROP VIEW' = 55, 'DROP DICTIONARY' = 56, 'DROP FUNCTION' = 57, 'DROP' = 58, 'TRUNCATE' = 59, 'OPTIMIZE' = 60, 'KILL QUERY' = 61, 'KILL TRANSACTION' = 62, 'MOVE PARTITION BETWEEN SHARDS' = 63, 'CREATE USER' = 64, 'ALTER USER' = 65, 'DROP USER' = 66, 'CREATE ROLE' = 67, 'ALTER ROLE' = 68, 'DROP ROLE' = 69, 'ROLE ADMIN' = 70, 'CREATE ROW POLICY' = 71, 'ALTER ROW POLICY' = 72, 'DROP ROW POLICY' = 73, 'CREATE QUOTA' = 74, 'ALTER QUOTA' = 75, 'DROP QUOTA' = 76, 'CREATE SETTINGS PROFILE' = 77, 'ALTER SETTINGS PROFILE' = 78, 'DROP SETTINGS PROFILE' = 79, 'SHOW USERS' = 80, 'SHOW ROLES' = 81, 'SHOW ROW POLICIES' = 82, 'SHOW QUOTAS' = 83, 'SHOW SETTINGS PROFILES' = 84, 'SHOW ACCESS' = 85, 'ACCESS MANAGEMENT' = 86, 'SYSTEM SHUTDOWN' = 87, 'SYSTEM DROP DNS CACHE' = 88, 'SYSTEM DROP MARK CACHE' = 89, 'SYSTEM DROP UNCOMPRESSED CACHE' = 90, 'SYSTEM DROP MMAP CACHE' = 91, 'SYSTEM DROP COMPILED EXPRESSION CACHE' = 92, 'SYSTEM DROP CACHE' = 93, 'SYSTEM RELOAD CONFIG' = 94, 'SYSTEM RELOAD SYMBOLS' = 95, 'SYSTEM RELOAD DICTIONARY' = 96, 'SYSTEM RELOAD MODEL' = 97, 'SYSTEM RELOAD FUNCTION' = 98, 'SYSTEM RELOAD EMBEDDED DICTIONARIES' = 99, 'SYSTEM RELOAD' = 100, 'SYSTEM RESTART DISK' = 101, 'SYSTEM MERGES' = 102, 'SYSTEM TTL MERGES' = 103, 'SYSTEM FETCHES' = 104, 'SYSTEM MOVES' = 105, 'SYSTEM DISTRIBUTED SENDS' = 106, 'SYSTEM REPLICATED SENDS' = 107, 'SYSTEM SENDS' = 108, 'SYSTEM REPLICATION QUEUES' = 109, 'SYSTEM DROP REPLICA' = 110, 'SYSTEM SYNC REPLICA' = 111, 'SYSTEM RESTART REPLICA' = 112, 'SYSTEM RESTORE REPLICA' = 113, 'SYSTEM SYNC DATABASE REPLICA' = 114, 'SYSTEM FLUSH DISTRIBUTED' = 115, 'SYSTEM FLUSH LOGS' = 116, 'SYSTEM FLUSH' = 117, 'SYSTEM THREAD FUZZER' = 118, 'SYSTEM' = 119, 'dictGet' = 120, 'addressToLine' = 121, 'addressToLineWithInlines' = 122, 'addressToSymbol' = 123, 'demangle' = 124, 'INTROSPECTION' = 125, 'FILE' = 126, 'URL' = 127, 'REMOTE' = 128, 'MONGO' = 129, 'MEILISEARCH' = 130, 'MYSQL' = 131, 'POSTGRES' = 132, 'SQLITE' = 133, 'ODBC' = 134, 'JDBC' = 135, 'HDFS' = 136, 'S3' = 137, 'HIVE' = 138, 'SOURCES' = 139, 'CLUSTER' = 140, 'ALL' = 141, 'NONE' = 142))
|
||||
)
|
||||
ENGINE = SystemPrivileges()
|
||||
COMMENT 'SYSTEM TABLE is built on the fly.'
|
||||
|
@ -0,0 +1,3 @@
|
||||
with_on_cluster_02250_ON_CLUSTER_grant_default
|
||||
without_on_cluster_02250_ON_CLUSTER_grant_default
|
||||
Not enough privileges. To execute this query it's necessary to have grant CLUSTER ON *.*. (ACCESS_DENIED)
|
31
tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh
Executable file
31
tests/queries/0_stateless/02250_ON_CLUSTER_grant.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
function cleanup()
|
||||
{
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
DROP USER IF EXISTS with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME;
|
||||
DROP USER IF EXISTS without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME;
|
||||
DROP DATABASE IF EXISTS db_with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME;
|
||||
"
|
||||
}
|
||||
cleanup
|
||||
trap cleanup EXIT
|
||||
|
||||
$CLICKHOUSE_CLIENT -nmq "
|
||||
CREATE USER with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME;
|
||||
CREATE USER without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME;
|
||||
|
||||
GRANT CLUSTER, CREATE ON *.* TO with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME;
|
||||
GRANT CREATE ON *.* TO without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME;
|
||||
"
|
||||
|
||||
echo "with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME"
|
||||
$CLICKHOUSE_CLIENT --user "with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME" -q "CREATE DATABASE IF NOT EXISTS db_with_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME ON CLUSTER test_shard_localhost" >/dev/null
|
||||
echo "without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME"
|
||||
$CLICKHOUSE_CLIENT --user "without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME" -q "CREATE DATABASE IF NOT EXISTS db_without_on_cluster_$CLICKHOUSE_TEST_UNIQUE_NAME ON CLUSTER test_shard_localhost" |& {
|
||||
grep -m1 -F -o "Not enough privileges. To execute this query it's necessary to have grant CLUSTER ON *.*. (ACCESS_DENIED)"
|
||||
}
|
1
tests/queries/0_stateless/02263_lazy_mark_load.reference
Normal file
1
tests/queries/0_stateless/02263_lazy_mark_load.reference
Normal file
@ -0,0 +1 @@
|
||||
2
|
34
tests/queries/0_stateless/02263_lazy_mark_load.sh
Executable file
34
tests/queries/0_stateless/02263_lazy_mark_load.sh
Executable file
@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eo pipefail
|
||||
|
||||
CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
|
||||
# shellcheck source=../shell_config.sh
|
||||
. "$CURDIR"/../shell_config.sh
|
||||
|
||||
QUERY_ID=$(${CLICKHOUSE_CLIENT} -q "select lower(hex(reverse(reinterpretAsString(generateUUIDv4()))))")
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS lazy_mark_test;"
|
||||
${CLICKHOUSE_CLIENT} <<EOF
|
||||
CREATE TABLE lazy_mark_test
|
||||
(
|
||||
n0 UInt64,
|
||||
n1 UInt64,
|
||||
n2 UInt64,
|
||||
n3 UInt64,
|
||||
n4 UInt64,
|
||||
n5 UInt64,
|
||||
n6 UInt64,
|
||||
n7 UInt64,
|
||||
n8 UInt64,
|
||||
n9 UInt64
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY n0 SETTINGS min_bytes_for_wide_part = 0;
|
||||
EOF
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "INSERT INTO lazy_mark_test select number, number % 3, number % 5, number % 10, number % 13, number % 15, number % 17, number % 18, number % 22, number % 25 from numbers(1000000)"
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM DROP MARK CACHE"
|
||||
${CLICKHOUSE_CLIENT} --log_queries=1 --query_id "${QUERY_ID}" -q "SELECT * FROM lazy_mark_test WHERE n3==11"
|
||||
${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH LOGS"
|
||||
|
||||
${CLICKHOUSE_CLIENT} -q "select ProfileEvents['FileOpen'] from system.query_log where query_id = '${QUERY_ID}' and type = 'QueryFinish' and current_database = currentDatabase()"
|
@ -0,0 +1,30 @@
|
||||
-- { echoOn }
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg SETTINGS optimize_aggregation_in_order = 0 FORMAT JSONEachRow;
|
||||
{"grp_aggreg":"\u0002\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
{"grp_aggreg":"\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg SETTINGS optimize_aggregation_in_order = 1 FORMAT JSONEachRow;
|
||||
{"grp_aggreg":"\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
{"grp_aggreg":"\u0002\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg WITH TOTALS SETTINGS optimize_aggregation_in_order = 0 FORMAT JSONEachRow;
|
||||
{"grp_aggreg":"\u0002\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
{"grp_aggreg":"\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg WITH TOTALS SETTINGS optimize_aggregation_in_order = 1 FORMAT JSONEachRow;
|
||||
{"grp_aggreg":"\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0001\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
{"grp_aggreg":"\u0002\u0002\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0003\u0000\u0000\u0000\u0000\u0000\u0000\u0000"}
|
||||
-- regression for incorrect positions passed to finalizeChunk()
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1;
|
||||
0 0 0
|
||||
1 0 0
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1, max_threads = 1;
|
||||
0 0 0
|
||||
1 0 0
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a WITH TOTALS ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1;
|
||||
0 0 0
|
||||
1 0 0
|
||||
|
||||
0 0 0
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a WITH TOTALS ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1, max_threads = 1;
|
||||
0 0 0
|
||||
1 0 0
|
||||
|
||||
0 0 0
|
@ -0,0 +1,24 @@
|
||||
drop table if exists data_02295;
|
||||
|
||||
create table data_02295 (
|
||||
-- the order of "a" and "b" is important here
|
||||
-- (since finalizeChunk() accepts positions and they may be wrong)
|
||||
b Int64,
|
||||
a Int64,
|
||||
grp_aggreg AggregateFunction(groupArrayArray, Array(UInt64))
|
||||
) engine = MergeTree() order by a;
|
||||
insert into data_02295 select 0 b, intDiv(number, 2) a, groupArrayArrayState([toUInt64(number)]) from numbers(4) group by a, b;
|
||||
|
||||
-- { echoOn }
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg SETTINGS optimize_aggregation_in_order = 0 FORMAT JSONEachRow;
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg SETTINGS optimize_aggregation_in_order = 1 FORMAT JSONEachRow;
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg WITH TOTALS SETTINGS optimize_aggregation_in_order = 0 FORMAT JSONEachRow;
|
||||
SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg WITH TOTALS SETTINGS optimize_aggregation_in_order = 1 FORMAT JSONEachRow;
|
||||
-- regression for incorrect positions passed to finalizeChunk()
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1;
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1, max_threads = 1;
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a WITH TOTALS ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1;
|
||||
SELECT a, min(b), max(b) FROM data_02295 GROUP BY a WITH TOTALS ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1, max_threads = 1;
|
||||
-- { echoOff }
|
||||
|
||||
drop table data_02295;
|
@ -1,23 +1,28 @@
|
||||
-- Tags: no-tsan, no-parallel
|
||||
|
||||
DROP TABLE IF EXISTS test.hits_1m;
|
||||
CREATE TABLE test.hits_1m as test.hits;
|
||||
INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000;
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS db_dict;
|
||||
DROP DICTIONARY IF EXISTS db_dict.cache_hits;
|
||||
|
||||
CREATE DICTIONARY db_dict.cache_hits
|
||||
(WatchID UInt64, UserID UInt64, SearchPhrase String)
|
||||
PRIMARY KEY WatchID
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits' PASSWORD '' DB 'test'))
|
||||
SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits_1m' PASSWORD '' DB 'test'))
|
||||
LIFETIME(MIN 1 MAX 10)
|
||||
LAYOUT(CACHE(SIZE_IN_CELLS 1 QUERY_WAIT_TIMEOUT_MILLISECONDS 60000));
|
||||
|
||||
SELECT count() FROM (SELECT WatchID, arrayDistinct(groupArray(dictGetUInt64( 'db_dict.cache_hits', 'UserID', toUInt64(WatchID)))) as arr
|
||||
FROM test.hits PREWHERE WatchID % 5 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0];
|
||||
FROM test.hits_1m PREWHERE WatchID % 5 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0];
|
||||
|
||||
SELECT count() FROM (SELECT WatchID, arrayDistinct(groupArray(dictGetUInt64( 'db_dict.cache_hits', 'UserID', toUInt64(WatchID)))) as arr
|
||||
FROM test.hits PREWHERE WatchID % 7 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0];
|
||||
FROM test.hits_1m PREWHERE WatchID % 7 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0];
|
||||
|
||||
SELECT count() FROM (SELECT WatchID, arrayDistinct(groupArray(dictGetUInt64( 'db_dict.cache_hits', 'UserID', toUInt64(WatchID)))) as arr
|
||||
FROM test.hits PREWHERE WatchID % 13 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0];
|
||||
FROM test.hits_1m PREWHERE WatchID % 13 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0];
|
||||
|
||||
DROP DICTIONARY IF EXISTS db_dict.cache_hits;
|
||||
DROP DATABASE IF EXISTS db_dict;
|
||||
DROP TABLE IF EXISTS hits_1m;
|
||||
|
@ -33,6 +33,7 @@ categories_preferred_order = (
|
||||
|
||||
FROM_REF = ""
|
||||
TO_REF = ""
|
||||
SHA_IN_CHANGELOG = [] # type: List[str]
|
||||
|
||||
|
||||
class Description:
|
||||
@ -99,19 +100,7 @@ class Worker(Thread):
|
||||
logging.info("PR %s does not belong to the repo", api_pr.number)
|
||||
continue
|
||||
|
||||
try:
|
||||
runner.run(
|
||||
f"git merge-base --is-ancestor '{merge_commit}' '{TO_REF}'",
|
||||
stderr=DEVNULL,
|
||||
)
|
||||
runner.run(
|
||||
f"git merge-base --is-ancestor '{FROM_REF}' '{merge_commit}'",
|
||||
stderr=DEVNULL,
|
||||
)
|
||||
in_changelog = True
|
||||
except CalledProcessError:
|
||||
# Commit is not between from and to refs
|
||||
continue
|
||||
in_changelog = merge_commit in SHA_IN_CHANGELOG
|
||||
if in_changelog:
|
||||
desc = generate_description(api_pr, self.repo)
|
||||
if desc is not None:
|
||||
@ -298,7 +287,7 @@ def write_changelog(fd: TextIO, descriptions: Dict[str, List[Description]]):
|
||||
|
||||
fd.write("\n")
|
||||
|
||||
for category in descriptions:
|
||||
for category in sorted(descriptions):
|
||||
if category not in seen_categories:
|
||||
fd.write(f"#### {category}\n\n")
|
||||
for desc in descriptions[category]:
|
||||
@ -316,25 +305,23 @@ def check_refs(from_ref: Optional[str], to_ref: str):
|
||||
|
||||
# Check from_ref
|
||||
if from_ref is None:
|
||||
FROM_REF = runner.run(f"git describe --abbrev=0 --tags '{TO_REF}~'")
|
||||
# Check if the previsous tag is different for merge commits
|
||||
# I __assume__ we won't have octopus merges, at least for the tagged commits
|
||||
try:
|
||||
alternative_tag = runner.run(
|
||||
f"git describe --abbrev=0 --tags '{TO_REF}^2'", stderr=DEVNULL
|
||||
)
|
||||
if FROM_REF != alternative_tag:
|
||||
raise Exception(
|
||||
f"Unable to get unified parent tag for {TO_REF}, "
|
||||
f"define it manually, get {FROM_REF} and {alternative_tag}"
|
||||
)
|
||||
except CalledProcessError:
|
||||
pass
|
||||
# Get all tags pointing to TO_REF
|
||||
tags = runner.run(f"git tag --points-at '{TO_REF}^{{}}'")
|
||||
logging.info("All tags pointing to %s:\n%s", TO_REF, tags)
|
||||
exclude = " ".join([f"--exclude='{tag}'" for tag in tags.split("\n")])
|
||||
FROM_REF = runner.run(f"git describe --abbrev=0 --tags {exclude} '{TO_REF}'")
|
||||
else:
|
||||
runner.run(f"git rev-parse {FROM_REF}")
|
||||
FROM_REF = from_ref
|
||||
|
||||
|
||||
def set_sha_in_changelog():
|
||||
global SHA_IN_CHANGELOG
|
||||
SHA_IN_CHANGELOG = runner.run(
|
||||
f"git log --format=format:%H {FROM_REF}..{TO_REF}"
|
||||
).split("\n")
|
||||
|
||||
|
||||
def main():
|
||||
log_levels = [logging.CRITICAL, logging.WARN, logging.INFO, logging.DEBUG]
|
||||
args = parse_args()
|
||||
@ -350,6 +337,7 @@ def main():
|
||||
runner.run("git fetch --tags", stderr=DEVNULL)
|
||||
|
||||
check_refs(args.from_ref, args.to_ref)
|
||||
set_sha_in_changelog()
|
||||
|
||||
logging.info("Using %s..%s as changelog interval", FROM_REF, TO_REF)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
v22.5.1.2079-stable 2022-05-19
|
||||
v22.4.5.9-stable 2022-05-06
|
||||
v22.4.4.7-stable 2022-04-29
|
||||
v22.4.3.3-stable 2022-04-26
|
||||
|
|
Loading…
Reference in New Issue
Block a user