mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 01:22:04 +00:00
Merge branch 'master' of https://github.com/ClickHouse/ClickHouse
This commit is contained in:
commit
00dc104025
12
.gitmodules
vendored
12
.gitmodules
vendored
@ -142,9 +142,6 @@
|
||||
[submodule "contrib/replxx"]
|
||||
path = contrib/replxx
|
||||
url = https://github.com/ClickHouse-Extras/replxx.git
|
||||
[submodule "contrib/ryu"]
|
||||
path = contrib/ryu
|
||||
url = https://github.com/ClickHouse-Extras/ryu.git
|
||||
[submodule "contrib/avro"]
|
||||
path = contrib/avro
|
||||
url = https://github.com/ClickHouse-Extras/avro.git
|
||||
@ -200,8 +197,7 @@
|
||||
url = https://github.com/danlark1/miniselect
|
||||
[submodule "contrib/rocksdb"]
|
||||
path = contrib/rocksdb
|
||||
url = https://github.com/facebook/rocksdb
|
||||
branch = v6.14.5
|
||||
url = https://github.com/ClickHouse-Extras/rocksdb.git
|
||||
[submodule "contrib/xz"]
|
||||
path = contrib/xz
|
||||
url = https://github.com/xz-mirror/xz
|
||||
@ -209,3 +205,9 @@
|
||||
path = contrib/abseil-cpp
|
||||
url = https://github.com/ClickHouse-Extras/abseil-cpp.git
|
||||
branch = lts_2020_02_25
|
||||
[submodule "contrib/dragonbox"]
|
||||
path = contrib/dragonbox
|
||||
url = https://github.com/ClickHouse-Extras/dragonbox.git
|
||||
[submodule "contrib/fast_float"]
|
||||
path = contrib/fast_float
|
||||
url = https://github.com/fastfloat/fast_float
|
||||
|
128
CHANGELOG.md
128
CHANGELOG.md
@ -15,7 +15,7 @@
|
||||
* Restrict to use of non-comparable data types (like `AggregateFunction`) in keys (Sorting key, Primary key, Partition key, and so on). [#16601](https://github.com/ClickHouse/ClickHouse/pull/16601) ([alesapin](https://github.com/alesapin)).
|
||||
* Remove `ANALYZE` and `AST` queries, and make the setting `enable_debug_queries` obsolete since now it is the part of full featured `EXPLAIN` query. [#16536](https://github.com/ClickHouse/ClickHouse/pull/16536) ([Ivan](https://github.com/abyss7)).
|
||||
* Aggregate functions `boundingRatio`, `rankCorr`, `retention`, `timeSeriesGroupSum`, `timeSeriesGroupRateSum`, `windowFunnel` were erroneously made case-insensitive. Now their names are made case sensitive as designed. Only functions that are specified in SQL standard or made for compatibility with other DBMS or functions similar to those should be case-insensitive. [#16407](https://github.com/ClickHouse/ClickHouse/pull/16407) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Make `rankCorr` function return nan on insufficient data https://github.com/ClickHouse/ClickHouse/issues/16124. [#16135](https://github.com/ClickHouse/ClickHouse/pull/16135) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Make `rankCorr` function return nan on insufficient data [#16124](https://github.com/ClickHouse/ClickHouse/issues/16124). [#16135](https://github.com/ClickHouse/ClickHouse/pull/16135) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* When upgrading from versions older than 20.5, if rolling update is performed and cluster contains both versions 20.5 or greater and less than 20.5, if ClickHouse nodes with old versions are restarted and old version has been started up in presence of newer versions, it may lead to `Part ... intersects previous part` errors. To prevent this error, first install newer clickhouse-server packages on all cluster nodes and then do restarts (so, when clickhouse-server is restarted, it will start up with the new version).
|
||||
|
||||
#### New Feature
|
||||
@ -33,7 +33,7 @@
|
||||
* Now we can provide identifiers via query parameters. And these parameters can be used as table objects or columns. [#16594](https://github.com/ClickHouse/ClickHouse/pull/16594) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Added big integers (UInt256, Int128, Int256) and UUID data types support for MergeTree BloomFilter index. Big integers is an experimental feature. [#16642](https://github.com/ClickHouse/ClickHouse/pull/16642) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `farmFingerprint64` function (non-cryptographic string hashing). [#16570](https://github.com/ClickHouse/ClickHouse/pull/16570) ([Jacob Hayes](https://github.com/JacobHayes)).
|
||||
* Add `log_queries_min_query_duration_ms`, only queries slower then the value of this setting will go to `query_log`/`query_thread_log` (i.e. something like `slow_query_log` in mysql). [#16529](https://github.com/ClickHouse/ClickHouse/pull/16529) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Add `log_queries_min_query_duration_ms`, only queries slower than the value of this setting will go to `query_log`/`query_thread_log` (i.e. something like `slow_query_log` in mysql). [#16529](https://github.com/ClickHouse/ClickHouse/pull/16529) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Ability to create a docker image on the top of `Alpine`. Uses precompiled binary and glibc components from ubuntu 20.04. [#16479](https://github.com/ClickHouse/ClickHouse/pull/16479) ([filimonov](https://github.com/filimonov)).
|
||||
* Added `toUUIDOrNull`, `toUUIDOrZero` cast functions. [#16337](https://github.com/ClickHouse/ClickHouse/pull/16337) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Add `max_concurrent_queries_for_all_users` setting, see [#6636](https://github.com/ClickHouse/ClickHouse/issues/6636) for use cases. [#16154](https://github.com/ClickHouse/ClickHouse/pull/16154) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
@ -178,7 +178,7 @@
|
||||
* Add `JSONStrings` format which output data in arrays of strings. [#14333](https://github.com/ClickHouse/ClickHouse/pull/14333) ([hcz](https://github.com/hczhcz)).
|
||||
* Add support for "Raw" column format for `Regexp` format. It allows to simply extract subpatterns as a whole without any escaping rules. [#15363](https://github.com/ClickHouse/ClickHouse/pull/15363) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Allow configurable `NULL` representation for `TSV` output format. It is controlled by the setting `output_format_tsv_null_representation` which is `\N` by default. This closes [#9375](https://github.com/ClickHouse/ClickHouse/issues/9375). Note that the setting only controls output format and `\N` is the only supported `NULL` representation for `TSV` input format. [#14586](https://github.com/ClickHouse/ClickHouse/pull/14586) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Support Decimal data type for `MaterializedMySQL`. `MaterializedMySQL` is an experimental feature. [#14535](https://github.com/ClickHouse/ClickHouse/pull/14535) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Support Decimal data type for `MaterializeMySQL`. `MaterializeMySQL` is an experimental feature. [#14535](https://github.com/ClickHouse/ClickHouse/pull/14535) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Add new feature: `SHOW DATABASES LIKE 'xxx'`. [#14521](https://github.com/ClickHouse/ClickHouse/pull/14521) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Added a script to import (arbitrary) git repository to ClickHouse as a sample dataset. [#14471](https://github.com/ClickHouse/ClickHouse/pull/14471) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Now insert statements can have asterisk (or variants) with column transformers in the column list. [#14453](https://github.com/ClickHouse/ClickHouse/pull/14453) ([Amos Bird](https://github.com/amosbird)).
|
||||
@ -200,18 +200,18 @@
|
||||
* Fix a very wrong code in TwoLevelStringHashTable implementation, which might lead to memory leak. [#16264](https://github.com/ClickHouse/ClickHouse/pull/16264) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault in some cases of wrong aggregation in lambdas. [#16082](https://github.com/ClickHouse/ClickHouse/pull/16082) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix `ALTER MODIFY ... ORDER BY` query hang for `ReplicatedVersionedCollapsingMergeTree`. This fixes [#15980](https://github.com/ClickHouse/ClickHouse/issues/15980). [#16011](https://github.com/ClickHouse/ClickHouse/pull/16011) ([alesapin](https://github.com/alesapin)).
|
||||
* `MaterializedMySQL` (experimental feature): Fix collate name & charset name parser and support `length = 0` for string type. [#16008](https://github.com/ClickHouse/ClickHouse/pull/16008) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* `MaterializeMySQL` (experimental feature): Fix collate name & charset name parser and support `length = 0` for string type. [#16008](https://github.com/ClickHouse/ClickHouse/pull/16008) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Allow to use `direct` layout for dictionaries with complex keys. [#16007](https://github.com/ClickHouse/ClickHouse/pull/16007) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Prevent replica hang for 5-10 mins when replication error happens after a period of inactivity. [#15987](https://github.com/ClickHouse/ClickHouse/pull/15987) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix rare segfaults when inserting into or selecting from MaterializedView and concurrently dropping target table (for Atomic database engine). [#15984](https://github.com/ClickHouse/ClickHouse/pull/15984) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes [#15628](https://github.com/ClickHouse/ClickHouse/issues/15628). [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* `MaterializedMySQL` (experimental feature): Fix crash on create database failure. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* `MaterializeMySQL` (experimental feature): Fix crash on create database failure. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fixed `DROP TABLE IF EXISTS` failure with `Table ... doesn't exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) - Fixed `DROP/DETACH DATABASE` failure with `Table ... doesn't exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixes [#12513](https://github.com/ClickHouse/ClickHouse/issues/12513): difference expressions with same alias when query is reanalyzed. [#15886](https://github.com/ClickHouse/ClickHouse/pull/15886) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix possible very rare deadlocks in RBAC implementation. [#15875](https://github.com/ClickHouse/ClickHouse/pull/15875) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix exception `Block structure mismatch` in `SELECT ... ORDER BY DESC` queries which were executed after `ALTER MODIFY COLUMN` query. Fixes [#15800](https://github.com/ClickHouse/ClickHouse/issues/15800). [#15852](https://github.com/ClickHouse/ClickHouse/pull/15852) ([alesapin](https://github.com/alesapin)).
|
||||
* `MaterializedMySQL` (experimental feature): Fix `select count()` inaccuracy. [#15767](https://github.com/ClickHouse/ClickHouse/pull/15767) ([tavplubix](https://github.com/tavplubix)).
|
||||
* `MaterializeMySQL` (experimental feature): Fix `select count()` inaccuracy. [#15767](https://github.com/ClickHouse/ClickHouse/pull/15767) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix some cases of queries, in which only virtual columns are selected. Previously `Not found column _nothing in block` exception may be thrown. Fixes [#12298](https://github.com/ClickHouse/ClickHouse/issues/12298). [#15756](https://github.com/ClickHouse/ClickHouse/pull/15756) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix drop of materialized view with inner table in Atomic database (hangs all subsequent DROP TABLE due to hang of the worker thread, due to recursive DROP TABLE for inner table of MV). [#15743](https://github.com/ClickHouse/ClickHouse/pull/15743) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Possibility to move part to another disk/volume if the first attempt was failed. [#15723](https://github.com/ClickHouse/ClickHouse/pull/15723) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||
@ -243,37 +243,37 @@
|
||||
* Fix hang of queries with a lot of subqueries to same table of `MySQL` engine. Previously, if there were more than 16 subqueries to same `MySQL` table in query, it hang forever. [#15299](https://github.com/ClickHouse/ClickHouse/pull/15299) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix 'Unknown identifier' in GROUP BY when query has JOIN over Merge table. [#15242](https://github.com/ClickHouse/ClickHouse/pull/15242) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fix instance crash when using `joinGet` with `LowCardinality` types. This fixes https://github.com/ClickHouse/ClickHouse/issues/15214. [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix instance crash when using `joinGet` with `LowCardinality` types. This fixes [#15214](https://github.com/ClickHouse/ClickHouse/issues/15214). [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)).
|
||||
* Adjust Decimal field size in MySQL column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)).
|
||||
* Fixes `Data compressed with different methods` in `join_algorithm='auto'`. Keep LowCardinality as type for left table join key in `join_algorithm='partial_merge'`. [#15088](https://github.com/ClickHouse/ClickHouse/pull/15088) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Update `jemalloc` to fix `percpu_arena` with affinity mask. [#15035](https://github.com/ClickHouse/ClickHouse/pull/15035) ([Azat Khuzhin](https://github.com/azat)). [#14957](https://github.com/ClickHouse/ClickHouse/pull/14957) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* We already use padded comparison between String and FixedString (https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/FunctionsComparison.h#L333). This PR applies the same logic to field comparison which corrects the usage of FixedString as primary keys. This fixes https://github.com/ClickHouse/ClickHouse/issues/14908. [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* We already use padded comparison between String and FixedString (https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/FunctionsComparison.h#L333). This PR applies the same logic to field comparison which corrects the usage of FixedString as primary keys. This fixes [#14908](https://github.com/ClickHouse/ClickHouse/issues/14908). [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* If function `bar` was called with specifically crafted arguments, buffer overflow was possible. This closes [#13926](https://github.com/ClickHouse/ClickHouse/issues/13926). [#15028](https://github.com/ClickHouse/ClickHouse/pull/15028) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed `Cannot rename ... errno: 22, strerror: Invalid argument` error on DDL query execution in Atomic database when running clickhouse-server in Docker on Mac OS. [#15024](https://github.com/ClickHouse/ClickHouse/pull/15024) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix crash in RIGHT or FULL JOIN with join_algorith='auto' when memory limit exceeded and we should change HashJoin with MergeJoin. [#15002](https://github.com/ClickHouse/ClickHouse/pull/15002) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Now settings `number_of_free_entries_in_pool_to_execute_mutation` and `number_of_free_entries_in_pool_to_lower_max_size_of_merge` can be equal to `background_pool_size`. [#14975](https://github.com/ClickHouse/ClickHouse/pull/14975) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix to make predicate push down work when subquery contains `finalizeAggregation` function. Fixes [#14847](https://github.com/ClickHouse/ClickHouse/issues/14847). [#14937](https://github.com/ClickHouse/ClickHouse/pull/14937) ([filimonov](https://github.com/filimonov)).
|
||||
* Publish CPU frequencies per logical core in `system.asynchronous_metrics`. This fixes https://github.com/ClickHouse/ClickHouse/issues/14923. [#14924](https://github.com/ClickHouse/ClickHouse/pull/14924) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* `MaterializedMySQL` (experimental feature): Fixed `.metadata.tmp File exists` error. [#14898](https://github.com/ClickHouse/ClickHouse/pull/14898) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Publish CPU frequencies per logical core in `system.asynchronous_metrics`. This fixes [#14923](https://github.com/ClickHouse/ClickHouse/issues/14923). [#14924](https://github.com/ClickHouse/ClickHouse/pull/14924) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* `MaterializeMySQL` (experimental feature): Fixed `.metadata.tmp File exists` error. [#14898](https://github.com/ClickHouse/ClickHouse/pull/14898) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix the issue when some invocations of `extractAllGroups` function may trigger "Memory limit exceeded" error. This fixes [#13383](https://github.com/ClickHouse/ClickHouse/issues/13383). [#14889](https://github.com/ClickHouse/ClickHouse/pull/14889) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix SIGSEGV for an attempt to INSERT into StorageFile with file descriptor. [#14887](https://github.com/ClickHouse/ClickHouse/pull/14887) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed segfault in `cache` dictionary [#14837](https://github.com/ClickHouse/ClickHouse/issues/14837). [#14879](https://github.com/ClickHouse/ClickHouse/pull/14879) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* `MaterializedMySQL` (experimental feature): Fixed bug in parsing MySQL binlog events, which causes `Attempt to read after eof` and `Packet payload is not fully read` in `MaterializeMySQL` database engine. [#14852](https://github.com/ClickHouse/ClickHouse/pull/14852) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* `MaterializeMySQL` (experimental feature): Fixed bug in parsing MySQL binlog events, which causes `Attempt to read after eof` and `Packet payload is not fully read` in `MaterializeMySQL` database engine. [#14852](https://github.com/ClickHouse/ClickHouse/pull/14852) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix rare error in `SELECT` queries when the queried column has `DEFAULT` expression which depends on the other column which also has `DEFAULT` and not present in select query and not exists on disk. Partially fixes [#14531](https://github.com/ClickHouse/ClickHouse/issues/14531). [#14845](https://github.com/ClickHouse/ClickHouse/pull/14845) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix a problem where the server may get stuck on startup while talking to ZooKeeper, if the configuration files have to be fetched from ZK (using the `from_zk` include option). This fixes [#14814](https://github.com/ClickHouse/ClickHouse/issues/14814). [#14843](https://github.com/ClickHouse/ClickHouse/pull/14843) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fix wrong monotonicity detection for shrunk `Int -> Int` cast of signed types. It might lead to incorrect query result. This bug is unveiled in [#14513](https://github.com/ClickHouse/ClickHouse/issues/14513). [#14783](https://github.com/ClickHouse/ClickHouse/pull/14783) ([Amos Bird](https://github.com/amosbird)).
|
||||
* `Replace` column transformer should replace identifiers with cloned ASTs. This fixes https://github.com/ClickHouse/ClickHouse/issues/14695 . [#14734](https://github.com/ClickHouse/ClickHouse/pull/14734) ([Amos Bird](https://github.com/amosbird)).
|
||||
* `Replace` column transformer should replace identifiers with cloned ASTs. This fixes [#14695](https://github.com/ClickHouse/ClickHouse/issues/14695) . [#14734](https://github.com/ClickHouse/ClickHouse/pull/14734) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed missed default database name in metadata of materialized view when executing `ALTER ... MODIFY QUERY`. [#14664](https://github.com/ClickHouse/ClickHouse/pull/14664) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix bug when `ALTER UPDATE` mutation with `Nullable` column in assignment expression and constant value (like `UPDATE x = 42`) leads to incorrect value in column or segfault. Fixes [#13634](https://github.com/ClickHouse/ClickHouse/issues/13634), [#14045](https://github.com/ClickHouse/ClickHouse/issues/14045). [#14646](https://github.com/ClickHouse/ClickHouse/pull/14646) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix wrong Decimal multiplication result caused wrong decimal scale of result column. [#14603](https://github.com/ClickHouse/ClickHouse/pull/14603) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fix function `has` with `LowCardinality` of `Nullable`. [#14591](https://github.com/ClickHouse/ClickHouse/pull/14591) ([Mike](https://github.com/myrrc)).
|
||||
* Cleanup data directory after Zookeeper exceptions during CreateQuery for StorageReplicatedMergeTree Engine. [#14563](https://github.com/ClickHouse/ClickHouse/pull/14563) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix rare segfaults in functions with combinator `-Resample`, which could appear in result of overflow with very large parameters. [#14562](https://github.com/ClickHouse/ClickHouse/pull/14562) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix a bug when converting `Nullable(String)` to Enum. Introduced by https://github.com/ClickHouse/ClickHouse/pull/12745. This fixes https://github.com/ClickHouse/ClickHouse/issues/14435. [#14530](https://github.com/ClickHouse/ClickHouse/pull/14530) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix a bug when converting `Nullable(String)` to Enum. Introduced by [#12745](https://github.com/ClickHouse/ClickHouse/pull/12745). This fixes [#14435](https://github.com/ClickHouse/ClickHouse/issues/14435). [#14530](https://github.com/ClickHouse/ClickHouse/pull/14530) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed the incorrect sorting order of `Nullable` column. This fixes [#14344](https://github.com/ClickHouse/ClickHouse/issues/14344). [#14495](https://github.com/ClickHouse/ClickHouse/pull/14495) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix `currentDatabase()` function cannot be used in `ON CLUSTER` ddl query. [#14211](https://github.com/ClickHouse/ClickHouse/pull/14211) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* `MaterializedMySQL` (experimental feature): Fixed `Packet payload is not fully read` error in `MaterializeMySQL` database engine. [#14696](https://github.com/ClickHouse/ClickHouse/pull/14696) ([BohuTANG](https://github.com/BohuTANG)).
|
||||
* `MaterializeMySQL` (experimental feature): Fixed `Packet payload is not fully read` error in `MaterializeMySQL` database engine. [#14696](https://github.com/ClickHouse/ClickHouse/pull/14696) ([BohuTANG](https://github.com/BohuTANG)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
@ -308,7 +308,7 @@
|
||||
* Add an option to skip access checks for `DiskS3`. `s3` disk is an experimental feature. [#14497](https://github.com/ClickHouse/ClickHouse/pull/14497) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||
* Speed up server shutdown process if there are ongoing S3 requests. [#14496](https://github.com/ClickHouse/ClickHouse/pull/14496) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||
* `SYSTEM RELOAD CONFIG` now throws an exception if failed to reload and continues using the previous users.xml. The background periodic reloading also continues using the previous users.xml if failed to reload. [#14492](https://github.com/ClickHouse/ClickHouse/pull/14492) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* For INSERTs with inline data in VALUES format in the script mode of `clickhouse-client`, support semicolon as the data terminator, in addition to the new line. Closes https://github.com/ClickHouse/ClickHouse/issues/12288. [#13192](https://github.com/ClickHouse/ClickHouse/pull/13192) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* For INSERTs with inline data in VALUES format in the script mode of `clickhouse-client`, support semicolon as the data terminator, in addition to the new line. Closes [#12288](https://github.com/ClickHouse/ClickHouse/issues/12288). [#13192](https://github.com/ClickHouse/ClickHouse/pull/13192) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Support custom codecs in compact parts. [#12183](https://github.com/ClickHouse/ClickHouse/pull/12183) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### Performance Improvement
|
||||
@ -320,7 +320,7 @@
|
||||
* Improve performance of 256-bit types using (u)int64_t as base type for wide integers. Original wide integers use 8-bit types as base. [#14859](https://github.com/ClickHouse/ClickHouse/pull/14859) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Explicitly use a temporary disk to store vertical merge temporary data. [#15639](https://github.com/ClickHouse/ClickHouse/pull/15639) ([Grigory Pervakov](https://github.com/GrigoryPervakov)).
|
||||
* Use one S3 DeleteObjects request instead of multiple DeleteObject in a loop. No any functionality changes, so covered by existing tests like integration/test_log_family_s3. [#15238](https://github.com/ClickHouse/ClickHouse/pull/15238) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
* Fix `DateTime <op> DateTime` mistakenly choosing the slow generic implementation. This fixes https://github.com/ClickHouse/ClickHouse/issues/15153. [#15178](https://github.com/ClickHouse/ClickHouse/pull/15178) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `DateTime <op> DateTime` mistakenly choosing the slow generic implementation. This fixes [#15153](https://github.com/ClickHouse/ClickHouse/issues/15153). [#15178](https://github.com/ClickHouse/ClickHouse/pull/15178) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Improve performance of GROUP BY key of type `FixedString`. [#15034](https://github.com/ClickHouse/ClickHouse/pull/15034) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Only `mlock` code segment when starting clickhouse-server. In previous versions, all mapped regions were locked in memory, including debug info. Debug info is usually splitted to a separate file but if it isn't, it led to +2..3 GiB memory usage. [#14929](https://github.com/ClickHouse/ClickHouse/pull/14929) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* ClickHouse binary become smaller due to link time optimization.
|
||||
@ -387,7 +387,7 @@
|
||||
* Allow to use direct layout for dictionaries with complex keys. [#16007](https://github.com/ClickHouse/ClickHouse/pull/16007) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Prevent replica hang for 5-10 mins when replication error happens after a period of inactivity. [#15987](https://github.com/ClickHouse/ClickHouse/pull/15987) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix rare segfaults when inserting into or selecting from MaterializedView and concurrently dropping target table (for Atomic database engine). [#15984](https://github.com/ClickHouse/ClickHouse/pull/15984) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes https://github.com/ClickHouse/ClickHouse/issues/15628. [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes [#15628](https://github.com/ClickHouse/ClickHouse/issues/15628). [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix a crash when database creation fails. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fixed `DROP TABLE IF EXISTS` failure with `Table ... doesn't exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) Fixed `DROP/DETACH DATABASE` failure with `Table ... doesn't exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -398,7 +398,7 @@
|
||||
* Fixed too low default value of `max_replicated_logs_to_keep` setting, which might cause replicas to become lost too often. Improve lost replica recovery process by choosing the most up-to-date replica to clone. Also do not remove old parts from lost replica, detach them instead. [#15701](https://github.com/ClickHouse/ClickHouse/pull/15701) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix error `Cannot add simple transform to empty Pipe` which happened while reading from `Buffer` table which has different structure than destination table. It was possible if destination table returned empty result for query. Fixes [#15529](https://github.com/ClickHouse/ClickHouse/issues/15529). [#15662](https://github.com/ClickHouse/ClickHouse/pull/15662) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed bug with globs in S3 table function, region from URL was not applied to S3 client configuration. [#15646](https://github.com/ClickHouse/ClickHouse/pull/15646) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Decrement the `ReadonlyReplica` metric when detaching read-only tables. This fixes https://github.com/ClickHouse/ClickHouse/issues/15598. [#15592](https://github.com/ClickHouse/ClickHouse/pull/15592) ([sundyli](https://github.com/sundy-li)).
|
||||
* Decrement the `ReadonlyReplica` metric when detaching read-only tables. This fixes [#15598](https://github.com/ClickHouse/ClickHouse/issues/15598). [#15592](https://github.com/ClickHouse/ClickHouse/pull/15592) ([sundyli](https://github.com/sundy-li)).
|
||||
* Throw an error when a single parameter is passed to ReplicatedMergeTree instead of ignoring it. [#15516](https://github.com/ClickHouse/ClickHouse/pull/15516) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
|
||||
#### Improvement
|
||||
@ -422,11 +422,11 @@
|
||||
* Fix `Missing columns` errors when selecting columns which absent in data, but depend on other columns which also absent in data. Fixes [#15530](https://github.com/ClickHouse/ClickHouse/issues/15530). [#15532](https://github.com/ClickHouse/ClickHouse/pull/15532) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bug with event subscription in DDLWorker which rarely may lead to query hangs in `ON CLUSTER`. Introduced in [#13450](https://github.com/ClickHouse/ClickHouse/issues/13450). [#15477](https://github.com/ClickHouse/ClickHouse/pull/15477) ([alesapin](https://github.com/alesapin)).
|
||||
* Report proper error when the second argument of `boundingRatio` aggregate function has a wrong type. [#15407](https://github.com/ClickHouse/ClickHouse/pull/15407) ([detailyang](https://github.com/detailyang)).
|
||||
* Fix bug where queries like SELECT toStartOfDay(today()) fail complaining about empty time_zone argument. [#15319](https://github.com/ClickHouse/ClickHouse/pull/15319) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix bug where queries like `SELECT toStartOfDay(today())` fail complaining about empty time_zone argument. [#15319](https://github.com/ClickHouse/ClickHouse/pull/15319) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix race condition during MergeTree table rename and background cleanup. [#15304](https://github.com/ClickHouse/ClickHouse/pull/15304) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix rare race condition on server startup when system.logs are enabled. [#15300](https://github.com/ClickHouse/ClickHouse/pull/15300) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix instance crash when using joinGet with LowCardinality types. This fixes https://github.com/ClickHouse/ClickHouse/issues/15214. [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix instance crash when using joinGet with LowCardinality types. This fixes [#15214](https://github.com/ClickHouse/ClickHouse/issues/15214). [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)).
|
||||
* Adjust decimals field size in mysql column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)).
|
||||
* Fixed `Cannot rename ... errno: 22, strerror: Invalid argument` error on DDL query execution in Atomic database when running clickhouse-server in docker on Mac OS. [#15024](https://github.com/ClickHouse/ClickHouse/pull/15024) ([tavplubix](https://github.com/tavplubix)).
|
||||
@ -455,10 +455,10 @@
|
||||
* Fix bug when `ALTER UPDATE` mutation with Nullable column in assignment expression and constant value (like `UPDATE x = 42`) leads to incorrect value in column or segfault. Fixes [#13634](https://github.com/ClickHouse/ClickHouse/issues/13634), [#14045](https://github.com/ClickHouse/ClickHouse/issues/14045). [#14646](https://github.com/ClickHouse/ClickHouse/pull/14646) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix wrong Decimal multiplication result caused wrong decimal scale of result column. [#14603](https://github.com/ClickHouse/ClickHouse/pull/14603) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fixed the incorrect sorting order of `Nullable` column. This fixes [#14344](https://github.com/ClickHouse/ClickHouse/issues/14344). [#14495](https://github.com/ClickHouse/ClickHouse/pull/14495) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fixed inconsistent comparison with primary key of type `FixedString` on index analysis if they're compered with a string of less size. This fixes https://github.com/ClickHouse/ClickHouse/issues/14908. [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed inconsistent comparison with primary key of type `FixedString` on index analysis if they're compered with a string of less size. This fixes [#14908](https://github.com/ClickHouse/ClickHouse/issues/14908). [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug which leads to wrong merges assignment if table has partitions with a single part. [#14444](https://github.com/ClickHouse/ClickHouse/pull/14444) ([alesapin](https://github.com/alesapin)).
|
||||
* If function `bar` was called with specifically crafted arguments, buffer overflow was possible. This closes [#13926](https://github.com/ClickHouse/ClickHouse/issues/13926). [#15028](https://github.com/ClickHouse/ClickHouse/pull/15028) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Publish CPU frequencies per logical core in `system.asynchronous_metrics`. This fixes https://github.com/ClickHouse/ClickHouse/issues/14923. [#14924](https://github.com/ClickHouse/ClickHouse/pull/14924) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Publish CPU frequencies per logical core in `system.asynchronous_metrics`. This fixes [#14923](https://github.com/ClickHouse/ClickHouse/issues/14923). [#14924](https://github.com/ClickHouse/ClickHouse/pull/14924) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fixed `.metadata.tmp File exists` error when using `MaterializeMySQL` database engine. [#14898](https://github.com/ClickHouse/ClickHouse/pull/14898) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix the issue when some invocations of `extractAllGroups` function may trigger "Memory limit exceeded" error. This fixes [#13383](https://github.com/ClickHouse/ClickHouse/issues/13383). [#14889](https://github.com/ClickHouse/ClickHouse/pull/14889) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix SIGSEGV for an attempt to INSERT into StorageFile(fd). [#14887](https://github.com/ClickHouse/ClickHouse/pull/14887) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -501,7 +501,7 @@
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Optimize queries with LIMIT/LIMIT BY/ORDER BY for distributed with GROUP BY sharding_key (under optimize_skip_unused_shards and optimize_distributed_group_by_sharding_key). [#10373](https://github.com/ClickHouse/ClickHouse/pull/10373) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Optimize queries with LIMIT/LIMIT BY/ORDER BY for distributed with GROUP BY sharding_key (under `optimize_skip_unused_shards` and `optimize_distributed_group_by_sharding_key`). [#10373](https://github.com/ClickHouse/ClickHouse/pull/10373) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Creating sets for multiple `JOIN` and `IN` in parallel. It may slightly improve performance for queries with several different `IN subquery` expressions. [#14412](https://github.com/ClickHouse/ClickHouse/pull/14412) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Improve Kafka engine performance by providing independent thread for each consumer. Separate thread pool for streaming engines (like Kafka). [#13939](https://github.com/ClickHouse/ClickHouse/pull/13939) ([fastio](https://github.com/fastio)).
|
||||
|
||||
@ -579,15 +579,15 @@
|
||||
* Fix race condition during MergeTree table rename and background cleanup. [#15304](https://github.com/ClickHouse/ClickHouse/pull/15304) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix rare race condition on server startup when system.logs are enabled. [#15300](https://github.com/ClickHouse/ClickHouse/pull/15300) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix instance crash when using joinGet with LowCardinality types. This fixes https://github.com/ClickHouse/ClickHouse/issues/15214. [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix instance crash when using joinGet with LowCardinality types. This fixes [#15214](https://github.com/ClickHouse/ClickHouse/issues/15214). [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)).
|
||||
* Adjust decimals field size in mysql column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)).
|
||||
* We already use padded comparison between String and FixedString (https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/FunctionsComparison.h#L333). This PR applies the same logic to field comparison which corrects the usage of FixedString as primary keys. This fixes https://github.com/ClickHouse/ClickHouse/issues/14908. [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* If function `bar` was called with specifically crafter arguments, buffer overflow was possible. This closes [#13926](https://github.com/ClickHouse/ClickHouse/issues/13926). [#15028](https://github.com/ClickHouse/ClickHouse/pull/15028) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* We already use padded comparison between String and FixedString (https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/FunctionsComparison.h#L333). This PR applies the same logic to field comparison which corrects the usage of FixedString as primary keys. This fixes [#14908](https://github.com/ClickHouse/ClickHouse/issues/14908). [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* If function `bar` was called with specifically crafted arguments, buffer overflow was possible. This closes [#13926](https://github.com/ClickHouse/ClickHouse/issues/13926). [#15028](https://github.com/ClickHouse/ClickHouse/pull/15028) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed `Cannot rename ... errno: 22, strerror: Invalid argument` error on DDL query execution in Atomic database when running clickhouse-server in docker on Mac OS. [#15024](https://github.com/ClickHouse/ClickHouse/pull/15024) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Now settings `number_of_free_entries_in_pool_to_execute_mutation` and `number_of_free_entries_in_pool_to_lower_max_size_of_merge` can be equal to `background_pool_size`. [#14975](https://github.com/ClickHouse/ClickHouse/pull/14975) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix to make predicate push down work when subquery contains finalizeAggregation function. Fixes [#14847](https://github.com/ClickHouse/ClickHouse/issues/14847). [#14937](https://github.com/ClickHouse/ClickHouse/pull/14937) ([filimonov](https://github.com/filimonov)).
|
||||
* Publish CPU frequencies per logical core in `system.asynchronous_metrics`. This fixes https://github.com/ClickHouse/ClickHouse/issues/14923. [#14924](https://github.com/ClickHouse/ClickHouse/pull/14924) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Publish CPU frequencies per logical core in `system.asynchronous_metrics`. This fixes [#14923](https://github.com/ClickHouse/ClickHouse/issues/14923). [#14924](https://github.com/ClickHouse/ClickHouse/pull/14924) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fixed `.metadata.tmp File exists` error when using `MaterializeMySQL` database engine. [#14898](https://github.com/ClickHouse/ClickHouse/pull/14898) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix a problem where the server may get stuck on startup while talking to ZooKeeper, if the configuration files have to be fetched from ZK (using the `from_zk` include option). This fixes [#14814](https://github.com/ClickHouse/ClickHouse/issues/14814). [#14843](https://github.com/ClickHouse/ClickHouse/pull/14843) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fix wrong monotonicity detection for shrunk `Int -> Int` cast of signed types. It might lead to incorrect query result. This bug is unveiled in [#14513](https://github.com/ClickHouse/ClickHouse/issues/14513). [#14783](https://github.com/ClickHouse/ClickHouse/pull/14783) ([Amos Bird](https://github.com/amosbird)).
|
||||
@ -647,16 +647,16 @@
|
||||
|
||||
* Fix visible data clobbering by progress bar in client in interactive mode. This fixes [#12562](https://github.com/ClickHouse/ClickHouse/issues/12562) and [#13369](https://github.com/ClickHouse/ClickHouse/issues/13369) and [#13584](https://github.com/ClickHouse/ClickHouse/issues/13584) and fixes [#12964](https://github.com/ClickHouse/ClickHouse/issues/12964). [#13691](https://github.com/ClickHouse/ClickHouse/pull/13691) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed incorrect sorting order if `LowCardinality` column when sorting by multiple columns. This fixes [#13958](https://github.com/ClickHouse/ClickHouse/issues/13958). [#14223](https://github.com/ClickHouse/ClickHouse/pull/14223) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Check for array size overflow in `topK` aggregate function. Without this check the user may send a query with carefully crafter parameters that will lead to server crash. This closes [#14452](https://github.com/ClickHouse/ClickHouse/issues/14452). [#14467](https://github.com/ClickHouse/ClickHouse/pull/14467) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Check for array size overflow in `topK` aggregate function. Without this check the user may send a query with carefully crafted parameters that will lead to server crash. This closes [#14452](https://github.com/ClickHouse/ClickHouse/issues/14452). [#14467](https://github.com/ClickHouse/ClickHouse/pull/14467) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bug which can lead to wrong merges assignment if table has partitions with a single part. [#14444](https://github.com/ClickHouse/ClickHouse/pull/14444) ([alesapin](https://github.com/alesapin)).
|
||||
* Stop query execution if exception happened in `PipelineExecutor` itself. This could prevent rare possible query hung. Continuation of [#14334](https://github.com/ClickHouse/ClickHouse/issues/14334). [#14402](https://github.com/ClickHouse/ClickHouse/pull/14402) [#14334](https://github.com/ClickHouse/ClickHouse/pull/14334) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix crash during `ALTER` query for table which was created `AS table_function`. Fixes [#14212](https://github.com/ClickHouse/ClickHouse/issues/14212). [#14326](https://github.com/ClickHouse/ClickHouse/pull/14326) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix exception during ALTER LIVE VIEW query with REFRESH command. Live view is an experimental feature. [#14320](https://github.com/ClickHouse/ClickHouse/pull/14320) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix QueryPlan lifetime (for EXPLAIN PIPELINE graph=1) for queries with nested interpreter. [#14315](https://github.com/ClickHouse/ClickHouse/pull/14315) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix segfault in `clickhouse-odbc-bridge` during schema fetch from some external sources. This PR fixes https://github.com/ClickHouse/ClickHouse/issues/13861. [#14267](https://github.com/ClickHouse/ClickHouse/pull/14267) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash in mark inclusion search introduced in https://github.com/ClickHouse/ClickHouse/pull/12277. [#14225](https://github.com/ClickHouse/ClickHouse/pull/14225) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault in `clickhouse-odbc-bridge` during schema fetch from some external sources. This PR fixes [#13861](https://github.com/ClickHouse/ClickHouse/issues/13861). [#14267](https://github.com/ClickHouse/ClickHouse/pull/14267) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash in mark inclusion search introduced in [#12277](https://github.com/ClickHouse/ClickHouse/pull/12277). [#14225](https://github.com/ClickHouse/ClickHouse/pull/14225) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix creation of tables with named tuples. This fixes [#13027](https://github.com/ClickHouse/ClickHouse/issues/13027). [#14143](https://github.com/ClickHouse/ClickHouse/pull/14143) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix formatting of minimal negative decimal numbers. This fixes https://github.com/ClickHouse/ClickHouse/issues/14111. [#14119](https://github.com/ClickHouse/ClickHouse/pull/14119) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fix formatting of minimal negative decimal numbers. This fixes [#14111](https://github.com/ClickHouse/ClickHouse/issues/14111). [#14119](https://github.com/ClickHouse/ClickHouse/pull/14119) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fix `DistributedFilesToInsert` metric (zeroed when it should not). [#14095](https://github.com/ClickHouse/ClickHouse/pull/14095) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix `pointInPolygon` with const 2d array as polygon. [#14079](https://github.com/ClickHouse/ClickHouse/pull/14079) ([Alexey Ilyukhov](https://github.com/livace)).
|
||||
* Fixed wrong mount point in extra info for `Poco::Exception: no space left on device`. [#14050](https://github.com/ClickHouse/ClickHouse/pull/14050) ([tavplubix](https://github.com/tavplubix)).
|
||||
@ -685,10 +685,10 @@
|
||||
* Fix wrong code in function `netloc`. This fixes [#13335](https://github.com/ClickHouse/ClickHouse/issues/13335). [#13446](https://github.com/ClickHouse/ClickHouse/pull/13446) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix possible race in `StorageMemory`. [#13416](https://github.com/ClickHouse/ClickHouse/pull/13416) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix missing or excessive headers in `TSV/CSVWithNames` formats in HTTP protocol. This fixes [#12504](https://github.com/ClickHouse/ClickHouse/issues/12504). [#13343](https://github.com/ClickHouse/ClickHouse/pull/13343) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix parsing row policies from users.xml when names of databases or tables contain dots. This fixes https://github.com/ClickHouse/ClickHouse/issues/5779, https://github.com/ClickHouse/ClickHouse/issues/12527. [#13199](https://github.com/ClickHouse/ClickHouse/pull/13199) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix parsing row policies from users.xml when names of databases or tables contain dots. This fixes [#5779](https://github.com/ClickHouse/ClickHouse/issues/5779), [#12527](https://github.com/ClickHouse/ClickHouse/issues/12527). [#13199](https://github.com/ClickHouse/ClickHouse/pull/13199) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix access to `redis` dictionary after connection was dropped once. It may happen with `cache` and `direct` dictionary layouts. [#13082](https://github.com/ClickHouse/ClickHouse/pull/13082) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Removed wrong auth access check when using ClickHouseDictionarySource to query remote tables. [#12756](https://github.com/ClickHouse/ClickHouse/pull/12756) ([sundyli](https://github.com/sundy-li)).
|
||||
* Properly distinguish subqueries in some cases for common subexpression elimination. https://github.com/ClickHouse/ClickHouse/issues/8333. [#8367](https://github.com/ClickHouse/ClickHouse/pull/8367) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Properly distinguish subqueries in some cases for common subexpression elimination. [#8333](https://github.com/ClickHouse/ClickHouse/issues/8333). [#8367](https://github.com/ClickHouse/ClickHouse/pull/8367) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
@ -756,7 +756,7 @@
|
||||
* Updating LDAP user authentication suite to check that it works with RBAC. [#13656](https://github.com/ClickHouse/ClickHouse/pull/13656) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||
* Removed `-DENABLE_CURL_CLIENT` for `contrib/aws`. [#13628](https://github.com/ClickHouse/ClickHouse/pull/13628) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Increasing health-check timeouts for ClickHouse nodes and adding support to dump docker-compose logs if unhealthy containers found. [#13612](https://github.com/ClickHouse/ClickHouse/pull/13612) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||
* Make sure https://github.com/ClickHouse/ClickHouse/issues/10977 is invalid. [#13539](https://github.com/ClickHouse/ClickHouse/pull/13539) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Make sure [#10977](https://github.com/ClickHouse/ClickHouse/issues/10977) is invalid. [#13539](https://github.com/ClickHouse/ClickHouse/pull/13539) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Skip PR's from robot-clickhouse. [#13489](https://github.com/ClickHouse/ClickHouse/pull/13489) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Move Dockerfiles from integration tests to `docker/test` directory. docker_compose files are available in `runner` docker container. Docker images are built in CI and not in integration tests. [#13448](https://github.com/ClickHouse/ClickHouse/pull/13448) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||
|
||||
@ -788,7 +788,7 @@
|
||||
* Add `FROM_UNIXTIME` function for compatibility with MySQL, related to [12149](https://github.com/ClickHouse/ClickHouse/issues/12149). [#12484](https://github.com/ClickHouse/ClickHouse/pull/12484) ([flynn](https://github.com/ucasFL)).
|
||||
* Allow Nullable types as keys in MergeTree tables if `allow_nullable_key` table setting is enabled. Closes [#5319](https://github.com/ClickHouse/ClickHouse/issues/5319). [#12433](https://github.com/ClickHouse/ClickHouse/pull/12433) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Integration with [COS](https://intl.cloud.tencent.com/product/cos). [#12386](https://github.com/ClickHouse/ClickHouse/pull/12386) ([fastio](https://github.com/fastio)).
|
||||
* Add mapAdd and mapSubtract functions for adding/subtracting key-mapped values. [#11735](https://github.com/ClickHouse/ClickHouse/pull/11735) ([Ildus Kurbangaliev](https://github.com/ildus)).
|
||||
* Add `mapAdd` and `mapSubtract` functions for adding/subtracting key-mapped values. [#11735](https://github.com/ClickHouse/ClickHouse/pull/11735) ([Ildus Kurbangaliev](https://github.com/ildus)).
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
@ -1071,7 +1071,7 @@
|
||||
|
||||
* Improved performace of 'ORDER BY' and 'GROUP BY' by prefix of sorting key (enabled with `optimize_aggregation_in_order` setting, disabled by default). [#11696](https://github.com/ClickHouse/ClickHouse/pull/11696) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Removed injective functions inside `uniq*()` if `set optimize_injective_functions_inside_uniq=1`. [#12337](https://github.com/ClickHouse/ClickHouse/pull/12337) ([Ruslan Kamalov](https://github.com/kamalov-ruslan)).
|
||||
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Index not used for IN operator with literals, performance regression introduced around v19.3. This fixes [#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Implemented single part uploads for DiskS3 (experimental feature). [#12026](https://github.com/ClickHouse/ClickHouse/pull/12026) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
|
||||
#### Experimental Feature
|
||||
@ -1133,7 +1133,7 @@
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Index not used for IN operator with literals, performance regression introduced around v19.3. This fixes [#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
@ -1213,7 +1213,7 @@
|
||||
* Fix wrong result of comparison of FixedString with constant String. This fixes [#11393](https://github.com/ClickHouse/ClickHouse/issues/11393). This bug appeared in version 20.4. [#11828](https://github.com/ClickHouse/ClickHouse/pull/11828) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix wrong result for `if` with NULLs in condition. [#11807](https://github.com/ClickHouse/ClickHouse/pull/11807) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fix using too many threads for queries. [#11788](https://github.com/ClickHouse/ClickHouse/pull/11788) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed `Scalar doesn't exist` exception when using `WITH <scalar subquery> ...` in `SELECT ... FROM merge_tree_table ...` https://github.com/ClickHouse/ClickHouse/issues/11621. [#11767](https://github.com/ClickHouse/ClickHouse/pull/11767) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed `Scalar doesn't exist` exception when using `WITH <scalar subquery> ...` in `SELECT ... FROM merge_tree_table ...` [#11621](https://github.com/ClickHouse/ClickHouse/issues/11621). [#11767](https://github.com/ClickHouse/ClickHouse/pull/11767) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix unexpected behaviour of queries like `SELECT *, xyz.*` which were success while an error expected. [#11753](https://github.com/ClickHouse/ClickHouse/pull/11753) ([hexiaoting](https://github.com/hexiaoting)).
|
||||
* Now replicated fetches will be cancelled during metadata alter. [#11744](https://github.com/ClickHouse/ClickHouse/pull/11744) ([alesapin](https://github.com/alesapin)).
|
||||
* Parse metadata stored in zookeeper before checking for equality. [#11739](https://github.com/ClickHouse/ClickHouse/pull/11739) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -1264,8 +1264,8 @@
|
||||
* Fix potential uninitialized memory in conversion. Example: `SELECT toIntervalSecond(now64())`. [#11311](https://github.com/ClickHouse/ClickHouse/pull/11311) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix the issue when index analysis cannot work if a table has Array column in primary key and if a query is filtering by this column with `empty` or `notEmpty` functions. This fixes [#11286](https://github.com/ClickHouse/ClickHouse/issues/11286). [#11303](https://github.com/ClickHouse/ClickHouse/pull/11303) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bug when query speed estimation can be incorrect and the limit of `min_execution_speed` may not work or work incorrectly if the query is throttled by `max_network_bandwidth`, `max_execution_speed` or `priority` settings. Change the default value of `timeout_before_checking_execution_speed` to non-zero, because otherwise the settings `min_execution_speed` and `max_execution_speed` have no effect. This fixes [#11297](https://github.com/ClickHouse/ClickHouse/issues/11297). This fixes [#5732](https://github.com/ClickHouse/ClickHouse/issues/5732). This fixes [#6228](https://github.com/ClickHouse/ClickHouse/issues/6228). Usability improvement: avoid concatenation of exception message with progress bar in `clickhouse-client`. [#11296](https://github.com/ClickHouse/ClickHouse/pull/11296) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix crash when `SET DEFAULT ROLE` is called with wrong arguments. This fixes https://github.com/ClickHouse/ClickHouse/issues/10586. [#11278](https://github.com/ClickHouse/ClickHouse/pull/11278) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash while reading malformed data in `Protobuf` format. This fixes https://github.com/ClickHouse/ClickHouse/issues/5957, fixes https://github.com/ClickHouse/ClickHouse/issues/11203. [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash when `SET DEFAULT ROLE` is called with wrong arguments. This fixes [#10586](https://github.com/ClickHouse/ClickHouse/issues/10586). [#11278](https://github.com/ClickHouse/ClickHouse/pull/11278) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash while reading malformed data in `Protobuf` format. This fixes [#5957](https://github.com/ClickHouse/ClickHouse/issues/5957), fixes [#11203](https://github.com/ClickHouse/ClickHouse/issues/11203). [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fixed a bug when `cache` dictionary could return default value instead of normal (when there are only expired keys). This affects only string fields. [#11233](https://github.com/ClickHouse/ClickHouse/pull/11233) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix error `Block structure mismatch in QueryPipeline` while reading from `VIEW` with constants in inner query. Fixes [#11181](https://github.com/ClickHouse/ClickHouse/issues/11181). [#11205](https://github.com/ClickHouse/ClickHouse/pull/11205) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible exception `Invalid status for associated output`. [#11200](https://github.com/ClickHouse/ClickHouse/pull/11200) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -1331,7 +1331,7 @@
|
||||
* Fix error `the BloomFilter false positive must be a double number between 0 and 1` [#10551](https://github.com/ClickHouse/ClickHouse/issues/10551). [#10569](https://github.com/ClickHouse/ClickHouse/pull/10569) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix SELECT of column ALIAS which default expression type different from column type. [#10563](https://github.com/ClickHouse/ClickHouse/pull/10563) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implemented comparison between DateTime64 and String values (just like for DateTime). [#10560](https://github.com/ClickHouse/ClickHouse/pull/10560) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Fix index corruption, which may accur in some cases after merge compact parts into another compact part. [#10531](https://github.com/ClickHouse/ClickHouse/pull/10531) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix index corruption, which may occur in some cases after merge compact parts into another compact part. [#10531](https://github.com/ClickHouse/ClickHouse/pull/10531) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Disable GROUP BY sharding_key optimization by default (`optimize_distributed_group_by_sharding_key` had been introduced and turned of by default, due to trickery of sharding_key analyzing, simple example is `if` in sharding key) and fix it for WITH ROLLUP/CUBE/TOTALS. [#10516](https://github.com/ClickHouse/ClickHouse/pull/10516) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixes: [#10263](https://github.com/ClickHouse/ClickHouse/issues/10263) (after that PR dist send via INSERT had been postponing on each INSERT) Fixes: [#8756](https://github.com/ClickHouse/ClickHouse/issues/8756) (that PR breaks distributed sends with all of the following conditions met (unlikely setup for now I guess): `internal_replication == false`, multiple local shards (activates the hardlinking code) and `distributed_storage_policy` (makes `link(2)` fails on `EXDEV`)). [#10486](https://github.com/ClickHouse/ClickHouse/pull/10486) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed error with "max_rows_to_sort" limit. [#10268](https://github.com/ClickHouse/ClickHouse/pull/10268) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -1488,7 +1488,7 @@
|
||||
* Lower memory usage in tests. [#10617](https://github.com/ClickHouse/ClickHouse/pull/10617) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixing hard coded timeouts in new live view tests. [#10604](https://github.com/ClickHouse/ClickHouse/pull/10604) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||
* Increasing timeout when opening a client in tests/queries/0_stateless/helpers/client.py. [#10599](https://github.com/ClickHouse/ClickHouse/pull/10599) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||
* Enable ThinLTO for clang builds, continuation of https://github.com/ClickHouse/ClickHouse/pull/10435. [#10585](https://github.com/ClickHouse/ClickHouse/pull/10585) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Enable ThinLTO for clang builds, continuation of [#10435](https://github.com/ClickHouse/ClickHouse/pull/10435). [#10585](https://github.com/ClickHouse/ClickHouse/pull/10585) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Adding fuzzers and preparing for oss-fuzz integration. [#10546](https://github.com/ClickHouse/ClickHouse/pull/10546) ([kyprizel](https://github.com/kyprizel)).
|
||||
* Fix FreeBSD build. [#10150](https://github.com/ClickHouse/ClickHouse/pull/10150) ([Ivan](https://github.com/abyss7)).
|
||||
* Add new build for query tests using pytest framework. [#10039](https://github.com/ClickHouse/ClickHouse/pull/10039) ([Ivan](https://github.com/abyss7)).
|
||||
@ -1563,7 +1563,7 @@
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Index not used for IN operator with literals, performance regression introduced around v19.3. This fixes [#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
@ -1617,7 +1617,7 @@
|
||||
* Fix the error `Data compressed with different methods` that can happen if `min_bytes_to_use_direct_io` is enabled and PREWHERE is active and using SAMPLE or high number of threads. This fixes [#11539](https://github.com/ClickHouse/ClickHouse/issues/11539). [#11540](https://github.com/ClickHouse/ClickHouse/pull/11540) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix return compressed size for codecs. [#11448](https://github.com/ClickHouse/ClickHouse/pull/11448) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix server crash when a column has compression codec with non-literal arguments. Fixes [#11365](https://github.com/ClickHouse/ClickHouse/issues/11365). [#11431](https://github.com/ClickHouse/ClickHouse/pull/11431) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix pointInPolygon with nan as point. Fixes https://github.com/ClickHouse/ClickHouse/issues/11375. [#11421](https://github.com/ClickHouse/ClickHouse/pull/11421) ([Alexey Ilyukhov](https://github.com/livace)).
|
||||
* Fix pointInPolygon with nan as point. Fixes [#11375](https://github.com/ClickHouse/ClickHouse/issues/11375). [#11421](https://github.com/ClickHouse/ClickHouse/pull/11421) ([Alexey Ilyukhov](https://github.com/livace)).
|
||||
* Fix potential uninitialized memory read in MergeTree shutdown if table was not created successfully. [#11420](https://github.com/ClickHouse/ClickHouse/pull/11420) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed geohashesInBox with arguments outside of latitude/longitude range. [#11403](https://github.com/ClickHouse/ClickHouse/pull/11403) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Fix possible `Pipeline stuck` error for queries with external sort and limit. Fixes [#11359](https://github.com/ClickHouse/ClickHouse/issues/11359). [#11366](https://github.com/ClickHouse/ClickHouse/pull/11366) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -1633,8 +1633,8 @@
|
||||
* Fix potential uninitialized memory in conversion. Example: `SELECT toIntervalSecond(now64())`. [#11311](https://github.com/ClickHouse/ClickHouse/pull/11311) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix the issue when index analysis cannot work if a table has Array column in primary key and if a query is filtering by this column with `empty` or `notEmpty` functions. This fixes [#11286](https://github.com/ClickHouse/ClickHouse/issues/11286). [#11303](https://github.com/ClickHouse/ClickHouse/pull/11303) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bug when query speed estimation can be incorrect and the limit of `min_execution_speed` may not work or work incorrectly if the query is throttled by `max_network_bandwidth`, `max_execution_speed` or `priority` settings. Change the default value of `timeout_before_checking_execution_speed` to non-zero, because otherwise the settings `min_execution_speed` and `max_execution_speed` have no effect. This fixes [#11297](https://github.com/ClickHouse/ClickHouse/issues/11297). This fixes [#5732](https://github.com/ClickHouse/ClickHouse/issues/5732). This fixes [#6228](https://github.com/ClickHouse/ClickHouse/issues/6228). Usability improvement: avoid concatenation of exception message with progress bar in `clickhouse-client`. [#11296](https://github.com/ClickHouse/ClickHouse/pull/11296) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix crash when SET DEFAULT ROLE is called with wrong arguments. This fixes https://github.com/ClickHouse/ClickHouse/issues/10586. [#11278](https://github.com/ClickHouse/ClickHouse/pull/11278) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash while reading malformed data in Protobuf format. This fixes https://github.com/ClickHouse/ClickHouse/issues/5957, fixes https://github.com/ClickHouse/ClickHouse/issues/11203. [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash when SET DEFAULT ROLE is called with wrong arguments. This fixes [#10586](https://github.com/ClickHouse/ClickHouse/issues/10586). [#11278](https://github.com/ClickHouse/ClickHouse/pull/11278) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash while reading malformed data in Protobuf format. This fixes [#5957](https://github.com/ClickHouse/ClickHouse/issues/5957), fixes [#11203](https://github.com/ClickHouse/ClickHouse/issues/11203). [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fixed a bug when cache-dictionary could return default value instead of normal (when there are only expired keys). This affects only string fields. [#11233](https://github.com/ClickHouse/ClickHouse/pull/11233) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix error `Block structure mismatch in QueryPipeline` while reading from `VIEW` with constants in inner query. Fixes [#11181](https://github.com/ClickHouse/ClickHouse/issues/11181). [#11205](https://github.com/ClickHouse/ClickHouse/pull/11205) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible exception `Invalid status for associated output`. [#11200](https://github.com/ClickHouse/ClickHouse/pull/11200) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -1679,7 +1679,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Now constraints are updated if the column participating in `CONSTRAINT` expression was renamed. Fixes [#10844](https://github.com/ClickHouse/ClickHouse/issues/10844). [#10847](https://github.com/ClickHouse/ClickHouse/pull/10847) ([alesapin](https://github.com/alesapin)).
|
||||
* Fixed potential read of uninitialized memory in cache-dictionary. [#10834](https://github.com/ClickHouse/ClickHouse/pull/10834) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed columns order after `Block::sortColumns()`. [#10826](https://github.com/ClickHouse/ClickHouse/pull/10826) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed the issue with `ODBC` bridge when no quoting of identifiers is requested. Fixes [#7984] (https://github.com/ClickHouse/ClickHouse/issues/7984). [#10821](https://github.com/ClickHouse/ClickHouse/pull/10821) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the issue with `ODBC` bridge when no quoting of identifiers is requested. Fixes [#7984](https://github.com/ClickHouse/ClickHouse/issues/7984). [#10821](https://github.com/ClickHouse/ClickHouse/pull/10821) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed `UBSan` and `MSan` report in `DateLUT`. [#10798](https://github.com/ClickHouse/ClickHouse/pull/10798) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed incorrect type conversion in key conditions. Fixes [#6287](https://github.com/ClickHouse/ClickHouse/issues/6287). [#10791](https://github.com/ClickHouse/ClickHouse/pull/10791) ([Andrew Onyshchuk](https://github.com/oandrew)).
|
||||
* Fixed `parallel_view_processing` behavior. Now all insertions into `MATERIALIZED VIEW` without exception should be finished if exception happened. Fixes [#10241](https://github.com/ClickHouse/ClickHouse/issues/10241). [#10757](https://github.com/ClickHouse/ClickHouse/pull/10757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -1707,15 +1707,15 @@ No changes compared to v20.4.3.16-stable.
|
||||
|
||||
#### New Feature
|
||||
* Add support for secured connection from ClickHouse to Zookeeper [#10184](https://github.com/ClickHouse/ClickHouse/pull/10184) ([Konstantin Lebedev](https://github.com/xzkostyan))
|
||||
* Support custom HTTP handlers. See ISSUES-5436 for description. [#7572](https://github.com/ClickHouse/ClickHouse/pull/7572) ([Winter Zhang](https://github.com/zhang2014))
|
||||
* Support custom HTTP handlers. See [#5436](https://github.com/ClickHouse/ClickHouse/issues/5436) for description. [#7572](https://github.com/ClickHouse/ClickHouse/pull/7572) ([Winter Zhang](https://github.com/zhang2014))
|
||||
* Add MessagePack Input/Output format. [#9889](https://github.com/ClickHouse/ClickHouse/pull/9889) ([Kruglov Pavel](https://github.com/Avogar))
|
||||
* Add Regexp input format. [#9196](https://github.com/ClickHouse/ClickHouse/pull/9196) ([Kruglov Pavel](https://github.com/Avogar))
|
||||
* Added output format `Markdown` for embedding tables in markdown documents. [#10317](https://github.com/ClickHouse/ClickHouse/pull/10317) ([Kruglov Pavel](https://github.com/Avogar))
|
||||
* Added support for custom settings section in dictionaries. Also fixes issue [#2829](https://github.com/ClickHouse/ClickHouse/issues/2829). [#10137](https://github.com/ClickHouse/ClickHouse/pull/10137) ([Artem Streltsov](https://github.com/kekekekule))
|
||||
* Added custom settings support in DDL-queries for CREATE DICTIONARY [#10465](https://github.com/ClickHouse/ClickHouse/pull/10465) ([Artem Streltsov](https://github.com/kekekekule))
|
||||
* Added custom settings support in DDL-queries for `CREATE DICTIONARY` [#10465](https://github.com/ClickHouse/ClickHouse/pull/10465) ([Artem Streltsov](https://github.com/kekekekule))
|
||||
* Add simple server-wide memory profiler that will collect allocation contexts when server memory usage becomes higher than the next allocation threshold. [#10444](https://github.com/ClickHouse/ClickHouse/pull/10444) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Add setting `always_fetch_merged_part` which restrict replica to merge parts by itself and always prefer dowloading from other replicas. [#10379](https://github.com/ClickHouse/ClickHouse/pull/10379) ([alesapin](https://github.com/alesapin))
|
||||
* Add function JSONExtractKeysAndValuesRaw which extracts raw data from JSON objects [#10378](https://github.com/ClickHouse/ClickHouse/pull/10378) ([hcz](https://github.com/hczhcz))
|
||||
* Add function `JSONExtractKeysAndValuesRaw` which extracts raw data from JSON objects [#10378](https://github.com/ClickHouse/ClickHouse/pull/10378) ([hcz](https://github.com/hczhcz))
|
||||
* Add memory usage from OS to `system.asynchronous_metrics`. [#10361](https://github.com/ClickHouse/ClickHouse/pull/10361) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Added generic variants for functions `least` and `greatest`. Now they work with arbitrary number of arguments of arbitrary types. This fixes [#4767](https://github.com/ClickHouse/ClickHouse/issues/4767) [#10318](https://github.com/ClickHouse/ClickHouse/pull/10318) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Now ClickHouse controls timeouts of dictionary sources on its side. Two new settings added to cache dictionary configuration: `strict_max_lifetime_seconds`, which is `max_lifetime` by default, and `query_wait_timeout_milliseconds`, which is one minute by default. The first settings is also useful with `allow_read_expired_keys` settings (to forbid reading very expired keys). [#10337](https://github.com/ClickHouse/ClickHouse/pull/10337) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov))
|
||||
@ -1728,7 +1728,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Add ability to query Distributed over Distributed (w/o `distributed_group_by_no_merge`) ... [#9923](https://github.com/ClickHouse/ClickHouse/pull/9923) ([Azat Khuzhin](https://github.com/azat))
|
||||
* Add function `arrayReduceInRanges` which aggregates array elements in given ranges. [#9598](https://github.com/ClickHouse/ClickHouse/pull/9598) ([hcz](https://github.com/hczhcz))
|
||||
* Add Dictionary Status on prometheus exporter. [#9622](https://github.com/ClickHouse/ClickHouse/pull/9622) ([Guillaume Tassery](https://github.com/YiuRULE))
|
||||
* Add function arrayAUC [#8698](https://github.com/ClickHouse/ClickHouse/pull/8698) ([taiyang-li](https://github.com/taiyang-li))
|
||||
* Add function `arrayAUC` [#8698](https://github.com/ClickHouse/ClickHouse/pull/8698) ([taiyang-li](https://github.com/taiyang-li))
|
||||
* Support `DROP VIEW` statement for better TPC-H compatibility. [#9831](https://github.com/ClickHouse/ClickHouse/pull/9831) ([Amos Bird](https://github.com/amosbird))
|
||||
* Add 'strict_order' option to windowFunnel() [#9773](https://github.com/ClickHouse/ClickHouse/pull/9773) ([achimbab](https://github.com/achimbab))
|
||||
* Support `DATE` and `TIMESTAMP` SQL operators, e.g. `SELECT date '2001-01-01'` [#9691](https://github.com/ClickHouse/ClickHouse/pull/9691) ([Artem Zuikov](https://github.com/4ertus2))
|
||||
@ -1932,7 +1932,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Move integration tests docker files to docker/ directory. [#10335](https://github.com/ClickHouse/ClickHouse/pull/10335) ([Ilya Yatsishin](https://github.com/qoega))
|
||||
* Allow to use `clang-10` in CI. It ensures that [#10238](https://github.com/ClickHouse/ClickHouse/issues/10238) is fixed. [#10384](https://github.com/ClickHouse/ClickHouse/pull/10384) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Update OpenSSL to upstream master. Fixed the issue when TLS connections may fail with the message `OpenSSL SSL_read: error:14094438:SSL routines:ssl3_read_bytes:tlsv1 alert internal error` and `SSL Exception: error:2400006E:random number generator::error retrieving entropy`. The issue was present in version 20.1. [#8956](https://github.com/ClickHouse/ClickHouse/pull/8956) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Fix clang-10 build. https://github.com/ClickHouse/ClickHouse/issues/10238 [#10370](https://github.com/ClickHouse/ClickHouse/pull/10370) ([Amos Bird](https://github.com/amosbird))
|
||||
* Fix clang-10 build. [#10238](https://github.com/ClickHouse/ClickHouse/issues/10238) [#10370](https://github.com/ClickHouse/ClickHouse/pull/10370) ([Amos Bird](https://github.com/amosbird))
|
||||
* Add performance test for [Parallel INSERT for materialized view](https://github.com/ClickHouse/ClickHouse/pull/10052). [#10345](https://github.com/ClickHouse/ClickHouse/pull/10345) ([vxider](https://github.com/Vxider))
|
||||
* Fix flaky test `test_settings_constraints_distributed.test_insert_clamps_settings`. [#10346](https://github.com/ClickHouse/ClickHouse/pull/10346) ([Vitaly Baranov](https://github.com/vitlibar))
|
||||
* Add util to test results upload in CI ClickHouse [#10330](https://github.com/ClickHouse/ClickHouse/pull/10330) ([Ilya Yatsishin](https://github.com/qoega))
|
||||
@ -2106,7 +2106,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Index not used for IN operator with literals, performance regression introduced around v19.3. This fixes [#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
|
||||
|
||||
### ClickHouse release v20.3.12.112-lts 2020-06-25
|
||||
@ -2148,7 +2148,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Fix the error `Data compressed with different methods` that can happen if `min_bytes_to_use_direct_io` is enabled and PREWHERE is active and using SAMPLE or high number of threads. This fixes [#11539](https://github.com/ClickHouse/ClickHouse/issues/11539). [#11540](https://github.com/ClickHouse/ClickHouse/pull/11540) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix return compressed size for codecs. [#11448](https://github.com/ClickHouse/ClickHouse/pull/11448) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix server crash when a column has compression codec with non-literal arguments. Fixes [#11365](https://github.com/ClickHouse/ClickHouse/issues/11365). [#11431](https://github.com/ClickHouse/ClickHouse/pull/11431) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix pointInPolygon with nan as point. Fixes https://github.com/ClickHouse/ClickHouse/issues/11375. [#11421](https://github.com/ClickHouse/ClickHouse/pull/11421) ([Alexey Ilyukhov](https://github.com/livace)).
|
||||
* Fix pointInPolygon with nan as point. Fixes [#11375](https://github.com/ClickHouse/ClickHouse/issues/11375). [#11421](https://github.com/ClickHouse/ClickHouse/pull/11421) ([Alexey Ilyukhov](https://github.com/livace)).
|
||||
* Fix crash in JOIN over LowCarinality(T) and Nullable(T). [#11380](https://github.com/ClickHouse/ClickHouse/issues/11380). [#11414](https://github.com/ClickHouse/ClickHouse/pull/11414) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fix error code for wrong `USING` key. [#11373](https://github.com/ClickHouse/ClickHouse/issues/11373). [#11404](https://github.com/ClickHouse/ClickHouse/pull/11404) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fixed geohashesInBox with arguments outside of latitude/longitude range. [#11403](https://github.com/ClickHouse/ClickHouse/pull/11403) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
@ -2165,7 +2165,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Fix potential uninitialized memory in conversion. Example: `SELECT toIntervalSecond(now64())`. [#11311](https://github.com/ClickHouse/ClickHouse/pull/11311) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix the issue when index analysis cannot work if a table has Array column in primary key and if a query is filtering by this column with `empty` or `notEmpty` functions. This fixes [#11286](https://github.com/ClickHouse/ClickHouse/issues/11286). [#11303](https://github.com/ClickHouse/ClickHouse/pull/11303) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bug when query speed estimation can be incorrect and the limit of `min_execution_speed` may not work or work incorrectly if the query is throttled by `max_network_bandwidth`, `max_execution_speed` or `priority` settings. Change the default value of `timeout_before_checking_execution_speed` to non-zero, because otherwise the settings `min_execution_speed` and `max_execution_speed` have no effect. This fixes [#11297](https://github.com/ClickHouse/ClickHouse/issues/11297). This fixes [#5732](https://github.com/ClickHouse/ClickHouse/issues/5732). This fixes [#6228](https://github.com/ClickHouse/ClickHouse/issues/6228). Usability improvement: avoid concatenation of exception message with progress bar in `clickhouse-client`. [#11296](https://github.com/ClickHouse/ClickHouse/pull/11296) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix crash while reading malformed data in Protobuf format. This fixes https://github.com/ClickHouse/ClickHouse/issues/5957, fixes https://github.com/ClickHouse/ClickHouse/issues/11203. [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash while reading malformed data in Protobuf format. This fixes [#5957](https://github.com/ClickHouse/ClickHouse/issues/5957), fixes [#11203](https://github.com/ClickHouse/ClickHouse/issues/11203). [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fixed a bug when cache-dictionary could return default value instead of normal (when there are only expired keys). This affects only string fields. [#11233](https://github.com/ClickHouse/ClickHouse/pull/11233) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix error `Block structure mismatch in QueryPipeline` while reading from `VIEW` with constants in inner query. Fixes [#11181](https://github.com/ClickHouse/ClickHouse/issues/11181). [#11205](https://github.com/ClickHouse/ClickHouse/pull/11205) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible exception `Invalid status for associated output`. [#11200](https://github.com/ClickHouse/ClickHouse/pull/11200) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -2196,7 +2196,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Fixed `SIGSEGV` in `StringHashTable` if such a key does not exist. [#10870](https://github.com/ClickHouse/ClickHouse/pull/10870) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed bug in `ReplicatedMergeTree` which might cause some `ALTER` on `OPTIMIZE` query to hang waiting for some replica after it become inactive. [#10849](https://github.com/ClickHouse/ClickHouse/pull/10849) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fixed columns order after `Block::sortColumns()`. [#10826](https://github.com/ClickHouse/ClickHouse/pull/10826) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed the issue with `ODBC` bridge when no quoting of identifiers is requested. Fixes [#7984] (https://github.com/ClickHouse/ClickHouse/issues/7984). [#10821](https://github.com/ClickHouse/ClickHouse/pull/10821) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the issue with `ODBC` bridge when no quoting of identifiers is requested. Fixes [#7984](https://github.com/ClickHouse/ClickHouse/issues/7984). [#10821](https://github.com/ClickHouse/ClickHouse/pull/10821) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed `UBSan` and `MSan` report in `DateLUT`. [#10798](https://github.com/ClickHouse/ClickHouse/pull/10798) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed incorrect type conversion in key conditions. Fixes [#6287](https://github.com/ClickHouse/ClickHouse/issues/6287). [#10791](https://github.com/ClickHouse/ClickHouse/pull/10791) ([Andrew Onyshchuk](https://github.com/oandrew))
|
||||
* Fixed `parallel_view_processing` behavior. Now all insertions into `MATERIALIZED VIEW` without exception should be finished if exception happened. Fixes [#10241](https://github.com/ClickHouse/ClickHouse/issues/10241). [#10757](https://github.com/ClickHouse/ClickHouse/pull/10757) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -2215,7 +2215,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Fixed incorrect scalar results inside inner query of `MATERIALIZED VIEW` in case if this query contained dependent table. [#10603](https://github.com/ClickHouse/ClickHouse/pull/10603) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed `SELECT` of column `ALIAS` which default expression type different from column type. [#10563](https://github.com/ClickHouse/ClickHouse/pull/10563) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implemented comparison between DateTime64 and String values. [#10560](https://github.com/ClickHouse/ClickHouse/pull/10560) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Fixed index corruption, which may accur in some cases after merge compact parts into another compact part. [#10531](https://github.com/ClickHouse/ClickHouse/pull/10531) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed index corruption, which may occur in some cases after merge compact parts into another compact part. [#10531](https://github.com/ClickHouse/ClickHouse/pull/10531) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed the situation, when mutation finished all parts, but hung up in `is_done=0`. [#10526](https://github.com/ClickHouse/ClickHouse/pull/10526) ([alesapin](https://github.com/alesapin)).
|
||||
* Fixed overflow at beginning of unix epoch for timezones with fractional offset from `UTC`. This fixes [#9335](https://github.com/ClickHouse/ClickHouse/issues/9335). [#10513](https://github.com/ClickHouse/ClickHouse/pull/10513) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed improper shutdown of `Distributed` storage. [#10491](https://github.com/ClickHouse/ClickHouse/pull/10491) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -2225,14 +2225,14 @@ No changes compared to v20.4.3.16-stable.
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Fix UBSan report in LZ4 library. [#10631](https://github.com/ClickHouse/ClickHouse/pull/10631) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix clang-10 build. https://github.com/ClickHouse/ClickHouse/issues/10238. [#10370](https://github.com/ClickHouse/ClickHouse/pull/10370) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix clang-10 build. [#10238](https://github.com/ClickHouse/ClickHouse/issues/10238). [#10370](https://github.com/ClickHouse/ClickHouse/pull/10370) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Added failing tests about `max_rows_to_sort` setting. [#10268](https://github.com/ClickHouse/ClickHouse/pull/10268) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added some improvements in printing diagnostic info in input formats. Fixes [#10204](https://github.com/ClickHouse/ClickHouse/issues/10204). [#10418](https://github.com/ClickHouse/ClickHouse/pull/10418) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Added CA certificates to clickhouse-server docker image. [#10476](https://github.com/ClickHouse/ClickHouse/pull/10476) ([filimonov](https://github.com/filimonov)).
|
||||
|
||||
#### Bug fix
|
||||
|
||||
* #10551. [#10569](https://github.com/ClickHouse/ClickHouse/pull/10569) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix error `the BloomFilter false positive must be a double number between 0 and 1` [#10551](https://github.com/ClickHouse/ClickHouse/issues/10551). [#10569](https://github.com/ClickHouse/ClickHouse/pull/10569) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
|
||||
|
||||
### ClickHouse release v20.3.8.53, 2020-04-23
|
||||
@ -2424,7 +2424,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Fixed the behaviour of `match` and `extract` functions when haystack has zero bytes. The behaviour was wrong when haystack was constant. This fixes [#9160](https://github.com/ClickHouse/ClickHouse/issues/9160) [#9163](https://github.com/ClickHouse/ClickHouse/pull/9163) ([alexey-milovidov](https://github.com/alexey-milovidov)) [#9345](https://github.com/ClickHouse/ClickHouse/pull/9345) ([alexey-milovidov](https://github.com/alexey-milovidov))
|
||||
* Avoid throwing from destructor in Apache Avro 3rd-party library. [#9066](https://github.com/ClickHouse/ClickHouse/pull/9066) ([Andrew Onyshchuk](https://github.com/oandrew))
|
||||
* Don't commit a batch polled from `Kafka` partially as it can lead to holes in data. [#8876](https://github.com/ClickHouse/ClickHouse/pull/8876) ([filimonov](https://github.com/filimonov))
|
||||
* Fix `joinGet` with nullable return types. https://github.com/ClickHouse/ClickHouse/issues/8919 [#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([Amos Bird](https://github.com/amosbird))
|
||||
* Fix `joinGet` with nullable return types. [#8919](https://github.com/ClickHouse/ClickHouse/issues/8919) [#9014](https://github.com/ClickHouse/ClickHouse/pull/9014) ([Amos Bird](https://github.com/amosbird))
|
||||
* Fix data incompatibility when compressed with `T64` codec. [#9016](https://github.com/ClickHouse/ClickHouse/pull/9016) ([Artem Zuikov](https://github.com/4ertus2)) Fix data type ids in `T64` compression codec that leads to wrong (de)compression in affected versions. [#9033](https://github.com/ClickHouse/ClickHouse/pull/9033) ([Artem Zuikov](https://github.com/4ertus2))
|
||||
* Add setting `enable_early_constant_folding` and disable it in some cases that leads to errors. [#9010](https://github.com/ClickHouse/ClickHouse/pull/9010) ([Artem Zuikov](https://github.com/4ertus2))
|
||||
* Fix pushdown predicate optimizer with VIEW and enable the test [#9011](https://github.com/ClickHouse/ClickHouse/pull/9011) ([Winter Zhang](https://github.com/zhang2014))
|
||||
@ -2626,7 +2626,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Fix the error `Data compressed with different methods` that can happen if `min_bytes_to_use_direct_io` is enabled and PREWHERE is active and using SAMPLE or high number of threads. This fixes [#11539](https://github.com/ClickHouse/ClickHouse/issues/11539). [#11540](https://github.com/ClickHouse/ClickHouse/pull/11540) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix return compressed size for codecs. [#11448](https://github.com/ClickHouse/ClickHouse/pull/11448) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix server crash when a column has compression codec with non-literal arguments. Fixes [#11365](https://github.com/ClickHouse/ClickHouse/issues/11365). [#11431](https://github.com/ClickHouse/ClickHouse/pull/11431) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix pointInPolygon with nan as point. Fixes https://github.com/ClickHouse/ClickHouse/issues/11375. [#11421](https://github.com/ClickHouse/ClickHouse/pull/11421) ([Alexey Ilyukhov](https://github.com/livace)).
|
||||
* Fix pointInPolygon with nan as point. Fixes [#11375](https://github.com/ClickHouse/ClickHouse/issues/11375). [#11421](https://github.com/ClickHouse/ClickHouse/pull/11421) ([Alexey Ilyukhov](https://github.com/livace)).
|
||||
* Fixed geohashesInBox with arguments outside of latitude/longitude range. [#11403](https://github.com/ClickHouse/ClickHouse/pull/11403) ([Vasily Nemkov](https://github.com/Enmk)).
|
||||
* Fix possible `Pipeline stuck` error for queries with external sort and limit. Fixes [#11359](https://github.com/ClickHouse/ClickHouse/issues/11359). [#11366](https://github.com/ClickHouse/ClickHouse/pull/11366) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix crash in `quantilesExactWeightedArray`. [#11337](https://github.com/ClickHouse/ClickHouse/pull/11337) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
@ -2636,7 +2636,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Fix potential uninitialized memory in conversion. Example: `SELECT toIntervalSecond(now64())`. [#11311](https://github.com/ClickHouse/ClickHouse/pull/11311) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix the issue when index analysis cannot work if a table has Array column in primary key and if a query is filtering by this column with `empty` or `notEmpty` functions. This fixes [#11286](https://github.com/ClickHouse/ClickHouse/issues/11286). [#11303](https://github.com/ClickHouse/ClickHouse/pull/11303) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bug when query speed estimation can be incorrect and the limit of `min_execution_speed` may not work or work incorrectly if the query is throttled by `max_network_bandwidth`, `max_execution_speed` or `priority` settings. Change the default value of `timeout_before_checking_execution_speed` to non-zero, because otherwise the settings `min_execution_speed` and `max_execution_speed` have no effect. This fixes [#11297](https://github.com/ClickHouse/ClickHouse/issues/11297). This fixes [#5732](https://github.com/ClickHouse/ClickHouse/issues/5732). This fixes [#6228](https://github.com/ClickHouse/ClickHouse/issues/6228). Usability improvement: avoid concatenation of exception message with progress bar in `clickhouse-client`. [#11296](https://github.com/ClickHouse/ClickHouse/pull/11296) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix crash while reading malformed data in Protobuf format. This fixes https://github.com/ClickHouse/ClickHouse/issues/5957, fixes https://github.com/ClickHouse/ClickHouse/issues/11203. [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix crash while reading malformed data in Protobuf format. This fixes [#5957](https://github.com/ClickHouse/ClickHouse/issues/5957), fixes [#11203](https://github.com/ClickHouse/ClickHouse/issues/11203). [#11258](https://github.com/ClickHouse/ClickHouse/pull/11258) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix possible error `Cannot capture column` for higher-order functions with `Array(Array(LowCardinality))` captured argument. [#11185](https://github.com/ClickHouse/ClickHouse/pull/11185) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* If data skipping index is dependent on columns that are going to be modified during background merge (for SummingMergeTree, AggregatingMergeTree as well as for TTL GROUP BY), it was calculated incorrectly. This issue is fixed by moving index calculation after merge so the index is calculated on merged data. [#11162](https://github.com/ClickHouse/ClickHouse/pull/11162) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Remove logging from mutation finalization task if nothing was finalized. [#11109](https://github.com/ClickHouse/ClickHouse/pull/11109) ([alesapin](https://github.com/alesapin)).
|
||||
@ -2914,7 +2914,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Several improvements ClickHouse grammar in `.g4` file. [#8294](https://github.com/ClickHouse/ClickHouse/pull/8294) ([taiyang-li](https://github.com/taiyang-li))
|
||||
* Fix bug that leads to crashes in `JOIN`s with tables with engine `Join`. This fixes [#7556](https://github.com/ClickHouse/ClickHouse/issues/7556) [#8254](https://github.com/ClickHouse/ClickHouse/issues/8254) [#7915](https://github.com/ClickHouse/ClickHouse/issues/7915) [#8100](https://github.com/ClickHouse/ClickHouse/issues/8100). [#8298](https://github.com/ClickHouse/ClickHouse/pull/8298) ([Artem Zuikov](https://github.com/4ertus2))
|
||||
* Fix redundant dictionaries reload on `CREATE DATABASE`. [#7916](https://github.com/ClickHouse/ClickHouse/pull/7916) ([Azat Khuzhin](https://github.com/azat))
|
||||
* Limit maximum number of streams for read from `StorageFile` and `StorageHDFS`. Fixes https://github.com/ClickHouse/ClickHouse/issues/7650. [#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin))
|
||||
* Limit maximum number of streams for read from `StorageFile` and `StorageHDFS`. Fixes [#7650](https://github.com/ClickHouse/ClickHouse/issues/7650). [#7981](https://github.com/ClickHouse/ClickHouse/pull/7981) ([alesapin](https://github.com/alesapin))
|
||||
* Fix bug in `ALTER ... MODIFY ... CODEC` query, when user specify both default expression and codec. Fixes [8593](https://github.com/ClickHouse/ClickHouse/issues/8593). [#8614](https://github.com/ClickHouse/ClickHouse/pull/8614) ([alesapin](https://github.com/alesapin))
|
||||
* Fix error in background merge of columns with `SimpleAggregateFunction(LowCardinality)` type. [#8613](https://github.com/ClickHouse/ClickHouse/pull/8613) ([Nikolai Kochetov](https://github.com/KochetovNicolai))
|
||||
* Fixed type check in function `toDateTime64`. [#8375](https://github.com/ClickHouse/ClickHouse/pull/8375) ([Vasily Nemkov](https://github.com/Enmk))
|
||||
@ -2998,7 +2998,7 @@ No changes compared to v20.4.3.16-stable.
|
||||
* Added check for extra parts of `MergeTree` at different disks, in order to not allow to miss data parts at undefined disks. [#8118](https://github.com/ClickHouse/ClickHouse/pull/8118) ([Vladimir Chebotarev](https://github.com/excitoon))
|
||||
* Enable SSL support for Mac client and server. [#8297](https://github.com/ClickHouse/ClickHouse/pull/8297) ([Ivan](https://github.com/abyss7))
|
||||
* Now ClickHouse can work as MySQL federated server (see https://dev.mysql.com/doc/refman/5.7/en/federated-create-server.html). [#7717](https://github.com/ClickHouse/ClickHouse/pull/7717) ([Maxim Fedotov](https://github.com/MaxFedotov))
|
||||
* `clickhouse-client` now only enable `bracketed-paste` when multiquery is on and multiline is off. This fixes (#7757)[https://github.com/ClickHouse/ClickHouse/issues/7757]. [#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([Amos Bird](https://github.com/amosbird))
|
||||
* `clickhouse-client` now only enable `bracketed-paste` when multiquery is on and multiline is off. This fixes [#7757](https://github.com/ClickHouse/ClickHouse/issues/7757). [#7761](https://github.com/ClickHouse/ClickHouse/pull/7761) ([Amos Bird](https://github.com/amosbird))
|
||||
* Support `Array(Decimal)` in `if` function. [#7721](https://github.com/ClickHouse/ClickHouse/pull/7721) ([Artem Zuikov](https://github.com/4ertus2))
|
||||
* Support Decimals in `arrayDifference`, `arrayCumSum` and `arrayCumSumNegative` functions. [#7724](https://github.com/ClickHouse/ClickHouse/pull/7724) ([Artem Zuikov](https://github.com/4ertus2))
|
||||
* Added `lifetime` column to `system.dictionaries` table. [#6820](https://github.com/ClickHouse/ClickHouse/issues/6820) [#7727](https://github.com/ClickHouse/ClickHouse/pull/7727) ([kekekekule](https://github.com/kekekekule))
|
||||
|
@ -223,8 +223,8 @@ if (ARCH_NATIVE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||
endif ()
|
||||
|
||||
if (UNBUNDLED AND (COMPILER_GCC OR COMPILER_CLANG))
|
||||
# to make numeric_limits<__int128> works for unbundled build
|
||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
||||
# to make numeric_limits<__int128> works with GCC
|
||||
set (_CXX_STANDARD "-std=gnu++2a")
|
||||
else()
|
||||
set (_CXX_STANDARD "-std=c++2a")
|
||||
@ -457,6 +457,7 @@ include (cmake/find/s3.cmake)
|
||||
include (cmake/find/base64.cmake)
|
||||
include (cmake/find/parquet.cmake)
|
||||
include (cmake/find/simdjson.cmake)
|
||||
include (cmake/find/fast_float.cmake)
|
||||
include (cmake/find/rapidjson.cmake)
|
||||
include (cmake/find/fastops.cmake)
|
||||
include (cmake/find/odbc.cmake)
|
||||
|
@ -58,8 +58,7 @@ public:
|
||||
using signed_base_type = int64_t;
|
||||
|
||||
// ctors
|
||||
integer() = default;
|
||||
|
||||
constexpr integer() noexcept;
|
||||
template <typename T>
|
||||
constexpr integer(T rhs) noexcept;
|
||||
template <typename T>
|
||||
|
@ -916,6 +916,11 @@ public:
|
||||
|
||||
// Members
|
||||
|
||||
template <size_t Bits, typename Signed>
|
||||
constexpr integer<Bits, Signed>::integer() noexcept
|
||||
: items{}
|
||||
{}
|
||||
|
||||
template <size_t Bits, typename Signed>
|
||||
template <typename T>
|
||||
constexpr integer<Bits, Signed>::integer(T rhs) noexcept
|
||||
|
@ -761,14 +761,14 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
||||
static KillingErrorHandler killing_error_handler;
|
||||
Poco::ErrorHandler::set(&killing_error_handler);
|
||||
|
||||
signal_pipe.setNonBlocking();
|
||||
signal_pipe.setNonBlockingWrite();
|
||||
signal_pipe.tryIncreaseSize(1 << 20);
|
||||
|
||||
signal_listener = std::make_unique<SignalListener>(*this);
|
||||
signal_listener_thread.start(*signal_listener);
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
String build_id_hex = DB::SymbolIndex::instance().getBuildIDHex();
|
||||
String build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex();
|
||||
if (build_id_hex.empty())
|
||||
build_id_info = "no build id";
|
||||
else
|
||||
|
@ -179,7 +179,7 @@ void SentryWriter::onFault(int sig, const std::string & error_message, const Sta
|
||||
sentry_set_extra("signal_number", sentry_value_new_int32(sig));
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
const String & build_id_hex = DB::SymbolIndex::instance().getBuildIDHex();
|
||||
const String & build_id_hex = DB::SymbolIndex::instance()->getBuildIDHex();
|
||||
sentry_set_tag("build_id", build_id_hex.c_str());
|
||||
#endif
|
||||
|
||||
|
@ -248,7 +248,7 @@ bool Pool::Entry::tryForceConnected() const
|
||||
if (prev_connection_id != current_connection_id)
|
||||
{
|
||||
auto & logger = Poco::Util::Application::instance().logger();
|
||||
logger.information("Connection to mysql server has been reestablished. Connection id changed: %d -> %d",
|
||||
logger.information("Connection to mysql server has been reestablished. Connection id changed: %lu -> %lu",
|
||||
prev_connection_id, current_connection_id);
|
||||
}
|
||||
return true;
|
||||
|
@ -22,4 +22,12 @@ ResultBase::~ResultBase()
|
||||
mysql_free_result(res);
|
||||
}
|
||||
|
||||
std::string ResultBase::getFieldName(size_t n) const
|
||||
{
|
||||
if (num_fields <= n)
|
||||
throw Exception(std::string("Unknown column position ") + std::to_string(n));
|
||||
|
||||
return fields[n].name;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ public:
|
||||
MYSQL_RES * getRes() { return res; }
|
||||
const Query * getQuery() const { return query; }
|
||||
|
||||
std::string getFieldName(size_t n) const;
|
||||
|
||||
virtual ~ResultBase();
|
||||
|
||||
protected:
|
||||
|
6
cmake/find/fast_float.cmake
Normal file
6
cmake/find/fast_float.cmake
Normal file
@ -0,0 +1,6 @@
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/fast_float/include/fast_float/fast_float.h")
|
||||
message (FATAL_ERROR "submodule contrib/fast_float is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
endif ()
|
||||
|
||||
set(FAST_FLOAT_LIBRARY fast_float)
|
||||
set(FAST_FLOAT_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/fast_float/include/")
|
5
contrib/CMakeLists.txt
vendored
5
contrib/CMakeLists.txt
vendored
@ -26,6 +26,7 @@ add_subdirectory (boost-cmake)
|
||||
add_subdirectory (cctz-cmake)
|
||||
add_subdirectory (consistent-hashing-sumbur)
|
||||
add_subdirectory (consistent-hashing)
|
||||
add_subdirectory (dragonbox-cmake)
|
||||
add_subdirectory (FastMemcpy)
|
||||
add_subdirectory (hyperscan-cmake)
|
||||
add_subdirectory (jemalloc-cmake)
|
||||
@ -35,7 +36,6 @@ add_subdirectory (libmetrohash)
|
||||
add_subdirectory (lz4-cmake)
|
||||
add_subdirectory (murmurhash)
|
||||
add_subdirectory (replxx-cmake)
|
||||
add_subdirectory (ryu-cmake)
|
||||
add_subdirectory (unixodbc-cmake)
|
||||
add_subdirectory (xz)
|
||||
|
||||
@ -322,3 +322,6 @@ endif()
|
||||
if (USE_INTERNAL_ROCKSDB_LIBRARY)
|
||||
add_subdirectory(rocksdb-cmake)
|
||||
endif()
|
||||
|
||||
add_subdirectory(fast_float)
|
||||
|
||||
|
2
contrib/boost
vendored
2
contrib/boost
vendored
@ -1 +1 @@
|
||||
Subproject commit a04e72c0464f0c31d3384f18f0c0db36a05538e0
|
||||
Subproject commit a7ceabe4747ecc3309dd3dcd9de4b29660dfd298
|
1
contrib/dragonbox
vendored
Submodule
1
contrib/dragonbox
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit b2751c65c0592c0239aec3becd53d0ea2fde9329
|
5
contrib/dragonbox-cmake/CMakeLists.txt
Normal file
5
contrib/dragonbox-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,5 @@
|
||||
set(LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/dragonbox")
|
||||
|
||||
add_library(dragonbox_to_chars "${LIBRARY_DIR}/source/dragonbox_to_chars.cpp")
|
||||
|
||||
target_include_directories(dragonbox_to_chars SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}/include/")
|
1
contrib/fast_float
vendored
Submodule
1
contrib/fast_float
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 7eae925b51fd0f570ccd5c880c12e3e27a23b86f
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
||||
Subproject commit f3d791f6568b99366d089b4479f76a515beb66d5
|
||||
Subproject commit 08974cc024b2e748f5b1d45415396706b3521d0f
|
2
contrib/rocksdb
vendored
2
contrib/rocksdb
vendored
@ -1 +1 @@
|
||||
Subproject commit 35d8e36ef1b8e3e0759ca81215f855226a0a54bd
|
||||
Subproject commit 8b966f0ca298fc1475bd09d9775f32dff0fdce0a
|
@ -345,6 +345,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_reader.cc
|
||||
@ -460,6 +461,7 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/configurable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/customizable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||
@ -583,8 +585,9 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point/point_lock_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc
|
||||
@ -592,7 +595,6 @@ set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_lock_mgr.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||
|
1
contrib/ryu
vendored
1
contrib/ryu
vendored
@ -1 +0,0 @@
|
||||
Subproject commit 5b4a853534b47438b4d97935370f6b2397137c2b
|
@ -1,10 +0,0 @@
|
||||
SET(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/ryu)
|
||||
|
||||
add_library(ryu
|
||||
${LIBRARY_DIR}/ryu/d2fixed.c
|
||||
${LIBRARY_DIR}/ryu/d2s.c
|
||||
${LIBRARY_DIR}/ryu/f2s.c
|
||||
${LIBRARY_DIR}/ryu/generic_128.c
|
||||
)
|
||||
|
||||
target_include_directories(ryu SYSTEM BEFORE PUBLIC "${LIBRARY_DIR}")
|
@ -137,7 +137,6 @@ function clone_submodules
|
||||
contrib/libxml2
|
||||
contrib/poco
|
||||
contrib/libunwind
|
||||
contrib/ryu
|
||||
contrib/fmtlib
|
||||
contrib/base64
|
||||
contrib/cctz
|
||||
@ -155,6 +154,8 @@ function clone_submodules
|
||||
contrib/croaring
|
||||
contrib/miniselect
|
||||
contrib/xz
|
||||
contrib/dragonbox
|
||||
contrib/fast_float
|
||||
)
|
||||
|
||||
git submodule sync
|
||||
@ -318,6 +319,9 @@ function run_tests
|
||||
01545_system_errors
|
||||
# Checks system.errors
|
||||
01563_distributed_query_finish
|
||||
|
||||
# nc - command not found
|
||||
01601_proxy_protocol
|
||||
)
|
||||
|
||||
time clickhouse-test -j 8 --order=random --no-long --testname --shard --zookeeper --skip "${TESTS_TO_SKIP[@]}" -- "$FASTTEST_FOCUS" 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/test_log.txt"
|
||||
|
@ -7,6 +7,11 @@ trap 'kill $(jobs -pr) ||:' EXIT
|
||||
stage=${stage:-}
|
||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
|
||||
# upstream/master
|
||||
LEFT_SERVER_PORT=9001
|
||||
# patched version
|
||||
RIGHT_SERVER_PORT=9002
|
||||
|
||||
function wait_for_server # port, pid
|
||||
{
|
||||
for _ in {1..60}
|
||||
@ -37,25 +42,32 @@ function configure
|
||||
rm right/config/config.d/text_log.xml ||:
|
||||
cp -rv right/config left ||:
|
||||
|
||||
sed -i 's/<tcp_port>900./<tcp_port>9001/g' left/config/config.xml
|
||||
sed -i 's/<tcp_port>900./<tcp_port>9002/g' right/config/config.xml
|
||||
|
||||
# Start a temporary server to rename the tables
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
|
||||
set -m # Spawn temporary in its own process groups
|
||||
left/clickhouse-server --config-file=left/config/config.xml -- --path db0 --user_files_path db0/user_files &> setup-server-log.log &
|
||||
|
||||
local setup_left_server_opts=(
|
||||
# server options
|
||||
--config-file=left/config/config.xml
|
||||
--
|
||||
# server *config* directives overrides
|
||||
--path db0
|
||||
--user_files_path db0/user_files
|
||||
--tcp_port $LEFT_SERVER_PORT
|
||||
)
|
||||
left/clickhouse-server "${setup_left_server_opts[@]}" &> setup-server-log.log &
|
||||
left_pid=$!
|
||||
kill -0 $left_pid
|
||||
disown $left_pid
|
||||
set +m
|
||||
|
||||
wait_for_server 9001 $left_pid
|
||||
wait_for_server $LEFT_SERVER_PORT $left_pid
|
||||
echo Server for setup started
|
||||
|
||||
clickhouse-client --port 9001 --query "create database test" ||:
|
||||
clickhouse-client --port 9001 --query "rename table datasets.hits_v1 to test.hits" ||:
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "create database test" ||:
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "rename table datasets.hits_v1 to test.hits" ||:
|
||||
|
||||
while killall clickhouse-server; do echo . ; sleep 1 ; done
|
||||
echo all killed
|
||||
@ -83,16 +95,30 @@ function restart
|
||||
|
||||
set -m # Spawn servers in their own process groups
|
||||
|
||||
left/clickhouse-server --config-file=left/config/config.xml \
|
||||
-- --path left/db --user_files_path left/db/user_files \
|
||||
&>> left-server-log.log &
|
||||
local left_server_opts=(
|
||||
# server options
|
||||
--config-file=left/config/config.xml
|
||||
--
|
||||
# server *config* directives overrides
|
||||
--path left/db
|
||||
--user_files_path left/db/user_files
|
||||
--tcp_port $LEFT_SERVER_PORT
|
||||
)
|
||||
left/clickhouse-server "${left_server_opts[@]}" &>> left-server-log.log &
|
||||
left_pid=$!
|
||||
kill -0 $left_pid
|
||||
disown $left_pid
|
||||
|
||||
right/clickhouse-server --config-file=right/config/config.xml \
|
||||
-- --path right/db --user_files_path right/db/user_files \
|
||||
&>> right-server-log.log &
|
||||
local right_server_opts=(
|
||||
# server options
|
||||
--config-file=right/config/config.xml
|
||||
--
|
||||
# server *config* directives overrides
|
||||
--path right/db
|
||||
--user_files_path right/db/user_files
|
||||
--tcp_port $RIGHT_SERVER_PORT
|
||||
)
|
||||
right/clickhouse-server "${right_server_opts[@]}" &>> right-server-log.log &
|
||||
right_pid=$!
|
||||
kill -0 $right_pid
|
||||
disown $right_pid
|
||||
@ -101,16 +127,16 @@ function restart
|
||||
|
||||
unset MALLOC_CONF
|
||||
|
||||
wait_for_server 9001 $left_pid
|
||||
wait_for_server $LEFT_SERVER_PORT $left_pid
|
||||
echo left ok
|
||||
|
||||
wait_for_server 9002 $right_pid
|
||||
wait_for_server $RIGHT_SERVER_PORT $right_pid
|
||||
echo right ok
|
||||
|
||||
clickhouse-client --port 9001 --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port 9001 --query "select * from system.build_options"
|
||||
clickhouse-client --port 9002 --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port 9002 --query "select * from system.build_options"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.build_options"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.tables where database != 'system'"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.build_options"
|
||||
|
||||
# Check again that both servers we started are running -- this is important
|
||||
# for running locally, when there might be some other servers started and we
|
||||
@ -199,9 +225,9 @@ function run_tests
|
||||
for test in $test_files
|
||||
do
|
||||
# Check that both servers are alive, and restart them if they die.
|
||||
clickhouse-client --port 9001 --query "select 1 format Null" \
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select 1 format Null" \
|
||||
|| { echo $test_name >> left-server-died.log ; restart ; }
|
||||
clickhouse-client --port 9002 --query "select 1 format Null" \
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1 format Null" \
|
||||
|| { echo $test_name >> right-server-died.log ; restart ; }
|
||||
|
||||
test_name=$(basename "$test" ".xml")
|
||||
@ -215,7 +241,7 @@ function run_tests
|
||||
# The grep is to filter out set -x output and keep only time output.
|
||||
# The '2>&1 >/dev/null' redirects stderr to stdout, and discards stdout.
|
||||
{ \
|
||||
time "$script_dir/perf.py" --host localhost localhost --port 9001 9002 \
|
||||
time "$script_dir/perf.py" --host localhost localhost --port $LEFT_SERVER_PORT $RIGHT_SERVER_PORT \
|
||||
--runs "$CHPC_RUNS" --max-queries "$CHPC_MAX_QUERIES" \
|
||||
--profile-seconds "$profile_seconds" \
|
||||
-- "$test" > "$test_name-raw.tsv" 2> "$test_name-err.log" ; \
|
||||
@ -257,36 +283,36 @@ function get_profiles_watchdog
|
||||
function get_profiles
|
||||
{
|
||||
# Collect the profiles
|
||||
clickhouse-client --port 9001 --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port 9001 --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port 9001 --query "system flush logs" &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
clickhouse-client --port 9002 --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port 9002 --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port 9002 --query "system flush logs" &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_cpu_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "set query_profiler_real_time_period_ns = 0"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "system flush logs" &
|
||||
|
||||
wait
|
||||
|
||||
clickhouse-client --port 9001 --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
|
||||
clickhouse-client --port 9001 --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > left-query-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > left-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > left-trace-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > left-addresses.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > left-metric-log.tsv ||: &
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > left-async-metric-log.tsv ||: &
|
||||
|
||||
clickhouse-client --port 9002 --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: &
|
||||
clickhouse-client --port 9002 --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > right-async-metric-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_log where type = 2 format TSVWithNamesAndTypes" > right-query-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.query_thread_log format TSVWithNamesAndTypes" > right-query-thread-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.trace_log format TSVWithNamesAndTypes" > right-trace-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select arrayJoin(trace) addr, concat(splitByChar('/', addressToLine(addr))[-1], '#', demangle(addressToSymbol(addr)) ) name from system.trace_log group by addr format TSVWithNamesAndTypes" > right-addresses.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.metric_log format TSVWithNamesAndTypes" > right-metric-log.tsv ||: &
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select * from system.asynchronous_metric_log format TSVWithNamesAndTypes" > right-async-metric-log.tsv ||: &
|
||||
|
||||
wait
|
||||
|
||||
# Just check that the servers are alive so that we return a proper exit code.
|
||||
# We don't consistently check the return codes of the above background jobs.
|
||||
clickhouse-client --port 9001 --query "select 1"
|
||||
clickhouse-client --port 9002 --query "select 1"
|
||||
clickhouse-client --port $LEFT_SERVER_PORT --query "select 1"
|
||||
clickhouse-client --port $RIGHT_SERVER_PORT --query "select 1"
|
||||
}
|
||||
|
||||
function build_log_column_definitions
|
||||
|
@ -2,6 +2,7 @@
|
||||
<http_port remove="remove"/>
|
||||
<mysql_port remove="remove"/>
|
||||
<interserver_http_port remove="remove"/>
|
||||
<tcp_with_proxy_port remove="remove"/>
|
||||
<listen_host>::</listen_host>
|
||||
|
||||
<logger>
|
@ -177,8 +177,6 @@ When you `INSERT` a bunch of data into `MergeTree`, that bunch is sorted by prim
|
||||
|
||||
`MergeTree` is not an LSM tree because it doesn’t contain “memtable” and “log”: inserted data is written directly to the filesystem. This makes it suitable only to INSERT data in batches, not by individual row and not very frequently – about once per second is ok, but a thousand times a second is not. We did it this way for simplicity’s sake, and because we are already inserting data in batches in our applications.
|
||||
|
||||
> MergeTree tables can only have one (primary) index: there aren’t any secondary indices. It would be nice to allow multiple physical representations under one logical table, for example, to store data in more than one physical order or even to allow representations with pre-aggregated data along with original data.
|
||||
|
||||
There are MergeTree engines that are doing additional work during background merges. Examples are `CollapsingMergeTree` and `AggregatingMergeTree`. This could be treated as special support for updates. Keep in mind that these are not real updates because users usually have no control over the time when background merges are executed, and data in a `MergeTree` table is almost always stored in more than one part, not in completely merged form.
|
||||
|
||||
## Replication {#replication}
|
||||
|
11
docs/en/getting-started/example-datasets/github-events.md
Normal file
11
docs/en/getting-started/example-datasets/github-events.md
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
toc_priority: 11
|
||||
toc_title: GitHub Events
|
||||
---
|
||||
|
||||
# GitHub Events Dataset
|
||||
|
||||
Dataset contains all events on GitHub from 2011 to Dec 6 2020, the size is 3.1 billion records. Download size is 75 GB and it will require up to 200 GB space on disk if stored in a table with lz4 compression.
|
||||
|
||||
Full dataset description, insights, download instruction and interactive queries are posted [here](https://github-sql.github.io/explorer/).
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
toc_folder_title: Example Datasets
|
||||
toc_priority: 14
|
||||
toc_priority: 10
|
||||
toc_title: Introduction
|
||||
---
|
||||
|
||||
@ -18,4 +18,4 @@ The list of documented datasets:
|
||||
- [New York Taxi Data](../../getting-started/example-datasets/nyc-taxi.md)
|
||||
- [OnTime](../../getting-started/example-datasets/ontime.md)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) <!--hide-->
|
||||
[Original article](https://clickhouse.tech/docs/en/getting_started/example_datasets) <!--hide-->
|
||||
|
@ -117,8 +117,8 @@ You can compile packages and install them or use programs without installing pac
|
||||
|
||||
You’ll need to create a data and metadata folders and `chown` them for the desired user. Their paths can be changed in server config (src/programs/server/config.xml), by default they are:
|
||||
|
||||
/opt/clickhouse/data/default/
|
||||
/opt/clickhouse/metadata/default/
|
||||
/var/lib/clickhouse/data/default/
|
||||
/var/lib/clickhouse/metadata/default/
|
||||
|
||||
On Gentoo, you can just use `emerge clickhouse` to install ClickHouse from sources.
|
||||
|
||||
|
@ -113,7 +113,8 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
||||
- `--port` – The port to connect to. Default value: 9000. Note that the HTTP interface and the native interface use different ports.
|
||||
- `--user, -u` – The username. Default value: default.
|
||||
- `--password` – The password. Default value: empty string.
|
||||
- `--query, -q` – The query to process when using non-interactive mode.
|
||||
- `--query, -q` – The query to process when using non-interactive mode. You must specify either `query` or `queries-file` option.
|
||||
- `--queries-file, -qf` - file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
||||
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
||||
- `--multiquery, -n` – If specified, allow processing multiple queries separated by semicolons.
|
||||
|
@ -58,6 +58,7 @@ The supported formats are:
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
| [CapnProto](#capnproto) | ✔ | ✗ |
|
||||
| [LineAsString](#lineasstring) | ✔ | ✗ |
|
||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||
|
||||
You can control some format processing parameters with the ClickHouse settings. For more information read the [Settings](../operations/settings/settings.md) section.
|
||||
|
||||
@ -457,7 +458,10 @@ This format is only appropriate for outputting a query result, but not for parsi
|
||||
|
||||
ClickHouse supports [NULL](../sql-reference/syntax.md), which is displayed as `null` in the JSON output. To enable `+nan`, `-nan`, `+inf`, `-inf` values in output, set the [output_format_json_quote_denormals](../operations/settings/settings.md#settings-output_format_json_quote_denormals) to 1.
|
||||
|
||||
See also the [JSONEachRow](#jsoneachrow) format.
|
||||
**See Also**
|
||||
|
||||
- [JSONEachRow](#jsoneachrow) format
|
||||
- [output_format_json_array_of_rows](../operations/settings/settings.md#output-format-json-array-of-rows) setting
|
||||
|
||||
## JSONString {#jsonstring}
|
||||
|
||||
@ -1367,4 +1371,45 @@ Result:
|
||||
└───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## RawBLOB {#rawblob}
|
||||
|
||||
In this format, all input data is read to a single value. It is possible to parse only a table with a single field of type [String](../sql-reference/data-types/string.md) or similar.
|
||||
The result is output in binary format without delimiters and escaping. If more than one value is output, the format is ambiguous, and it will be impossible to read the data back.
|
||||
|
||||
Below is a comparison of the formats `RawBLOB` and [TabSeparatedRaw](#tabseparatedraw).
|
||||
`RawBLOB`:
|
||||
- data is output in binary format, no escaping;
|
||||
- there are no delimiters between values;
|
||||
- no newline at the end of each value.
|
||||
[TabSeparatedRaw] (#tabseparatedraw):
|
||||
- data is output without escaping;
|
||||
- the rows contain values separated by tabs;
|
||||
- there is a line feed after the last value in every row.
|
||||
|
||||
The following is a comparison of the `RawBLOB` and [RowBinary](#rowbinary) formats.
|
||||
`RawBLOB`:
|
||||
- String fields are output without being prefixed by length.
|
||||
`RowBinary`:
|
||||
- String fields are represented as length in varint format (unsigned [LEB128] (https://en.wikipedia.org/wiki/LEB128)), followed by the bytes of the string.
|
||||
|
||||
When an empty data is passed to the `RawBLOB` input, ClickHouse throws an exception:
|
||||
|
||||
``` text
|
||||
Code: 108. DB::Exception: No data to insert
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --query "CREATE TABLE {some_table} (a String) ENGINE = Memory;"
|
||||
$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT RawBLOB"
|
||||
$ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
f9725a22f9191e064120d718e26862a9 -
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/interfaces/formats/) <!--hide-->
|
||||
|
@ -48,6 +48,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.flipkart.com/" class="favicon">Flipkart</a> | e-Commerce | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=239) |
|
||||
| <a href="https://fun.co/rp" class="favicon">FunCorp</a> | Games | | — | — | [Article](https://www.altinity.com/blog/migrating-from-redshift-to-clickhouse) |
|
||||
| <a href="https://geniee.co.jp" class="favicon">Geniee</a> | Ad network | Main product | — | — | [Blog post in Japanese, July 2017](https://tech.geniee.co.jp/entry/2017/07/20/160100) |
|
||||
| <a href="https://www.genotek.ru/" class="favicon">Genotek</a> | Bioinformatics | Main product | — | — | [Video, August 2020](https://youtu.be/v3KyZbz9lEE) |
|
||||
| <a href="https://www.huya.com/" class="favicon">HUYA</a> | Video Streaming | Analytics | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/7.%20ClickHouse万亿数据分析实践%20李本旺(sundy-li)%20虎牙.pdf) |
|
||||
| <a href="https://www.the-ica.com/" class="favicon">ICA</a> | FinTech | Risk Management | — | — | [Blog Post in English, Sep 2020](https://altinity.com/blog/clickhouse-vs-redshift-performance-for-fintech-risk-management?utm_campaign=ClickHouse%20vs%20RedShift&utm_content=143520807&utm_medium=social&utm_source=twitter&hss_channel=tw-3894792263) |
|
||||
| <a href="https://www.idealista.com" class="favicon">Idealista</a> | Real Estate | Analytics | — | — | [Blog Post in English, April 2019](https://clickhouse.tech/blog/en/clickhouse-meetup-in-madrid-on-april-2-2019) |
|
||||
|
@ -2360,10 +2360,89 @@ Default value: `1`.
|
||||
|
||||
## output_format_tsv_null_representation {#output_format_tsv_null_representation}
|
||||
|
||||
Allows configurable `NULL` representation for [TSV](../../interfaces/formats.md#tabseparated) output format. The setting only controls output format and `\N` is the only supported `NULL` representation for TSV input format.
|
||||
Defines the representation of `NULL` for [TSV](../../interfaces/formats.md#tabseparated) output format. User can set any string as a value, for example, `My NULL`.
|
||||
|
||||
Default value: `\N`.
|
||||
|
||||
**Examples**
|
||||
|
||||
Query
|
||||
|
||||
```sql
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
Result
|
||||
|
||||
```text
|
||||
788
|
||||
\N
|
||||
\N
|
||||
```
|
||||
|
||||
Query
|
||||
|
||||
```sql
|
||||
SET output_format_tsv_null_representation = 'My NULL';
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
Result
|
||||
|
||||
```text
|
||||
788
|
||||
My NULL
|
||||
My NULL
|
||||
```
|
||||
|
||||
## output_format_json_array_of_rows {#output-format-json-array-of-rows}
|
||||
|
||||
Enables the ability to output all rows as a JSON array in the [JSONEachRow](../../interfaces/formats.md#jsoneachrow) format.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 1 — ClickHouse outputs all rows as an array, each row in the `JSONEachRow` format.
|
||||
- 0 — ClickHouse outputs each row separately in the `JSONEachRow` format.
|
||||
|
||||
Default value: `0`.
|
||||
|
||||
**Example of a query with the enabled setting**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 1;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
[
|
||||
{"number":"0"},
|
||||
{"number":"1"},
|
||||
{"number":"2"}
|
||||
]
|
||||
```
|
||||
|
||||
**Example of a query with the disabled setting**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 0;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
{"number":"0"}
|
||||
{"number":"1"}
|
||||
{"number":"2"}
|
||||
```
|
||||
|
||||
=======
|
||||
## allow_nullable_key {#allow-nullable-key}
|
||||
|
||||
Allows using of the [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable)-typed values in a sorting and a primary key for [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree) tables.
|
||||
|
42
docs/en/operations/system-tables/distribution_queue.md
Normal file
42
docs/en/operations/system-tables/distribution_queue.md
Normal file
@ -0,0 +1,42 @@
|
||||
# system.distribution_queue {#system_tables-distribution_queue}
|
||||
|
||||
Contains information about local files that are in the queue to be sent to the shards. This local files contain new parts that are created by inserting new data into the Distributed table in asynchronous mode.
|
||||
|
||||
Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — Name of the database.
|
||||
|
||||
- `table` ([String](../../sql-reference/data-types/string.md)) — Name of the table.
|
||||
|
||||
- `data_path` ([String](../../sql-reference/data-types/string.md)) — Path to the folder with local files.
|
||||
|
||||
- `is_blocked` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Flag indicates whether sending local files to the server is blocked.
|
||||
|
||||
- `error_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of errors.
|
||||
|
||||
- `data_files` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of local files in a folder.
|
||||
|
||||
- `data_compressed_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Size of compressed data in local files, in bytes.
|
||||
|
||||
- `last_exception` ([String](../../sql-reference/data-types/string.md)) — Text message about the last error that occurred (if any).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT * FROM system.distribution_queue LIMIT 1 FORMAT Vertical;
|
||||
```
|
||||
|
||||
``` text
|
||||
Row 1:
|
||||
──────
|
||||
database: default
|
||||
table: dist
|
||||
data_path: ./store/268/268bc070-3aad-4b1a-9cf2-4987580161af/default@127%2E0%2E0%2E2:9000/
|
||||
is_blocked: 1
|
||||
error_count: 0
|
||||
data_files: 1
|
||||
data_compressed_bytes: 499
|
||||
last_exception:
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/system_tables/distribution_queue) <!--hide-->
|
@ -9,6 +9,12 @@ Connects to a ClickHouse server and repeatedly sends specified queries.
|
||||
|
||||
Syntax:
|
||||
|
||||
``` bash
|
||||
$ clickhouse-benchmark --query ["single query"] [keys]
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` bash
|
||||
$ echo "single query" | clickhouse-benchmark [keys]
|
||||
```
|
||||
@ -34,6 +40,7 @@ clickhouse-benchmark [keys] < queries_file
|
||||
|
||||
## Keys {#clickhouse-benchmark-keys}
|
||||
|
||||
- `--query=WORD` - Query to execute. If this parameter is not passed clickhouse-benchmark will read queries from standard input.
|
||||
- `-c N`, `--concurrency=N` — Number of queries that `clickhouse-benchmark` sends simultaneously. Default value: 1.
|
||||
- `-d N`, `--delay=N` — Interval in seconds between intermediate reports (set 0 to disable reports). Default value: 1.
|
||||
- `-h WORD`, `--host=WORD` — Server host. Default value: `localhost`. For the [comparison mode](#clickhouse-benchmark-comparison-mode) you can use multiple `-h` keys.
|
||||
|
@ -32,7 +32,8 @@ Arguments:
|
||||
- `-S`, `--structure` — table structure for input data.
|
||||
- `-if`, `--input-format` — input format, `TSV` by default.
|
||||
- `-f`, `--file` — path to data, `stdin` by default.
|
||||
- `-q` `--query` — queries to execute with `;` as delimeter.
|
||||
- `-q` `--query` — queries to execute with `;` as delimeter. You must specify either `query` or `queries-file` option.
|
||||
- `-qf` `--queries-file` - file path with queries to execute. You must specify either `query` or `queries-file` option.
|
||||
- `-N`, `--table` — table name where to put output data, `table` by default.
|
||||
- `-of`, `--format`, `--output-format` — output format, `TSV` by default.
|
||||
- `--stacktrace` — whether to dump debug output in case of exception.
|
||||
|
@ -93,6 +93,8 @@ Setting fields:
|
||||
- `path` – The absolute path to the file.
|
||||
- `format` – The file format. All the formats described in “[Formats](../../../interfaces/formats.md#formats)” are supported.
|
||||
|
||||
When dictionary with FILE source is created via DDL command (`CREATE DICTIONARY ...`), source of the dictionary have to be located in `user_files` directory, to prevent DB users accessing arbitrary file on clickhouse node.
|
||||
|
||||
## Executable File {#dicts-external_dicts_dict_sources-executable}
|
||||
|
||||
Working with executable files depends on [how the dictionary is stored in memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request to the executable file’s STDIN. Otherwise, ClickHouse starts executable file and treats its output as dictionary data.
|
||||
@ -108,17 +110,13 @@ Example of settings:
|
||||
</source>
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
``` sql
|
||||
SOURCE(EXECUTABLE(command 'cat /opt/dictionaries/os.tsv' format 'TabSeparated'))
|
||||
```
|
||||
|
||||
Setting fields:
|
||||
|
||||
- `command` – The absolute path to the executable file, or the file name (if the program directory is written to `PATH`).
|
||||
- `format` – The file format. All the formats described in “[Formats](../../../interfaces/formats.md#formats)” are supported.
|
||||
|
||||
That dictionary source can be configured only via XML configuration. Creating dictionaries with executable source via DDL is disabled, otherwise, the DB user would be able to execute arbitrary binary on clickhouse node.
|
||||
|
||||
## Http(s) {#dicts-external_dicts_dict_sources-http}
|
||||
|
||||
Working with an HTTP(s) server depends on [how the dictionary is stored in memory](../../../sql-reference/dictionaries/external-dictionaries/external-dicts-dict-layout.md). If the dictionary is stored using `cache` and `complex_key_cache`, ClickHouse requests the necessary keys by sending a request via the `POST` method.
|
||||
@ -169,6 +167,8 @@ Setting fields:
|
||||
- `name` – Identifiant name used for the header send on the request.
|
||||
- `value` – Value set for a specific identifiant name.
|
||||
|
||||
When creating a dictionary using the DDL command (`CREATE DICTIONARY ...`) remote hosts for HTTP dictionaries checked with the `remote_url_allow_hosts` section from config to prevent database users to access arbitrary HTTP server.
|
||||
|
||||
## ODBC {#dicts-external_dicts_dict_sources-odbc}
|
||||
|
||||
You can use this method to connect any database that has an ODBC driver.
|
||||
|
@ -366,7 +366,7 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
## date\_trunc {#date_trunc}
|
||||
|
||||
Truncates date and time data to the specified part of date.
|
||||
|
||||
@ -435,7 +435,7 @@ Result:
|
||||
|
||||
- [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone)
|
||||
|
||||
# now {#now}
|
||||
## now {#now}
|
||||
|
||||
Returns the current date and time.
|
||||
|
||||
@ -662,7 +662,7 @@ Result:
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->
|
||||
|
||||
## FROM_UNIXTIME
|
||||
## FROM\_UNIXTIME {#fromunixfime}
|
||||
|
||||
When there is only single argument of integer type, it act in the same way as `toDateTime` and return [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
type.
|
||||
@ -692,3 +692,147 @@ SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
|
||||
│ 2009-02-11 14:42:23 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## toModifiedJulianDay {#tomodifiedjulianday}
|
||||
|
||||
Converts a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD` to a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number in Int32. This function supports date from `0000-01-01` to `9999-12-31`. It raises an exception if the argument cannot be parsed as a date, or the date is invalid.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toModifiedJulianDay(date)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `date` — Date in text form. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Modified Julian Day number.
|
||||
|
||||
Type: [Int32](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toModifiedJulianDay('2020-01-01');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toModifiedJulianDay('2020-01-01')─┐
|
||||
│ 58849 │
|
||||
└───────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toModifiedJulianDayOrNull {#tomodifiedjuliandayornull}
|
||||
|
||||
Similar to [toModifiedJulianDay()](#tomodifiedjulianday), but instead of raising exceptions it returns `NULL`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
toModifiedJulianDayOrNull(date)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `date` — Date in text form. [String](../../sql-reference/data-types/string.md) or [FixedString](../../sql-reference/data-types/fixedstring.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Modified Julian Day number.
|
||||
|
||||
Type: [Nullable(Int32)](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toModifiedJulianDayOrNull('2020-01-01');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─toModifiedJulianDayOrNull('2020-01-01')─┐
|
||||
│ 58849 │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## fromModifiedJulianDay {#frommodifiedjulianday}
|
||||
|
||||
Converts a [Modified Julian Day](https://en.wikipedia.org/wiki/Julian_day#Variants) number to a [Proleptic Gregorian calendar](https://en.wikipedia.org/wiki/Proleptic_Gregorian_calendar) date in text form `YYYY-MM-DD`. This function supports day number from `-678941` to `2973119` (which represent 0000-01-01 and 9999-12-31 respectively). It raises an exception if the day number is outside of the supported range.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
fromModifiedJulianDay(day)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `day` — Modified Julian Day number. [Any integral types](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Date in text form.
|
||||
|
||||
Type: [String](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT fromModifiedJulianDay(58849);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─fromModifiedJulianDay(58849)─┐
|
||||
│ 2020-01-01 │
|
||||
└──────────────────────────────┘
|
||||
```
|
||||
|
||||
## fromModifiedJulianDayOrNull {#frommodifiedjuliandayornull}
|
||||
|
||||
Similar to [fromModifiedJulianDayOrNull()](#frommodifiedjuliandayornull), but instead of raising exceptions it returns `NULL`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
fromModifiedJulianDayOrNull(day)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `day` — Modified Julian Day number. [Any integral types](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Date in text form.
|
||||
|
||||
Type: [Nullable(String)](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT fromModifiedJulianDayOrNull(58849);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─fromModifiedJulianDayOrNull(58849)─┐
|
||||
│ 2020-01-01 │
|
||||
└────────────────────────────────────┘
|
||||
```
|
||||
|
@ -111,4 +111,306 @@ Accepts a numeric argument and returns a UInt64 number close to 2 to the power o
|
||||
|
||||
Accepts a numeric argument and returns a UInt64 number close to 10 to the power of x.
|
||||
|
||||
## cosh(x) {#coshx}
|
||||
|
||||
[Hyperbolic cosine](https://in.mathworks.com/help/matlab/ref/cosh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
cosh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `1 <= cosh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT cosh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─cosh(0)──┐
|
||||
│ 1 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## acosh(x) {#acoshx}
|
||||
|
||||
[Inverse hyperbolic cosine](https://www.mathworks.com/help/matlab/ref/acosh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
acosh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Hyperbolic cosine of angle. Values from the interval: `1 <= x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle, in radians. Values from the interval: `0 <= acosh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT acosh(1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─acosh(1)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [cosh(x)](../../sql-reference/functions/math-functions.md#coshx)
|
||||
|
||||
## sinh(x) {#sinhx}
|
||||
|
||||
[Hyperbolic sine](https://www.mathworks.com/help/matlab/ref/sinh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
sinh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — The angle, in radians. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `-∞ < sinh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT sinh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─sinh(0)──┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## asinh(x) {#asinhx}
|
||||
|
||||
[Inverse hyperbolic sine](https://www.mathworks.com/help/matlab/ref/asinh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
asinh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Hyperbolic sine of angle. Values from the interval: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle, in radians. Values from the interval: `-∞ < asinh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT asinh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─asinh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [sinh(x)](../../sql-reference/functions/math-functions.md#sinhx)
|
||||
|
||||
## atanh(x) {#atanhx}
|
||||
|
||||
[Inverse hyperbolic tangent](https://www.mathworks.com/help/matlab/ref/atanh.html).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
atanh(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Hyperbolic tangent of angle. Values from the interval: `–1 < x < 1`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle, in radians. Values from the interval: `-∞ < atanh(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT atanh(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─atanh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## atan2(y, x) {#atan2yx}
|
||||
|
||||
The [function](https://en.wikipedia.org/wiki/Atan2) calculates the angle in the Euclidean plane, given in radians, between the positive x axis and the ray to the point `(x, y) ≠ (0, 0)`.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
atan2(y, x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `y` — y-coordinate of the point through which the ray passes. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — x-coordinate of the point through which the ray passes. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The angle `θ` such that `−π < θ ≤ π`, in radians.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT atan2(1, 1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────atan2(1, 1)─┐
|
||||
│ 0.7853981633974483 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## hypot(x, y) {#hypotxy}
|
||||
|
||||
Calculates the length of the hypotenuse of a right-angle triangle. The [function](https://en.wikipedia.org/wiki/Hypot) avoids problems that occur when squaring very large or very small numbers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hypot(x, y)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — The first cathetus of a right-angle triangle. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — The second cathetus of a right-angle triangle. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The length of the hypotenuse of a right-angle triangle.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hypot(1, 1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────────hypot(1, 1)─┐
|
||||
│ 1.4142135623730951 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## log1p(x) {#log1px}
|
||||
|
||||
Calculates `log(1+x)`. The [function](https://en.wikipedia.org/wiki/Natural_logarithm#lnp1) `log1p(x)` is more accurate than `log(1+x)` for small values of x.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
log1p(x)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `x` — Values from the interval: `-1 < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Values from the interval: `-∞ < log1p(x) < +∞`.
|
||||
|
||||
Type: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT log1p(0);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─log1p(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**See Also**
|
||||
|
||||
- [log(x)](../../sql-reference/functions/math-functions.md#logx-lnx)
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/math_functions/) <!--hide-->
|
||||
|
@ -11,7 +11,7 @@ Syntax:
|
||||
|
||||
``` sql
|
||||
CREATE QUOTA [IF NOT EXISTS | OR REPLACE] name [ON CLUSTER cluster_name]
|
||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
||||
[KEYED BY {'none' | 'user name' | 'ip address' | 'forwarded ip address' | 'client key' | 'client key or user name' | 'client key or ip address'}]
|
||||
[FOR [RANDOMIZED] INTERVAL number {SECOND | MINUTE | HOUR | DAY | WEEK | MONTH | QUARTER | YEAR}
|
||||
{MAX { {QUERIES | ERRORS | RESULT ROWS | RESULT BYTES | READ ROWS | READ BYTES | EXECUTION TIME} = number } [,...] |
|
||||
NO LIMITS | TRACKING ONLY} [,...]]
|
||||
|
@ -53,7 +53,7 @@ KILL MUTATION [ON CLUSTER cluster]
|
||||
|
||||
Tries to cancel and remove [mutations](../../sql-reference/statements/alter/index.md#alter-mutations) that are currently executing. Mutations to cancel are selected from the [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) table using the filter specified by the `WHERE` clause of the `KILL` query.
|
||||
|
||||
A test query (`TEST`) only checks the user’s rights and displays a list of queries to stop.
|
||||
A test query (`TEST`) only checks the user’s rights and displays a list of mutations to stop.
|
||||
|
||||
Examples:
|
||||
|
||||
|
@ -9,6 +9,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
|
||||
|
||||
Поддерживаемые форматы и возможность использовать их в запросах `INSERT` и `SELECT` перечислены в таблице ниже.
|
||||
|
||||
=======
|
||||
| Формат | INSERT | SELECT |
|
||||
|-----------------------------------------------------------------------------------------|--------|--------|
|
||||
| [TabSeparated](#tabseparated) | ✔ | ✔ |
|
||||
@ -56,6 +57,7 @@ ClickHouse может принимать (`INSERT`) и отдавать (`SELECT
|
||||
| [XML](#xml) | ✗ | ✔ |
|
||||
| [CapnProto](#capnproto) | ✔ | ✗ |
|
||||
| [LineAsString](#lineasstring) | ✔ | ✗ |
|
||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||
|
||||
Вы можете регулировать некоторые параметры работы с форматами с помощью настроек ClickHouse. За дополнительной информацией обращайтесь к разделу [Настройки](../operations/settings/settings.md).
|
||||
|
||||
@ -434,7 +436,10 @@ JSON совместим с JavaScript. Для этого, дополнитель
|
||||
|
||||
ClickHouse поддерживает [NULL](../sql-reference/syntax.md), который при выводе JSON будет отображен как `null`. Чтобы включить отображение в результате значений `+nan`, `-nan`, `+inf`, `-inf`, установите параметр [output_format_json_quote_denormals](../operations/settings/settings.md#settings-output_format_json_quote_denormals) равным 1.
|
||||
|
||||
Смотрите также формат [JSONEachRow](#jsoneachrow).
|
||||
**Смотрите также**
|
||||
|
||||
- Формат [JSONEachRow](#jsoneachrow)
|
||||
- Настройка [output_format_json_array_of_rows](../operations/settings/settings.md#output-format-json-array-of-rows)
|
||||
|
||||
## JSONString {#jsonstring}
|
||||
|
||||
@ -1245,4 +1250,45 @@ SELECT * FROM line_as_string;
|
||||
└───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## RawBLOB {#rawblob}
|
||||
|
||||
В этом формате все входные данные считываются в одно значение. Парсить можно только таблицу с одним полем типа [String](../sql-reference/data-types/string.md) или подобным ему.
|
||||
Результат выводится в бинарном виде без разделителей и экранирования. При выводе более одного значения формат неоднозначен и будет невозможно прочитать данные снова.
|
||||
|
||||
Ниже приведено сравнение форматов `RawBLOB` и [TabSeparatedRaw](#tabseparatedraw).
|
||||
`RawBLOB`:
|
||||
- данные выводятся в бинарном виде, без экранирования;
|
||||
- нет разделителей между значениями;
|
||||
- нет перевода строки в конце каждого значения.
|
||||
[TabSeparatedRaw](#tabseparatedraw):
|
||||
- данные выводятся без экранирования;
|
||||
- строка содержит значения, разделённые табуляцией;
|
||||
- после последнего значения в строке есть перевод строки.
|
||||
|
||||
Далее рассмотрено сравнение форматов `RawBLOB` и [RowBinary](#rowbinary).
|
||||
`RawBLOB`:
|
||||
- строки выводятся без их длины в начале.
|
||||
`RowBinary`:
|
||||
- строки представлены как длина в формате varint (unsigned [LEB128](https://en.wikipedia.org/wiki/LEB128)), а затем байты строки.
|
||||
|
||||
При передаче на вход `RawBLOB` пустых данных, ClickHouse бросает исключение:
|
||||
|
||||
``` text
|
||||
Code: 108. DB::Exception: No data to insert
|
||||
```
|
||||
|
||||
**Пример**
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --query "CREATE TABLE {some_table} (a String) ENGINE = Memory;"
|
||||
$ cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT RawBLOB"
|
||||
$ clickhouse-client --query "SELECT * FROM {some_table} FORMAT RawBLOB" | md5sum
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
f9725a22f9191e064120d718e26862a9 -
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/interfaces/formats/) <!--hide-->
|
||||
|
@ -2231,10 +2231,88 @@ SELECT CAST(toNullable(toInt32(0)) AS Int32) as x, toTypeName(x);
|
||||
|
||||
## output_format_tsv_null_representation {#output_format_tsv_null_representation}
|
||||
|
||||
Позволяет настраивать представление `NULL` для формата выходных данных [TSV](../../interfaces/formats.md#tabseparated). Настройка управляет форматом выходных данных, `\N` является единственным поддерживаемым представлением для формата входных данных TSV.
|
||||
Определяет представление `NULL` для формата выходных данных [TSV](../../interfaces/formats.md#tabseparated). Пользователь может установить в качестве значения любую строку.
|
||||
|
||||
Значение по умолчанию: `\N`.
|
||||
|
||||
**Примеры**
|
||||
|
||||
Запрос
|
||||
|
||||
```sql
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
Результат
|
||||
|
||||
```text
|
||||
788
|
||||
\N
|
||||
\N
|
||||
```
|
||||
|
||||
Запрос
|
||||
|
||||
```sql
|
||||
SET output_format_tsv_null_representation = 'My NULL';
|
||||
SELECT * FROM tsv_custom_null FORMAT TSV;
|
||||
```
|
||||
|
||||
Результат
|
||||
|
||||
```text
|
||||
788
|
||||
My NULL
|
||||
My NULL
|
||||
```
|
||||
|
||||
## output_format_json_array_of_rows {#output-format-json-array-of-rows}
|
||||
|
||||
Позволяет выводить все строки в виде массива JSON в формате [JSONEachRow](../../interfaces/formats.md#jsoneachrow).
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 1 — ClickHouse выводит все строки в виде массива и при этом каждую строку в формате `JSONEachRow`.
|
||||
- 0 — ClickHouse выводит каждую строку отдельно в формате `JSONEachRow`.
|
||||
|
||||
Значение по умолчанию: `0`.
|
||||
|
||||
**Пример запроса с включенной настройкой**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 1;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
[
|
||||
{"number":"0"},
|
||||
{"number":"1"},
|
||||
{"number":"2"}
|
||||
]
|
||||
```
|
||||
|
||||
**Пример запроса с отключенной настройкой**
|
||||
|
||||
Запрос:
|
||||
|
||||
```sql
|
||||
SET output_format_json_array_of_rows = 0;
|
||||
SELECT number FROM numbers(3) FORMAT JSONEachRow;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
{"number":"0"}
|
||||
{"number":"1"}
|
||||
{"number":"2"}
|
||||
```
|
||||
|
||||
## allow_nullable_key {#allow-nullable-key}
|
||||
|
||||
Включает или отключает поддержку типа [Nullable](../../sql-reference/data-types/nullable.md#data_type-nullable) для ключей таблиц [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md#table_engines-mergetree).
|
||||
|
@ -63,10 +63,18 @@ int32samoa: 1546300800
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер года (AD).
|
||||
|
||||
## toQuarter {#toquarter}
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер квартала.
|
||||
|
||||
## toMonth {#tomonth}
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер месяца (1-12).
|
||||
|
||||
## toDayOfYear {#todayofyear}
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt16, содержащее номер дня года (1-366).
|
||||
|
||||
## toDayOfMonth {#todayofmonth}
|
||||
|
||||
Переводит дату или дату-с-временем в число типа UInt8, содержащее номер дня в месяце (1-31).
|
||||
@ -128,6 +136,22 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp
|
||||
Округляет дату или дату-с-временем вниз до первого дня года.
|
||||
Возвращается дата.
|
||||
|
||||
## toStartOfISOYear {#tostartofisoyear}
|
||||
|
||||
Округляет дату или дату-с-временем вниз до первого дня ISO года. Возвращается дата.
|
||||
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
|
||||
|
||||
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это 2016 ISO-год, который начался 2016-01-04.
|
||||
|
||||
```sql
|
||||
SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101;
|
||||
```
|
||||
```text
|
||||
┌─ISOYear20170101─┐
|
||||
│ 2016-01-04 │
|
||||
└─────────────────┘
|
||||
```
|
||||
|
||||
## toStartOfQuarter {#tostartofquarter}
|
||||
|
||||
Округляет дату или дату-с-временем вниз до первого дня квартала.
|
||||
@ -147,6 +171,12 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp
|
||||
Округляет дату или дату-с-временем вниз до ближайшего понедельника.
|
||||
Возвращается дата.
|
||||
|
||||
## toStartOfWeek(t[,mode]) {#tostartofweek}
|
||||
|
||||
Округляет дату или дату со временем до ближайшего воскресенья или понедельника в соответствии с mode.
|
||||
Возвращается дата.
|
||||
Аргумент mode работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0.
|
||||
|
||||
## toStartOfDay {#tostartofday}
|
||||
|
||||
Округляет дату-с-временем вниз до начала дня. Возвращается дата-с-временем.
|
||||
@ -243,6 +273,10 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
Переводит дату-с-временем или дату в номер года, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeQuarterNum {#torelativequarternum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер квартала, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toRelativeMonthNum {#torelativemonthnum}
|
||||
|
||||
Переводит дату-с-временем или дату в номер месяца, начиная с некоторого фиксированного момента в прошлом.
|
||||
@ -267,6 +301,102 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
Переводит дату-с-временем в номер секунды, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## toISOYear {#toisoyear}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
|
||||
|
||||
Пример:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toDate('2017-01-01') AS date,
|
||||
toYear(date),
|
||||
toISOYear(date)
|
||||
```
|
||||
```text
|
||||
┌───────date─┬─toYear(toDate('2017-01-01'))─┬─toISOYear(toDate('2017-01-01'))─┐
|
||||
│ 2017-01-01 │ 2017 │ 2016 │
|
||||
└────────────┴──────────────────────────────┴─────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toISOWeek {#toisoweek}
|
||||
|
||||
Переводит дату-с-временем или дату в число типа UInt8, содержащее номер ISO недели.
|
||||
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
|
||||
|
||||
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года.
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toISOWeek(toDate('2017-01-01')) AS ISOWeek20170101,
|
||||
toISOWeek(toDate('2017-01-02')) AS ISOWeek20170102
|
||||
```
|
||||
|
||||
```text
|
||||
┌─ISOWeek20170101─┬─ISOWeek20170102─┐
|
||||
│ 52 │ 1 │
|
||||
└─────────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
## toWeek(date\[, mode\]\[, timezone\]) {#toweek}
|
||||
Переводит дату-с-временем или дату в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
|
||||
|
||||
`toISOWeek() ` эквивалентно `toWeek(date,3)`.
|
||||
|
||||
Описание режимов (mode):
|
||||
|
||||
| Mode | Первый день недели | Диапазон | Неделя 1 это первая неделя … |
|
||||
| ----------- | -------- | -------- | ------------------ |
|
||||
|0|Воскресенье|0-53|с воскресеньем в этом году
|
||||
|1|Понедельник|0-53|с 4-мя или более днями в этом году
|
||||
|2|Воскресенье|1-53|с воскресеньем в этом году
|
||||
|3|Понедельник|1-53|с 4-мя или более днями в этом году
|
||||
|4|Воскресенье|0-53|с 4-мя или более днями в этом году
|
||||
|5|Понедельник|0-53|с понедельником в этом году
|
||||
|6|Воскресенье|1-53|с 4-мя или более днями в этом году
|
||||
|7|Понедельник|1-53|с понедельником в этом году
|
||||
|8|Воскресенье|1-53|содержащая 1 января
|
||||
|9|Понедельник|1-53|содержащая 1 января
|
||||
|
||||
Для режимов со значением «с 4 или более днями в этом году» недели нумеруются в соответствии с ISO 8601:1988:
|
||||
|
||||
- Если неделя, содержащая 1 января, имеет 4 или более дней в новом году, это неделя 1.
|
||||
|
||||
- В противном случае это последняя неделя предыдущего года, а следующая неделя - неделя 1.
|
||||
|
||||
Для режимов со значением «содержит 1 января», неделя 1 – это неделя содержащая 1 января. Не имеет значения, сколько дней в новом году содержала неделя, даже если она содержала только один день.
|
||||
|
||||
**Пример**
|
||||
|
||||
```sql
|
||||
SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS week1, toWeek(date,9) AS week9;
|
||||
```
|
||||
|
||||
```text
|
||||
┌───────date─┬─week0─┬─week1─┬─week9─┐
|
||||
│ 2016-12-27 │ 52 │ 52 │ 1 │
|
||||
└────────────┴───────┴───────┴───────┘
|
||||
```
|
||||
|
||||
## toYearWeek(date[,mode]) {#toyearweek}
|
||||
Возвращает год и неделю для даты. Год в результате может отличаться от года в аргументе даты для первой и последней недели года.
|
||||
|
||||
Аргумент mode работает точно так же, как аргумент mode [toWeek()](#toweek). Если mode не задан, используется режим 0.
|
||||
|
||||
`toISOYear() ` эквивалентно `intDiv(toYearWeek(date,3),100)`.
|
||||
|
||||
**Пример**
|
||||
|
||||
```sql
|
||||
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9;
|
||||
```
|
||||
|
||||
```text
|
||||
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐
|
||||
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
Отсекает от даты и времени части, меньшие чем указанная часть.
|
||||
|
@ -103,4 +103,306 @@ SELECT erf(3 / sqrt(2))
|
||||
|
||||
Принимает два числовых аргумента x и y. Возвращает число типа Float64, близкое к x в степени y.
|
||||
|
||||
## cosh(x) {#coshx}
|
||||
|
||||
[Гиперболический косинус](https://help.scilab.org/docs/5.4.0/ru_RU/cosh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
cosh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — угол в радианах. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значения из интервала: `1 <= cosh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT cosh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─cosh(0)──┐
|
||||
│ 1 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## acosh(x) {#acoshx}
|
||||
|
||||
[Обратный гиперболический косинус](https://help.scilab.org/docs/5.4.0/ru_RU/acosh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
acosh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — гиперболический косинус угла. Значения из интервала: `1 <= x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол в радианах. Значения из интервала: `0 <= acosh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT acosh(1);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─acosh(1)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [cosh(x)](../../sql-reference/functions/math-functions.md#coshx)
|
||||
|
||||
## sinh(x) {#sinhx}
|
||||
|
||||
[Гиперболический синус](https://help.scilab.org/docs/5.4.0/ru_RU/sinh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
sinh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — угол в радианах. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значения из интервала: `-∞ < sinh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT sinh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─sinh(0)──┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## asinh(x) {#asinhx}
|
||||
|
||||
[Обратный гиперболический синус](https://help.scilab.org/docs/5.4.0/ru_RU/asinh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
asinh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — гиперболический синус угла. Значения из интервала: `-∞ < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол в радианах. Значения из интервала: `-∞ < asinh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT asinh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─asinh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [sinh(x)](../../sql-reference/functions/math-functions.md#sinhx)
|
||||
|
||||
## atanh(x) {#atanhx}
|
||||
|
||||
[Обратный гиперболический тангенс](https://help.scilab.org/docs/5.4.0/ru_RU/atanh.html).
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
atanh(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — гиперболический тангенс угла. Значения из интервала: `–1 < x < 1`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол в радианах. Значения из интервала: `-∞ < atanh(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT atanh(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─atanh(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
## atan2(y, x) {#atan2yx}
|
||||
|
||||
[Функция](https://msoffice-prowork.com/ref/excel/excelfunc/math/atan2/) вычисляет угол в радианах между положительной осью x и линией, проведенной из начала координат в точку `(x, y) ≠ (0, 0)`.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
atan2(y, x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `y` — координата y точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `x` — координата х точки, в которую проведена линия. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Угол `θ` в радианах из интервала: `−π < θ ≤ π`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT atan2(1, 1);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌────────atan2(1, 1)─┐
|
||||
│ 0.7853981633974483 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## hypot(x, y) {#hypotxy}
|
||||
|
||||
Вычисляет длину гипотенузы прямоугольного треугольника. При использовании этой [функции](https://php.ru/manual/function.hypot.html) не возникает проблем при возведении в квадрат очень больших или очень малых чисел.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
hypot(x, y)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — первый катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
- `y` — второй катет прямоугольного треугольника. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Длина гипотенузы прямоугольного треугольника.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT hypot(1, 1);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌────────hypot(1, 1)─┐
|
||||
│ 1.4142135623730951 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## log1p(x) {#log1px}
|
||||
|
||||
Вычисляет `log(1+x)`. [Функция](https://help.scilab.org/docs/6.0.1/ru_RU/log1p.html) `log1p(x)` является более точной, чем функция `log(1+x)` для малых значений x.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
log1p(x)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `x` — значения из интервала: `-1 < x < +∞`. [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значения из интервала: `-∞ < log1p(x) < +∞`.
|
||||
|
||||
Тип: [Float64](../../sql-reference/data-types/float.md#float32-float64).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT log1p(0);
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─log1p(0)─┐
|
||||
│ 0 │
|
||||
└──────────┘
|
||||
```
|
||||
|
||||
**Смотрите также**
|
||||
|
||||
- [log(x)](../../sql-reference/functions/math-functions.md#logx)
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/query_language/functions/math_functions/) <!--hide-->
|
||||
|
@ -12,6 +12,7 @@ toc_title: SYSTEM
|
||||
- [DROP MARK CACHE](#query_language-system-drop-mark-cache)
|
||||
- [DROP UNCOMPRESSED CACHE](#query_language-system-drop-uncompressed-cache)
|
||||
- [DROP COMPILED EXPRESSION CACHE](#query_language-system-drop-compiled-expression-cache)
|
||||
- [DROP REPLICA](#query_language-system-drop-replica)
|
||||
- [FLUSH LOGS](#query_language-system-flush_logs)
|
||||
- [RELOAD CONFIG](#query_language-system-reload-config)
|
||||
- [SHUTDOWN](#query_language-system-shutdown)
|
||||
@ -66,6 +67,24 @@ SELECT name, status FROM system.dictionaries;
|
||||
|
||||
Сбрасывает кеш «засечек» (`mark cache`). Используется при разработке ClickHouse и тестах производительности.
|
||||
|
||||
## DROP REPLICA {#query_language-system-drop-replica}
|
||||
|
||||
Мертвые реплики можно удалить, используя следующий синтаксис:
|
||||
|
||||
``` sql
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM TABLE database.table;
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM DATABASE database;
|
||||
SYSTEM DROP REPLICA 'replica_name';
|
||||
SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk';
|
||||
```
|
||||
|
||||
Удаляет путь реплики из ZooKeeper-а. Это полезно, когда реплика мертва и ее метаданные не могут быть удалены из ZooKeeper с помощью `DROP TABLE`, потому что такой таблицы больше нет. `DROP REPLICA` может удалить только неактивную / устаревшую реплику и не может удалить локальную реплику, используйте для этого `DROP TABLE`. `DROP REPLICA` не удаляет таблицы и не удаляет данные или метаданные с диска.
|
||||
|
||||
Первая команда удаляет метаданные реплики `'replica_name'` для таблицы `database.table`.
|
||||
Вторая команда удаляет метаданные реплики `'replica_name'` для всех таблиц базы данных `database`.
|
||||
Третья команда удаляет метаданные реплики `'replica_name'` для всех таблиц, существующих на локальном сервере (список таблиц генерируется из локальной реплики).
|
||||
Четверая команда полезна для удаления метаданных мертвой реплики когда все другие реплики таблицы уже были удалены ранее, поэтому необходимо явно указать ZooKeeper путь таблицы. ZooKeeper путь это первый аргумент для `ReplicatedMergeTree` движка при создании таблицы.
|
||||
|
||||
## DROP UNCOMPRESSED CACHE {#query_language-system-drop-uncompressed-cache}
|
||||
|
||||
Сбрасывает кеш не сжатых данных. Используется при разработке ClickHouse и тестах производительности.
|
||||
|
@ -1,12 +1,36 @@
|
||||
---
|
||||
toc_priority: 14
|
||||
toc_title: "\u266A\u64CD\u573A\u266A"
|
||||
toc_title: 体验平台
|
||||
---
|
||||
|
||||
# ClickHouse体验平台 {#clickhouse-playground}
|
||||
|
||||
[ClickHouse体验平台](https://play.clickhouse.tech?file=welcome) 允许人们通过即时运行查询来尝试ClickHouse,而无需设置他们的服务器或集群。
|
||||
体验平台中提供了几个示例数据集以及显示ClickHouse特性的示例查询。
|
||||
|
||||
体验平台中提供几个示例数据集以及显示ClickHouse特性的示例查询。还有一些ClickHouse LTS版本可供尝试。
|
||||
|
||||
ClickHouse体验平台提供了小型集群[Managed Service for ClickHouse](https://cloud.yandex.com/services/managed-clickhouse)实例配置(4 vCPU, 32 GB RAM)它们托管在[Yandex.Cloud](https://cloud.yandex.com/). 更多信息查询[cloud providers](../commercial/cloud.md).
|
||||
|
||||
您可以使用任何HTTP客户端对ClickHouse体验平台进行查询,例如[curl](https://curl.haxx.se)或者[wget](https://www.gnu.org/software/wget/),或使用[JDBC](../interfaces/jdbc.md)或者[ODBC](../interfaces/odbc.md)驱动连接。关于支持ClickHouse的软件产品的更多信息详见[here](../interfaces/index.md).
|
||||
|
||||
## Credentials {#credentials}
|
||||
|
||||
| 参数 | 值 |
|
||||
|:--------------------|:----------------------------------------|
|
||||
| HTTPS端点 | `https://play-api.clickhouse.tech:8443` |
|
||||
| TCP端点 | `play-api.clickhouse.tech:9440` |
|
||||
| 用户 | `playground` |
|
||||
| 密码 | `clickhouse` |
|
||||
|
||||
还有一些带有特定ClickHouse版本的附加信息来试验它们之间的差异(端口和用户/密码与上面相同):
|
||||
|
||||
- 20.3 LTS: `play-api-v20-3.clickhouse.tech`
|
||||
- 19.14 LTS: `play-api-v19-14.clickhouse.tech`
|
||||
|
||||
!!! note "注意"
|
||||
所有这些端点都需要安全的TLS连接。
|
||||
|
||||
## 查询限制 {#limitations}
|
||||
|
||||
查询以只读用户身份执行。 这意味着一些局限性:
|
||||
|
||||
@ -14,33 +38,34 @@ toc_title: "\u266A\u64CD\u573A\u266A"
|
||||
- 不允许插入查询
|
||||
|
||||
还强制执行以下设置:
|
||||
- [`max_result_bytes=10485760`](../operations/settings/query_complexity/#max-result-bytes)
|
||||
- [`max_result_rows=2000`](../operations/settings/query_complexity/#setting-max_result_rows)
|
||||
- [`result_overflow_mode=break`](../operations/settings/query_complexity/#result-overflow-mode)
|
||||
- [`max_execution_time=60000`](../operations/settings/query_complexity/#max-execution-time)
|
||||
- [max_result_bytes=10485760](../operations/settings/query-complexity/#max-result-bytes)
|
||||
- [max_result_rows=2000](../operations/settings/query-complexity/#setting-max_result_rows)
|
||||
- [result_overflow_mode=break](../operations/settings/query-complexity/#result-overflow-mode)
|
||||
- [max_execution_time=60000](../operations/settings/query-complexity/#max-execution-time)
|
||||
|
||||
ClickHouse体验还有如下:
|
||||
[ClickHouse管理服务](https://cloud.yandex.com/services/managed-clickhouse)
|
||||
实例托管 [Yandex云](https://cloud.yandex.com/)。
|
||||
更多信息 [云提供商](../commercial/cloud.md)。
|
||||
|
||||
ClickHouse体验平台界面实际上是通过ClickHouse [HTTP API](../interfaces/http.md)接口实现的.
|
||||
体验平台后端只是一个ClickHouse集群,没有任何额外的服务器端应用程序。
|
||||
体验平台也同样提供了ClickHouse HTTPS服务端口。
|
||||
## 示例 {#examples}
|
||||
|
||||
您可以使用任何HTTP客户端向体验平台进行查询,例如 [curl](https://curl.haxx.se) 或 [wget](https://www.gnu.org/software/wget/),或使用以下方式建立连接 [JDBC](../interfaces/jdbc.md) 或 [ODBC](../interfaces/odbc.md) 驱动。
|
||||
有关支持ClickHouse的软件产品的更多信息,请访问 [这里](../interfaces/index.md)。
|
||||
|
||||
| 参数 | 值 |
|
||||
|:---------|:--------------------------------------|
|
||||
| 服务端口 | https://play-api.clickhouse.tech:8443 |
|
||||
| 用户 | `playground` |
|
||||
| 密码 | `clickhouse` |
|
||||
|
||||
请注意,此服务端口需要安全连接。
|
||||
|
||||
示例:
|
||||
使用`curl`连接Https服务:
|
||||
|
||||
``` bash
|
||||
curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse!';&user=playground&password=clickhouse&database=datasets"
|
||||
curl "https://play-api.clickhouse.tech:8443/?query=SELECT+'Play+ClickHouse\!';&user=playground&password=clickhouse&database=datasets"
|
||||
```
|
||||
|
||||
TCP连接示例[CLI](../interfaces/cli.md):
|
||||
|
||||
``` bash
|
||||
clickhouse client --secure -h play-api.clickhouse.tech --port 9440 -u playground --password clickhouse -q "SELECT 'Play ClickHouse\!'"
|
||||
```
|
||||
|
||||
## Implementation Details {#implementation-details}
|
||||
|
||||
ClickHouse体验平台界面实际上是通过ClickHouse [HTTP API](../interfaces/http.md)接口实现的。
|
||||
ClickHouse体验平台是一个ClickHouse集群,没有任何附加的服务器端应用程序。如上所述,ClickHouse的HTTPS和TCP/TLS端点也可以作为体验平台的一部分公开使用, 代理通过[Cloudflare Spectrum](https://www.cloudflare.com/products/cloudflare-spectrum/)增加一层额外的保护和改善连接。
|
||||
|
||||
!!! warning "注意"
|
||||
**强烈不推荐**在任何其他情况下将ClickHouse服务器暴露给公共互联网。确保它只在私有网络上侦听,并由正确配置的防火墙监控。
|
||||
|
@ -46,7 +46,7 @@ ClickHouse是一个用于联机分析(OLAP)的列式数据库管理系统(DBMS)
|
||||
- 处理单个查询时需要高吞吐量(每台服务器每秒可达数十亿行)
|
||||
- 事务不是必须的
|
||||
- 对数据一致性要求低
|
||||
- 每个查询有一个大表。除了他意以外,其他的都很小。
|
||||
- 每个查询有一个大表。除了他以外,其他的都很小。
|
||||
- 查询结果明显小于源数据。换句话说,数据经过过滤或聚合,因此结果适合于单个服务器的RAM中
|
||||
|
||||
很容易可以看出,OLAP场景与其他通常业务场景(例如,OLTP或K/V)有很大的不同, 因此想要使用OLTP或Key-Value数据库去高效的处理分析查询场景,并不是非常完美的适用方案。例如,使用OLAP数据库去处理分析请求通常要优于使用MongoDB或Redis去处理分析请求。
|
||||
|
@ -1,71 +1,105 @@
|
||||
# 命令行客户端 {#ming-ling-xing-ke-hu-duan}
|
||||
---
|
||||
toc_priority: 17
|
||||
toc_title: 命令行客户端
|
||||
---
|
||||
|
||||
通过命令行来访问 ClickHouse,您可以使用 `clickhouse-client`
|
||||
# 命令行客户端 {#command-line-client}
|
||||
|
||||
ClickHouse提供了一个原生命令行客户端`clickhouse-client`客户端支持命令行支持的更多信息详见[Configuring](#interfaces_cli_configuration)。
|
||||
|
||||
[安装部署](../getting-started/index.md)后,系统默认会安装`clickhouse-client`(同时它属于`clickhouse-client`安装包中)。
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client
|
||||
ClickHouse client version 0.0.26176.
|
||||
Connecting to localhost:9000.
|
||||
Connected to ClickHouse server version 0.0.26176.:)
|
||||
ClickHouse client version 19.17.1.1579 (official build).
|
||||
Connecting to localhost:9000 as user default.
|
||||
Connected to ClickHouse server version 19.17.1 revision 54428.
|
||||
|
||||
:)
|
||||
```
|
||||
|
||||
该客户端支持命令行参数以及配置文件。查看更多,请看 «[配置](#interfaces_cli_configuration)»
|
||||
不同的客户端和服务器版本彼此兼容,但是一些特性可能在旧客户机中不可用。我们建议使用与服务器应用相同版本的客户端。当你尝试使用旧版本的客户端时,服务器上的`clickhouse-client`会显示如下信息:
|
||||
|
||||
## 使用方式 {#shi-yong-fang-shi}
|
||||
ClickHouse client version is older than ClickHouse server. It may lack support for new features.
|
||||
|
||||
这个客户端可以选择使用交互式与非交互式(批量)两种模式。
|
||||
使用批量模式,要指定 `query` 参数,或者发送数据到 `stdin`(它会检查 `stdin` 是否是 Terminal),或者两种同时使用。
|
||||
它与 HTTP 接口很相似,当使用 `query` 参数发送数据到 `stdin` 时,客户端请求就是一行一行的 `stdin` 输入作为 `query` 的参数。这种方式在大规模的插入请求中非常方便。
|
||||
## 使用方式 {#cli_usage}
|
||||
|
||||
使用这个客户端插入数据的示例:
|
||||
客户端可以在交互和非交互(批处理)模式下使用。要使用批处理模式,请指定`query`参数,或将数据发送到`stdin`(它会验证`stdin`是否是终端),或两者同时进行。与HTTP接口类似,当使用`query`参数并向`stdin`发送数据时,客户端请求就是一行一行的`stdin`输入作为`query`的参数。这种方式在大规模的插入请求中非常方便。
|
||||
|
||||
使用客户端插入数据的示例:
|
||||
|
||||
``` bash
|
||||
echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
$ echo -ne "1, 'some text', '2016-08-14 00:00:00'\n2, 'some more text', '2016-08-14 00:00:01'" | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
|
||||
cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
$ cat <<_EOF | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
3, 'some text', '2016-08-14 00:00:00'
|
||||
4, 'some more text', '2016-08-14 00:00:01'
|
||||
_EOF
|
||||
|
||||
cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
$ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMAT CSV";
|
||||
```
|
||||
|
||||
在批量模式中,默认的数据格式是 `TabSeparated` 分隔的。您可以根据查询来灵活设置 FORMAT 格式。
|
||||
在批量模式中,默认的数据格式是`TabSeparated`分隔的。您可以根据查询来灵活设置FORMAT格式。
|
||||
|
||||
默认情况下,在批量模式中只能执行单个查询。为了从一个 Script 中执行多个查询,可以使用 `--multiquery` 参数。除了 INSERT 请求外,这种方式在任何地方都有用。查询的结果会连续且不含分隔符地输出。
|
||||
同样的,为了执行大规模的查询,您可以为每个查询执行一次 `clickhouse-client`。但注意到每次启动 `clickhouse-client` 程序都需要消耗几十毫秒时间。
|
||||
默认情况下,在批量模式中只能执行单个查询。为了从一个Script中执行多个查询,可以使用`--multiquery`参数。除了INSERT请求外,这种方式在任何地方都有用。查询的结果会连续且不含分隔符地输出。
|
||||
同样的,为了执行大规模的查询,您可以为每个查询执行一次`clickhouse-client`。但注意到每次启动`clickhouse-client`程序都需要消耗几十毫秒时间。
|
||||
|
||||
在交互模式下,每条查询过后,你可以直接输入下一条查询命令。
|
||||
|
||||
如果 `multiline` 没有指定(默认没指定):为了执行查询,按下 Enter 即可。查询语句不是必须使用分号结尾。如果需要写一个多行的查询语句,可以在换行之前输入一个反斜杠`\`,然后在您按下 Enter 键后,您就可以输入当前语句的下一行查询了。
|
||||
如果`multiline`没有指定(默认没指定):为了执行查询,按下Enter即可。查询语句不是必须使用分号结尾。如果需要写一个多行的查询语句,可以在换行之前输入一个反斜杠`\`,然后在您按下Enter键后,您就可以输入当前语句的下一行查询了。
|
||||
|
||||
如果 `multiline` 指定了:为了执行查询,需要以分号结尾并且按下 Enter 键。如果行末没有分号,将认为当前语句并没有输入完而要求继续输入下一行。
|
||||
如果指定了`multiline`:为了执行查询,需要以分号结尾并且按下Enter键。如果行末没有分号,将认为当前语句并没有输入完而要求继续输入下一行。
|
||||
|
||||
若只运行单个查询,分号后面的所有内容都会被忽略。
|
||||
|
||||
您可以指定 `\G` 来替代分号或者在分号后面,这表示 `Vertical` 的格式。在这种格式下,每一个值都会打印在不同的行中,这种方式对于宽表来说很方便。这个不常见的特性是为了兼容 MySQL 命令而加的。
|
||||
您可以指定`\G`来替代分号或者在分号后面,这表示使用`Vertical`的格式。在这种格式下,每一个值都会打印在不同的行中,这种方式对于宽表来说很方便。这个不常见的特性是为了兼容MySQL命令而加的。
|
||||
|
||||
命令行客户端是基于 `replxx`。换句话说,它可以使用我们熟悉的快捷键方式来操作以及保留历史命令。
|
||||
历史命令会写入在 `~/.clickhouse-client-history` 中。
|
||||
命令行客户端是基于`replxx`(类似于`readline`)。换句话说,它可以使用我们熟悉的快捷键方式来操作以及保留历史命令。
|
||||
历史命令会写入在`~/.clickhouse-client-history`中。
|
||||
|
||||
默认情况下,输出的格式是 `PrettyCompact`。您可以通过 FORMAT 设置根据不同查询来修改格式,或者通过在查询末尾指定 `\G` 字符,或通过在命令行中使用 `--format` 或 `--vertical` 参数,或使用客户端的配置文件。
|
||||
默认情况下,输出的格式是`PrettyCompact`。您可以通过FORMAT设置根据不同查询来修改格式,或者通过在查询末尾指定`\G`字符,或通过在命令行中使用`--format`或`--vertical`参数,或使用客户端的配置文件。
|
||||
|
||||
若要退出客户端,使用 Ctrl+D (或 Ctrl+C),或者输入以下其中一个命令:`exit`, `quit`, `logout`, `учше`, `йгше`, `дщпщге`, `exit;`, `quit;`, `logout;`, `учшеж`, `йгшеж`, `дщпщгеж`, `q`, `й`, `q`, `Q`, `:q`, `й`, `Й`, `Жй`
|
||||
若要退出客户端,使用Ctrl+D(或Ctrl+C),或者输入以下其中一个命令:`exit`, `quit`, `logout`, `учше`, `йгше`, `дщпщге`, `exit;`, `quit;`, `logout;`, `q`, `Q`, `:q`
|
||||
|
||||
当执行一个查询的时候,客户端会显示:
|
||||
|
||||
1. 进度, 进度会每秒更新十次 (默认情况下)。 对于很快的查询,进度可能没有时间显示。
|
||||
1. 进度, 进度会每秒更新十次(默认情况下)。对于很快的查询,进度可能没有时间显示。
|
||||
2. 为了调试会显示解析且格式化后的查询语句。
|
||||
3. 指定格式的输出结果。
|
||||
4. 输出结果的行数的行数,经过的时间,以及查询处理的速度。
|
||||
|
||||
您可以通过 Ctrl+C 来取消一个长时间的查询。然而,您依然需要等待服务端来中止请求。在某个阶段去取消查询是不可能的。如果您不等待并再次按下 Ctrl + C,客户端将会退出。
|
||||
您可以通过Ctrl+C来取消一个长时间的查询。然而,您依然需要等待服务端来中止请求。在某个阶段去取消查询是不可能的。如果您不等待并再次按下Ctrl + C,客户端将会退出。
|
||||
|
||||
命令行客户端允许通过外部数据 (外部临时表) 来查询。更多相关信息,请参考 «[外部数据查询处理](../engines/table-engines/special/external-data.md)».
|
||||
命令行客户端允许通过外部数据(外部临时表)来查询。更多相关信息,请参考 «[外部数据查询处理](../engines/table-engines/special/external-data.md)».
|
||||
|
||||
### 查询参数 {#cli-queries-with-parameters}
|
||||
|
||||
您可以创建带有参数的查询,并将值从客户端传递给服务器。这允许避免在客户端使用特定的动态值格式化查询。例如:
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --param_parName="[1, 2]" -q "SELECT * FROM table WHERE a = {parName:Array(UInt16)}"
|
||||
```
|
||||
|
||||
#### 查询语法 {#cli-queries-with-parameters-syntax}
|
||||
|
||||
像平常一样格式化一个查询,然后把你想要从app参数传递到查询的值用大括号格式化,格式如下:
|
||||
|
||||
``` sql
|
||||
{<name>:<data type>}
|
||||
```
|
||||
|
||||
- `name` — 占位符标识符。在控制台客户端,使用`--param_<name> = value`来指定
|
||||
- `data type` — [数据类型](../sql-reference/data-types/index.md)参数值。例如,一个数据结构`(integer, ('string', integer))`拥有`Tuple(UInt8, Tuple(String, UInt8))`数据类型(你也可以用另一个[integer](../sql-reference/data-types/int-uint.md)类型)。
|
||||
|
||||
#### 示例 {#example}
|
||||
|
||||
``` bash
|
||||
$ clickhouse-client --param_tuple_in_tuple="(10, ('dt', 10))" -q "SELECT * FROM table WHERE val = {tuple_in_tuple:Tuple(UInt8, Tuple(String, UInt8))}"
|
||||
```
|
||||
|
||||
## 配置 {#interfaces_cli_configuration}
|
||||
|
||||
您可以通过以下方式传入参数到 `clickhouse-client` 中 (所有的参数都有默认值):
|
||||
您可以通过以下方式传入参数到`clickhouse-client`中(所有的参数都有默认值):
|
||||
|
||||
- 通过命令行
|
||||
|
||||
@ -75,29 +109,32 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA
|
||||
|
||||
配置文件的配置会覆盖默认值
|
||||
|
||||
### 命令行参数 {#ming-ling-xing-can-shu}
|
||||
### 命令行参数 {#command-line-options}
|
||||
|
||||
- `--host, -h` -– 服务端的 host 名称, 默认是 ‘localhost’。 您可以选择使用 host 名称或者 IPv4 或 IPv6 地址。
|
||||
- `--port` – 连接的端口,默认值: 9000。注意 HTTP 接口以及 TCP 原生接口是使用不同端口的。
|
||||
- `--user, -u` – 用户名。 默认值: default。
|
||||
- `--password` – 密码。 默认值: 空字符串。
|
||||
- `--query, -q` – 非交互模式下的查询语句.
|
||||
- `--database, -d` – 默认当前操作的数据库. 默认值: 服务端默认的配置 (默认是 `default`)。
|
||||
- `--multiline, -m` – 如果指定,允许多行语句查询(Enter 仅代表换行,不代表查询语句完结)。
|
||||
- `--multiquery, -n` – 如果指定, 允许处理用逗号分隔的多个查询,只在非交互模式下生效。
|
||||
- `--host, -h` -– 服务端的host名称, 默认是`localhost`。您可以选择使用host名称或者IPv4或IPv6地址。
|
||||
- `--port` – 连接的端口,默认值:9000。注意HTTP接口以及TCP原生接口使用的是不同端口。
|
||||
- `--user, -u` – 用户名。 默认值:`default`。
|
||||
- `--password` – 密码。 默认值:空字符串。
|
||||
- `--query, -q` – 使用非交互模式查询。
|
||||
- `--database, -d` – 默认当前操作的数据库. 默认值:服务端默认的配置(默认是`default`)。
|
||||
- `--multiline, -m` – 如果指定,允许多行语句查询(Enter仅代表换行,不代表查询语句完结)。
|
||||
- `--multiquery, -n` – 如果指定, 允许处理用`;`号分隔的多个查询,只在非交互模式下生效。
|
||||
- `--format, -f` – 使用指定的默认格式输出结果。
|
||||
- `--vertical, -E` – 如果指定,默认情况下使用垂直格式输出结果。这与 ‘–format=Vertical’ 相同。在这种格式中,每个值都在单独的行上打印,这种方式对显示宽表很有帮助。
|
||||
- `--time, -t` – 如果指定,非交互模式下会打印查询执行的时间到 ‘stderr’ 中。
|
||||
- `--vertical, -E` – 如果指定,默认情况下使用垂直格式输出结果。这与`–format=Vertical`相同。在这种格式中,每个值都在单独的行上打印,这种方式对显示宽表很有帮助。
|
||||
- `--time, -t` – 如果指定,非交互模式下会打印查询执行的时间到`stderr`中。
|
||||
- `--stacktrace` – 如果指定,如果出现异常,会打印堆栈跟踪信息。
|
||||
- `--config-file` – 配置文件的名称。
|
||||
- `--secure` – 如果指定,将通过安全连接连接到服务器。
|
||||
- `--history_file` — 存放命令历史的文件的路径。
|
||||
- `--param_<name>` — 查询参数配置[查询参数](#cli-queries-with-parameters).
|
||||
|
||||
### 配置文件 {#pei-zhi-wen-jian}
|
||||
### 配置文件 {#configuration_files}
|
||||
|
||||
`clickhouse-client` 使用一下第一个存在的文件:
|
||||
`clickhouse-client`使用以下第一个配置文件:
|
||||
|
||||
- 通过 `--config-file` 参数指定的文件.
|
||||
- 通过`--config-file`参数指定。
|
||||
- `./clickhouse-client.xml`
|
||||
- `\~/.clickhouse-client/config.xml`
|
||||
- `~/.clickhouse-client/config.xml`
|
||||
- `/etc/clickhouse-client/config.xml`
|
||||
|
||||
配置文件示例:
|
||||
@ -106,6 +143,7 @@ cat file.csv | clickhouse-client --database=test --query="INSERT INTO test FORMA
|
||||
<config>
|
||||
<user>username</user>
|
||||
<password>password</password>
|
||||
<secure>False</secure>
|
||||
</config>
|
||||
```
|
||||
|
||||
|
@ -1,5 +1,10 @@
|
||||
# C ++客户端库 {#c-ke-hu-duan-ku}
|
||||
---
|
||||
toc_priority: 24
|
||||
toc_title: C++客户端库
|
||||
---
|
||||
|
||||
请参阅以下网站的自述文件[ツ暗ェツ氾环催ツ団](https://github.com/ClickHouse/clickhouse-cpp)资料库。
|
||||
# C++客户端库 {#c-client-library}
|
||||
|
||||
请参考仓库的描述文件[clickhouse-cpp](https://github.com/ClickHouse/clickhouse-cpp)。
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/zh/interfaces/cpp/) <!--hide-->
|
||||
|
@ -1,23 +1,31 @@
|
||||
# HTTP 客户端 {#http-ke-hu-duan}
|
||||
---
|
||||
toc_priority: 19
|
||||
toc_title: HTTP客户端
|
||||
---
|
||||
|
||||
HTTP 接口可以让你通过任何平台和编程语言来使用 ClickHouse。我们用 Java 和 Perl 以及 shell 脚本来访问它。在其他的部门中,HTTP 接口会用在 Perl,Python 以及 Go 中。HTTP 接口比 TCP 原生接口更为局限,但是却有更好的兼容性。
|
||||
# HTTP客户端 {#http-interface}
|
||||
|
||||
默认情况下,clickhouse-server 会在端口 8123 上监控 HTTP 请求(这可以在配置中修改)。
|
||||
如果你发送了一个不带参数的 GET 请求,它会返回一个字符串 «Ok.»(结尾有换行)。可以将它用在健康检查脚本中。
|
||||
HTTP接口允许您在任何编程语言的任何平台上使用ClickHouse。我们使用它在Java和Perl以及shell脚本中工作。在其他部门中,HTTP接口用于Perl、Python和Go。HTTP接口比原生接口受到更多的限制,但它具有更好的兼容性。
|
||||
|
||||
默认情况下,`clickhouse-server`会在`8123`端口上监控HTTP请求(这可以在配置中修改)。
|
||||
|
||||
如果你发送了一个未携带任何参数的`GET /`请求,它会返回一个字符串 «Ok.»(结尾有换行)。可以将它用在健康检查脚本中。
|
||||
|
||||
如果你发送了一个未携带任何参数的`GET /`请求,它返回响应码200和`OK`字符串定义,可在[Http服务响应配置](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-http_server_default_response)定义(在末尾添加换行)
|
||||
|
||||
``` bash
|
||||
$ curl 'http://localhost:8123/'
|
||||
Ok.
|
||||
```
|
||||
|
||||
通过 URL 中的 `query` 参数来发送请求,或者发送 POST 请求,或者将查询的开头部分放在 URL 的 `query` 参数中,其他部分放在 POST 中(我们会在后面解释为什么这样做是有必要的)。URL 的大小会限制在 16 KB,所以发送大型查询时要时刻记住这点。
|
||||
通过URL中的 `query` 参数来发送请求,或者发送POST请求,或者将查询的开头部分放在URL的`query`参数中,其他部分放在POST中(我们会在后面解释为什么这样做是有必要的)。URL的大小会限制在16KB,所以发送大型查询时要时刻记住这点。
|
||||
|
||||
如果请求成功,将会收到 200 的响应状态码和响应主体中的结果。
|
||||
如果发生了某个异常,将会收到 500 的响应状态码和响应主体中的异常描述信息。
|
||||
如果请求成功,将会收到200的响应状态码和响应主体中的结果。
|
||||
如果发生了某个异常,将会收到500的响应状态码和响应主体中的异常描述信息。
|
||||
|
||||
当使用 GET 方法请求时,`readonly` 会被设置。换句话说,若要作修改数据的查询,只能发送 POST 方法的请求。可以将查询通过 POST 主体发送,也可以通过 URL 参数发送。
|
||||
当使用GET方法请求时,`readonly`会被设置。换句话说,若要作修改数据的查询,只能发送POST方法的请求。可以将查询通过POST主体发送,也可以通过URL参数发送。
|
||||
|
||||
例:
|
||||
示例:
|
||||
|
||||
``` bash
|
||||
$ curl 'http://localhost:8123/?query=SELECT%201'
|
||||
@ -26,13 +34,14 @@ $ curl 'http://localhost:8123/?query=SELECT%201'
|
||||
$ wget -nv -O- 'http://localhost:8123/?query=SELECT 1'
|
||||
1
|
||||
|
||||
$ GET 'http://localhost:8123/?query=SELECT 1'
|
||||
1
|
||||
|
||||
$ echo -ne 'GET /?query=SELECT%201 HTTP/1.0\r\n\r\n' | nc localhost 8123
|
||||
HTTP/1.0 200 OK
|
||||
Date: Wed, 27 Nov 2019 10:30:18 GMT
|
||||
Connection: Close
|
||||
Date: Fri, 16 Nov 2012 19:21:50 GMT
|
||||
Content-Type: text/tab-separated-values; charset=UTF-8
|
||||
X-ClickHouse-Server-Display-Name: clickhouse.ru-central1.internal
|
||||
X-ClickHouse-Query-Id: 5abe861c-239c-467f-b955-8a201abb8b7f
|
||||
X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
|
||||
1
|
||||
```
|
||||
@ -50,7 +59,22 @@ $ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @-
|
||||
1
|
||||
```
|
||||
|
||||
如果一部分请求是通过参数发送的,另外一部分通过 POST 主体发送,两部分查询之间会一行空行插入。
|
||||
如您所见,curl有些不方便,因为空格必须进行URL转义。
|
||||
尽管wget本身会对所有内容进行转义,但我们不推荐使用它,因为在使用keepalive和传输编码chunked时,它在HTTP 1.1上不能很好地工作。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT 1' | curl 'http://localhost:8123/' --data-binary @-
|
||||
1
|
||||
|
||||
$ echo 'SELECT 1' | curl 'http://localhost:8123/?query=' --data-binary @-
|
||||
1
|
||||
|
||||
$ echo '1' | curl 'http://localhost:8123/?query=SELECT' --data-binary @-
|
||||
1
|
||||
```
|
||||
|
||||
如果部分查询是在参数中发送的,部分是在POST中发送的,则在这两个数据部分之间插入换行。
|
||||
|
||||
错误示例:
|
||||
|
||||
``` bash
|
||||
@ -60,8 +84,11 @@ ECT 1
|
||||
, expected One of: SHOW TABLES, SHOW DATABASES, SELECT, INSERT, CREATE, ATTACH, RENAME, DROP, DETACH, USE, SET, OPTIMIZE., e.what() = DB::Exception
|
||||
```
|
||||
|
||||
默认情况下,返回的数据是 TabSeparated 格式的,更多信息,见 «\[数据格式\]» 部分。
|
||||
可以使用 FORMAT 设置查询来请求不同格式。
|
||||
默认情况下,返回的数据是`TabSeparated`格式的,更多信息,见[Formats](../interfaces/formats/)部分。
|
||||
|
||||
您可以使用查询的FORMAT子句来设置其他格式。
|
||||
|
||||
另外,还可以使用`default_format`URL参数或`X-ClickHouse-Format`头来指定TabSeparated之外的默认格式。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @-
|
||||
@ -72,42 +99,42 @@ $ echo 'SELECT 1 FORMAT Pretty' | curl 'http://localhost:8123/?' --data-binary @
|
||||
└───┘
|
||||
```
|
||||
|
||||
INSERT 必须通过 POST 方法来插入数据。这种情况下,你可以将查询的开头部分放在 URL 参数中,然后用 POST 主体传入插入的数据。插入的数据可以是,举个例子,从 MySQL 导出的以 tab 分割的数据。在这种方式中,INSERT 查询取代了 LOAD DATA LOCAL INFILE from MySQL。
|
||||
INSERT必须通过POST方法来插入数据。在这种情况下,您可以在URL参数中编写查询的开始部分,并使用POST传递要插入的数据。例如,要插入的数据可以是来自MySQL的一个以tab分隔的存储。通过这种方式,INSERT查询替换了从MySQL查询的LOAD DATA LOCAL INFILE。
|
||||
|
||||
示例: 创建一个表:
|
||||
|
||||
``` bash
|
||||
echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | POST 'http://localhost:8123/'
|
||||
$ echo 'CREATE TABLE t (a UInt8) ENGINE = Memory' | curl 'http://localhost:8123/' --data-binary @-
|
||||
```
|
||||
|
||||
使用类似 INSERT 的查询来插入数据:
|
||||
使用类似INSERT的查询来插入数据:
|
||||
|
||||
``` bash
|
||||
echo 'INSERT INTO t VALUES (1),(2),(3)' | POST 'http://localhost:8123/'
|
||||
$ echo 'INSERT INTO t VALUES (1),(2),(3)' | curl 'http://localhost:8123/' --data-binary @-
|
||||
```
|
||||
|
||||
数据可以从查询中单独发送:
|
||||
|
||||
``` bash
|
||||
echo '(4),(5),(6)' | POST 'http://localhost:8123/?query=INSERT INTO t VALUES'
|
||||
$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @-
|
||||
```
|
||||
|
||||
可以指定任何数据格式。值的格式和写入表 `t` 的值的格式相同:
|
||||
您可以指定任何数据格式。`Values`格式与将INSERT写入`t`值时使用的格式相同:
|
||||
|
||||
``` bash
|
||||
echo '(7),(8),(9)' | POST 'http://localhost:8123/?query=INSERT INTO t FORMAT Values'
|
||||
$ echo '(7),(8),(9)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20Values' --data-binary @-
|
||||
```
|
||||
|
||||
若要插入 tab 分割的数据,需要指定对应的格式:
|
||||
若要插入tab分割的数据,需要指定对应的格式:
|
||||
|
||||
``` bash
|
||||
echo -ne '10\n11\n12\n' | POST 'http://localhost:8123/?query=INSERT INTO t FORMAT TabSeparated'
|
||||
$ echo -ne '10\n11\n12\n' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20FORMAT%20TabSeparated' --data-binary @-
|
||||
```
|
||||
|
||||
从表中读取内容。由于查询处理是并行的,数据以随机顺序输出。
|
||||
|
||||
``` bash
|
||||
$ GET 'http://localhost:8123/?query=SELECT a FROM t'
|
||||
$ curl 'http://localhost:8123/?query=SELECT%20a%20FROM%20t'
|
||||
7
|
||||
8
|
||||
9
|
||||
@ -122,22 +149,37 @@ $ GET 'http://localhost:8123/?query=SELECT a FROM t'
|
||||
6
|
||||
```
|
||||
|
||||
删除表。
|
||||
删除表:
|
||||
|
||||
``` bash
|
||||
POST 'http://localhost:8123/?query=DROP TABLE t'
|
||||
$ echo 'DROP TABLE t' | curl 'http://localhost:8123/' --data-binary @-
|
||||
```
|
||||
|
||||
成功请求后并不会返回数据,返回一个空的响应体。
|
||||
|
||||
可以通过压缩来传输数据。压缩的数据没有一个标准的格式,但你需要指定一个压缩程序来使用它(sudo apt-get install compressor-metrika-yandex)。
|
||||
在传输数据时,可以使用ClickHouse内部压缩格式。压缩的数据具有非标准格式,您需要使用特殊的`clickhouse-compressor`程序来处理它(它是与`clickhouse-client`包一起安装的)。为了提高数据插入的效率,您可以通过使用[http_native_compression_disable_checksumming_on_decompress](../operations/settings/settings.md#settings-http_native_compression_disable_checksumming_on_decompress)设置禁用服务器端校验。
|
||||
|
||||
如果在 URL 中指定了 `compress=1` ,服务会返回压缩的数据。
|
||||
如果在 URL 中指定了 `decompress=1` ,服务会解压通过 POST 方法发送的数据。
|
||||
如果在URL中指定了`compress=1`,服务会返回压缩的数据。
|
||||
如果在URL中指定了`decompress=1`,服务会解压通过POST方法发送的数据。
|
||||
|
||||
可以通过为每份数据进行立即压缩来减少大规模数据传输中的网络压力。
|
||||
您也可以选择使用[HTTP compression](https://en.wikipedia.org/wiki/HTTP_compression)。发送一个压缩的POST请求,附加请求头`Content-Encoding: compression_method`。为了使ClickHouse响应,您必须附加`Accept-Encoding: compression_method`。ClickHouse支持`gzip`,`br`和`deflate` [compression methods](https://en.wikipedia.org/wiki/HTTP_compression#Content-Encoding_tokens)。要启用HTTP压缩,必须使用ClickHouse[启用Http压缩](../operations/settings/settings.md#settings-enable_http_compression)配置。您可以在[Http zlib压缩级别](#settings-http_zlib_compression_level)设置中为所有压缩方法配置数据压缩级别。
|
||||
|
||||
可以指定 ‘database’ 参数来指定默认的数据库。
|
||||
您可以使用它在传输大量数据时减少网络流量,或者创建立即压缩的转储。
|
||||
|
||||
通过压缩发送数据的例子:
|
||||
|
||||
``` bash
|
||||
#Sending data to the server:
|
||||
$ curl -vsS "http://localhost:8123/?enable_http_compression=1" -d 'SELECT number FROM system.numbers LIMIT 10' -H 'Accept-Encoding: gzip'
|
||||
|
||||
#Sending data to the client:
|
||||
$ echo "SELECT 1" | gzip -c | curl -sS --data-binary @- -H 'Content-Encoding: gzip' 'http://localhost:8123/'
|
||||
```
|
||||
|
||||
!!! note "警告"
|
||||
一些HTTP客户端可能会在默认情况下从服务器解压数据(使用`gzip`和`deflate`),即使您未正确地使用了压缩设置,您也可能会得到解压数据。
|
||||
|
||||
您可以使用`database`URL参数或`X-ClickHouse-Database`头来指定默认数据库。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?database=system' --data-binary @-
|
||||
@ -153,30 +195,38 @@ $ echo 'SELECT number FROM numbers LIMIT 10' | curl 'http://localhost:8123/?data
|
||||
9
|
||||
```
|
||||
|
||||
默认情况下,默认数据库会在服务的配置中注册,默认是 `default`。或者,也可以在表名之前使用一个点来指定数据库。
|
||||
默认情况下,在服务器设置中注册的数据库被用作默认数据库。默认情况下,它是名为`default`的数据库。或者,您可以始终在表名之前使用点来指定数据库。
|
||||
|
||||
用户名密码可以通过以下两种方式指定:
|
||||
用户名和密码可以通过以下三种方式指定:
|
||||
|
||||
1. 通过 HTTP Basic Authentication。示例:
|
||||
1. 通过HTTP Basic Authentication。示例:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @-
|
||||
$ echo 'SELECT 1' | curl 'http://user:password@localhost:8123/' -d @-
|
||||
```
|
||||
|
||||
1. 通过 URL 参数 中的 ‘user’ 和 ‘password’。示例:
|
||||
1. 通过URL参数中的`user`和`password`。示例:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @-
|
||||
$ echo 'SELECT 1' | curl 'http://localhost:8123/?user=user&password=password' -d @-
|
||||
```
|
||||
|
||||
如果用户名没有指定,默认的用户是 `default`。如果密码没有指定,默认会使用空密码。
|
||||
可以使用 URL 参数指定配置或者设置整个配置文件来处理单个查询。示例:`http://localhost:8123/?profile=web&max_rows_to_read=1000000000&query=SELECT+1`
|
||||
1. 使用`X-ClickHouse-User`或`X-ClickHouse-Key`头指定,示例:
|
||||
|
||||
更多信息,参见 «[设置](../operations/settings/index.md#settings)» 部分。
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT 1' | curl -H 'X-ClickHouse-User: user' -H 'X-ClickHouse-Key: password' 'http://localhost:8123/' -d @-
|
||||
```
|
||||
|
||||
如果未指定用户名,则使用`default`。如果未指定密码,则使用空密码。
|
||||
您还可以使用URL参数来指定处理单个查询或整个设置配置文件的任何设置。例子:`http://localhost:8123/?profile=web&max_rows_to_read=1000000000&query=SELECT+1`
|
||||
|
||||
更多信息,详见[设置](../operations/settings/index.md#settings)部分。
|
||||
|
||||
``` bash
|
||||
$ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:8123/?' --data-binary @-
|
||||
@ -192,30 +242,386 @@ $ echo 'SELECT number FROM system.numbers LIMIT 10' | curl 'http://localhost:812
|
||||
9
|
||||
```
|
||||
|
||||
更多关于其他参数的信息,参见 «[设置](../operations/settings/index.md#settings)» 部分。
|
||||
有关其他参数的信息,请参考`SET`一节。
|
||||
|
||||
相比起 TCP 原生接口,HTTP 接口不支持会话和会话设置的概念,不允许中止查询(准确地说,只在少数情况下允许),不显示查询处理的进展。执行解析和数据格式化都是在服务端处理,网络上会比 TCP 原生接口更低效。
|
||||
类似地,您可以在HTTP协议中使用ClickHouse会话。为此,需要向请求添加`session_id`GET参数。您可以使用任何字符串作为会话ID。默认情况下,会话在60秒不活动后终止。要更改此超时配置,请修改服务器配置中的`default_session_timeout`设置,或向请求添加`session_timeout`GET参数。要检查会话状态,使用`session_check=1`参数。一次只能在单个会话中执行一个查询。
|
||||
|
||||
可选的 `query_id` 参数可能当做 query ID 传入(或者任何字符串)。更多信息,参见 «[设置 replace_running_query](../operations/settings/settings.md)» 部分。
|
||||
您可以在`X-ClickHouse-Progress`响应头中收到查询进度的信息。为此,启用[Http Header携带进度](../operations/settings/settings.md#settings-send_progress_in_http_headers)。示例:
|
||||
|
||||
可选的 `quota_key` 参数可能当做 quota key 传入(或者任何字符串)。更多信息,参见 «[配额](../operations/quotas.md#quotas)» 部分。
|
||||
``` text
|
||||
X-ClickHouse-Progress: {"read_rows":"2752512","read_bytes":"240570816","total_rows_to_read":"8880128"}
|
||||
X-ClickHouse-Progress: {"read_rows":"5439488","read_bytes":"482285394","total_rows_to_read":"8880128"}
|
||||
X-ClickHouse-Progress: {"read_rows":"8783786","read_bytes":"819092887","total_rows_to_read":"8880128"}
|
||||
```
|
||||
|
||||
HTTP 接口允许传入额外的数据(外部临时表)来查询。更多信息,参见 «[外部数据查询处理](../engines/table-engines/special/external-data.md)» 部分。
|
||||
显示字段信息:
|
||||
|
||||
## 响应缓冲 {#xiang-ying-huan-chong}
|
||||
- `read_rows` — 读取的行数。
|
||||
- `read_bytes` — 读取的数据字节数。
|
||||
- `total_rows_to_read` — 读取的数据总行数。
|
||||
- `written_rows` — 写入数据行数。
|
||||
- `written_bytes` — 写入数据字节数。
|
||||
|
||||
可以在服务器端启用响应缓冲。提供了 `buffer_size` 和 `wait_end_of_query` 两个URL 参数来达此目的。
|
||||
如果HTTP连接丢失,运行的请求不会自动停止。解析和数据格式化是在服务器端执行的,使用Http连接可能无效。
|
||||
|
||||
`buffer_size` 决定了查询结果要在服务内存中缓冲多少个字节数据. 如果响应体比这个阈值大,缓冲区会写入到 HTTP 管道,剩下的数据也直接发到 HTTP 管道中。
|
||||
可选的`query_id`参数可能当做query ID传入(或者任何字符串)。更多信息,详见[replace_running_query](../operations/settings/settings.md)部分。
|
||||
|
||||
为了确保整个响应体被缓冲,可以设置 `wait_end_of_query=1`。这种情况下,存入内存的数据会被缓冲到服务端的一个临时文件中。
|
||||
可选的`quota_key`参数可能当做quota key传入(或者任何字符串)。更多信息,详见[Quotas](../operations/quotas.md#quotas)部分。
|
||||
|
||||
HTTP接口允许传入额外的数据(外部临时表)来查询。更多信息,详见[外部数据查询处理](../engines/table-engines/special/external-data.md)部分。
|
||||
|
||||
## 响应缓冲 {#response-buffering}
|
||||
|
||||
可以在服务器端启用响应缓冲。提供了`buffer_size`和`wait_end_of_query`两个URL参数来达此目的。
|
||||
|
||||
`buffer_size`决定了查询结果要在服务内存中缓冲多少个字节数据. 如果响应体比这个阈值大,缓冲区会写入到HTTP管道,剩下的数据也直接发到HTTP管道中。
|
||||
|
||||
为了确保整个响应体被缓冲,可以设置`wait_end_of_query=1`。这种情况下,存入内存的数据会被缓冲到服务端的一个临时文件中。
|
||||
|
||||
示例:
|
||||
|
||||
``` bash
|
||||
curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary'
|
||||
$ curl -sS 'http://localhost:8123/?max_result_bytes=4000000&buffer_size=3000000&wait_end_of_query=1' -d 'SELECT toUInt8(number) FROM system.numbers LIMIT 9000000 FORMAT RowBinary'
|
||||
```
|
||||
|
||||
查询请求响应状态码和 HTTP 头被发送到客户端后,若发生查询处理出错,使用缓冲区可以避免这种情况的发生。在这种情况下,响应主体的结尾会写入一条错误消息,而在客户端,只能在解析阶段检测到该错误。
|
||||
查询请求响应状态码和HTTP头被发送到客户端后,若发生查询处理出错,使用缓冲区可以避免这种情况的发生。在这种情况下,响应主体的结尾会写入一条错误消息,而在客户端,只能在解析阶段检测到该错误。
|
||||
|
||||
### 查询参数 {#cli-queries-with-parameters}
|
||||
|
||||
您可以使用参数创建查询,并通过相应的HTTP请求参数为它们传递值。有关更多信息,请参见[CLI查询参数](../interfaces/cli.md#cli-queries-with-parameters)。
|
||||
|
||||
### 示例 {#example}
|
||||
|
||||
``` bash
|
||||
$ curl -sS "<address>?param_id=2¶m_phrase=test" -d "SELECT * FROM table WHERE int_column = {id:UInt8} and string_column = {phrase:String}"
|
||||
```
|
||||
|
||||
## 特定的HTTP接口 {#predefined_http_interface}
|
||||
|
||||
ClickHouse通过HTTP接口支持特定的查询。例如,您可以如下所示向表写入数据:
|
||||
|
||||
``` bash
|
||||
$ echo '(4),(5),(6)' | curl 'http://localhost:8123/?query=INSERT%20INTO%20t%20VALUES' --data-binary @-
|
||||
```
|
||||
|
||||
ClickHouse还支持预定义的HTTP接口,可以帮助您更容易与第三方工具集成,如[Prometheus Exporter](https://github.com/percona-lab/clickhouse_exporter).
|
||||
|
||||
示例:
|
||||
|
||||
- 首先,将此部分添加到服务器配置文件中:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url>/predefined_query</url>
|
||||
<methods>POST,GET</methods>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT * FROM system.metrics LIMIT 5 FORMAT Template SETTINGS format_template_resultset = 'prometheus_template_output_format_resultset', format_template_row = 'prometheus_template_output_format_row', format_template_rows_between_delimiter = '\n'</query>
|
||||
</handler>
|
||||
</rule>
|
||||
<rule>...</rule>
|
||||
<rule>...</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
- 请求Prometheus格式的URL以获取数据:
|
||||
|
||||
<!-- -->
|
||||
|
||||
``` bash
|
||||
$ curl -v 'http://localhost:8123/predefined_query'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /predefined_query HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Tue, 28 Apr 2020 08:52:56 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< X-ClickHouse-Server-Display-Name: i-mloy5trc
|
||||
< Transfer-Encoding: chunked
|
||||
< X-ClickHouse-Query-Id: 96fe0052-01e6-43ce-b12a-6b7370de6e8a
|
||||
< X-ClickHouse-Format: Template
|
||||
< X-ClickHouse-Timezone: Asia/Shanghai
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
# HELP "Query" "Number of executing queries"
|
||||
# TYPE "Query" counter
|
||||
"Query" 1
|
||||
|
||||
# HELP "Merge" "Number of executing background merges"
|
||||
# TYPE "Merge" counter
|
||||
"Merge" 0
|
||||
|
||||
# HELP "PartMutation" "Number of mutations (ALTER DELETE/UPDATE)"
|
||||
# TYPE "PartMutation" counter
|
||||
"PartMutation" 0
|
||||
|
||||
# HELP "ReplicatedFetch" "Number of data parts being fetched from replica"
|
||||
# TYPE "ReplicatedFetch" counter
|
||||
"ReplicatedFetch" 0
|
||||
|
||||
# HELP "ReplicatedSend" "Number of data parts being sent to replicas"
|
||||
# TYPE "ReplicatedSend" counter
|
||||
"ReplicatedSend" 0
|
||||
|
||||
* Connection #0 to host localhost left intact
|
||||
|
||||
* Connection #0 to host localhost left intact
|
||||
```
|
||||
|
||||
正如您从示例中看到的,如果在`config.xml`文件中配置了`http_handlers`,并且`http_handlers`可以包含许多`规则`。ClickHouse将把接收到的HTTP请求与`rule`中的预定义类型进行匹配,第一个匹配的将运行处理程序。如果匹配成功,ClickHouse将执行相应的预定义查询。
|
||||
|
||||
现在`rule`可以配置`method`, `header`, `url`, `handler`:
|
||||
- `method` 负责匹配HTTP请求的方法部分。 `method`完全符合HTTP协议中[method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods)的定义。这是一个可选的配置。如果它没有在配置文件中定义,那么它与HTTP请求的方法部分不匹配。
|
||||
|
||||
- `url` 负责匹配HTTP请求的URL部分。它匹配[RE2](https://github.com/google/re2)正则表达式。这是一个可选的配置。如果配置文件中没有定义它,则它与HTTP请求的URL部分不匹配。
|
||||
|
||||
- `headers` 负责匹配HTTP请求的头部分。它与RE2的正则表达式兼容。这是一个可选的配置。如果它没有在配置文件中定义,那么它与HTTP请求的头部分不匹配。
|
||||
|
||||
- `handler` 包含主要的处理部分。现在`handler`可以配置`type`, `status`, `content_type`, `response_content`, `query`, `query_param_name`。
|
||||
`type` 目前支持三种类型:[特定查询](#predefined_query_handler), [动态查询](#dynamic_query_handler), [static](#static).
|
||||
|
||||
- `query` — 使用`predefined_query_handler`类型,在调用处理程序时执行查询。
|
||||
|
||||
- `query_param_name` — 与`dynamic_query_handler`类型一起使用,提取并执行HTTP请求参数中与`query_param_name`值对应的值。
|
||||
|
||||
- `status` — 与`static`类型一起使用,响应状态代码。
|
||||
|
||||
- `content_type` — 与`static`类型一起使用,响应信息[content-type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type)。
|
||||
|
||||
- `response_content` — 与`static`类型一起使用,响应发送给客户端的内容,当使用前缀`file://`或`config://`时,从发送给客户端的文件或配置中查找内容。
|
||||
|
||||
接下来是不同`type`的配置方法。
|
||||
|
||||
### 特定查询 {#predefined_query_handler}
|
||||
|
||||
`predefined_query_handler` 支持设置`Settings`和`query_params`参数。您可以将`query`配置为`predefined_query_handler`类型。
|
||||
|
||||
`query` 是一个预定义的`predefined_query_handler`查询,它由ClickHouse在匹配HTTP请求并返回查询结果时执行。这是一个必须的配置。
|
||||
|
||||
以下是定义的[max_threads](../operations/settings/settings.md#settings-max_threads)和`max_alter_threads`设置, 然后查询系统表以检查这些设置是否设置成功。
|
||||
|
||||
示例:
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<url><![CDATA[/query_param_with_url/\w+/(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></url>
|
||||
<method>GET</method>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE</XXX>
|
||||
<PARAMS_XXX><![CDATA[(?P<name_1>[^/]+)(/(?P<name_2>[^/]+))?]]></PARAMS_XXX>
|
||||
</headers>
|
||||
<handler>
|
||||
<type>predefined_query_handler</type>
|
||||
<query>SELECT value FROM system.settings WHERE name = {name_1:String}</query>
|
||||
<query>SELECT name, value FROM system.settings WHERE name = {name_2:String}</query>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE' -H 'PARAMS_XXX:max_threads' 'http://localhost:8123/query_param_with_url/1/max_threads/max_alter_threads?max_threads=1&max_alter_threads=2'
|
||||
1
|
||||
max_alter_threads 2
|
||||
```
|
||||
|
||||
!!! note "警告"
|
||||
在一个`predefined_query_handler`中,只支持insert类型的一个`查询`。
|
||||
|
||||
### 动态查询 {#dynamic_query_handler}
|
||||
|
||||
`dynamic_query_handler`时,查询以HTTP请求参数的形式编写。区别在于,在`predefined_query_handler`中,查询是在配置文件中编写的。您可以在`dynamic_query_handler`中配置`query_param_name`。
|
||||
|
||||
ClickHouse提取并执行与HTTP请求URL中的`query_param_name`值对应的值。`query_param_name`的默认值是`/query`。这是一个可选的配置。如果配置文件中没有定义,则不会传入参数。
|
||||
|
||||
为了试验这个功能,示例定义了[max_threads](../operations/settings/settings.md#settings-max_threads)和`max_alter_threads`,`queries`设置是否成功的值。
|
||||
|
||||
示例:
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<headers>
|
||||
<XXX>TEST_HEADER_VALUE_DYNAMIC</XXX> </headers>
|
||||
<handler>
|
||||
<type>dynamic_query_handler</type>
|
||||
<query_param_name>query_param</query_param_name>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -H 'XXX:TEST_HEADER_VALUE_DYNAMIC' 'http://localhost:8123/own?max_threads=1&max_alter_threads=2¶m_name_1=max_threads¶m_name_2=max_alter_threads&query_param=SELECT%20name,value%20FROM%20system.settings%20where%20name%20=%20%7Bname_1:String%7D%20OR%20name%20=%20%7Bname_2:String%7D'
|
||||
max_threads 1
|
||||
max_alter_threads 2
|
||||
```
|
||||
|
||||
### static {#static}
|
||||
|
||||
`static`可以返回[content_type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type), [status](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status)和`response_content`。`response_content`可以返回指定的内容。
|
||||
|
||||
示例:
|
||||
|
||||
返回信息.
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/hi</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<status>402</status>
|
||||
<content_type>text/html; charset=UTF-8</content_type>
|
||||
<response_content>Say Hi!</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
<http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/hi'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /hi HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 402 Payment Required
|
||||
< Date: Wed, 29 Apr 2020 03:51:26 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
Say Hi!%
|
||||
```
|
||||
|
||||
从配置中查找发送到客户端的内容。
|
||||
|
||||
``` xml
|
||||
<get_config_static_handler><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></get_config_static_handler>
|
||||
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/get_config_static_handler</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<response_content>config://get_config_static_handler</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ curl -v -H 'XXX:xxx' 'http://localhost:8123/get_config_static_handler'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /get_config_static_handler HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 29 Apr 2020 04:01:24 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/plain; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>%
|
||||
```
|
||||
|
||||
从发送到客户端的文件中查找内容。
|
||||
|
||||
``` xml
|
||||
<http_handlers>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/get_absolute_path_static_handler</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<content_type>text/html; charset=UTF-8</content_type>
|
||||
<response_content>file:///absolute_path_file.html</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
<rule>
|
||||
<methods>GET</methods>
|
||||
<headers><XXX>xxx</XXX></headers>
|
||||
<url>/get_relative_path_static_handler</url>
|
||||
<handler>
|
||||
<type>static</type>
|
||||
<content_type>text/html; charset=UTF-8</content_type>
|
||||
<response_content>file://./relative_path_file.html</response_content>
|
||||
</handler>
|
||||
</rule>
|
||||
</http_handlers>
|
||||
```
|
||||
|
||||
``` bash
|
||||
$ user_files_path='/var/lib/clickhouse/user_files'
|
||||
$ sudo echo "<html><body>Relative Path File</body></html>" > $user_files_path/relative_path_file.html
|
||||
$ sudo echo "<html><body>Absolute Path File</body></html>" > $user_files_path/absolute_path_file.html
|
||||
$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_absolute_path_static_handler'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /get_absolute_path_static_handler HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 29 Apr 2020 04:18:16 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
<html><body>Absolute Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
$ curl -vv -H 'XXX:xxx' 'http://localhost:8123/get_relative_path_static_handler'
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8123 (#0)
|
||||
> GET /get_relative_path_static_handler HTTP/1.1
|
||||
> Host: localhost:8123
|
||||
> User-Agent: curl/7.47.0
|
||||
> Accept: */*
|
||||
> XXX:xxx
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 29 Apr 2020 04:18:31 GMT
|
||||
< Connection: Keep-Alive
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Transfer-Encoding: chunked
|
||||
< Keep-Alive: timeout=3
|
||||
< X-ClickHouse-Summary: {"read_rows":"0","read_bytes":"0","written_rows":"0","written_bytes":"0","total_rows_to_read":"0"}
|
||||
<
|
||||
<html><body>Relative Path File</body></html>
|
||||
* Connection #0 to host localhost left intact
|
||||
```
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/http_interface/) <!--hide-->
|
||||
|
@ -1,19 +1,27 @@
|
||||
---
|
||||
toc_folder_title: Interfaces
|
||||
toc_priority: 14
|
||||
toc_title: 客户端
|
||||
---
|
||||
|
||||
# 客户端 {#interfaces}
|
||||
|
||||
ClickHouse提供了两个网络接口(两者都可以选择包装在TLS中以提高安全性):
|
||||
ClickHouse提供了两个网络接口(两个都可以选择包装在TLS中以增加安全性):
|
||||
|
||||
- [HTTP](http.md),记录在案,易于使用.
|
||||
- [本地TCP](tcp.md),这有较少的开销.
|
||||
- [HTTP](http.md), 包含文档,易于使用。
|
||||
- [Native TCP](../interfaces/tcp.md),简单,方便使用。
|
||||
|
||||
在大多数情况下,建议使用适当的工具或库,而不是直接与这些工具或库进行交互。 Yandex的官方支持如下:
|
||||
\* [命令行客户端](cli.md)
|
||||
\* [JDBC驱动程序](jdbc.md)
|
||||
\* [ODBC驱动程序](odbc.md)
|
||||
\* [C++客户端库](cpp.md)
|
||||
在大多数情况下,建议使用适当的工具或库,而不是直接与它们交互。Yandex官方支持的项目有:
|
||||
|
||||
还有许多第三方库可供使用ClickHouse:
|
||||
\* [客户端库](third-party/client-libraries.md)
|
||||
\* [集成](third-party/integrations.md)
|
||||
\* [可视界面](third-party/gui.md)
|
||||
- [命令行客户端](../interfaces/cli.md)
|
||||
- [JDBC驱动](../interfaces/jdbc.md)
|
||||
- [ODBC驱动](../interfaces/odbc.md)
|
||||
- [C++客户端](../interfaces/cpp.md)
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/) <!--hide-->
|
||||
还有一些广泛的第三方库可供ClickHouse使用:
|
||||
|
||||
- [客户端库](../interfaces/third-party/client-libraries.md)
|
||||
- [第三方集成库](../interfaces/third-party/integrations.md)
|
||||
- [可视化UI](../interfaces/third-party/gui.md)
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/en/interfaces/) <!--hide-->
|
||||
|
@ -1,8 +1,13 @@
|
||||
# JDBC 驱动 {#jdbc-qu-dong}
|
||||
---
|
||||
toc_priority: 22
|
||||
toc_title: JDBC驱动
|
||||
---
|
||||
|
||||
- **[官方JDBC 的驱动](https://github.com/ClickHouse/clickhouse-jdbc)**
|
||||
- 三方提供的 JDBC 驱动:
|
||||
- [掳胫–禄脢鹿脷露胫鲁隆鹿–酶](https://github.com/housepower/ClickHouse-Native-JDBC)
|
||||
# JDBC驱动 {#jdbc-driver}
|
||||
|
||||
- **[官方驱动](https://github.com/ClickHouse/clickhouse-jdbc)**
|
||||
- 第三方驱动:
|
||||
- [ClickHouse-Native-JDBC](https://github.com/housepower/ClickHouse-Native-JDBC)
|
||||
- [clickhouse4j](https://github.com/blynkkk/clickhouse4j)
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/jdbc/) <!--hide-->
|
||||
|
@ -1,19 +1,17 @@
|
||||
---
|
||||
machine_translated: true
|
||||
machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
||||
toc_priority: 20
|
||||
toc_title: "MySQL\u63A5\u53E3"
|
||||
toc_title: MySQL接口
|
||||
---
|
||||
|
||||
# MySQL接口 {#mysql-interface}
|
||||
|
||||
ClickHouse支持MySQL线协议。 它可以通过启用 [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) 在配置文件中设置:
|
||||
ClickHouse支持MySQL wire通讯协议。可以通过在配置文件中设置 [mysql_port](../operations/server-configuration-parameters/settings.md#server_configuration_parameters-mysql_port) 来启用它:
|
||||
|
||||
``` xml
|
||||
<mysql_port>9004</mysql_port>
|
||||
```
|
||||
|
||||
使用命令行工具连接的示例 `mysql`:
|
||||
使用命令行工具 `mysql` 进行连接的示例:
|
||||
|
||||
``` bash
|
||||
$ mysql --protocol tcp -u default -P 9004
|
||||
@ -37,12 +35,12 @@ Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
|
||||
mysql>
|
||||
```
|
||||
|
||||
为了与所有MySQL客户端兼容,建议使用以下命令指定用户密码 [双SHA1](../operations/settings/settings-users.md#password_double_sha1_hex) 在配置文件中。
|
||||
如果使用用户密码指定 [SHA256](../operations/settings/settings-users.md#password_sha256_hex),一些客户端将无法进行身份验证(mysqljs和旧版本的命令行工具mysql)。
|
||||
为了与所有MySQL客户端兼容,建议在配置文件中使用 [double SHA1](../operations/settings/settings-users.md#password_double_sha1_hex) 来指定用户密码。
|
||||
如果使用 [SHA256](../operations/settings/settings-users.md#password_sha256_hex) 指定用户密码,一些客户端将无法进行身份验证(比如mysqljs和旧版本的命令行工具mysql)。
|
||||
|
||||
限制:
|
||||
|
||||
- 不支持准备好的查询
|
||||
- 不支持prepared queries
|
||||
|
||||
- 某些数据类型以字符串形式发送
|
||||
|
||||
|
@ -1,5 +1,10 @@
|
||||
# ODBC 驱动 {#odbc-qu-dong}
|
||||
---
|
||||
toc_priority: 23
|
||||
toc_title: ODBC驱动
|
||||
---
|
||||
|
||||
- ClickHouse官方有 ODBC 的驱动。 见 [这里](https://github.com/ClickHouse/clickhouse-odbc)。
|
||||
# ODBC驱动 {#odbc-driver}
|
||||
|
||||
- [官方驱动](https://github.com/ClickHouse/clickhouse-odbc)。
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/odbc/) <!--hide-->
|
||||
|
@ -1,5 +1,10 @@
|
||||
# 原生客户端接口(TCP) {#yuan-sheng-ke-hu-duan-jie-kou-tcp}
|
||||
---
|
||||
toc_priority: 18
|
||||
toc_title: 原生接口(TCP)
|
||||
---
|
||||
|
||||
本机协议用于 [命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C ++程序。 不幸的是,本机ClickHouse协议还没有正式的规范,但它可以从ClickHouse源代码进行逆向工程 [从这里开始](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client))和/或拦截和分析TCP流量。
|
||||
# 原生接口(TCP){#native-interface-tcp}
|
||||
|
||||
原生接口用于[命令行客户端](cli.md),用于分布式查询处理期间的服务器间通信,以及其他C++程序。可惜的是,原生的ClickHouse协议还没有正式的规范,但它可以从ClickHouse[源代码](https://github.com/ClickHouse/ClickHouse/tree/master/src/Client)通过拦截和分析TCP流量进行反向工程。
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/zh/interfaces/tcp/) <!--hide-->
|
||||
|
@ -20,7 +20,37 @@ SELECT
|
||||
|
||||
## toTimeZone {#totimezone}
|
||||
|
||||
将Date或DateTime转换为指定的时区。
|
||||
将Date或DateTime转换为指定的时区。 时区是Date/DateTime类型的属性。 表字段或结果集的列的内部值(秒数)不会更改,列的类型会更改,并且其字符串表示形式也会相应更改。
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toDateTime('2019-01-01 00:00:00', 'UTC') AS time_utc,
|
||||
toTypeName(time_utc) AS type_utc,
|
||||
toInt32(time_utc) AS int32utc,
|
||||
toTimeZone(time_utc, 'Asia/Yekaterinburg') AS time_yekat,
|
||||
toTypeName(time_yekat) AS type_yekat,
|
||||
toInt32(time_yekat) AS int32yekat,
|
||||
toTimeZone(time_utc, 'US/Samoa') AS time_samoa,
|
||||
toTypeName(time_samoa) AS type_samoa,
|
||||
toInt32(time_samoa) AS int32samoa
|
||||
FORMAT Vertical;
|
||||
```
|
||||
|
||||
```text
|
||||
Row 1:
|
||||
──────
|
||||
time_utc: 2019-01-01 00:00:00
|
||||
type_utc: DateTime('UTC')
|
||||
int32utc: 1546300800
|
||||
time_yekat: 2019-01-01 05:00:00
|
||||
type_yekat: DateTime('Asia/Yekaterinburg')
|
||||
int32yekat: 1546300800
|
||||
time_samoa: 2018-12-31 13:00:00
|
||||
type_samoa: DateTime('US/Samoa')
|
||||
int32samoa: 1546300800
|
||||
```
|
||||
|
||||
`toTimeZone(time_utc, 'Asia/Yekaterinburg')` 把 `DateTime('UTC')` 类型转换为 `DateTime('Asia/Yekaterinburg')`. 内部值 (Unixtimestamp) 1546300800 保持不变, 但是字符串表示(toString() 函数的结果值) 由 `time_utc: 2019-01-01 00:00:00` 转换为o `time_yekat: 2019-01-01 05:00:00`.
|
||||
|
||||
## toYear {#toyear}
|
||||
|
||||
@ -34,15 +64,15 @@ SELECT
|
||||
|
||||
将Date或DateTime转换为包含月份编号(1-12)的UInt8类型的数字。
|
||||
|
||||
## 今天一年 {#todayofyear}
|
||||
## toDayOfYear {#todayofyear}
|
||||
|
||||
将Date或DateTime转换为包含一年中的某一天的编号的UInt16(1-366)类型的数字。
|
||||
|
||||
## 今天月 {#todayofmonth}
|
||||
## toDayOfMonth {#todayofmonth}
|
||||
|
||||
将Date或DateTime转换为包含一月中的某一天的编号的UInt8(1-31)类型的数字。
|
||||
|
||||
## 今天一周 {#todayofweek}
|
||||
## toDayOfWeek {#todayofweek}
|
||||
|
||||
将Date或DateTime转换为包含一周中的某一天的编号的UInt8(周一是1, 周日是7)类型的数字。
|
||||
|
||||
@ -55,31 +85,61 @@ SELECT
|
||||
|
||||
将DateTime转换为包含一小时中分钟数(0-59)的UInt8数字。
|
||||
|
||||
## 秒 {#tosecond}
|
||||
## toSecond {#tosecond}
|
||||
|
||||
将DateTime转换为包含一分钟中秒数(0-59)的UInt8数字。
|
||||
闰秒不计算在内。
|
||||
|
||||
## toUnixTimestamp {#tounixtimestamp}
|
||||
## toUnixTimestamp {#to-unix-timestamp}
|
||||
|
||||
将DateTime转换为unix时间戳。
|
||||
对于DateTime参数:将值转换为UInt32类型的数字-Unix时间戳(https://en.wikipedia.org/wiki/Unix_time)。
|
||||
对于String参数:根据时区将输入字符串转换为日期时间(可选的第二个参数,默认使用服务器时区),并返回相应的unix时间戳。
|
||||
|
||||
## 开始一年 {#tostartofyear}
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
toUnixTimestamp(datetime)
|
||||
toUnixTimestamp(str, [timezone])
|
||||
```
|
||||
|
||||
**返回值**
|
||||
|
||||
- 返回 unix timestamp.
|
||||
|
||||
类型: `UInt32`.
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─unix_timestamp─┐
|
||||
│ 1509836867 │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## toStartOfYear {#tostartofyear}
|
||||
|
||||
将Date或DateTime向前取整到本年的第一天。
|
||||
返回Date类型。
|
||||
|
||||
## 今年开始 {#tostartofisoyear}
|
||||
## toStartOfISOYear {#tostartofisoyear}
|
||||
|
||||
将Date或DateTime向前取整到ISO本年的第一天。
|
||||
返回Date类型。
|
||||
|
||||
## 四分之一开始 {#tostartofquarter}
|
||||
## toStartOfQuarter {#tostartofquarter}
|
||||
|
||||
将Date或DateTime向前取整到本季度的第一天。
|
||||
返回Date类型。
|
||||
|
||||
## 到月份开始 {#tostartofmonth}
|
||||
## toStartOfMonth {#tostartofmonth}
|
||||
|
||||
将Date或DateTime向前取整到本月的第一天。
|
||||
返回Date类型。
|
||||
@ -92,27 +152,90 @@ SELECT
|
||||
将Date或DateTime向前取整到本周的星期一。
|
||||
返回Date类型。
|
||||
|
||||
## 今天开始 {#tostartofday}
|
||||
## toStartOfWeek(t\[,mode\]) {#tostartofweek}
|
||||
|
||||
将DateTime向前取整到当日的开始。
|
||||
按mode将Date或DateTime向前取整到最近的星期日或星期一。
|
||||
返回Date类型。
|
||||
mode参数的工作方式与toWeek()的mode参数完全相同。 对于单参数语法,mode使用默认值0。
|
||||
|
||||
## 开始一小时 {#tostartofhour}
|
||||
## toStartOfDay {#tostartofday}
|
||||
|
||||
将DateTime向前取整到今天的开始。
|
||||
|
||||
## toStartOfHour {#tostartofhour}
|
||||
|
||||
将DateTime向前取整到当前小时的开始。
|
||||
|
||||
## to startofminute {#tostartofminute}
|
||||
## toStartOfMinute {#tostartofminute}
|
||||
|
||||
将DateTime向前取整到当前分钟的开始。
|
||||
|
||||
## to startoffiveminute {#tostartoffiveminute}
|
||||
## toStartOfSecond {#tostartofsecond}
|
||||
|
||||
将DateTime向前取整到当前秒数的开始。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
toStartOfSecond(value[, timezone])
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `value` — 时间和日期[DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `timezone` — 返回值的[Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (可选参数)。 如果未指定将使用 `value` 参数的时区。 [String](../../sql-reference/data-types/string.md)。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 输入值毫秒部分为零。
|
||||
|
||||
类型: [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
**示例**
|
||||
|
||||
不指定时区查询:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64
|
||||
SELECT toStartOfSecond(dt64);
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌───toStartOfSecond(dt64)─┐
|
||||
│ 2020-01-01 10:20:30.000 │
|
||||
└─────────────────────────┘
|
||||
```
|
||||
|
||||
指定时区查询:
|
||||
|
||||
``` sql
|
||||
WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64
|
||||
SELECT toStartOfSecond(dt64, 'Europe/Moscow');
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─toStartOfSecond(dt64, 'Europe/Moscow')─┐
|
||||
│ 2020-01-01 13:20:30.000 │
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**参考**
|
||||
|
||||
- [Timezone](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) 服务器配置选项。
|
||||
|
||||
## toStartOfFiveMinute {#tostartoffiveminute}
|
||||
|
||||
将DateTime以五分钟为单位向前取整到最接近的时间点。
|
||||
|
||||
## 开始分钟 {#tostartoftenminutes}
|
||||
## toStartOfTenMinutes {#tostartoftenminutes}
|
||||
|
||||
将DateTime以十分钟为单位向前取整到最接近的时间点。
|
||||
|
||||
## 开始几分钟 {#tostartoffifteenminutes}
|
||||
## toStartOfFifteenMinutes {#tostartoffifteenminutes}
|
||||
|
||||
将DateTime以十五分钟为单位向前取整到最接近的时间点。
|
||||
|
||||
@ -168,31 +291,214 @@ SELECT
|
||||
|
||||
将Date或DateTime转换为包含ISO周数的UInt8类型的编号。
|
||||
|
||||
## 现在 {#now}
|
||||
## toWeek(date\[,mode\]) {#toweekdatemode}
|
||||
|
||||
不接受任何参数并在请求执行时的某一刻返回当前时间(DateTime)。
|
||||
此函数返回一个常量,即时请求需要很长时间能够完成。
|
||||
返回Date或DateTime的周数。两个参数形式可以指定星期是从星期日还是星期一开始,以及返回值应在0到53还是从1到53的范围内。如果省略了mode参数,则默认 模式为0。
|
||||
`toISOWeek()`是一个兼容函数,等效于`toWeek(date,3)`。
|
||||
下表描述了mode参数的工作方式。
|
||||
|
||||
## 今天 {#today}
|
||||
| Mode | First day of week | Range | Week 1 is the first week … |
|
||||
|------|-------------------|-------|-------------------------------|
|
||||
| 0 | Sunday | 0-53 | with a Sunday in this year |
|
||||
| 1 | Monday | 0-53 | with 4 or more days this year |
|
||||
| 2 | Sunday | 1-53 | with a Sunday in this year |
|
||||
| 3 | Monday | 1-53 | with 4 or more days this year |
|
||||
| 4 | Sunday | 0-53 | with 4 or more days this year |
|
||||
| 5 | Monday | 0-53 | with a Monday in this year |
|
||||
| 6 | Sunday | 1-53 | with 4 or more days this year |
|
||||
| 7 | Monday | 1-53 | with a Monday in this year |
|
||||
| 8 | Sunday | 1-53 | contains January 1 |
|
||||
| 9 | Monday | 1-53 | contains January 1 |
|
||||
|
||||
对于象“with 4 or more days this year,”的mode值,根据ISO 8601:1988对周进行编号:
|
||||
|
||||
- 如果包含1月1日的一周在后一年度中有4天或更多天,则为第1周。
|
||||
|
||||
- 否则,它是上一年的最后一周,下周是第1周。
|
||||
|
||||
对于像“contains January 1”的mode值, 包含1月1日的那周为本年度的第1周。
|
||||
|
||||
``` sql
|
||||
toWeek(date, [, mode][, Timezone])
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `date` – Date 或 DateTime.
|
||||
- `mode` – 可选参数, 取值范围 \[0,9\], 默认0。
|
||||
- `Timezone` – 可选参数, 可其他时间日期转换参数的行为一致。
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT toDate('2016-12-27') AS date, toWeek(date) AS week0, toWeek(date,1) AS week1, toWeek(date,9) AS week9;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌───────date─┬─week0─┬─week1─┬─week9─┐
|
||||
│ 2016-12-27 │ 52 │ 52 │ 1 │
|
||||
└────────────┴───────┴───────┴───────┘
|
||||
```
|
||||
|
||||
## toYearWeek(date\[,mode\]) {#toyearweekdatemode}
|
||||
|
||||
返回Date的年和周。 结果中的年份可能因为Date为该年份的第一周和最后一周而于Date的年份不同。
|
||||
|
||||
mode参数的工作方式与toWeek()的mode参数完全相同。 对于单参数语法,mode使用默认值0。
|
||||
|
||||
`toISOYear()`是一个兼容函数,等效于`intDiv(toYearWeek(date,3),100)`.
|
||||
|
||||
**示例**
|
||||
|
||||
``` sql
|
||||
SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(date,1) AS yearWeek1, toYearWeek(date,9) AS yearWeek9;
|
||||
```
|
||||
|
||||
``` text
|
||||
┌───────date─┬─yearWeek0─┬─yearWeek1─┬─yearWeek9─┐
|
||||
│ 2016-12-27 │ 201652 │ 201652 │ 201701 │
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
将Date或DateTime按指定的单位向前取整到最接近的时间点。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
date_trunc(unit, value[, timezone])
|
||||
```
|
||||
|
||||
别名: `dateTrunc`.
|
||||
|
||||
**参数**
|
||||
|
||||
- `unit` — 单位. [String](../syntax.md#syntax-string-literal).
|
||||
可选值:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `value` — [DateTime](../../sql-reference/data-types/datetime.md) 或者 [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) 返回值的时区(可选值)。如果未指定将使用`value`的时区。 [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**返回值**
|
||||
|
||||
- 按指定的单位向前取整后的DateTime。
|
||||
|
||||
类型: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**示例**
|
||||
|
||||
不指定时区查询:
|
||||
|
||||
``` sql
|
||||
SELECT now(), date_trunc('hour', now());
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌───────────────now()─┬─date_trunc('hour', now())─┐
|
||||
│ 2020-09-28 10:40:45 │ 2020-09-28 10:00:00 │
|
||||
└─────────────────────┴───────────────────────────┘
|
||||
```
|
||||
|
||||
指定时区查询:
|
||||
|
||||
```sql
|
||||
SELECT now(), date_trunc('hour', now(), 'Europe/Moscow');
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
```text
|
||||
┌───────────────now()─┬─date_trunc('hour', now(), 'Europe/Moscow')─┐
|
||||
│ 2020-09-28 10:46:26 │ 2020-09-28 13:00:00 │
|
||||
└─────────────────────┴────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**参考**
|
||||
|
||||
- [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone)
|
||||
|
||||
# now {#now}
|
||||
|
||||
返回当前日期和时间。
|
||||
|
||||
**语法**
|
||||
|
||||
``` sql
|
||||
now([timezone])
|
||||
```
|
||||
|
||||
**参数**
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) 返回结果的时区(可先参数). [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**返回值**
|
||||
|
||||
- 当前日期和时间。
|
||||
|
||||
类型: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**示例**
|
||||
|
||||
不指定时区查询:
|
||||
|
||||
``` sql
|
||||
SELECT now();
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌───────────────now()─┐
|
||||
│ 2020-10-17 07:42:09 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
指定时区查询:
|
||||
|
||||
``` sql
|
||||
SELECT now('Europe/Moscow');
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─now('Europe/Moscow')─┐
|
||||
│ 2020-10-17 10:42:23 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## today {#today}
|
||||
|
||||
不接受任何参数并在请求执行时的某一刻返回当前日期(Date)。
|
||||
其功能与’toDate(now())’相同。
|
||||
其功能与’toDate(now())’相同。
|
||||
|
||||
## 昨天 {#yesterday}
|
||||
## yesterday {#yesterday}
|
||||
|
||||
不接受任何参数并在请求执行时的某一刻返回昨天的日期(Date)。
|
||||
其功能与’today() - 1’相同。
|
||||
其功能与’today() - 1’相同。
|
||||
|
||||
## 时隙 {#timeslot}
|
||||
## timeSlot {#timeslot}
|
||||
|
||||
将时间向前取整半小时。
|
||||
此功能用于Yandex.Metrica,因为如果跟踪标记显示单个用户的连续综合浏览量在时间上严格超过此数量,则半小时是将会话分成两个会话的最短时间。这意味着(tag id,user id,time slot)可用于搜索相应会话中包含的综合浏览量。
|
||||
|
||||
## toyyymm {#toyyyymm}
|
||||
## toYYYMM {#toyyyymm}
|
||||
|
||||
将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 100 + MM)。
|
||||
|
||||
## toyyymmdd {#toyyyymmdd}
|
||||
## toYYYMMDD {#toyyyymmdd}
|
||||
|
||||
将Date或DateTime转换为包含年份和月份编号的UInt32类型的数字(YYYY \* 10000 + MM \* 100 + DD)。
|
||||
|
||||
@ -200,7 +506,7 @@ SELECT
|
||||
|
||||
将Date或DateTime转换为包含年份和月份编号的UInt64类型的数字(YYYY \* 10000000000 + MM \* 100000000 + DD \* 1000000 + hh \* 10000 + mm \* 100 + ss)。
|
||||
|
||||
## 隆隆隆隆路虏脢,,陇,貌,垄拢卢虏禄quar陇,貌路,隆拢脳枚脢虏,麓脢,脱,,,录,禄庐戮,utes, {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters}
|
||||
## addYears, addMonths, addWeeks, addDays, addHours, addMinutes, addSeconds, addQuarters {#addyears-addmonths-addweeks-adddays-addhours-addminutes-addseconds-addquarters}
|
||||
|
||||
函数将一段时间间隔添加到Date/DateTime,然后返回Date/DateTime。例如:
|
||||
|
||||
@ -234,59 +540,145 @@ SELECT
|
||||
│ 2018-01-01 │ 2018-01-01 00:00:00 │
|
||||
└──────────────────────────┴───────────────────────────────┘
|
||||
|
||||
## dateDiff(‘unit’,t1,t2,\[时区\]) {#datediffunit-t1-t2-timezone}
|
||||
## dateDiff {#datediff}
|
||||
|
||||
返回以’unit’为单位表示的两个时间之间的差异,例如`'hours'`。 ‘t1’和’t2’可以是Date或DateTime,如果指定’timezone’,它将应用于两个参数。如果不是,则使用来自数据类型’t1’和’t2’的时区。如果时区不相同,则结果将是未定义的。
|
||||
返回两个Date或DateTime类型之间的时差。
|
||||
|
||||
支持的单位值:
|
||||
**语法**
|
||||
|
||||
| 单位 |
|
||||
|------|
|
||||
| 第二 |
|
||||
| 分钟 |
|
||||
| 小时 |
|
||||
| 日 |
|
||||
| 周 |
|
||||
| 月 |
|
||||
| 季 |
|
||||
| 年 |
|
||||
``` sql
|
||||
dateDiff('unit', startdate, enddate, [timezone])
|
||||
```
|
||||
|
||||
## 时隙(开始时间,持续时间,\[,大小\]) {#timeslotsstarttime-duration-size}
|
||||
**参数**
|
||||
|
||||
- `unit` — 返回结果的时间单位。 [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
|
||||
支持的时间单位: second, minute, hour, day, week, month, quarter, year.
|
||||
|
||||
- `startdate` — 第一个待比较值。 [Date](../../sql-reference/data-types/date.md) 或 [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
- `enddate` — 第二个待比较值。 [Date](../../sql-reference/data-types/date.md) 或 [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
- `timezone` — 可选参数。 如果指定了,则同时适用于`startdate`和`enddate`。如果不指定,则使用`startdate`和`enddate`的时区。如果两个时区不一致,则结果不可预料。
|
||||
|
||||
**返回值**
|
||||
|
||||
以`unit`为单位的`startdate`和`enddate`之间的时差。
|
||||
|
||||
类型: `int`.
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
``` text
|
||||
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
|
||||
│ 25 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size}
|
||||
|
||||
它返回一个时间数组,其中包括从从«StartTime»开始到«StartTime + Duration 秒»内的所有符合«size»(以秒为单位)步长的时间点。其中«size»是一个可选参数,默认为1800。
|
||||
例如,`timeSlots(toDateTime('2012-01-01 12:20:00'),600) = [toDateTime('2012-01-01 12:00:00'),toDateTime('2012-01-01 12:30:00' )]`。
|
||||
这对于搜索在相应会话中综合浏览量是非常有用的。
|
||||
|
||||
## formatDateTime(时间,格式\[,时区\]) {#formatdatetimetime-format-timezone}
|
||||
## formatDateTime {#formatdatetime}
|
||||
|
||||
函数根据给定的格式字符串来格式化时间。请注意:格式字符串必须是常量表达式,例如:单个结果列不能有多种格式字符串。
|
||||
|
||||
支持的格式修饰符:
|
||||
(«Example» 列是对`2018-01-02 22:33:44`的格式化结果)
|
||||
**语法**
|
||||
|
||||
| 修饰符 | 产品描述 | 示例 |
|
||||
|--------|-------------------------------------------|------------|
|
||||
| %C | 年除以100并截断为整数(00-99) | 20 |
|
||||
| %d | 月中的一天,零填充(01-31) | 02 |
|
||||
| %D | 短MM/DD/YY日期,相当于%m/%d/%y | 01/02/2018 |
|
||||
| %e | 月中的一天,空格填充(1-31) | 2 |
|
||||
| %F | 短YYYY-MM-DD日期,相当于%Y-%m-%d | 2018-01-02 |
|
||||
| %H | 24小时格式(00-23) | 22 |
|
||||
| %I | 小时12h格式(01-12) | 10 |
|
||||
| %j | 一年(001-366) | 002 |
|
||||
| %m | 月份为十进制数(01-12) | 01 |
|
||||
| %M | 分钟(00-59) | 33 |
|
||||
| %n | 换行符(") | |
|
||||
| %p | AM或PM指定 | PM |
|
||||
| %R | 24小时HH:MM时间,相当于%H:%M | 22:33 |
|
||||
| %S | 第二(00-59) | 44 |
|
||||
| %t | 水平制表符(’) | |
|
||||
| %T | ISO8601时间格式(HH:MM:SS),相当于%H:%M:%S | 22:33:44 |
|
||||
| %u | ISO8601平日as编号,星期一为1(1-7) | 2 |
|
||||
| %V | ISO8601周编号(01-53) | 01 |
|
||||
| %w | 周日为十进制数,周日为0(0-6) | 2 |
|
||||
| %y | 年份,最后两位数字(00-99) | 18 |
|
||||
| %Y | 年 | 2018 |
|
||||
| %% | %符号 | % |
|
||||
``` sql
|
||||
formatDateTime(Time, Format\[, Timezone\])
|
||||
```
|
||||
|
||||
[来源文章](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->
|
||||
**返回值**
|
||||
|
||||
根据指定格式返回的日期和时间。
|
||||
|
||||
**支持的格式修饰符**
|
||||
|
||||
使用格式修饰符来指定结果字符串的样式。«Example» 列是对`2018-01-02 22:33:44`的格式化结果。
|
||||
|
||||
| 修饰符 | 描述 | 示例 |
|
||||
|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|
|
||||
| %C | 年除以100并截断为整数(00-99) | 20 |
|
||||
| %d | 月中的一天,零填充(01-31) | 02 |
|
||||
| %D | 短MM/DD/YY日期,相当于%m/%d/%y | 01/02/2018 |
|
||||
| %e | 月中的一天,空格填充(1-31) | 2 |
|
||||
| %F | 短YYYY-MM-DD日期,相当于%Y-%m-%d | 2018-01-02 |
|
||||
| %G | ISO周号的四位数年份格式, 从基于周的年份[由ISO 8601定义](https://en.wikipedia.org/wiki/ISO_8601#Week_dates) 标准计算得出,通常仅对%V有用 | 2018 |
|
||||
| %g | 两位数的年份格式,与ISO 8601一致,四位数表示法的缩写 | 18 |
|
||||
| %H | 24小时格式(00-23) | 22 |
|
||||
| %I | 小时12h格式(01-12) | 10 |
|
||||
| %j | 一年(001-366) | 002 |
|
||||
| %m | 月份为十进制数(01-12) | 01 |
|
||||
| %M | 分钟(00-59) | 33 |
|
||||
| %n | 换行符(") | |
|
||||
| %p | AM或PM指定 | PM |
|
||||
| %R | 24小时HH:MM时间,相当于%H:%M | 22:33 |
|
||||
| %S | 第二(00-59) | 44 |
|
||||
| %t | 水平制表符(’) | |
|
||||
| %T | ISO8601时间格式(HH:MM:SS),相当于%H:%M:%S | 22:33:44 |
|
||||
| %u | ISO8601平日as编号,星期一为1(1-7) | 2 |
|
||||
| %V | ISO8601周编号(01-53) | 01 |
|
||||
| %w | 周日为十进制数,周日为0(0-6) | 2 |
|
||||
| %y | 年份,最后两位数字(00-99) | 18 |
|
||||
| %Y | 年 | 2018 |
|
||||
| %% | %符号 | % |
|
||||
|
||||
**示例**
|
||||
|
||||
查询:
|
||||
|
||||
``` sql
|
||||
SELECT formatDateTime(toDate('2010-01-04'), '%g')
|
||||
```
|
||||
|
||||
结果:
|
||||
|
||||
```
|
||||
┌─formatDateTime(toDate('2010-01-04'), '%g')─┐
|
||||
│ 10 │
|
||||
└────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/query_language/functions/date_time_functions/) <!--hide-->
|
||||
|
||||
## FROM_UNIXTIME
|
||||
|
||||
当只有单个整数类型的参数时,它的作用与`toDateTime`相同,并返回[DateTime](../../sql-reference/data-types/datetime.md)类型。
|
||||
|
||||
例如:
|
||||
|
||||
```sql
|
||||
SELECT FROM_UNIXTIME(423543535)
|
||||
```
|
||||
|
||||
```text
|
||||
┌─FROM_UNIXTIME(423543535)─┐
|
||||
│ 1983-06-04 10:58:55 │
|
||||
└──────────────────────────┘
|
||||
```
|
||||
|
||||
当有两个参数时,第一个是整型或DateTime,第二个是常量格式字符串,它的作用与`formatDateTime`相同,并返回`String`类型。
|
||||
|
||||
例如:
|
||||
|
||||
```sql
|
||||
SELECT FROM_UNIXTIME(1234334543, '%Y-%m-%d %R:%S') AS DateTime
|
||||
```
|
||||
|
||||
```text
|
||||
┌─DateTime────────────┐
|
||||
│ 2009-02-11 14:42:23 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
@ -18,7 +18,7 @@ toc_title: "\u81EA\u7701"
|
||||
|
||||
- 设置 [allow_introspection_functions](../../operations/settings/settings.md#settings-allow_introspection_functions) 设置为1。
|
||||
|
||||
For security reasons introspection functions are disabled by default.
|
||||
出于安全考虑,内省函数默认是关闭的。
|
||||
|
||||
ClickHouse将探查器报告保存到 [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log) 系统表. 确保正确配置了表和探查器。
|
||||
|
||||
@ -36,17 +36,17 @@ addressToLine(address_of_binary_instruction)
|
||||
|
||||
**参数**
|
||||
|
||||
- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Address of instruction in a running process.
|
||||
- `address_of_binary_instruction` ([UInt64](../../sql-reference/data-types/int-uint.md)) — 正在运行进程的指令地址。
|
||||
|
||||
**返回值**
|
||||
|
||||
- 源代码文件名和此文件中用冒号分隔的行号。
|
||||
- 源代码文件名和行号(用冒号分隔的行号)
|
||||
|
||||
For example, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number.
|
||||
示例, `/build/obj-x86_64-linux-gnu/../src/Common/ThreadPool.cpp:199`, where `199` is a line number.
|
||||
|
||||
- 二进制文件的名称,如果函数找不到调试信息。
|
||||
- 如果函数找不到调试信息,返回二进制文件的名称。
|
||||
|
||||
- 空字符串,如果地址无效。
|
||||
- 如果地址无效,返回空字符串。
|
||||
|
||||
类型: [字符串](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -132,7 +132,7 @@ addressToSymbol(address_of_binary_instruction)
|
||||
**返回值**
|
||||
|
||||
- 来自ClickHouse对象文件的符号。
|
||||
- 空字符串,如果地址无效。
|
||||
- 如果地址无效,返回空字符串。
|
||||
|
||||
类型: [字符串](../../sql-reference/data-types/string.md).
|
||||
|
||||
|
@ -41,25 +41,25 @@ CHECK TABLE [db.]name
|
||||
|
||||
该 `CHECK TABLE` 查询支持下表引擎:
|
||||
|
||||
- [日志](../../engines/table-engines/log-family/log.md)
|
||||
- [Log](../../engines/table-engines/log-family/log.md)
|
||||
- [TinyLog](../../engines/table-engines/log-family/tinylog.md)
|
||||
- [StripeLog](../../engines/table-engines/log-family/stripelog.md)
|
||||
- [梅树家族](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||
- [MergeTree 家族](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||
|
||||
使用另一个表引擎对表执行会导致异常。
|
||||
对其他不支持的表引擎的表执行会导致异常。
|
||||
|
||||
从发动机 `*Log` 家庭不提供故障自动数据恢复。 使用 `CHECK TABLE` 查询以及时跟踪数据丢失。
|
||||
来自 `*Log` 家族的引擎不提供故障自动数据恢复。 使用 `CHECK TABLE` 查询及时跟踪数据丢失。
|
||||
|
||||
为 `MergeTree` 家庭发动机, `CHECK TABLE` 查询显示本地服务器上表的每个单独数据部分的检查状态。
|
||||
对于 `MergeTree` 家族引擎, `CHECK TABLE` 查询显示本地服务器上表的每个单独数据部分的检查状态。
|
||||
|
||||
**如果数据已损坏**
|
||||
|
||||
如果表已损坏,则可以将未损坏的数据复制到另一个表。 要做到这一点:
|
||||
|
||||
1. 创建具有与损坏的表相同结构的新表。 要执行此操作,请执行查询 `CREATE TABLE <new_table_name> AS <damaged_table_name>`.
|
||||
2. 设置 [max_threads](../../operations/settings/settings.md#settings-max_threads) 值为1以在单个线程中处理下一个查询。 要执行此操作,请运行查询 `SET max_threads = 1`.
|
||||
1. 创建一个与损坏的表结构相同的新表。 要做到这一点,请执行查询 `CREATE TABLE <new_table_name> AS <damaged_table_name>`.
|
||||
2. 将 [max_threads](../../operations/settings/settings.md#settings-max_threads) 值设置为1,以在单个线程中处理下一个查询。 要这样做,请运行查询 `SET max_threads = 1`.
|
||||
3. 执行查询 `INSERT INTO <new_table_name> SELECT * FROM <damaged_table_name>`. 此请求将未损坏的数据从损坏的表复制到另一个表。 只有损坏部分之前的数据才会被复制。
|
||||
4. 重新启动 `clickhouse-client` 要重置 `max_threads` 价值。
|
||||
4. 重新启动 `clickhouse-client` 以重置 `max_threads` 值。
|
||||
|
||||
## DESCRIBE TABLE {#misc-describe-table}
|
||||
|
||||
@ -67,57 +67,65 @@ CHECK TABLE [db.]name
|
||||
DESC|DESCRIBE TABLE [db.]table [INTO OUTFILE filename] [FORMAT format]
|
||||
```
|
||||
|
||||
返回以下内容 `String` 类型列:
|
||||
返回以下 `String` 类型列:
|
||||
|
||||
- `name` — Column name.
|
||||
- `type`— Column type.
|
||||
- `default_type` — Clause that is used in [默认表达式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` 或 `ALIAS`). 如果未指定默认表达式,则Column包含一个空字符串。
|
||||
- `default_expression` — Value specified in the `DEFAULT` 条款
|
||||
- `comment_expression` — Comment text.
|
||||
- `name` — 列名。
|
||||
- `type`— 列的类型。
|
||||
- `default_type` — [默认表达式](create.md#create-default-values) (`DEFAULT`, `MATERIALIZED` 或 `ALIAS`)中使用的子句。 如果没有指定默认表达式,则列包含一个空字符串。
|
||||
- `default_expression` — `DEFAULT` 子句中指定的值。
|
||||
- `comment_expression` — 注释。
|
||||
|
||||
嵌套的数据结构输出 “expanded” 格式。 每列分别显示,名称后面有一个点。
|
||||
嵌套数据结构以 “expanded” 格式输出。 每列分别显示,列名后加点号。
|
||||
|
||||
## DETACH {#detach}
|
||||
|
||||
删除有关 ‘name’ 表从服务器。 服务器停止了解表的存在。
|
||||
从服务器中删除有关 ‘name’ 表的信息。 服务器停止了解该表的存在。
|
||||
|
||||
``` sql
|
||||
DETACH TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
这不会删除表的数据或元数据。 在下一次服务器启动时,服务器将读取元数据并再次查找有关表的信息。
|
||||
同样,一个 “detached” 表可以使用重新连接 `ATTACH` 查询(系统表除外,它们没有为它们存储元数据)。
|
||||
|
||||
没有 `DETACH DATABASE` 查询。
|
||||
这不会删除表的数据或元数据。 在下一次服务器启动时,服务器将读取元数据并再次查找该表。
|
||||
同样,可以使用 `ATTACH` 查询重新连接一个 “detached” 的表(系统表除外,没有为它们存储元数据)。
|
||||
|
||||
## DROP {#drop}
|
||||
|
||||
此查询有两种类型: `DROP DATABASE` 和 `DROP TABLE`.
|
||||
删除已经存在的实体。如果指定 `IF EXISTS`, 则如果实体不存在,则不返回错误。
|
||||
|
||||
## DROP DATABASE {#drop-database}
|
||||
|
||||
删除 `db` 数据库中的所有表,然后删除 `db` 数据库本身。
|
||||
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster]
|
||||
```
|
||||
## DROP TABLE {#drop-table}
|
||||
|
||||
删除内部的所有表 ‘db’ 数据库,然后删除 ‘db’ 数据库本身。
|
||||
如果 `IF EXISTS` 如果数据库不存在,则不会返回错误。
|
||||
删除表。
|
||||
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP [TEMPORARY] TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
删除表。
|
||||
如果 `IF EXISTS` 如果表不存在或数据库不存在,则不会返回错误。
|
||||
|
||||
DROP DICTIONARY [IF EXISTS] [db.]name
|
||||
## DROP DICTIONARY {#drop-dictionary}
|
||||
|
||||
删除字典。
|
||||
如果 `IF EXISTS` 如果表不存在或数据库不存在,则不会返回错误。
|
||||
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP DICTIONARY [IF EXISTS] [db.]name
|
||||
```
|
||||
|
||||
## DROP USER {#drop-user-statement}
|
||||
|
||||
删除用户。
|
||||
|
||||
### 语法 {#drop-user-syntax}
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
@ -129,7 +137,7 @@ DROP USER [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
|
||||
已删除的角色将从授予该角色的所有实体撤销。
|
||||
|
||||
### 语法 {#drop-role-syntax}
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
@ -141,7 +149,7 @@ DROP ROLE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
|
||||
已删除行策略将从分配该策略的所有实体撤销。
|
||||
|
||||
### 语法 {#drop-row-policy-syntax}
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER cluster_name]
|
||||
@ -153,7 +161,7 @@ DROP [ROW] POLICY [IF EXISTS] name [,...] ON [database.]table [,...] [ON CLUSTER
|
||||
|
||||
已删除的配额将从分配该配额的所有实体撤销。
|
||||
|
||||
### 语法 {#drop-quota-syntax}
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
@ -165,12 +173,22 @@ DROP QUOTA [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
|
||||
已删除的settings配置将从分配该settings配置的所有实体撤销。
|
||||
|
||||
### 语法 {#drop-settings-profile-syntax}
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP [SETTINGS] PROFILE [IF EXISTS] name [,...] [ON CLUSTER cluster_name]
|
||||
```
|
||||
|
||||
## DROP VIEW {#drop-view}
|
||||
|
||||
删除视图。视图也可以通过 `DROP TABLE` 删除,但是 `DROP VIEW` 检查 `[db.]name` 是视图。
|
||||
|
||||
语法:
|
||||
|
||||
``` sql
|
||||
DROP VIEW [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
## EXISTS {#exists-statement}
|
||||
|
||||
``` sql
|
||||
@ -189,7 +207,7 @@ KILL QUERY [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
尝试强制终止当前正在运行的查询。
|
||||
要终止的查询是从系统中选择的。使用在定义的标准进程表 `WHERE` 《公约》条款 `KILL` 查询。
|
||||
要终止的查询是使用 `KILL` 查询的 `WHERE` 子句定义的标准从system.processes表中选择的。
|
||||
|
||||
例:
|
||||
|
||||
@ -206,13 +224,13 @@ KILL QUERY WHERE user='username' SYNC
|
||||
默认情况下,使用异步版本的查询 (`ASYNC`),不等待确认查询已停止。
|
||||
|
||||
同步版本 (`SYNC`)等待所有查询停止,并在停止时显示有关每个进程的信息。
|
||||
响应包含 `kill_status` 列,它可以采用以下值:
|
||||
响应包含 `kill_status` 列,该列可以采用以下值:
|
||||
|
||||
1. ‘finished’ – The query was terminated successfully.
|
||||
2. ‘waiting’ – Waiting for the query to end after sending it a signal to terminate.
|
||||
3. The other values explain why the query can't be stopped.
|
||||
1. ‘finished’ – 查询已成功终止。
|
||||
2. ‘waiting’ – 发送查询信号终止后,等待查询结束。
|
||||
3. 其他值解释为什么查询不能停止。
|
||||
|
||||
测试查询 (`TEST`)仅检查用户的权限并显示要停止的查询列表。
|
||||
测试查询 (`TEST`)仅检查用户的权限,并显示要停止的查询列表。
|
||||
|
||||
## KILL MUTATION {#kill-mutation}
|
||||
|
||||
@ -223,9 +241,9 @@ KILL MUTATION [ON CLUSTER cluster]
|
||||
[FORMAT format]
|
||||
```
|
||||
|
||||
尝试取消和删除 [突变](alter.md#alter-mutations) 当前正在执行。 要取消的突变选自 [`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) 表使用由指定的过滤器 `WHERE` 《公约》条款 `KILL` 查询。
|
||||
尝试取消和删除当前正在执行的 [mutations](alter.md#alter-mutations) 。 要取消的mutation是使用 `KILL` 查询的WHERE子句指定的过滤器从[`system.mutations`](../../operations/system-tables/mutations.md#system_tables-mutations) 表中选择的。
|
||||
|
||||
测试查询 (`TEST`)仅检查用户的权限并显示要停止的查询列表。
|
||||
测试查询 (`TEST`)仅检查用户的权限并显示要停止的mutations列表。
|
||||
|
||||
例:
|
||||
|
||||
@ -237,9 +255,9 @@ KILL MUTATION WHERE database = 'default' AND table = 'table'
|
||||
KILL MUTATION WHERE database = 'default' AND table = 'table' AND mutation_id = 'mutation_3.txt'
|
||||
```
|
||||
|
||||
The query is useful when a mutation is stuck and cannot finish (e.g. if some function in the mutation query throws an exception when applied to the data contained in the table).
|
||||
当mutation卡住且无法完成时,该查询是有用的(例如,当mutation查询中的某些函数在应用于表中包含的数据时抛出异常)。
|
||||
|
||||
已经由突变所做的更改不会回滚。
|
||||
Mutation已经做出的更改不会回滚。
|
||||
|
||||
## OPTIMIZE {#misc_operations-optimize}
|
||||
|
||||
@ -247,19 +265,19 @@ The query is useful when a mutation is stuck and cannot finish (e.g. if some fu
|
||||
OPTIMIZE TABLE [db.]name [ON CLUSTER cluster] [PARTITION partition | PARTITION ID 'partition_id'] [FINAL] [DEDUPLICATE]
|
||||
```
|
||||
|
||||
此查询尝试使用来自表引擎的表初始化表的数据部分的非计划合并 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md) 家人
|
||||
此查询尝试初始化 [MergeTree](../../engines/table-engines/mergetree-family/mergetree.md)家族的表引擎的表中未计划合并数据部分。
|
||||
|
||||
该 `OPTMIZE` 查询也支持 [MaterializedView](../../engines/table-engines/special/materializedview.md) 和 [缓冲区](../../engines/table-engines/special/buffer.md) 引擎 不支持其他表引擎。
|
||||
该 `OPTMIZE` 查询也支持 [MaterializedView](../../engines/table-engines/special/materializedview.md) 和 [Buffer](../../engines/table-engines/special/buffer.md) 引擎。 不支持其他表引擎。
|
||||
|
||||
当 `OPTIMIZE` 与使用 [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) 表引擎的家族,ClickHouse创建合并任务,并等待在所有节点上执行(如果 `replication_alter_partitions_sync` 设置已启用)。
|
||||
当 `OPTIMIZE` 与 [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication.md) 家族的表引擎一起使用时,ClickHouse将创建一个合并任务,并等待所有节点上的执行(如果 `replication_alter_partitions_sync` 设置已启用)。
|
||||
|
||||
- 如果 `OPTIMIZE` 出于任何原因不执行合并,它不通知客户端。 要启用通知,请使用 [optimize_throw_if_noop](../../operations/settings/settings.md#setting-optimize_throw_if_noop) 设置。
|
||||
- 如果您指定 `PARTITION`,仅优化指定的分区。 [如何设置分区表达式](alter.md#alter-how-to-specify-part-expr).
|
||||
- 如果您指定 `FINAL`,即使所有数据已经在一个部分中,也会执行优化。
|
||||
- 如果您指定 `DEDUPLICATE`,然后完全相同的行将被重复数据删除(所有列进行比较),这仅适用于MergeTree引擎。
|
||||
- 如果您指定 `DEDUPLICATE`,则将对完全相同的行进行重复数据删除(所有列进行比较),这仅适用于MergeTree引擎。
|
||||
|
||||
!!! warning "警告"
|
||||
`OPTIMIZE` 无法修复 “Too many parts” 错误
|
||||
`OPTIMIZE` 无法修复 “Too many parts” 错误。
|
||||
|
||||
## RENAME {#misc_operations-rename}
|
||||
|
||||
@ -270,6 +288,7 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ...
|
||||
```
|
||||
|
||||
所有表都在全局锁定下重命名。 重命名表是一个轻型操作。 如果您在TO之后指定了另一个数据库,则表将被移动到此数据库。 但是,包含数据库的目录必须位于同一文件系统中(否则,将返回错误)。
|
||||
如果您在一个查询中重命名多个表,这是一个非原子操作,它可能被部分执行,其他会话中的查询可能会接收错误 Table ... doesn't exist ...。
|
||||
|
||||
## SET {#query-set}
|
||||
|
||||
@ -277,9 +296,9 @@ RENAME TABLE [db11.]name11 TO [db12.]name12, [db21.]name21 TO [db22.]name22, ...
|
||||
SET param = value
|
||||
```
|
||||
|
||||
分配 `value` 到 `param` [设置](../../operations/settings/index.md) 对于当前会话。 你不能改变 [服务器设置](../../operations/server-configuration-parameters/index.md) 这边
|
||||
为当前会话的 [设置](../../operations/settings/index.md) `param` 分配值 `value`。 您不能以这种方式更改 [服务器设置](../../operations/server-configuration-parameters/index.md)。
|
||||
|
||||
您还可以在单个查询中设置指定设置配置文件中的所有值。
|
||||
您还可以在单个查询中从指定的设置配置文件中设置所有值。
|
||||
|
||||
``` sql
|
||||
SET profile = 'profile-name-from-the-settings-file'
|
||||
@ -291,8 +310,6 @@ SET profile = 'profile-name-from-the-settings-file'
|
||||
|
||||
激活当前用户的角色。
|
||||
|
||||
### 语法 {#set-role-syntax}
|
||||
|
||||
``` sql
|
||||
SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]}
|
||||
```
|
||||
@ -301,15 +318,13 @@ SET ROLE {DEFAULT | NONE | role [,...] | ALL | ALL EXCEPT role [,...]}
|
||||
|
||||
将默认角色设置为用户。
|
||||
|
||||
默认角色在用户登录时自动激活。 您只能将以前授予的角色设置为默认值。 如果未向用户授予角色,ClickHouse将引发异常。
|
||||
|
||||
### 语法 {#set-default-role-syntax}
|
||||
默认角色在用户登录时自动激活。 您只能将以前授予的角色设置为默认值。 如果角色没有授予用户,ClickHouse会抛出异常。
|
||||
|
||||
``` sql
|
||||
SET DEFAULT ROLE {NONE | role [,...] | ALL | ALL EXCEPT role [,...]} TO {user|CURRENT_USER} [,...]
|
||||
```
|
||||
|
||||
### 例 {#set-default-role-examples}
|
||||
### 示例 {#set-default-role-examples}
|
||||
|
||||
为用户设置多个默认角色:
|
||||
|
||||
@ -317,19 +332,19 @@ SET DEFAULT ROLE {NONE | role [,...] | ALL | ALL EXCEPT role [,...]} TO {user|CU
|
||||
SET DEFAULT ROLE role1, role2, ... TO user
|
||||
```
|
||||
|
||||
将所有授予的角色设置为用户的默认值:
|
||||
将所有授予的角色设置为用户的默认角色:
|
||||
|
||||
``` sql
|
||||
SET DEFAULT ROLE ALL TO user
|
||||
```
|
||||
|
||||
从用户清除默认角色:
|
||||
清除用户的默认角色:
|
||||
|
||||
``` sql
|
||||
SET DEFAULT ROLE NONE TO user
|
||||
```
|
||||
|
||||
将所有授予的角色设置为默认角色,其中一些角色除外:
|
||||
将所有授予的角色设置为默认角色,但其中一些角色除外:
|
||||
|
||||
``` sql
|
||||
SET DEFAULT ROLE ALL EXCEPT role1, role2 TO user
|
||||
@ -341,9 +356,9 @@ SET DEFAULT ROLE ALL EXCEPT role1, role2 TO user
|
||||
TRUNCATE TABLE [IF EXISTS] [db.]name [ON CLUSTER cluster]
|
||||
```
|
||||
|
||||
从表中删除所有数据。 当条款 `IF EXISTS` 如果该表不存在,则查询返回错误。
|
||||
从表中删除所有数据。 当省略 `IF EXISTS`子句时,如果该表不存在,则查询返回错误。
|
||||
|
||||
该 `TRUNCATE` 查询不支持 [查看](../../engines/table-engines/special/view.md), [文件](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) 和 [Null](../../engines/table-engines/special/null.md) 表引擎.
|
||||
该 `TRUNCATE` 查询不支持 [View](../../engines/table-engines/special/view.md), [File](../../engines/table-engines/special/file.md), [URL](../../engines/table-engines/special/url.md) 和 [Null](../../engines/table-engines/special/null.md) 表引擎.
|
||||
|
||||
## USE {#use}
|
||||
|
||||
|
@ -60,13 +60,13 @@ public:
|
||||
const String & user_, const String & password_, const String & stage,
|
||||
bool randomize_, size_t max_iterations_, double max_time_,
|
||||
const String & json_path_, size_t confidence_,
|
||||
const String & query_id_, bool continue_on_errors_,
|
||||
const String & query_id_, const String & query_to_execute_, bool continue_on_errors_,
|
||||
bool print_stacktrace_, const Settings & settings_)
|
||||
:
|
||||
concurrency(concurrency_), delay(delay_), queue(concurrency), randomize(randomize_),
|
||||
cumulative(cumulative_), max_iterations(max_iterations_), max_time(max_time_),
|
||||
json_path(json_path_), confidence(confidence_), query_id(query_id_),
|
||||
continue_on_errors(continue_on_errors_),
|
||||
query_to_execute(query_to_execute_), continue_on_errors(continue_on_errors_),
|
||||
print_stacktrace(print_stacktrace_), settings(settings_),
|
||||
shared_context(Context::createShared()), global_context(Context::createGlobal(shared_context.get())),
|
||||
pool(concurrency)
|
||||
@ -150,7 +150,8 @@ private:
|
||||
double max_time;
|
||||
String json_path;
|
||||
size_t confidence;
|
||||
std::string query_id;
|
||||
String query_id;
|
||||
String query_to_execute;
|
||||
bool continue_on_errors;
|
||||
bool print_stacktrace;
|
||||
const Settings & settings;
|
||||
@ -213,20 +214,28 @@ private:
|
||||
|
||||
void readQueries()
|
||||
{
|
||||
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
|
||||
while (!in.eof())
|
||||
if (query_to_execute.empty())
|
||||
{
|
||||
std::string query;
|
||||
readText(query, in);
|
||||
assertChar('\n', in);
|
||||
ReadBufferFromFileDescriptor in(STDIN_FILENO);
|
||||
|
||||
if (!query.empty())
|
||||
queries.emplace_back(query);
|
||||
while (!in.eof())
|
||||
{
|
||||
String query;
|
||||
readText(query, in);
|
||||
assertChar('\n', in);
|
||||
|
||||
if (!query.empty())
|
||||
queries.emplace_back(std::move(query));
|
||||
}
|
||||
|
||||
if (queries.empty())
|
||||
throw Exception("Empty list of queries.", ErrorCodes::EMPTY_DATA_PASSED);
|
||||
}
|
||||
else
|
||||
{
|
||||
queries.emplace_back(query_to_execute);
|
||||
}
|
||||
|
||||
if (queries.empty())
|
||||
throw Exception("Empty list of queries.", ErrorCodes::EMPTY_DATA_PASSED);
|
||||
|
||||
std::cerr << "Loaded " << queries.size() << " queries.\n";
|
||||
}
|
||||
@ -559,6 +568,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth());
|
||||
desc.add_options()
|
||||
("help", "produce help message")
|
||||
("query", value<std::string>()->default_value(""), "query to execute")
|
||||
("concurrency,c", value<unsigned>()->default_value(1), "number of parallel queries")
|
||||
("delay,d", value<double>()->default_value(1), "delay between intermediate reports in seconds (set 0 to disable reports)")
|
||||
("stage", value<std::string>()->default_value("complete"), "request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation")
|
||||
@ -625,6 +635,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
options["json"].as<std::string>(),
|
||||
options["confidence"].as<size_t>(),
|
||||
options["query_id"].as<std::string>(),
|
||||
options["query"].as<std::string>(),
|
||||
options.count("continue_on_errors") > 0,
|
||||
print_stacktrace,
|
||||
settings);
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <Core/Types.h>
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Core/ExternalTable.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
@ -475,9 +476,16 @@ private:
|
||||
/// The value of the option is used as the text of query (or of multiple queries).
|
||||
/// If stdin is not a terminal, INSERT data for the first query is read from it.
|
||||
/// - stdin is not a terminal. In this case queries are read from it.
|
||||
if (!stdin_is_a_tty || config().has("query"))
|
||||
/// - -qf (--queries-file) command line option is present.
|
||||
/// The value of the option is used as file with query (or of multiple queries) to execute.
|
||||
if (!stdin_is_a_tty || config().has("query") || config().has("queries-file"))
|
||||
is_interactive = false;
|
||||
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
{
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
std::cout << std::fixed << std::setprecision(3);
|
||||
std::cerr << std::fixed << std::setprecision(3);
|
||||
|
||||
@ -786,8 +794,15 @@ private:
|
||||
{
|
||||
String text;
|
||||
|
||||
if (config().has("query"))
|
||||
text = config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||
if (config().has("queries-file"))
|
||||
{
|
||||
ReadBufferFromFile in(config().getString("queries-file"));
|
||||
readStringUntilEOF(text, in);
|
||||
processMultiQuery(text);
|
||||
return;
|
||||
}
|
||||
else if (config().has("query"))
|
||||
text = config().getRawString("query"); /// Poco configuration should not process substitutions in form of ${...} inside query.
|
||||
else
|
||||
{
|
||||
/// If 'query' parameter is not set, read a query from stdin.
|
||||
@ -2320,6 +2335,7 @@ public:
|
||||
"Suggestion limit for how many databases, tables and columns to fetch.")
|
||||
("multiline,m", "multiline")
|
||||
("multiquery,n", "multiquery")
|
||||
("queries-file,qf", po::value<std::string>(), "file path with queries to execute")
|
||||
("format,f", po::value<std::string>(), "default output format")
|
||||
("testmode,T", "enable test hints in comments")
|
||||
("ignore-error", "do not stop processing in multiquery mode")
|
||||
@ -2448,6 +2464,8 @@ public:
|
||||
config().setString("query_id", options["query_id"].as<std::string>());
|
||||
if (options.count("query"))
|
||||
config().setString("query", options["query"].as<std::string>());
|
||||
if (options.count("queries-file"))
|
||||
config().setString("queries-file", options["queries-file"].as<std::string>());
|
||||
if (options.count("database"))
|
||||
config().setString("database", options["database"].as<std::string>());
|
||||
if (options.count("pager"))
|
||||
|
@ -20,9 +20,11 @@
|
||||
#include <Common/ThreadStatus.h>
|
||||
#include <Common/config_version.h>
|
||||
#include <Common/quoteString.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/WriteBufferFromFileDescriptor.h>
|
||||
#include <IO/UseSSL.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/IAST.h>
|
||||
#include <common/ErrorHandlers.h>
|
||||
@ -195,7 +197,7 @@ try
|
||||
ThreadStatus thread_status;
|
||||
UseSSL use_ssl;
|
||||
|
||||
if (!config().has("query") && !config().has("table-structure")) /// Nothing to process
|
||||
if (!config().has("query") && !config().has("table-structure") && !config().has("queries-file")) /// Nothing to process
|
||||
{
|
||||
if (config().hasOption("verbose"))
|
||||
std::cerr << "There are no queries to process." << '\n';
|
||||
@ -203,6 +205,11 @@ try
|
||||
return Application::EXIT_OK;
|
||||
}
|
||||
|
||||
if (config().has("query") && config().has("queries-file"))
|
||||
{
|
||||
throw Exception("Specify either `query` or `queries-file` option", ErrorCodes::BAD_ARGUMENTS);
|
||||
}
|
||||
|
||||
shared_context = Context::createShared();
|
||||
global_context = std::make_unique<Context>(Context::createGlobal(shared_context.get()));
|
||||
global_context->makeGlobalContext();
|
||||
@ -340,7 +347,17 @@ std::string LocalServer::getInitialCreateTableQuery()
|
||||
void LocalServer::processQueries()
|
||||
{
|
||||
String initial_create_query = getInitialCreateTableQuery();
|
||||
String queries_str = initial_create_query + config().getRawString("query");
|
||||
String queries_str = initial_create_query;
|
||||
|
||||
if (config().has("query"))
|
||||
queries_str += config().getRawString("query");
|
||||
else
|
||||
{
|
||||
String queries_from_file;
|
||||
ReadBufferFromFile in(config().getString("queries-file"));
|
||||
readStringUntilEOF(queries_from_file, in);
|
||||
queries_str += queries_from_file;
|
||||
}
|
||||
|
||||
const auto & settings = global_context->getSettingsRef();
|
||||
|
||||
@ -505,6 +522,7 @@ void LocalServer::init(int argc, char ** argv)
|
||||
("help", "produce help message")
|
||||
("config-file,c", po::value<std::string>(), "config-file path")
|
||||
("query,q", po::value<std::string>(), "query")
|
||||
("queries-file, qf", po::value<std::string>(), "file path with queries to execute")
|
||||
("database,d", po::value<std::string>(), "database")
|
||||
|
||||
("table,N", po::value<std::string>(), "name of the initial table")
|
||||
@ -552,6 +570,8 @@ void LocalServer::init(int argc, char ** argv)
|
||||
config().setString("config-file", options["config-file"].as<std::string>());
|
||||
if (options.count("query"))
|
||||
config().setString("query", options["query"].as<std::string>());
|
||||
if (options.count("queries-file"))
|
||||
config().setString("queries-file", options["queries-file"].as<std::string>());
|
||||
if (options.count("database"))
|
||||
config().setString("default_database", options["database"].as<std::string>());
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Common/Config/ConfigReloader.h>
|
||||
#include <Server/HTTPHandlerFactory.h>
|
||||
#include <Server/TestKeeperTCPHandlerFactory.h>
|
||||
#include "MetricsTransmitter.h"
|
||||
#include <Common/StatusFile.h>
|
||||
#include <Server/TCPHandlerFactory.h>
|
||||
@ -186,6 +187,85 @@ static std::string getUserName(uid_t user_id)
|
||||
return toString(user_id);
|
||||
}
|
||||
|
||||
Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
|
||||
{
|
||||
Poco::Net::SocketAddress socket_address;
|
||||
try
|
||||
{
|
||||
socket_address = Poco::Net::SocketAddress(host, port);
|
||||
}
|
||||
catch (const Poco::Net::DNSException & e)
|
||||
{
|
||||
const auto code = e.code();
|
||||
if (code == EAI_FAMILY
|
||||
#if defined(EAI_ADDRFAMILY)
|
||||
|| code == EAI_ADDRFAMILY
|
||||
#endif
|
||||
)
|
||||
{
|
||||
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
|
||||
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
|
||||
"specify IPv4 address to listen in <listen_host> element of configuration "
|
||||
"file. Example: <listen_host>0.0.0.0</listen_host>",
|
||||
host, e.code(), e.message());
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
return socket_address;
|
||||
}
|
||||
|
||||
Poco::Net::SocketAddress Server::socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure) const
|
||||
{
|
||||
auto address = makeSocketAddress(host, port, &logger());
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
if (secure)
|
||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
||||
/// https://github.com/pocoproject/poco/pull/2257
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
else
|
||||
#endif
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
void Server::createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const
|
||||
{
|
||||
/// For testing purposes, user may omit tcp_port or http_port or https_port in configuration file.
|
||||
if (!config().has(port_name))
|
||||
return;
|
||||
|
||||
auto port = config().getInt(port_name);
|
||||
try
|
||||
{
|
||||
func(port);
|
||||
}
|
||||
catch (const Poco::Exception &)
|
||||
{
|
||||
std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false);
|
||||
|
||||
if (listen_try)
|
||||
{
|
||||
LOG_WARNING(&logger(), "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
|
||||
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
|
||||
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
|
||||
" Example for disabled IPv4: <listen_host>::</listen_host>",
|
||||
message);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{message, ErrorCodes::NETWORK_ERROR};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Server::uninitialize()
|
||||
{
|
||||
logger().information("shutting down");
|
||||
@ -399,27 +479,6 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
StatusFile status{path + "status", StatusFile::write_full_info};
|
||||
|
||||
SCOPE_EXIT({
|
||||
/** Ask to cancel background jobs all table engines,
|
||||
* and also query_log.
|
||||
* It is important to do early, not in destructor of Context, because
|
||||
* table engines could use Context on destroy.
|
||||
*/
|
||||
LOG_INFO(log, "Shutting down storages.");
|
||||
|
||||
global_context->shutdown();
|
||||
|
||||
LOG_DEBUG(log, "Shut down storages.");
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
global_context_ptr = nullptr;
|
||||
global_context.reset();
|
||||
shared_context.reset();
|
||||
LOG_DEBUG(log, "Destroyed global context.");
|
||||
});
|
||||
|
||||
/// Try to increase limit on number of open files.
|
||||
{
|
||||
rlimit rlim;
|
||||
@ -675,6 +734,71 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
total_memory_tracker.setDescription("(total)");
|
||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||
|
||||
Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0);
|
||||
|
||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
bool listen_try = config().getBool("listen_try", false);
|
||||
if (listen_hosts.empty())
|
||||
{
|
||||
listen_hosts.emplace_back("::1");
|
||||
listen_hosts.emplace_back("127.0.0.1");
|
||||
listen_try = true;
|
||||
}
|
||||
|
||||
for (const auto & listen_host : listen_hosts)
|
||||
{
|
||||
/// TCP TestKeeper
|
||||
createServer(listen_host, "test_keeper_server.tcp_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers_to_start_before_tables.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TestKeeperTCPHandlerFactory(*this),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
|
||||
LOG_INFO(log, "Listening for connections to fake zookeeper (tcp): {}", address.toString());
|
||||
});
|
||||
}
|
||||
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
server.start();
|
||||
|
||||
SCOPE_EXIT({
|
||||
/** Ask to cancel background jobs all table engines,
|
||||
* and also query_log.
|
||||
* It is important to do early, not in destructor of Context, because
|
||||
* table engines could use Context on destroy.
|
||||
*/
|
||||
LOG_INFO(log, "Shutting down storages.");
|
||||
|
||||
global_context->shutdown();
|
||||
|
||||
LOG_DEBUG(log, "Shut down storages.");
|
||||
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
server.stop();
|
||||
|
||||
/** Explicitly destroy Context. It is more convenient than in destructor of Server, because logger is still available.
|
||||
* At this moment, no one could own shared part of Context.
|
||||
*/
|
||||
global_context_ptr = nullptr;
|
||||
global_context.reset();
|
||||
shared_context.reset();
|
||||
LOG_DEBUG(log, "Destroyed global context.");
|
||||
});
|
||||
|
||||
/// Set current database name before loading tables and databases because
|
||||
/// system logs may copy global context.
|
||||
global_context->setCurrentDatabaseNameInGlobalContext(default_database);
|
||||
@ -804,75 +928,8 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "TaskStats is not implemented for this OS. IO accounting will be disabled.");
|
||||
#endif
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers;
|
||||
{
|
||||
Poco::Timespan keep_alive_timeout(config().getUInt("keep_alive_timeout", 10), 0);
|
||||
|
||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||
http_params->setTimeout(settings.http_receive_timeout);
|
||||
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||
|
||||
std::vector<ProtocolServerAdapter> servers;
|
||||
|
||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||
|
||||
bool listen_try = config().getBool("listen_try", false);
|
||||
if (listen_hosts.empty())
|
||||
{
|
||||
listen_hosts.emplace_back("::1");
|
||||
listen_hosts.emplace_back("127.0.0.1");
|
||||
listen_try = true;
|
||||
}
|
||||
|
||||
auto make_socket_address = [&](const std::string & host, UInt16 port)
|
||||
{
|
||||
Poco::Net::SocketAddress socket_address;
|
||||
try
|
||||
{
|
||||
socket_address = Poco::Net::SocketAddress(host, port);
|
||||
}
|
||||
catch (const Poco::Net::DNSException & e)
|
||||
{
|
||||
const auto code = e.code();
|
||||
if (code == EAI_FAMILY
|
||||
#if defined(EAI_ADDRFAMILY)
|
||||
|| code == EAI_ADDRFAMILY
|
||||
#endif
|
||||
)
|
||||
{
|
||||
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. "
|
||||
"If it is an IPv6 address and your host has disabled IPv6, then consider to "
|
||||
"specify IPv4 address to listen in <listen_host> element of configuration "
|
||||
"file. Example: <listen_host>0.0.0.0</listen_host>",
|
||||
host, e.code(), e.message());
|
||||
}
|
||||
|
||||
throw;
|
||||
}
|
||||
return socket_address;
|
||||
};
|
||||
|
||||
auto socket_bind_listen = [&](auto & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false)
|
||||
{
|
||||
auto address = make_socket_address(host, port);
|
||||
#if !defined(POCO_CLICKHOUSE_PATCH) || POCO_VERSION < 0x01090100
|
||||
if (secure)
|
||||
/// Bug in old (<1.9.1) poco, listen() after bind() with reusePort param will fail because have no implementation in SecureServerSocketImpl
|
||||
/// https://github.com/pocoproject/poco/pull/2257
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
else
|
||||
#endif
|
||||
#if POCO_VERSION < 0x01080000
|
||||
socket.bind(address, /* reuseAddress = */ true);
|
||||
#else
|
||||
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ config().getBool("listen_reuse_port", false));
|
||||
#endif
|
||||
|
||||
socket.listen(/* backlog = */ config().getUInt("listen_backlog", 64));
|
||||
|
||||
return address;
|
||||
};
|
||||
|
||||
/// This object will periodically calculate some metrics.
|
||||
AsynchronousMetrics async_metrics(*global_context,
|
||||
config().getUInt("asynchronous_metrics_update_period_s", 60));
|
||||
@ -880,41 +937,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
for (const auto & listen_host : listen_hosts)
|
||||
{
|
||||
auto create_server = [&](const char * port_name, auto && func)
|
||||
{
|
||||
/// For testing purposes, user may omit tcp_port or http_port or https_port in configuration file.
|
||||
if (!config().has(port_name))
|
||||
return;
|
||||
|
||||
auto port = config().getInt(port_name);
|
||||
try
|
||||
{
|
||||
func(port);
|
||||
}
|
||||
catch (const Poco::Exception &)
|
||||
{
|
||||
std::string message = "Listen [" + listen_host + "]:" + std::to_string(port) + " failed: " + getCurrentExceptionMessage(false);
|
||||
|
||||
if (listen_try)
|
||||
{
|
||||
LOG_WARNING(log, "{}. If it is an IPv6 or IPv4 address and your host has disabled IPv6 or IPv4, then consider to "
|
||||
"specify not disabled IPv4 or IPv6 address to listen in <listen_host> element of configuration "
|
||||
"file. Example for disabled IPv6: <listen_host>0.0.0.0</listen_host> ."
|
||||
" Example for disabled IPv4: <listen_host>::</listen_host>",
|
||||
message);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception{message, ErrorCodes::NETWORK_ERROR};
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/// HTTP
|
||||
create_server("http_port", [&](UInt16 port)
|
||||
createServer(listen_host, "http_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
|
||||
@ -925,11 +952,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
/// HTTPS
|
||||
create_server("https_port", [&](UInt16 port)
|
||||
createServer(listen_host, "https_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -944,14 +971,14 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
/// TCP
|
||||
create_server("tcp_port", [&](UInt16 port)
|
||||
createServer(listen_host, "tcp_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this),
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ false),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
@ -959,16 +986,32 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "Listening for connections with native protocol (tcp): {}", address.toString());
|
||||
});
|
||||
|
||||
/// TCP with SSL
|
||||
create_server("tcp_port_secure", [&](UInt16 port)
|
||||
/// TCP with PROXY protocol, see https://github.com/wolfeidau/proxyv2/blob/master/docs/proxy-protocol.txt
|
||||
createServer(listen_host, "tcp_with_proxy_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure= */ true),
|
||||
new TCPHandlerFactory(*this, /* secure */ false, /* proxy protocol */ true),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
|
||||
LOG_INFO(log, "Listening for connections with native protocol (tcp) with PROXY: {}", address.toString());
|
||||
});
|
||||
|
||||
/// TCP with SSL
|
||||
createServer(listen_host, "tcp_port_secure", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.receive_timeout);
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
new TCPHandlerFactory(*this, /* secure */ true, /* proxy protocol */ false),
|
||||
server_pool,
|
||||
socket,
|
||||
new Poco::Net::TCPServerParams));
|
||||
@ -981,10 +1024,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
/// Interserver IO HTTP
|
||||
create_server("interserver_http_port", [&](UInt16 port)
|
||||
createServer(listen_host, "interserver_http_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -993,11 +1036,11 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "Listening for replica communication (interserver): http://{}", address.toString());
|
||||
});
|
||||
|
||||
create_server("interserver_https_port", [&](UInt16 port)
|
||||
createServer(listen_host, "interserver_https_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
#if USE_SSL
|
||||
Poco::Net::SecureServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -1011,10 +1054,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
#endif
|
||||
});
|
||||
|
||||
create_server("mysql_port", [&](UInt16 port)
|
||||
createServer(listen_host, "mysql_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
@ -1026,10 +1069,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
LOG_INFO(log, "Listening for MySQL compatibility protocol: {}", address.toString());
|
||||
});
|
||||
|
||||
create_server("postgresql_port", [&](UInt16 port)
|
||||
createServer(listen_host, "postgresql_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port, /* secure = */ true);
|
||||
auto address = socketBindListen(socket, listen_host, port, /* secure = */ true);
|
||||
socket.setReceiveTimeout(Poco::Timespan());
|
||||
socket.setSendTimeout(settings.send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::TCPServer>(
|
||||
@ -1042,19 +1085,19 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
});
|
||||
|
||||
#if USE_GRPC
|
||||
create_server("grpc_port", [&](UInt16 port)
|
||||
createServer(listen_host, "grpc_port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::SocketAddress server_address(listen_host, port);
|
||||
servers.emplace_back(std::make_unique<GRPCServer>(*this, make_socket_address(listen_host, port)));
|
||||
servers.emplace_back(std::make_unique<GRPCServer>(*this, makeSocketAddress(listen_host, port, log)));
|
||||
LOG_INFO(log, "Listening for gRPC protocol: " + server_address.toString());
|
||||
});
|
||||
#endif
|
||||
|
||||
/// Prometheus (if defined and not setup yet with http_port)
|
||||
create_server("prometheus.port", [&](UInt16 port)
|
||||
createServer(listen_host, "prometheus.port", listen_try, [&](UInt16 port)
|
||||
{
|
||||
Poco::Net::ServerSocket socket;
|
||||
auto address = socket_bind_listen(socket, listen_host, port);
|
||||
auto address = socketBindListen(socket, listen_host, port);
|
||||
socket.setReceiveTimeout(settings.http_receive_timeout);
|
||||
socket.setSendTimeout(settings.http_send_timeout);
|
||||
servers.emplace_back(std::make_unique<Poco::Net::HTTPServer>(
|
||||
@ -1078,6 +1121,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
int level = level_str.empty() ? INT_MAX : Poco::Logger::parseLevel(level_str);
|
||||
setTextLog(global_context->getTextLog(), level);
|
||||
}
|
||||
|
||||
buildLoggers(config(), logger());
|
||||
|
||||
main_config_reloader->start();
|
||||
@ -1124,7 +1168,10 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
{
|
||||
current_connections = 0;
|
||||
for (auto & server : servers)
|
||||
{
|
||||
server.stop();
|
||||
current_connections += server.currentConnections();
|
||||
}
|
||||
if (!current_connections)
|
||||
break;
|
||||
sleep_current_ms += sleep_one_ms;
|
||||
|
@ -14,6 +14,13 @@
|
||||
* 3. Interserver HTTP - for replication.
|
||||
*/
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
namespace Net
|
||||
{
|
||||
class ServerSocket;
|
||||
}
|
||||
}
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -57,6 +64,13 @@ protected:
|
||||
|
||||
private:
|
||||
Context * global_context_ptr = nullptr;
|
||||
|
||||
private:
|
||||
|
||||
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
|
||||
|
||||
using CreateServerFunc = std::function<void(UInt16)>;
|
||||
void createServer(const std::string & listen_host, const char * port_name, bool listen_try, CreateServerFunc && func) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
1
programs/server/config.d/tcp_with_proxy.xml
Symbolic link
1
programs/server/config.d/tcp_with_proxy.xml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../tests/config/config.d/tcp_with_proxy.xml
|
@ -64,11 +64,18 @@
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
<mysql_port>9004</mysql_port>
|
||||
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
-->
|
||||
|
||||
<!-- TCP with PROXY protocol (PROXY header sent for every connection) -->
|
||||
<!--
|
||||
<tcp_with_proxy_port>9010</tcp_with_proxy_port>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
@ -137,29 +144,32 @@
|
||||
<!-- gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API) -->
|
||||
<!-- <grpc_port>9100</grpc_port> -->
|
||||
<grpc>
|
||||
<enable_ssl>true</enable_ssl>
|
||||
<enable_ssl>false</enable_ssl>
|
||||
|
||||
<!-- The following two files are used only if enable_ssl=1
|
||||
<!-- The following two files are used only if enable_ssl=1 -->
|
||||
<ssl_cert_file>/path/to/ssl_cert_file</ssl_cert_file>
|
||||
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file> -->
|
||||
<ssl_key_file>/path/to/ssl_key_file</ssl_key_file>
|
||||
|
||||
<!-- Whether server will request client for a certificate
|
||||
<ssl_require_client_auth>true</ssl_require_client_auth> -->
|
||||
<!-- Whether server will request client for a certificate -->
|
||||
<ssl_require_client_auth>false</ssl_require_client_auth>
|
||||
|
||||
<!-- The following file is used only if ssl_require_client_auth=1
|
||||
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file> -->
|
||||
<!-- The following file is used only if ssl_require_client_auth=1 -->
|
||||
<ssl_ca_cert_file>/path/to/ssl_ca_cert_file</ssl_ca_cert_file>
|
||||
|
||||
<!-- Default compression algorithm (applied if client doesn't specify another algorithm).
|
||||
Supported algorithms: none, deflate, gzip, stream_gzip
|
||||
<compression>gzip</compression> -->
|
||||
Supported algorithms: none, deflate, gzip, stream_gzip -->
|
||||
<compression>deflate</compression>
|
||||
|
||||
<!-- Default compression level (applied if client doesn't specify another level).
|
||||
Supported levels: none, low, medium, high
|
||||
<compression_level>high</compression_level> -->
|
||||
Supported levels: none, low, medium, high -->
|
||||
<compression_level>medium</compression_level>
|
||||
|
||||
<!-- Send/receive message size limits in bytes. -1 means unlimited
|
||||
<!-- Send/receive message size limits in bytes. -1 means unlimited -->
|
||||
<max_send_message_size>-1</max_send_message_size>
|
||||
<max_receive_message_size>4194304</max_receive_message_size> -->
|
||||
<max_receive_message_size>-1</max_receive_message_size>
|
||||
|
||||
<!-- Enable if you want very detailed logs -->
|
||||
<verbose_logs>false</verbose_logs>
|
||||
</grpc>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <common/find_symbols.h>
|
||||
#include <Poco/ExpireCache.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
|
||||
@ -419,6 +421,18 @@ std::shared_ptr<const ContextAccess> AccessControlManager::getContextAccess(
|
||||
params.http_method = client_info.http_method;
|
||||
params.address = client_info.current_address.host();
|
||||
params.quota_key = client_info.quota_key;
|
||||
|
||||
/// Extract the last entry from comma separated list of X-Forwarded-For addresses.
|
||||
/// Only the last proxy can be trusted (if any).
|
||||
Strings forwarded_addresses;
|
||||
boost::split(forwarded_addresses, client_info.forwarded_for, boost::is_any_of(","));
|
||||
if (!forwarded_addresses.empty())
|
||||
{
|
||||
String & last_forwarded_address = forwarded_addresses.back();
|
||||
boost::trim(last_forwarded_address);
|
||||
params.forwarded_address = last_forwarded_address;
|
||||
}
|
||||
|
||||
return getContextAccess(params);
|
||||
}
|
||||
|
||||
@ -444,9 +458,14 @@ std::shared_ptr<const EnabledRowPolicies> AccessControlManager::getEnabledRowPol
|
||||
|
||||
|
||||
std::shared_ptr<const EnabledQuota> AccessControlManager::getEnabledQuota(
|
||||
const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & address, const String & custom_quota_key) const
|
||||
const UUID & user_id,
|
||||
const String & user_name,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const Poco::Net::IPAddress & address,
|
||||
const String & forwarded_address,
|
||||
const String & custom_quota_key) const
|
||||
{
|
||||
return quota_cache->getEnabledQuota(user_id, user_name, enabled_roles, address, custom_quota_key);
|
||||
return quota_cache->getEnabledQuota(user_id, user_name, enabled_roles, address, forwarded_address, custom_quota_key);
|
||||
}
|
||||
|
||||
|
||||
|
@ -135,6 +135,7 @@ public:
|
||||
const String & user_name,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const Poco::Net::IPAddress & address,
|
||||
const String & forwarded_address,
|
||||
const String & custom_quota_key) const;
|
||||
|
||||
std::vector<QuotaUsage> getAllQuotasUsage() const;
|
||||
|
@ -127,6 +127,7 @@ enum class AccessType
|
||||
M(SYSTEM_DROP_COMPILED_EXPRESSION_CACHE, "SYSTEM DROP COMPILED EXPRESSION, DROP COMPILED EXPRESSION CACHE, DROP COMPILED EXPRESSIONS", GLOBAL, SYSTEM_DROP_CACHE) \
|
||||
M(SYSTEM_DROP_CACHE, "DROP CACHE", GROUP, SYSTEM) \
|
||||
M(SYSTEM_RELOAD_CONFIG, "RELOAD CONFIG", GLOBAL, SYSTEM_RELOAD) \
|
||||
M(SYSTEM_RELOAD_SYMBOLS, "RELOAD SYMBOLS", GLOBAL, SYSTEM_RELOAD) \
|
||||
M(SYSTEM_RELOAD_DICTIONARY, "SYSTEM RELOAD DICTIONARIES, RELOAD DICTIONARY, RELOAD DICTIONARIES", GLOBAL, SYSTEM_RELOAD) \
|
||||
M(SYSTEM_RELOAD_EMBEDDED_DICTIONARIES, "RELOAD EMBEDDED DICTIONARIES", GLOBAL, SYSTEM_RELOAD) /* implicitly enabled by the grant SYSTEM_RELOAD_DICTIONARY ON *.* */\
|
||||
M(SYSTEM_RELOAD, "", GROUP, SYSTEM) \
|
||||
|
@ -258,9 +258,12 @@ void ContextAccess::setRolesInfo(const std::shared_ptr<const EnabledRolesInfo> &
|
||||
{
|
||||
assert(roles_info_);
|
||||
roles_info = roles_info_;
|
||||
enabled_row_policies = manager->getEnabledRowPolicies(*params.user_id, roles_info->enabled_roles);
|
||||
enabled_quota = manager->getEnabledQuota(*params.user_id, user_name, roles_info->enabled_roles, params.address, params.quota_key);
|
||||
enabled_settings = manager->getEnabledSettings(*params.user_id, user->settings, roles_info->enabled_roles, roles_info->settings_from_enabled_roles);
|
||||
enabled_row_policies = manager->getEnabledRowPolicies(
|
||||
*params.user_id, roles_info->enabled_roles);
|
||||
enabled_quota = manager->getEnabledQuota(
|
||||
*params.user_id, user_name, roles_info->enabled_roles, params.address, params.forwarded_address, params.quota_key);
|
||||
enabled_settings = manager->getEnabledSettings(
|
||||
*params.user_id, user->settings, roles_info->enabled_roles, roles_info->settings_from_enabled_roles);
|
||||
calculateAccessRights();
|
||||
}
|
||||
|
||||
|
@ -41,9 +41,16 @@ struct ContextAccessParams
|
||||
ClientInfo::Interface interface = ClientInfo::Interface::TCP;
|
||||
ClientInfo::HTTPMethod http_method = ClientInfo::HTTPMethod::UNKNOWN;
|
||||
Poco::Net::IPAddress address;
|
||||
String forwarded_address;
|
||||
String quota_key;
|
||||
|
||||
auto toTuple() const { return std::tie(user_id, current_roles, use_default_roles, readonly, allow_ddl, allow_introspection, current_database, interface, http_method, address, quota_key); }
|
||||
auto toTuple() const
|
||||
{
|
||||
return std::tie(
|
||||
user_id, current_roles, use_default_roles, readonly, allow_ddl, allow_introspection,
|
||||
current_database, interface, http_method, address, forwarded_address, quota_key);
|
||||
}
|
||||
|
||||
friend bool operator ==(const ContextAccessParams & lhs, const ContextAccessParams & rhs) { return lhs.toTuple() == rhs.toTuple(); }
|
||||
friend bool operator !=(const ContextAccessParams & lhs, const ContextAccessParams & rhs) { return !(lhs == rhs); }
|
||||
friend bool operator <(const ContextAccessParams & lhs, const ContextAccessParams & rhs) { return lhs.toTuple() < rhs.toTuple(); }
|
||||
|
@ -25,9 +25,10 @@ public:
|
||||
String user_name;
|
||||
boost::container::flat_set<UUID> enabled_roles;
|
||||
Poco::Net::IPAddress client_address;
|
||||
String forwarded_address;
|
||||
String client_key;
|
||||
|
||||
auto toTuple() const { return std::tie(user_id, enabled_roles, user_name, client_address, client_key); }
|
||||
auto toTuple() const { return std::tie(user_id, enabled_roles, user_name, client_address, forwarded_address, client_key); }
|
||||
friend bool operator ==(const Params & lhs, const Params & rhs) { return lhs.toTuple() == rhs.toTuple(); }
|
||||
friend bool operator !=(const Params & lhs, const Params & rhs) { return !(lhs == rhs); }
|
||||
friend bool operator <(const Params & lhs, const Params & rhs) { return lhs.toTuple() < rhs.toTuple(); }
|
||||
|
@ -76,6 +76,7 @@ struct Quota : public IAccessEntity
|
||||
NONE, /// All users share the same quota.
|
||||
USER_NAME, /// Connections with the same user name share the same quota.
|
||||
IP_ADDRESS, /// Connections from the same IP share the same quota.
|
||||
FORWARDED_IP_ADDRESS, /// Use X-Forwarded-For HTTP header instead of IP address.
|
||||
CLIENT_KEY, /// Client should explicitly supply a key to use.
|
||||
CLIENT_KEY_OR_USER_NAME, /// Same as CLIENT_KEY, but use USER_NAME if the client doesn't supply a key.
|
||||
CLIENT_KEY_OR_IP_ADDRESS, /// Same as CLIENT_KEY, but use IP_ADDRESS if the client doesn't supply a key.
|
||||
@ -205,12 +206,16 @@ inline const Quota::KeyTypeInfo & Quota::KeyTypeInfo::get(KeyType type)
|
||||
if (tokens.size() > 1)
|
||||
{
|
||||
for (const auto & token : tokens)
|
||||
{
|
||||
for (auto kt : ext::range(KeyType::MAX))
|
||||
{
|
||||
if (KeyTypeInfo::get(kt).name == token)
|
||||
{
|
||||
init_base_types.push_back(kt);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return KeyTypeInfo{raw_name_, std::move(init_name), std::move(init_base_types)};
|
||||
};
|
||||
@ -232,6 +237,11 @@ inline const Quota::KeyTypeInfo & Quota::KeyTypeInfo::get(KeyType type)
|
||||
static const auto info = make_info("IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case KeyType::FORWARDED_IP_ADDRESS:
|
||||
{
|
||||
static const auto info = make_info("FORWARDED_IP_ADDRESS");
|
||||
return info;
|
||||
}
|
||||
case KeyType::CLIENT_KEY:
|
||||
{
|
||||
static const auto info = make_info("CLIENT_KEY");
|
||||
|
@ -48,11 +48,21 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled) const
|
||||
switch (quota->key_type)
|
||||
{
|
||||
case KeyType::NONE:
|
||||
{
|
||||
return "";
|
||||
}
|
||||
case KeyType::USER_NAME:
|
||||
{
|
||||
return params.user_name;
|
||||
}
|
||||
case KeyType::IP_ADDRESS:
|
||||
{
|
||||
return params.client_address.toString();
|
||||
}
|
||||
case KeyType::FORWARDED_IP_ADDRESS:
|
||||
{
|
||||
return params.forwarded_address;
|
||||
}
|
||||
case KeyType::CLIENT_KEY:
|
||||
{
|
||||
if (!params.client_key.empty())
|
||||
@ -170,7 +180,7 @@ QuotaCache::QuotaCache(const AccessControlManager & access_control_manager_)
|
||||
QuotaCache::~QuotaCache() = default;
|
||||
|
||||
|
||||
std::shared_ptr<const EnabledQuota> QuotaCache::getEnabledQuota(const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & client_address, const String & client_key)
|
||||
std::shared_ptr<const EnabledQuota> QuotaCache::getEnabledQuota(const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & client_address, const String & forwarded_address, const String & client_key)
|
||||
{
|
||||
std::lock_guard lock{mutex};
|
||||
ensureAllQuotasRead();
|
||||
@ -180,6 +190,7 @@ std::shared_ptr<const EnabledQuota> QuotaCache::getEnabledQuota(const UUID & use
|
||||
params.user_name = user_name;
|
||||
params.enabled_roles = enabled_roles;
|
||||
params.client_address = client_address;
|
||||
params.forwarded_address = forwarded_address;
|
||||
params.client_key = client_key;
|
||||
auto it = enabled_quotas.find(params);
|
||||
if (it != enabled_quotas.end())
|
||||
|
@ -20,7 +20,14 @@ public:
|
||||
QuotaCache(const AccessControlManager & access_control_manager_);
|
||||
~QuotaCache();
|
||||
|
||||
std::shared_ptr<const EnabledQuota> getEnabledQuota(const UUID & user_id, const String & user_name, const boost::container::flat_set<UUID> & enabled_roles, const Poco::Net::IPAddress & address, const String & client_key);
|
||||
std::shared_ptr<const EnabledQuota> getEnabledQuota(
|
||||
const UUID & user_id,
|
||||
const String & user_name,
|
||||
const boost::container::flat_set<UUID> & enabled_roles,
|
||||
const Poco::Net::IPAddress & address,
|
||||
const String & forwarded_address,
|
||||
const String & client_key);
|
||||
|
||||
std::vector<QuotaUsage> getAllQuotasUsage() const;
|
||||
|
||||
private:
|
||||
|
@ -215,6 +215,8 @@ namespace
|
||||
String quota_config = "quotas." + quota_name;
|
||||
if (config.has(quota_config + ".keyed_by_ip"))
|
||||
quota->key_type = KeyType::IP_ADDRESS;
|
||||
else if (config.has(quota_config + ".keyed_by_forwarded_ip"))
|
||||
quota->key_type = KeyType::FORWARDED_IP_ADDRESS;
|
||||
else if (config.has(quota_config + ".keyed"))
|
||||
quota->key_type = KeyType::CLIENT_KEY_OR_USER_NAME;
|
||||
else
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <AggregateFunctions/FactoryHelpers.h>
|
||||
#include <DataTypes/DataTypeDate.h>
|
||||
#include <DataTypes/DataTypeDateTime.h>
|
||||
#include <DataTypes/DataTypeDateTime64.h>
|
||||
#include "registerAggregateFunctions.h"
|
||||
|
||||
|
||||
|
@ -100,6 +100,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(const std::string & name, c
|
||||
if (which.idx == TypeIndex::Decimal32) return std::make_shared<Function<Decimal32, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Decimal64) return std::make_shared<Function<Decimal64, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Decimal128) return std::make_shared<Function<Decimal128, false>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::DateTime64) return std::make_shared<Function<DateTime64, false>>(argument_types, params);
|
||||
//if (which.idx == TypeIndex::Decimal256) return std::make_shared<Function<Decimal256, false>>(argument_types, params);
|
||||
}
|
||||
|
||||
|
@ -237,6 +237,8 @@ template <typename T, typename TResult, typename Data, AggregateFunctionSumType
|
||||
class AggregateFunctionSum final : public IAggregateFunctionDataHelper<Data, AggregateFunctionSum<T, TResult, Data, Type>>
|
||||
{
|
||||
public:
|
||||
static constexpr bool DateTime64Supported = false;
|
||||
|
||||
using ResultDataType = std::conditional_t<IsDecimalNumber<T>, DataTypeDecimal<TResult>, DataTypeNumber<TResult>>;
|
||||
using ColVecType = std::conditional_t<IsDecimalNumber<T>, ColumnDecimal<T>, ColumnVector<T>>;
|
||||
using ColVecResult = std::conditional_t<IsDecimalNumber<T>, ColumnDecimal<TResult>, ColumnVector<TResult>>;
|
||||
|
@ -135,6 +135,8 @@ static IAggregateFunction * createWithDecimalType(const IDataType & argument_typ
|
||||
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256>(std::forward<TArgs>(args)...);
|
||||
if constexpr (AggregateFunctionTemplate<DateTime64>::DateTime64Supported)
|
||||
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64>(std::forward<TArgs>(args)...);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -146,6 +148,8 @@ static IAggregateFunction * createWithDecimalType(const IDataType & argument_typ
|
||||
if (which.idx == TypeIndex::Decimal64) return new AggregateFunctionTemplate<Decimal64, Data>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal128) return new AggregateFunctionTemplate<Decimal128, Data>(std::forward<TArgs>(args)...);
|
||||
if (which.idx == TypeIndex::Decimal256) return new AggregateFunctionTemplate<Decimal256, Data>(std::forward<TArgs>(args)...);
|
||||
if constexpr (AggregateFunctionTemplate<DateTime64, Data>::DateTime64Supported)
|
||||
if (which.idx == TypeIndex::DateTime64) return new AggregateFunctionTemplate<DateTime64, Data>(std::forward<TArgs>(args)...);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -310,6 +310,9 @@ protected:
|
||||
static const Data & data(ConstAggregateDataPtr place) { return *reinterpret_cast<const Data*>(place); }
|
||||
|
||||
public:
|
||||
// Derived class can `override` this to flag that DateTime64 is not supported.
|
||||
static constexpr bool DateTime64Supported = true;
|
||||
|
||||
IAggregateFunctionDataHelper(const DataTypes & argument_types_, const Array & parameters_)
|
||||
: IAggregateFunctionHelper<Derived>(argument_types_, parameters_) {}
|
||||
|
||||
|
@ -242,7 +242,7 @@ target_link_libraries (clickhouse_common_io
|
||||
PUBLIC
|
||||
common
|
||||
${DOUBLE_CONVERSION_LIBRARIES}
|
||||
ryu
|
||||
dragonbox_to_chars
|
||||
)
|
||||
|
||||
if(RE2_LIBRARY)
|
||||
@ -403,6 +403,9 @@ if (USE_MSGPACK)
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
target_link_libraries (clickhouse_common_io PUBLIC ${FAST_FLOAT_LIBRARY})
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${FAST_FLOAT_INCLUDE_DIR})
|
||||
|
||||
if (USE_ORC)
|
||||
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
|
||||
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} ${CMAKE_BINARY_DIR}/contrib/orc/c++/include)
|
||||
|
@ -370,4 +370,5 @@ template class ColumnDecimal<Decimal32>;
|
||||
template class ColumnDecimal<Decimal64>;
|
||||
template class ColumnDecimal<Decimal128>;
|
||||
template class ColumnDecimal<Decimal256>;
|
||||
template class ColumnDecimal<DateTime64>;
|
||||
}
|
||||
|
@ -37,12 +37,16 @@ void encodeSHA256(const void * text, size_t size, unsigned char * out)
|
||||
|
||||
String getOpenSSLErrors()
|
||||
{
|
||||
BIO * mem = BIO_new(BIO_s_mem());
|
||||
SCOPE_EXIT(BIO_free(mem));
|
||||
ERR_print_errors(mem);
|
||||
char * buf = nullptr;
|
||||
size_t size = BIO_get_mem_data(mem, &buf);
|
||||
return String(buf, size);
|
||||
String res;
|
||||
ERR_print_errors_cb([](const char * str, size_t len, void * ctx)
|
||||
{
|
||||
String & out = *reinterpret_cast<String*>(ctx);
|
||||
if (!out.empty())
|
||||
out += ", ";
|
||||
out.append(str, len);
|
||||
return 1;
|
||||
}, &res);
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ LazyPipeFDs::~LazyPipeFDs()
|
||||
}
|
||||
|
||||
|
||||
void LazyPipeFDs::setNonBlocking()
|
||||
void LazyPipeFDs::setNonBlockingWrite()
|
||||
{
|
||||
int flags = fcntl(fds_rw[1], F_GETFL, 0);
|
||||
if (-1 == flags)
|
||||
@ -79,6 +79,21 @@ void LazyPipeFDs::setNonBlocking()
|
||||
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
}
|
||||
|
||||
void LazyPipeFDs::setNonBlockingRead()
|
||||
{
|
||||
int flags = fcntl(fds_rw[0], F_GETFL, 0);
|
||||
if (-1 == flags)
|
||||
throwFromErrno("Cannot get file status flags of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
if (-1 == fcntl(fds_rw[0], F_SETFL, flags | O_NONBLOCK))
|
||||
throwFromErrno("Cannot set non-blocking mode of pipe", ErrorCodes::CANNOT_FCNTL);
|
||||
}
|
||||
|
||||
void LazyPipeFDs::setNonBlockingReadWrite()
|
||||
{
|
||||
setNonBlockingRead();
|
||||
setNonBlockingWrite();
|
||||
}
|
||||
|
||||
void LazyPipeFDs::tryIncreaseSize(int desired_size)
|
||||
{
|
||||
#if defined(OS_LINUX)
|
||||
|
@ -17,7 +17,12 @@ struct LazyPipeFDs
|
||||
void open();
|
||||
void close();
|
||||
|
||||
void setNonBlocking();
|
||||
/// Set O_NONBLOCK to different ends of pipe preserving existing flags.
|
||||
/// Throws an exception if fcntl was not successful.
|
||||
void setNonBlockingWrite();
|
||||
void setNonBlockingRead();
|
||||
void setNonBlockingReadWrite();
|
||||
|
||||
void tryIncreaseSize(int desired_size);
|
||||
|
||||
~LazyPipeFDs();
|
||||
|
@ -195,7 +195,8 @@ void StackTrace::symbolize(const StackTrace::FramePointers & frame_pointers, siz
|
||||
{
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__) && !defined(ARCADIA_BUILD)
|
||||
|
||||
const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance();
|
||||
auto symbol_index_ptr = DB::SymbolIndex::instance();
|
||||
const DB::SymbolIndex & symbol_index = *symbol_index_ptr;
|
||||
std::unordered_map<std::string, DB::Dwarf> dwarfs;
|
||||
|
||||
for (size_t i = 0; i < offset; ++i)
|
||||
@ -316,7 +317,8 @@ static void toStringEveryLineImpl(
|
||||
return callback("<Empty trace>");
|
||||
|
||||
#if defined(__ELF__) && !defined(__FreeBSD__)
|
||||
const DB::SymbolIndex & symbol_index = DB::SymbolIndex::instance();
|
||||
auto symbol_index_ptr = DB::SymbolIndex::instance();
|
||||
const DB::SymbolIndex & symbol_index = *symbol_index_ptr;
|
||||
std::unordered_map<std::string, DB::Dwarf> dwarfs;
|
||||
|
||||
std::stringstream out; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
|
@ -300,13 +300,13 @@ void collectSymbolsFromELF(dl_phdr_info * info,
|
||||
|
||||
String our_build_id = getBuildIDFromProgramHeaders(info);
|
||||
|
||||
/// If the name is empty - it's main executable.
|
||||
/// Find a elf file for the main executable.
|
||||
|
||||
/// If the name is empty and there is a non-empty build-id - it's main executable.
|
||||
/// Find a elf file for the main executable and set the build-id.
|
||||
if (object_name.empty())
|
||||
{
|
||||
object_name = "/proc/self/exe";
|
||||
build_id = our_build_id;
|
||||
if (build_id.empty())
|
||||
build_id = our_build_id;
|
||||
}
|
||||
|
||||
std::error_code ec;
|
||||
@ -316,9 +316,16 @@ void collectSymbolsFromELF(dl_phdr_info * info,
|
||||
return;
|
||||
|
||||
/// Debug info and symbol table sections may be split to separate binary.
|
||||
std::filesystem::path local_debug_info_path = canonical_path.parent_path() / canonical_path.stem();
|
||||
local_debug_info_path += ".debug";
|
||||
std::filesystem::path debug_info_path = std::filesystem::path("/usr/lib/debug") / canonical_path.relative_path();
|
||||
|
||||
object_name = std::filesystem::exists(debug_info_path) ? debug_info_path : canonical_path;
|
||||
if (std::filesystem::exists(local_debug_info_path))
|
||||
object_name = local_debug_info_path;
|
||||
else if (std::filesystem::exists(debug_info_path))
|
||||
object_name = debug_info_path;
|
||||
else
|
||||
object_name = canonical_path;
|
||||
|
||||
/// But we have to compare Build ID to check that debug info corresponds to the same executable.
|
||||
|
||||
@ -434,10 +441,12 @@ String SymbolIndex::getBuildIDHex() const
|
||||
return build_id_hex;
|
||||
}
|
||||
|
||||
SymbolIndex & SymbolIndex::instance()
|
||||
MultiVersion<SymbolIndex>::Version SymbolIndex::instance(bool reload)
|
||||
{
|
||||
static SymbolIndex instance;
|
||||
return instance;
|
||||
static MultiVersion<SymbolIndex> instance(std::unique_ptr<SymbolIndex>(new SymbolIndex));
|
||||
if (reload)
|
||||
instance.set(std::unique_ptr<SymbolIndex>(new SymbolIndex));
|
||||
return instance.get();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <Common/Elf.h>
|
||||
#include <boost/noncopyable.hpp>
|
||||
|
||||
#include <Common/MultiVersion.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -21,7 +22,7 @@ protected:
|
||||
SymbolIndex() { update(); }
|
||||
|
||||
public:
|
||||
static SymbolIndex & instance();
|
||||
static MultiVersion<SymbolIndex>::Version instance(bool reload = false);
|
||||
|
||||
struct Symbol
|
||||
{
|
||||
|
@ -36,7 +36,7 @@ TraceCollector::TraceCollector(std::shared_ptr<TraceLog> trace_log_)
|
||||
/** Turn write end of pipe to non-blocking mode to avoid deadlocks
|
||||
* when QueryProfiler is invoked under locks and TraceCollector cannot pull data from pipe.
|
||||
*/
|
||||
pipe.setNonBlocking();
|
||||
pipe.setNonBlockingWrite();
|
||||
pipe.tryIncreaseSize(1 << 20);
|
||||
|
||||
thread = ThreadFromGlobalPool(&TraceCollector::run, this);
|
||||
|
@ -31,7 +31,6 @@ using Undo = std::function<void()>;
|
||||
|
||||
struct TestKeeperRequest : virtual Request
|
||||
{
|
||||
virtual bool isMutable() const { return false; }
|
||||
virtual ResponsePtr createResponse() const = 0;
|
||||
virtual std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const = 0;
|
||||
virtual void processWatches(TestKeeper::Watches & /*watches*/, TestKeeper::Watches & /*list_watches*/) const {}
|
||||
@ -85,7 +84,6 @@ struct TestKeeperRemoveRequest final : RemoveRequest, TestKeeperRequest
|
||||
{
|
||||
TestKeeperRemoveRequest() = default;
|
||||
explicit TestKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {}
|
||||
bool isMutable() const override { return true; }
|
||||
ResponsePtr createResponse() const override;
|
||||
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||
|
||||
@ -112,7 +110,6 @@ struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest
|
||||
{
|
||||
TestKeeperSetRequest() = default;
|
||||
explicit TestKeeperSetRequest(const SetRequest & base) : SetRequest(base) {}
|
||||
bool isMutable() const override { return true; }
|
||||
ResponsePtr createResponse() const override;
|
||||
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||
|
||||
@ -216,7 +213,6 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
|
||||
if (is_sequential)
|
||||
{
|
||||
auto seq_num = it->second.seq_num;
|
||||
++it->second.seq_num;
|
||||
|
||||
std::stringstream seq_num_str; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
seq_num_str.exceptions(std::ios::failbit);
|
||||
@ -225,18 +221,19 @@ std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Contai
|
||||
path_created += seq_num_str.str();
|
||||
}
|
||||
|
||||
/// Increment sequential number even if node is not sequential
|
||||
++it->second.seq_num;
|
||||
|
||||
response.path_created = path_created;
|
||||
container.emplace(path_created, std::move(created_node));
|
||||
|
||||
undo = [&container, path_created, is_sequential = is_sequential, parent_path = it->first]
|
||||
undo = [&container, path_created, parent_path = it->first]
|
||||
{
|
||||
container.erase(path_created);
|
||||
auto & undo_parent = container.at(parent_path);
|
||||
--undo_parent.stat.cversion;
|
||||
--undo_parent.stat.numChildren;
|
||||
|
||||
if (is_sequential)
|
||||
--undo_parent.seq_num;
|
||||
--undo_parent.seq_num;
|
||||
};
|
||||
|
||||
++it->second.stat.cversion;
|
||||
|
@ -125,8 +125,6 @@ private:
|
||||
Watches watches;
|
||||
Watches list_watches; /// Watches for 'list' request (watches on children).
|
||||
|
||||
void createWatchCallBack(const String & path);
|
||||
|
||||
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
|
||||
RequestsQueue requests_queue{1};
|
||||
|
||||
|
806
src/Common/ZooKeeper/TestKeeperStorage.cpp
Normal file
806
src/Common/ZooKeeper/TestKeeperStorage.cpp
Normal file
@ -0,0 +1,806 @@
|
||||
#include <Common/ZooKeeper/TestKeeperStorage.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <mutex>
|
||||
#include <functional>
|
||||
#include <common/logger_useful.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LOGICAL_ERROR;
|
||||
extern const int TIMEOUT_EXCEEDED;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace zkutil
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
|
||||
static String parentPath(const String & path)
|
||||
{
|
||||
auto rslash_pos = path.rfind('/');
|
||||
if (rslash_pos > 0)
|
||||
return path.substr(0, rslash_pos);
|
||||
return "/";
|
||||
}
|
||||
|
||||
static String baseName(const String & path)
|
||||
{
|
||||
auto rslash_pos = path.rfind('/');
|
||||
return path.substr(rslash_pos + 1);
|
||||
}
|
||||
|
||||
static void processWatchesImpl(const String & path, TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches, Coordination::Event event_type)
|
||||
{
|
||||
auto it = watches.find(path);
|
||||
if (it != watches.end())
|
||||
{
|
||||
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
|
||||
watch_response->path = path;
|
||||
watch_response->xid = -1;
|
||||
watch_response->zxid = -1;
|
||||
watch_response->type = event_type;
|
||||
watch_response->state = Coordination::State::CONNECTED;
|
||||
for (auto & watcher : it->second)
|
||||
if (watcher.watch_callback)
|
||||
watcher.watch_callback(watch_response);
|
||||
|
||||
watches.erase(it);
|
||||
}
|
||||
|
||||
auto parent_path = parentPath(path);
|
||||
it = list_watches.find(parent_path);
|
||||
if (it != list_watches.end())
|
||||
{
|
||||
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_list_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
|
||||
watch_list_response->path = parent_path;
|
||||
watch_list_response->xid = -1;
|
||||
watch_list_response->zxid = -1;
|
||||
watch_list_response->type = Coordination::Event::CHILD;
|
||||
watch_list_response->state = Coordination::State::CONNECTED;
|
||||
for (auto & watcher : it->second)
|
||||
if (watcher.watch_callback)
|
||||
watcher.watch_callback(watch_list_response);
|
||||
|
||||
list_watches.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
TestKeeperStorage::TestKeeperStorage()
|
||||
{
|
||||
container.emplace("/", Node());
|
||||
|
||||
processing_thread = ThreadFromGlobalPool([this] { processingThread(); });
|
||||
}
|
||||
|
||||
using Undo = std::function<void()>;
|
||||
|
||||
struct TestKeeperStorageRequest
|
||||
{
|
||||
Coordination::ZooKeeperRequestPtr zk_request;
|
||||
|
||||
explicit TestKeeperStorageRequest(const Coordination::ZooKeeperRequestPtr & zk_request_)
|
||||
: zk_request(zk_request_)
|
||||
{}
|
||||
virtual std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const = 0;
|
||||
virtual void processWatches(TestKeeperStorage::Watches & /*watches*/, TestKeeperStorage::Watches & /*list_watches*/) const {}
|
||||
|
||||
virtual ~TestKeeperStorageRequest() = default;
|
||||
};
|
||||
|
||||
struct TestKeeperStorageHeartbeatRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & /* container */, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /* zxid */, int64_t /* session_id */) const override
|
||||
{
|
||||
return {zk_request->makeResponse(), {}};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct TestKeeperStorageCreateRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CREATED);
|
||||
}
|
||||
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Undo undo;
|
||||
Coordination::ZooKeeperCreateResponse & response = dynamic_cast<Coordination::ZooKeeperCreateResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperCreateRequest & request = dynamic_cast<Coordination::ZooKeeperCreateRequest &>(*zk_request);
|
||||
|
||||
if (container.count(request.path))
|
||||
{
|
||||
response.error = Coordination::Error::ZNODEEXISTS;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto it = container.find(parentPath(request.path));
|
||||
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (it->second.is_ephemeral)
|
||||
{
|
||||
response.error = Coordination::Error::ZNOCHILDRENFOREPHEMERALS;
|
||||
}
|
||||
else
|
||||
{
|
||||
TestKeeperStorage::Node created_node;
|
||||
created_node.seq_num = 0;
|
||||
created_node.stat.czxid = zxid;
|
||||
created_node.stat.mzxid = zxid;
|
||||
created_node.stat.ctime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1);
|
||||
created_node.stat.mtime = created_node.stat.ctime;
|
||||
created_node.stat.numChildren = 0;
|
||||
created_node.stat.dataLength = request.data.length();
|
||||
created_node.data = request.data;
|
||||
created_node.is_ephemeral = request.is_ephemeral;
|
||||
created_node.is_sequental = request.is_sequential;
|
||||
std::string path_created = request.path;
|
||||
|
||||
if (request.is_sequential)
|
||||
{
|
||||
auto seq_num = it->second.seq_num;
|
||||
|
||||
std::stringstream seq_num_str; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
||||
seq_num_str.exceptions(std::ios::failbit);
|
||||
seq_num_str << std::setw(10) << std::setfill('0') << seq_num;
|
||||
|
||||
path_created += seq_num_str.str();
|
||||
}
|
||||
|
||||
/// Increment sequential number even if node is not sequential
|
||||
++it->second.seq_num;
|
||||
|
||||
response.path_created = path_created;
|
||||
container.emplace(path_created, std::move(created_node));
|
||||
|
||||
if (request.is_ephemeral)
|
||||
ephemerals[session_id].emplace(path_created);
|
||||
|
||||
undo = [&container, &ephemerals, session_id, path_created, is_ephemeral = request.is_ephemeral, parent_path = it->first]
|
||||
{
|
||||
container.erase(path_created);
|
||||
if (is_ephemeral)
|
||||
ephemerals[session_id].erase(path_created);
|
||||
auto & undo_parent = container.at(parent_path);
|
||||
--undo_parent.stat.cversion;
|
||||
--undo_parent.stat.numChildren;
|
||||
--undo_parent.seq_num;
|
||||
};
|
||||
|
||||
++it->second.stat.cversion;
|
||||
++it->second.stat.numChildren;
|
||||
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
}
|
||||
|
||||
return { response_ptr, undo };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageGetRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /* zxid */, int64_t /* session_id */) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperGetResponse & response = dynamic_cast<Coordination::ZooKeeperGetResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperGetRequest & request = dynamic_cast<Coordination::ZooKeeperGetRequest &>(*zk_request);
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else
|
||||
{
|
||||
response.stat = it->second.stat;
|
||||
response.data = it->second.data;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageRemoveRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t /*zxid*/, int64_t session_id) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperRemoveResponse & response = dynamic_cast<Coordination::ZooKeeperRemoveResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperRemoveRequest & request = dynamic_cast<Coordination::ZooKeeperRemoveRequest &>(*zk_request);
|
||||
Undo undo;
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (request.version != -1 && request.version != it->second.stat.version)
|
||||
{
|
||||
response.error = Coordination::Error::ZBADVERSION;
|
||||
}
|
||||
else if (it->second.stat.numChildren)
|
||||
{
|
||||
response.error = Coordination::Error::ZNOTEMPTY;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto prev_node = it->second;
|
||||
if (prev_node.is_ephemeral)
|
||||
ephemerals[session_id].erase(request.path);
|
||||
|
||||
container.erase(it);
|
||||
auto & parent = container.at(parentPath(request.path));
|
||||
--parent.stat.numChildren;
|
||||
++parent.stat.cversion;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
|
||||
undo = [prev_node, &container, &ephemerals, session_id, path = request.path]
|
||||
{
|
||||
if (prev_node.is_ephemeral)
|
||||
ephemerals[session_id].emplace(path);
|
||||
|
||||
container.emplace(path, prev_node);
|
||||
auto & undo_parent = container.at(parentPath(path));
|
||||
++undo_parent.stat.numChildren;
|
||||
--undo_parent.stat.cversion;
|
||||
};
|
||||
}
|
||||
|
||||
return { response_ptr, undo };
|
||||
}
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::DELETED);
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageExistsRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /*zxid*/, int64_t /* session_id */) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperExistsResponse & response = dynamic_cast<Coordination::ZooKeeperExistsResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperExistsRequest & request = dynamic_cast<Coordination::ZooKeeperExistsRequest &>(*zk_request);
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it != container.end())
|
||||
{
|
||||
response.stat = it->second.stat;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
else
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageSetRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t zxid, int64_t /* session_id */) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperSetResponse & response = dynamic_cast<Coordination::ZooKeeperSetResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperSetRequest & request = dynamic_cast<Coordination::ZooKeeperSetRequest &>(*zk_request);
|
||||
Undo undo;
|
||||
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (request.version == -1 || request.version == it->second.stat.version)
|
||||
{
|
||||
auto prev_node = it->second;
|
||||
|
||||
it->second.data = request.data;
|
||||
++it->second.stat.version;
|
||||
it->second.stat.mzxid = zxid;
|
||||
it->second.stat.mtime = std::chrono::system_clock::now().time_since_epoch() / std::chrono::milliseconds(1);
|
||||
it->second.stat.dataLength = request.data.length();
|
||||
it->second.data = request.data;
|
||||
++container.at(parentPath(request.path)).stat.cversion;
|
||||
response.stat = it->second.stat;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
|
||||
undo = [prev_node, &container, path = request.path]
|
||||
{
|
||||
container.at(path) = prev_node;
|
||||
--container.at(parentPath(path)).stat.cversion;
|
||||
};
|
||||
}
|
||||
else
|
||||
{
|
||||
response.error = Coordination::Error::ZBADVERSION;
|
||||
}
|
||||
|
||||
return { response_ptr, undo };
|
||||
}
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
processWatchesImpl(zk_request->getPath(), watches, list_watches, Coordination::Event::CHANGED);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct TestKeeperStorageListRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /*zxid*/, int64_t /*session_id*/) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperListResponse & response = dynamic_cast<Coordination::ZooKeeperListResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperListRequest & request = dynamic_cast<Coordination::ZooKeeperListRequest &>(*zk_request);
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto path_prefix = request.path;
|
||||
if (path_prefix.empty())
|
||||
throw DB::Exception("Logical error: path cannot be empty", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
if (path_prefix.back() != '/')
|
||||
path_prefix += '/';
|
||||
|
||||
/// Fairly inefficient.
|
||||
for (auto child_it = container.upper_bound(path_prefix);
|
||||
child_it != container.end() && startsWith(child_it->first, path_prefix);
|
||||
++child_it)
|
||||
{
|
||||
if (parentPath(child_it->first) == request.path)
|
||||
response.names.emplace_back(baseName(child_it->first));
|
||||
}
|
||||
|
||||
response.stat = it->second.stat;
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageCheckRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & /* ephemerals */, int64_t /*zxid*/, int64_t /*session_id*/) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperCheckResponse & response = dynamic_cast<Coordination::ZooKeeperCheckResponse &>(*response_ptr);
|
||||
Coordination::ZooKeeperCheckRequest & request = dynamic_cast<Coordination::ZooKeeperCheckRequest &>(*zk_request);
|
||||
auto it = container.find(request.path);
|
||||
if (it == container.end())
|
||||
{
|
||||
response.error = Coordination::Error::ZNONODE;
|
||||
}
|
||||
else if (request.version != -1 && request.version != it->second.stat.version)
|
||||
{
|
||||
response.error = Coordination::Error::ZBADVERSION;
|
||||
}
|
||||
else
|
||||
{
|
||||
response.error = Coordination::Error::ZOK;
|
||||
}
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageMultiRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
std::vector<TestKeeperStorageRequestPtr> concrete_requests;
|
||||
explicit TestKeeperStorageMultiRequest(const Coordination::ZooKeeperRequestPtr & zk_request_)
|
||||
: TestKeeperStorageRequest(zk_request_)
|
||||
{
|
||||
Coordination::ZooKeeperMultiRequest & request = dynamic_cast<Coordination::ZooKeeperMultiRequest &>(*zk_request);
|
||||
concrete_requests.reserve(request.requests.size());
|
||||
|
||||
for (const auto & sub_request : request.requests)
|
||||
{
|
||||
auto sub_zk_request = dynamic_pointer_cast<Coordination::ZooKeeperRequest>(sub_request);
|
||||
if (sub_zk_request->getOpNum() == Coordination::OpNum::Create)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageCreateRequest>(sub_zk_request));
|
||||
}
|
||||
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Remove)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageRemoveRequest>(sub_zk_request));
|
||||
}
|
||||
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Set)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageSetRequest>(sub_zk_request));
|
||||
}
|
||||
else if (sub_zk_request->getOpNum() == Coordination::OpNum::Check)
|
||||
{
|
||||
concrete_requests.push_back(std::make_shared<TestKeeperStorageCheckRequest>(sub_zk_request));
|
||||
}
|
||||
else
|
||||
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Illegal command as part of multi ZooKeeper request {}", sub_zk_request->getOpNum());
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container & container, TestKeeperStorage::Ephemerals & ephemerals, int64_t zxid, int64_t session_id) const override
|
||||
{
|
||||
Coordination::ZooKeeperResponsePtr response_ptr = zk_request->makeResponse();
|
||||
Coordination::ZooKeeperMultiResponse & response = dynamic_cast<Coordination::ZooKeeperMultiResponse &>(*response_ptr);
|
||||
std::vector<Undo> undo_actions;
|
||||
|
||||
try
|
||||
{
|
||||
size_t i = 0;
|
||||
for (const auto & concrete_request : concrete_requests)
|
||||
{
|
||||
auto [ cur_response, undo_action ] = concrete_request->process(container, ephemerals, zxid, session_id);
|
||||
|
||||
response.responses[i] = cur_response;
|
||||
if (cur_response->error != Coordination::Error::ZOK)
|
||||
{
|
||||
for (size_t j = 0; j <= i; ++j)
|
||||
{
|
||||
auto response_error = response.responses[j]->error;
|
||||
response.responses[j] = std::make_shared<Coordination::ZooKeeperErrorResponse>();
|
||||
response.responses[j]->error = response_error;
|
||||
}
|
||||
|
||||
for (size_t j = i + 1; j < response.responses.size(); ++j)
|
||||
{
|
||||
response.responses[j] = std::make_shared<Coordination::ZooKeeperErrorResponse>();
|
||||
response.responses[j]->error = Coordination::Error::ZRUNTIMEINCONSISTENCY;
|
||||
}
|
||||
|
||||
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
||||
if (*it)
|
||||
(*it)();
|
||||
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
else
|
||||
undo_actions.emplace_back(std::move(undo_action));
|
||||
|
||||
++i;
|
||||
}
|
||||
|
||||
response.error = Coordination::Error::ZOK;
|
||||
return { response_ptr, {} };
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
||||
if (*it)
|
||||
(*it)();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void processWatches(TestKeeperStorage::Watches & watches, TestKeeperStorage::Watches & list_watches) const override
|
||||
{
|
||||
for (const auto & generic_request : concrete_requests)
|
||||
generic_request->processWatches(watches, list_watches);
|
||||
}
|
||||
};
|
||||
|
||||
struct TestKeeperStorageCloseRequest final : public TestKeeperStorageRequest
|
||||
{
|
||||
using TestKeeperStorageRequest::TestKeeperStorageRequest;
|
||||
std::pair<Coordination::ZooKeeperResponsePtr, Undo> process(TestKeeperStorage::Container &, TestKeeperStorage::Ephemerals &, int64_t, int64_t) const override
|
||||
{
|
||||
throw DB::Exception("Called process on close request", ErrorCodes::LOGICAL_ERROR);
|
||||
}
|
||||
};
|
||||
|
||||
void TestKeeperStorage::processingThread()
|
||||
{
|
||||
setThreadName("TestKeeperSProc");
|
||||
|
||||
try
|
||||
{
|
||||
while (!shutdown)
|
||||
{
|
||||
RequestInfo info;
|
||||
|
||||
UInt64 max_wait = UInt64(operation_timeout.totalMilliseconds());
|
||||
|
||||
if (requests_queue.tryPop(info, max_wait))
|
||||
{
|
||||
if (shutdown)
|
||||
break;
|
||||
|
||||
auto zk_request = info.request->zk_request;
|
||||
if (zk_request->getOpNum() == Coordination::OpNum::Close)
|
||||
{
|
||||
auto it = ephemerals.find(info.session_id);
|
||||
if (it != ephemerals.end())
|
||||
{
|
||||
for (const auto & ephemeral_path : it->second)
|
||||
{
|
||||
container.erase(ephemeral_path);
|
||||
processWatchesImpl(ephemeral_path, watches, list_watches, Coordination::Event::DELETED);
|
||||
}
|
||||
ephemerals.erase(it);
|
||||
}
|
||||
clearDeadWatches(info.session_id);
|
||||
|
||||
/// Finish connection
|
||||
auto response = std::make_shared<Coordination::ZooKeeperCloseResponse>();
|
||||
response->xid = zk_request->xid;
|
||||
response->zxid = getZXID();
|
||||
info.response_callback(response);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto [response, _] = info.request->process(container, ephemerals, zxid, info.session_id);
|
||||
|
||||
if (info.watch_callback)
|
||||
{
|
||||
if (response->error == Coordination::Error::ZOK)
|
||||
{
|
||||
auto & watches_type = zk_request->getOpNum() == Coordination::OpNum::List || zk_request->getOpNum() == Coordination::OpNum::SimpleList
|
||||
? list_watches
|
||||
: watches;
|
||||
|
||||
watches_type[zk_request->getPath()].emplace_back(Watcher{info.session_id, info.watch_callback});
|
||||
sessions_and_watchers[info.session_id].emplace(zk_request->getPath());
|
||||
}
|
||||
else if (response->error == Coordination::Error::ZNONODE && zk_request->getOpNum() == Coordination::OpNum::Exists)
|
||||
{
|
||||
watches[zk_request->getPath()].emplace_back(Watcher{info.session_id, info.watch_callback});
|
||||
sessions_and_watchers[info.session_id].emplace(zk_request->getPath());
|
||||
}
|
||||
else
|
||||
{
|
||||
std::shared_ptr<Coordination::ZooKeeperWatchResponse> watch_response = std::make_shared<Coordination::ZooKeeperWatchResponse>();
|
||||
watch_response->path = zk_request->getPath();
|
||||
watch_response->xid = -1;
|
||||
watch_response->error = response->error;
|
||||
watch_response->type = Coordination::Event::NOTWATCHING;
|
||||
info.watch_callback(watch_response);
|
||||
}
|
||||
}
|
||||
|
||||
if (response->error == Coordination::Error::ZOK)
|
||||
info.request->processWatches(watches, list_watches);
|
||||
|
||||
response->xid = zk_request->xid;
|
||||
response->zxid = getZXID();
|
||||
|
||||
info.response_callback(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
finalize();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TestKeeperStorage::finalize()
|
||||
{
|
||||
{
|
||||
std::lock_guard lock(push_request_mutex);
|
||||
|
||||
if (shutdown)
|
||||
return;
|
||||
|
||||
shutdown = true;
|
||||
|
||||
if (processing_thread.joinable())
|
||||
processing_thread.join();
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
{
|
||||
auto finish_watch = [] (const auto & watch_pair)
|
||||
{
|
||||
Coordination::ZooKeeperWatchResponse response;
|
||||
response.type = Coordination::SESSION;
|
||||
response.state = Coordination::EXPIRED_SESSION;
|
||||
response.error = Coordination::Error::ZSESSIONEXPIRED;
|
||||
|
||||
for (auto & watcher : watch_pair.second)
|
||||
{
|
||||
if (watcher.watch_callback)
|
||||
{
|
||||
try
|
||||
{
|
||||
watcher.watch_callback(std::make_shared<Coordination::ZooKeeperWatchResponse>(response));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
for (auto & path_watch : watches)
|
||||
finish_watch(path_watch);
|
||||
watches.clear();
|
||||
for (auto & path_watch : list_watches)
|
||||
finish_watch(path_watch);
|
||||
list_watches.clear();
|
||||
sessions_and_watchers.clear();
|
||||
}
|
||||
RequestInfo info;
|
||||
while (requests_queue.tryPop(info))
|
||||
{
|
||||
auto response = info.request->zk_request->makeResponse();
|
||||
response->error = Coordination::Error::ZSESSIONEXPIRED;
|
||||
try
|
||||
{
|
||||
info.response_callback(response);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TestKeeperWrapperFactory final : private boost::noncopyable
|
||||
{
|
||||
|
||||
public:
|
||||
using Creator = std::function<TestKeeperStorageRequestPtr(const Coordination::ZooKeeperRequestPtr &)>;
|
||||
using OpNumToRequest = std::unordered_map<Coordination::OpNum, Creator>;
|
||||
|
||||
static TestKeeperWrapperFactory & instance()
|
||||
{
|
||||
static TestKeeperWrapperFactory factory;
|
||||
return factory;
|
||||
}
|
||||
|
||||
TestKeeperStorageRequestPtr get(const Coordination::ZooKeeperRequestPtr & zk_request) const
|
||||
{
|
||||
auto it = op_num_to_request.find(zk_request->getOpNum());
|
||||
if (it == op_num_to_request.end())
|
||||
throw DB::Exception("Unknown operation type " + toString(zk_request->getOpNum()), ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
return it->second(zk_request);
|
||||
}
|
||||
|
||||
void registerRequest(Coordination::OpNum op_num, Creator creator)
|
||||
{
|
||||
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
||||
throw DB::Exception(ErrorCodes::LOGICAL_ERROR, "Request with op num {} already registered", op_num);
|
||||
}
|
||||
|
||||
private:
|
||||
OpNumToRequest op_num_to_request;
|
||||
TestKeeperWrapperFactory();
|
||||
};
|
||||
|
||||
template<Coordination::OpNum num, typename RequestT>
|
||||
void registerTestKeeperRequestWrapper(TestKeeperWrapperFactory & factory)
|
||||
{
|
||||
factory.registerRequest(num, [] (const Coordination::ZooKeeperRequestPtr & zk_request) { return std::make_shared<RequestT>(zk_request); });
|
||||
}
|
||||
|
||||
|
||||
TestKeeperWrapperFactory::TestKeeperWrapperFactory()
|
||||
{
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Heartbeat, TestKeeperStorageHeartbeatRequest>(*this);
|
||||
//registerTestKeeperRequestWrapper<Coordination::OpNum::Auth, TestKeeperStorageAuthRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Close, TestKeeperStorageCloseRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Create, TestKeeperStorageCreateRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Remove, TestKeeperStorageRemoveRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Exists, TestKeeperStorageExistsRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Get, TestKeeperStorageGetRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Set, TestKeeperStorageSetRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::List, TestKeeperStorageListRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::SimpleList, TestKeeperStorageListRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Check, TestKeeperStorageCheckRequest>(*this);
|
||||
registerTestKeeperRequestWrapper<Coordination::OpNum::Multi, TestKeeperStorageMultiRequest>(*this);
|
||||
}
|
||||
|
||||
void TestKeeperStorage::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback)
|
||||
{
|
||||
TestKeeperStorageRequestPtr storage_request = TestKeeperWrapperFactory::instance().get(request);
|
||||
RequestInfo request_info;
|
||||
request_info.time = clock::now();
|
||||
request_info.request = storage_request;
|
||||
request_info.session_id = session_id;
|
||||
request_info.response_callback = callback;
|
||||
|
||||
/// Put close requests without timeouts
|
||||
auto timeout = request->getOpNum() == Coordination::OpNum::Close ? 0 : operation_timeout.totalMilliseconds();
|
||||
std::lock_guard lock(push_request_mutex);
|
||||
if (!requests_queue.tryPush(std::move(request_info), timeout))
|
||||
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
void TestKeeperStorage::putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback, ResponseCallback watch_callback)
|
||||
{
|
||||
TestKeeperStorageRequestPtr storage_request = TestKeeperWrapperFactory::instance().get(request);
|
||||
RequestInfo request_info;
|
||||
request_info.time = clock::now();
|
||||
request_info.request = storage_request;
|
||||
request_info.session_id = session_id;
|
||||
request_info.response_callback = callback;
|
||||
if (request->has_watch)
|
||||
request_info.watch_callback = watch_callback;
|
||||
|
||||
/// Put close requests without timeouts
|
||||
auto timeout = request->getOpNum() == Coordination::OpNum::Close ? 0 : operation_timeout.totalMilliseconds();
|
||||
std::lock_guard lock(push_request_mutex);
|
||||
if (!requests_queue.tryPush(std::move(request_info), timeout))
|
||||
throw Exception("Cannot push request to queue within operation timeout", ErrorCodes::TIMEOUT_EXCEEDED);
|
||||
}
|
||||
|
||||
TestKeeperStorage::~TestKeeperStorage()
|
||||
{
|
||||
try
|
||||
{
|
||||
finalize();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
void TestKeeperStorage::clearDeadWatches(int64_t session_id)
|
||||
{
|
||||
auto watches_it = sessions_and_watchers.find(session_id);
|
||||
if (watches_it != sessions_and_watchers.end())
|
||||
{
|
||||
for (const auto & watch_path : watches_it->second)
|
||||
{
|
||||
auto watch = watches.find(watch_path);
|
||||
if (watch != watches.end())
|
||||
{
|
||||
auto & watches_for_path = watch->second;
|
||||
for (auto w_it = watches_for_path.begin(); w_it != watches_for_path.end();)
|
||||
{
|
||||
if (w_it->session_id == session_id)
|
||||
w_it = watches_for_path.erase(w_it);
|
||||
else
|
||||
++w_it;
|
||||
}
|
||||
if (watches_for_path.empty())
|
||||
watches.erase(watch);
|
||||
}
|
||||
}
|
||||
sessions_and_watchers.erase(watches_it);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
104
src/Common/ZooKeeper/TestKeeperStorage.h
Normal file
104
src/Common/ZooKeeper/TestKeeperStorage.h
Normal file
@ -0,0 +1,104 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ThreadPool.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <future>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace zkutil
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
struct TestKeeperStorageRequest;
|
||||
using TestKeeperStorageRequestPtr = std::shared_ptr<TestKeeperStorageRequest>;
|
||||
using ResponseCallback = std::function<void(const Coordination::ZooKeeperResponsePtr &)>;
|
||||
|
||||
class TestKeeperStorage
|
||||
{
|
||||
|
||||
public:
|
||||
|
||||
Poco::Timespan operation_timeout{0, Coordination::DEFAULT_OPERATION_TIMEOUT_MS * 1000};
|
||||
std::atomic<int64_t> session_id_counter{0};
|
||||
|
||||
struct Node
|
||||
{
|
||||
String data;
|
||||
Coordination::ACLs acls;
|
||||
bool is_ephemeral = false;
|
||||
bool is_sequental = false;
|
||||
Coordination::Stat stat{};
|
||||
int32_t seq_num = 0;
|
||||
};
|
||||
|
||||
struct Watcher
|
||||
{
|
||||
int64_t session_id;
|
||||
ResponseCallback watch_callback;
|
||||
};
|
||||
|
||||
using Container = std::map<std::string, Node>;
|
||||
using Ephemerals = std::unordered_map<int64_t, std::unordered_set<String>>;
|
||||
using SessionAndWatcher = std::unordered_map<int64_t, std::unordered_set<String>>;
|
||||
|
||||
using WatchCallbacks = std::vector<Watcher>;
|
||||
using Watches = std::map<String /* path, relative of root_path */, WatchCallbacks>;
|
||||
|
||||
Container container;
|
||||
Ephemerals ephemerals;
|
||||
SessionAndWatcher sessions_and_watchers;
|
||||
|
||||
std::atomic<int64_t> zxid{0};
|
||||
std::atomic<bool> shutdown{false};
|
||||
|
||||
Watches watches;
|
||||
Watches list_watches; /// Watches for 'list' request (watches on children).
|
||||
|
||||
using clock = std::chrono::steady_clock;
|
||||
|
||||
struct RequestInfo
|
||||
{
|
||||
TestKeeperStorageRequestPtr request;
|
||||
ResponseCallback response_callback;
|
||||
ResponseCallback watch_callback;
|
||||
clock::time_point time;
|
||||
int64_t session_id;
|
||||
};
|
||||
|
||||
std::mutex push_request_mutex;
|
||||
using RequestsQueue = ConcurrentBoundedQueue<RequestInfo>;
|
||||
RequestsQueue requests_queue{1};
|
||||
|
||||
void finalize();
|
||||
|
||||
ThreadFromGlobalPool processing_thread;
|
||||
|
||||
void processingThread();
|
||||
void clearDeadWatches(int64_t session_id);
|
||||
|
||||
public:
|
||||
using AsyncResponse = std::future<Coordination::ZooKeeperResponsePtr>;
|
||||
TestKeeperStorage();
|
||||
~TestKeeperStorage();
|
||||
struct ResponsePair
|
||||
{
|
||||
AsyncResponse response;
|
||||
std::optional<AsyncResponse> watch_response;
|
||||
};
|
||||
void putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback);
|
||||
void putRequest(const Coordination::ZooKeeperRequestPtr & request, int64_t session_id, ResponseCallback callback, ResponseCallback watch_callback);
|
||||
|
||||
int64_t getSessionID()
|
||||
{
|
||||
return session_id_counter.fetch_add(1);
|
||||
}
|
||||
int64_t getZXID()
|
||||
{
|
||||
return zxid.fetch_add(1);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
@ -129,8 +129,8 @@ struct ZooKeeperArgs
|
||||
|
||||
std::vector<std::string> hosts_strings;
|
||||
|
||||
session_timeout_ms = DEFAULT_SESSION_TIMEOUT;
|
||||
operation_timeout_ms = DEFAULT_OPERATION_TIMEOUT;
|
||||
session_timeout_ms = Coordination::DEFAULT_SESSION_TIMEOUT_MS;
|
||||
operation_timeout_ms = Coordination::DEFAULT_OPERATION_TIMEOUT_MS;
|
||||
implementation = "zookeeper";
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <unistd.h>
|
||||
|
||||
|
||||
@ -28,9 +29,6 @@ namespace CurrentMetrics
|
||||
namespace zkutil
|
||||
{
|
||||
|
||||
const UInt32 DEFAULT_SESSION_TIMEOUT = 30000;
|
||||
const UInt32 DEFAULT_OPERATION_TIMEOUT = 10000;
|
||||
|
||||
/// Preferred size of multi() command (in number of ops)
|
||||
constexpr size_t MULTI_BATCH_SIZE = 100;
|
||||
|
||||
@ -53,8 +51,8 @@ public:
|
||||
using Ptr = std::shared_ptr<ZooKeeper>;
|
||||
|
||||
ZooKeeper(const std::string & hosts_, const std::string & identity_ = "",
|
||||
int32_t session_timeout_ms_ = DEFAULT_SESSION_TIMEOUT,
|
||||
int32_t operation_timeout_ms_ = DEFAULT_OPERATION_TIMEOUT,
|
||||
int32_t session_timeout_ms_ = Coordination::DEFAULT_SESSION_TIMEOUT_MS,
|
||||
int32_t operation_timeout_ms_ = Coordination::DEFAULT_OPERATION_TIMEOUT_MS,
|
||||
const std::string & chroot_ = "",
|
||||
const std::string & implementation_ = "zookeeper");
|
||||
|
||||
|
481
src/Common/ZooKeeper/ZooKeeperCommon.cpp
Normal file
481
src/Common/ZooKeeper/ZooKeeperCommon.cpp
Normal file
@ -0,0 +1,481 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperCommon.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <common/logger_useful.h>
|
||||
#include <array>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
using namespace DB;
|
||||
|
||||
void ZooKeeperResponse::write(WriteBuffer & out) const
|
||||
{
|
||||
/// Excessive copy to calculate length.
|
||||
WriteBufferFromOwnString buf;
|
||||
Coordination::write(xid, buf);
|
||||
Coordination::write(zxid, buf);
|
||||
Coordination::write(error, buf);
|
||||
if (error == Error::ZOK)
|
||||
writeImpl(buf);
|
||||
Coordination::write(buf.str(), out);
|
||||
out.next();
|
||||
}
|
||||
|
||||
void ZooKeeperRequest::write(WriteBuffer & out) const
|
||||
{
|
||||
/// Excessive copy to calculate length.
|
||||
WriteBufferFromOwnString buf;
|
||||
Coordination::write(xid, buf);
|
||||
Coordination::write(getOpNum(), buf);
|
||||
writeImpl(buf);
|
||||
Coordination::write(buf.str(), out);
|
||||
out.next();
|
||||
}
|
||||
|
||||
void ZooKeeperWatchResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(type, in);
|
||||
Coordination::read(state, in);
|
||||
Coordination::read(path, in);
|
||||
}
|
||||
|
||||
void ZooKeeperWatchResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(type, out);
|
||||
Coordination::write(state, out);
|
||||
Coordination::write(path, out);
|
||||
}
|
||||
|
||||
void ZooKeeperAuthRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(type, out);
|
||||
Coordination::write(scheme, out);
|
||||
Coordination::write(data, out);
|
||||
}
|
||||
|
||||
void ZooKeeperAuthRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(type, in);
|
||||
Coordination::read(scheme, in);
|
||||
Coordination::read(data, in);
|
||||
}
|
||||
|
||||
void ZooKeeperCreateRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(acls, out);
|
||||
|
||||
int32_t flags = 0;
|
||||
|
||||
if (is_ephemeral)
|
||||
flags |= 1;
|
||||
if (is_sequential)
|
||||
flags |= 2;
|
||||
|
||||
Coordination::write(flags, out);
|
||||
}
|
||||
|
||||
void ZooKeeperCreateRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(data, in);
|
||||
Coordination::read(acls, in);
|
||||
|
||||
int32_t flags = 0;
|
||||
Coordination::read(flags, in);
|
||||
|
||||
if (flags & 1)
|
||||
is_ephemeral = true;
|
||||
if (flags & 2)
|
||||
is_sequential = true;
|
||||
}
|
||||
|
||||
void ZooKeeperCreateResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path_created, in);
|
||||
}
|
||||
|
||||
void ZooKeeperCreateResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path_created, out);
|
||||
}
|
||||
|
||||
void ZooKeeperRemoveRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
|
||||
void ZooKeeperRemoveRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperExistsResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperGetRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
|
||||
void ZooKeeperGetRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
void ZooKeeperGetResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(data, in);
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperGetResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperSetRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(data, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
|
||||
void ZooKeeperSetRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(data, in);
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
void ZooKeeperSetResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperSetResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperListRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(has_watch, out);
|
||||
}
|
||||
|
||||
void ZooKeeperListRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(has_watch, in);
|
||||
}
|
||||
|
||||
void ZooKeeperListResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(names, in);
|
||||
Coordination::read(stat, in);
|
||||
}
|
||||
|
||||
void ZooKeeperListResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(names, out);
|
||||
Coordination::write(stat, out);
|
||||
}
|
||||
|
||||
void ZooKeeperCheckRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(path, out);
|
||||
Coordination::write(version, out);
|
||||
}
|
||||
|
||||
void ZooKeeperCheckRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::read(path, in);
|
||||
Coordination::read(version, in);
|
||||
}
|
||||
|
||||
void ZooKeeperErrorResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
Coordination::Error read_error;
|
||||
Coordination::read(read_error, in);
|
||||
|
||||
if (read_error != error)
|
||||
throw Exception(fmt::format("Error code in ErrorResponse ({}) doesn't match error code in header ({})", read_error, error),
|
||||
Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
|
||||
void ZooKeeperErrorResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
Coordination::write(error, out);
|
||||
}
|
||||
|
||||
ZooKeeperMultiRequest::ZooKeeperMultiRequest(const Requests & generic_requests, const ACLs & default_acls)
|
||||
{
|
||||
/// Convert nested Requests to ZooKeeperRequests.
|
||||
/// Note that deep copy is required to avoid modifying path in presence of chroot prefix.
|
||||
requests.reserve(generic_requests.size());
|
||||
|
||||
for (const auto & generic_request : generic_requests)
|
||||
{
|
||||
if (const auto * concrete_request_create = dynamic_cast<const CreateRequest *>(generic_request.get()))
|
||||
{
|
||||
auto create = std::make_shared<ZooKeeperCreateRequest>(*concrete_request_create);
|
||||
if (create->acls.empty())
|
||||
create->acls = default_acls;
|
||||
requests.push_back(create);
|
||||
}
|
||||
else if (const auto * concrete_request_remove = dynamic_cast<const RemoveRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperRemoveRequest>(*concrete_request_remove));
|
||||
}
|
||||
else if (const auto * concrete_request_set = dynamic_cast<const SetRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperSetRequest>(*concrete_request_set));
|
||||
}
|
||||
else if (const auto * concrete_request_check = dynamic_cast<const CheckRequest *>(generic_request.get()))
|
||||
{
|
||||
requests.push_back(std::make_shared<ZooKeeperCheckRequest>(*concrete_request_check));
|
||||
}
|
||||
else
|
||||
throw Exception("Illegal command as part of multi ZooKeeper request", Error::ZBADARGUMENTS);
|
||||
}
|
||||
}
|
||||
|
||||
void ZooKeeperMultiRequest::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
for (const auto & request : requests)
|
||||
{
|
||||
const auto & zk_request = dynamic_cast<const ZooKeeperRequest &>(*request);
|
||||
|
||||
bool done = false;
|
||||
int32_t error = -1;
|
||||
|
||||
Coordination::write(zk_request.getOpNum(), out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error, out);
|
||||
|
||||
zk_request.writeImpl(out);
|
||||
}
|
||||
|
||||
OpNum op_num = OpNum::Error;
|
||||
bool done = true;
|
||||
int32_t error = -1;
|
||||
|
||||
Coordination::write(op_num, out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error, out);
|
||||
}
|
||||
|
||||
void ZooKeeperMultiRequest::readImpl(ReadBuffer & in)
|
||||
{
|
||||
|
||||
while (true)
|
||||
{
|
||||
OpNum op_num;
|
||||
bool done;
|
||||
int32_t error;
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(error, in);
|
||||
|
||||
if (done)
|
||||
{
|
||||
if (op_num != OpNum::Error)
|
||||
throw Exception("Unexpected op_num received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (error != -1)
|
||||
throw Exception("Unexpected error value received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
break;
|
||||
}
|
||||
|
||||
ZooKeeperRequestPtr request = ZooKeeperRequestFactory::instance().get(op_num);
|
||||
request->readImpl(in);
|
||||
requests.push_back(request);
|
||||
|
||||
if (in.eof())
|
||||
throw Exception("Not enough results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
}
|
||||
|
||||
void ZooKeeperMultiResponse::readImpl(ReadBuffer & in)
|
||||
{
|
||||
for (auto & response : responses)
|
||||
{
|
||||
OpNum op_num;
|
||||
bool done;
|
||||
Error op_error;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(op_error, in);
|
||||
|
||||
if (done)
|
||||
throw Exception("Not enough results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
|
||||
/// op_num == -1 is special for multi transaction.
|
||||
/// For unknown reason, error code is duplicated in header and in response body.
|
||||
|
||||
if (op_num == OpNum::Error)
|
||||
response = std::make_shared<ZooKeeperErrorResponse>();
|
||||
|
||||
if (op_error != Error::ZOK)
|
||||
{
|
||||
response->error = op_error;
|
||||
|
||||
/// Set error for whole transaction.
|
||||
/// If some operations fail, ZK send global error as zero and then send details about each operation.
|
||||
/// It will set error code for first failed operation and it will set special "runtime inconsistency" code for other operations.
|
||||
if (error == Error::ZOK && op_error != Error::ZRUNTIMEINCONSISTENCY)
|
||||
error = op_error;
|
||||
}
|
||||
|
||||
if (op_error == Error::ZOK || op_num == OpNum::Error)
|
||||
dynamic_cast<ZooKeeperResponse &>(*response).readImpl(in);
|
||||
}
|
||||
|
||||
/// Footer.
|
||||
{
|
||||
OpNum op_num;
|
||||
bool done;
|
||||
int32_t error_read;
|
||||
|
||||
Coordination::read(op_num, in);
|
||||
Coordination::read(done, in);
|
||||
Coordination::read(error_read, in);
|
||||
|
||||
if (!done)
|
||||
throw Exception("Too many results received for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (op_num != OpNum::Error)
|
||||
throw Exception("Unexpected op_num received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
if (error_read != -1)
|
||||
throw Exception("Unexpected error value received at the end of results for multi transaction", Error::ZMARSHALLINGERROR);
|
||||
}
|
||||
}
|
||||
|
||||
void ZooKeeperMultiResponse::writeImpl(WriteBuffer & out) const
|
||||
{
|
||||
for (const auto & response : responses)
|
||||
{
|
||||
const ZooKeeperResponse & zk_response = dynamic_cast<const ZooKeeperResponse &>(*response);
|
||||
OpNum op_num = zk_response.getOpNum();
|
||||
bool done = false;
|
||||
Error op_error = zk_response.error;
|
||||
|
||||
Coordination::write(op_num, out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(op_error, out);
|
||||
if (op_error == Error::ZOK || op_num == OpNum::Error)
|
||||
zk_response.writeImpl(out);
|
||||
}
|
||||
|
||||
/// Footer.
|
||||
{
|
||||
OpNum op_num = OpNum::Error;
|
||||
bool done = true;
|
||||
int32_t error_read = - 1;
|
||||
|
||||
Coordination::write(op_num, out);
|
||||
Coordination::write(done, out);
|
||||
Coordination::write(error_read, out);
|
||||
}
|
||||
}
|
||||
|
||||
ZooKeeperResponsePtr ZooKeeperHeartbeatRequest::makeResponse() const { return std::make_shared<ZooKeeperHeartbeatResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperAuthRequest::makeResponse() const { return std::make_shared<ZooKeeperAuthResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperCreateRequest::makeResponse() const { return std::make_shared<ZooKeeperCreateResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperRemoveRequest::makeResponse() const { return std::make_shared<ZooKeeperRemoveResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperExistsRequest::makeResponse() const { return std::make_shared<ZooKeeperExistsResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperGetRequest::makeResponse() const { return std::make_shared<ZooKeeperGetResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperSetRequest::makeResponse() const { return std::make_shared<ZooKeeperSetResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperListRequest::makeResponse() const { return std::make_shared<ZooKeeperListResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperCheckRequest::makeResponse() const { return std::make_shared<ZooKeeperCheckResponse>(); }
|
||||
ZooKeeperResponsePtr ZooKeeperMultiRequest::makeResponse() const { return std::make_shared<ZooKeeperMultiResponse>(requests); }
|
||||
ZooKeeperResponsePtr ZooKeeperCloseRequest::makeResponse() const { return std::make_shared<ZooKeeperCloseResponse>(); }
|
||||
|
||||
void ZooKeeperRequestFactory::registerRequest(OpNum op_num, Creator creator)
|
||||
{
|
||||
if (!op_num_to_request.try_emplace(op_num, creator).second)
|
||||
throw Coordination::Exception("Request type " + toString(op_num) + " already registered", Coordination::Error::ZRUNTIMEINCONSISTENCY);
|
||||
}
|
||||
|
||||
std::shared_ptr<ZooKeeperRequest> ZooKeeperRequest::read(ReadBuffer & in)
|
||||
{
|
||||
XID xid;
|
||||
OpNum op_num;
|
||||
|
||||
Coordination::read(xid, in);
|
||||
Coordination::read(op_num, in);
|
||||
|
||||
auto request = ZooKeeperRequestFactory::instance().get(op_num);
|
||||
request->xid = xid;
|
||||
request->readImpl(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
ZooKeeperRequestPtr ZooKeeperRequestFactory::get(OpNum op_num) const
|
||||
{
|
||||
auto it = op_num_to_request.find(op_num);
|
||||
if (it == op_num_to_request.end())
|
||||
throw Exception("Unknown operation type " + toString(op_num), Error::ZBADARGUMENTS);
|
||||
|
||||
return it->second();
|
||||
}
|
||||
|
||||
ZooKeeperRequestFactory & ZooKeeperRequestFactory::instance()
|
||||
{
|
||||
static ZooKeeperRequestFactory factory;
|
||||
return factory;
|
||||
}
|
||||
|
||||
template<OpNum num, typename RequestT>
|
||||
void registerZooKeeperRequest(ZooKeeperRequestFactory & factory)
|
||||
{
|
||||
factory.registerRequest(num, [] { return std::make_shared<RequestT>(); });
|
||||
}
|
||||
|
||||
ZooKeeperRequestFactory::ZooKeeperRequestFactory()
|
||||
{
|
||||
registerZooKeeperRequest<OpNum::Heartbeat, ZooKeeperHeartbeatRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Auth, ZooKeeperAuthRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Close, ZooKeeperCloseRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Create, ZooKeeperCreateRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Remove, ZooKeeperRemoveRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Exists, ZooKeeperExistsRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Get, ZooKeeperGetRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Set, ZooKeeperSetRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::SimpleList, ZooKeeperSimpleListRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::List, ZooKeeperListRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Check, ZooKeeperCheckRequest>(*this);
|
||||
registerZooKeeperRequest<OpNum::Multi, ZooKeeperMultiRequest>(*this);
|
||||
}
|
||||
|
||||
}
|
338
src/Common/ZooKeeper/ZooKeeperCommon.h
Normal file
338
src/Common/ZooKeeper/ZooKeeperCommon.h
Normal file
@ -0,0 +1,338 @@
|
||||
#pragma once
|
||||
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
|
||||
#include <boost/noncopyable.hpp>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
#include <mutex>
|
||||
#include <chrono>
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <functional>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
struct ZooKeeperResponse : virtual Response
|
||||
{
|
||||
XID xid = 0;
|
||||
int64_t zxid;
|
||||
|
||||
virtual ~ZooKeeperResponse() override = default;
|
||||
virtual void readImpl(ReadBuffer &) = 0;
|
||||
virtual void writeImpl(WriteBuffer &) const = 0;
|
||||
void write(WriteBuffer & out) const;
|
||||
virtual OpNum getOpNum() const = 0;
|
||||
};
|
||||
|
||||
using ZooKeeperResponsePtr = std::shared_ptr<ZooKeeperResponse>;
|
||||
|
||||
/// Exposed in header file for Yandex.Metrica code.
|
||||
struct ZooKeeperRequest : virtual Request
|
||||
{
|
||||
XID xid = 0;
|
||||
bool has_watch = false;
|
||||
/// If the request was not send and the error happens, we definitely sure, that it has not been processed by the server.
|
||||
/// If the request was sent and we didn't get the response and the error happens, then we cannot be sure was it processed or not.
|
||||
bool probably_sent = false;
|
||||
|
||||
ZooKeeperRequest() = default;
|
||||
ZooKeeperRequest(const ZooKeeperRequest &) = default;
|
||||
virtual ~ZooKeeperRequest() override = default;
|
||||
|
||||
virtual OpNum getOpNum() const = 0;
|
||||
|
||||
/// Writes length, xid, op_num, then the rest.
|
||||
void write(WriteBuffer & out) const;
|
||||
|
||||
virtual void writeImpl(WriteBuffer &) const = 0;
|
||||
virtual void readImpl(ReadBuffer &) = 0;
|
||||
|
||||
static std::shared_ptr<ZooKeeperRequest> read(ReadBuffer & in);
|
||||
|
||||
virtual ZooKeeperResponsePtr makeResponse() const = 0;
|
||||
};
|
||||
|
||||
using ZooKeeperRequestPtr = std::shared_ptr<ZooKeeperRequest>;
|
||||
|
||||
struct ZooKeeperHeartbeatRequest final : ZooKeeperRequest
|
||||
{
|
||||
String getPath() const override { return {}; }
|
||||
OpNum getOpNum() const override { return OpNum::Heartbeat; }
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperHeartbeatResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
OpNum getOpNum() const override { return OpNum::Heartbeat; }
|
||||
};
|
||||
|
||||
struct ZooKeeperWatchResponse final : WatchResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
OpNum getOpNum() const override
|
||||
{
|
||||
throw Exception("OpNum for watch response doesn't exist", Error::ZRUNTIMEINCONSISTENCY);
|
||||
}
|
||||
};
|
||||
|
||||
struct ZooKeeperAuthRequest final : ZooKeeperRequest
|
||||
{
|
||||
int32_t type = 0; /// ignored by the server
|
||||
String scheme;
|
||||
String data;
|
||||
|
||||
String getPath() const override { return {}; }
|
||||
OpNum getOpNum() const override { return OpNum::Auth; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperAuthResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Auth; }
|
||||
};
|
||||
|
||||
struct ZooKeeperCloseRequest final : ZooKeeperRequest
|
||||
{
|
||||
String getPath() const override { return {}; }
|
||||
OpNum getOpNum() const override { return OpNum::Close; }
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCloseResponse final : ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override
|
||||
{
|
||||
throw Exception("Received response for close request", Error::ZRUNTIMEINCONSISTENCY);
|
||||
}
|
||||
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Close; }
|
||||
};
|
||||
|
||||
struct ZooKeeperCreateRequest final : public CreateRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperCreateRequest() = default;
|
||||
explicit ZooKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Create; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCreateResponse final : CreateResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Create; }
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveRequest final : RemoveRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperRemoveRequest() = default;
|
||||
explicit ZooKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Remove; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperRemoveResponse final : RemoveResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
OpNum getOpNum() const override { return OpNum::Remove; }
|
||||
};
|
||||
|
||||
struct ZooKeeperExistsRequest final : ExistsRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Exists; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperExistsResponse final : ExistsResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::Exists; }
|
||||
};
|
||||
|
||||
struct ZooKeeperGetRequest final : GetRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Get; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperGetResponse final : GetResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::Get; }
|
||||
};
|
||||
|
||||
struct ZooKeeperSetRequest final : SetRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperSetRequest() = default;
|
||||
explicit ZooKeeperSetRequest(const SetRequest & base) : SetRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Set; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperSetResponse final : SetResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::Set; }
|
||||
};
|
||||
|
||||
struct ZooKeeperListRequest : ListRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::List; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperSimpleListRequest final : ZooKeeperListRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::SimpleList; }
|
||||
};
|
||||
|
||||
struct ZooKeeperListResponse : ListResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
OpNum getOpNum() const override { return OpNum::List; }
|
||||
};
|
||||
|
||||
struct ZooKeeperSimpleListResponse final : ZooKeeperListResponse
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::SimpleList; }
|
||||
};
|
||||
|
||||
struct ZooKeeperCheckRequest final : CheckRequest, ZooKeeperRequest
|
||||
{
|
||||
ZooKeeperCheckRequest() = default;
|
||||
explicit ZooKeeperCheckRequest(const CheckRequest & base) : CheckRequest(base) {}
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Check; }
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperCheckResponse final : CheckResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer &) override {}
|
||||
void writeImpl(WriteBuffer &) const override {}
|
||||
OpNum getOpNum() const override { return OpNum::Check; }
|
||||
};
|
||||
|
||||
/// This response may be received only as an element of responses in MultiResponse.
|
||||
struct ZooKeeperErrorResponse final : ErrorResponse, ZooKeeperResponse
|
||||
{
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
OpNum getOpNum() const override { return OpNum::Error; }
|
||||
};
|
||||
|
||||
struct ZooKeeperMultiRequest final : MultiRequest, ZooKeeperRequest
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Multi; }
|
||||
ZooKeeperMultiRequest() = default;
|
||||
|
||||
ZooKeeperMultiRequest(const Requests & generic_requests, const ACLs & default_acls);
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
ZooKeeperResponsePtr makeResponse() const override;
|
||||
};
|
||||
|
||||
struct ZooKeeperMultiResponse final : MultiResponse, ZooKeeperResponse
|
||||
{
|
||||
OpNum getOpNum() const override { return OpNum::Multi; }
|
||||
|
||||
explicit ZooKeeperMultiResponse(const Requests & requests)
|
||||
{
|
||||
responses.reserve(requests.size());
|
||||
|
||||
for (const auto & request : requests)
|
||||
responses.emplace_back(dynamic_cast<const ZooKeeperRequest &>(*request).makeResponse());
|
||||
}
|
||||
|
||||
explicit ZooKeeperMultiResponse(const Responses & responses_)
|
||||
{
|
||||
responses = responses_;
|
||||
}
|
||||
|
||||
void readImpl(ReadBuffer & in) override;
|
||||
|
||||
void writeImpl(WriteBuffer & out) const override;
|
||||
|
||||
};
|
||||
|
||||
class ZooKeeperRequestFactory final : private boost::noncopyable
|
||||
{
|
||||
|
||||
public:
|
||||
using Creator = std::function<ZooKeeperRequestPtr()>;
|
||||
using OpNumToRequest = std::unordered_map<OpNum, Creator>;
|
||||
|
||||
static ZooKeeperRequestFactory & instance();
|
||||
|
||||
ZooKeeperRequestPtr get(OpNum op_num) const;
|
||||
|
||||
void registerRequest(OpNum op_num, Creator creator);
|
||||
|
||||
private:
|
||||
OpNumToRequest op_num_to_request;
|
||||
|
||||
private:
|
||||
ZooKeeperRequestFactory();
|
||||
};
|
||||
|
||||
}
|
67
src/Common/ZooKeeper/ZooKeeperConstants.cpp
Normal file
67
src/Common/ZooKeeper/ZooKeeperConstants.cpp
Normal file
@ -0,0 +1,67 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperConstants.h>
|
||||
#include <Common/ZooKeeper/IKeeper.h>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
static const std::unordered_set<int32_t> VALID_OPERATIONS =
|
||||
{
|
||||
static_cast<int32_t>(OpNum::Close),
|
||||
static_cast<int32_t>(OpNum::Error),
|
||||
static_cast<int32_t>(OpNum::Create),
|
||||
static_cast<int32_t>(OpNum::Remove),
|
||||
static_cast<int32_t>(OpNum::Exists),
|
||||
static_cast<int32_t>(OpNum::Get),
|
||||
static_cast<int32_t>(OpNum::Set),
|
||||
static_cast<int32_t>(OpNum::SimpleList),
|
||||
static_cast<int32_t>(OpNum::Heartbeat),
|
||||
static_cast<int32_t>(OpNum::List),
|
||||
static_cast<int32_t>(OpNum::Check),
|
||||
static_cast<int32_t>(OpNum::Multi),
|
||||
static_cast<int32_t>(OpNum::Auth),
|
||||
};
|
||||
|
||||
std::string toString(OpNum op_num)
|
||||
{
|
||||
switch (op_num)
|
||||
{
|
||||
case OpNum::Close:
|
||||
return "Close";
|
||||
case OpNum::Error:
|
||||
return "Error";
|
||||
case OpNum::Create:
|
||||
return "Create";
|
||||
case OpNum::Remove:
|
||||
return "Remove";
|
||||
case OpNum::Exists:
|
||||
return "Exists";
|
||||
case OpNum::Get:
|
||||
return "Get";
|
||||
case OpNum::Set:
|
||||
return "Set";
|
||||
case OpNum::SimpleList:
|
||||
return "SimpleList";
|
||||
case OpNum::List:
|
||||
return "List";
|
||||
case OpNum::Check:
|
||||
return "Check";
|
||||
case OpNum::Multi:
|
||||
return "Multi";
|
||||
case OpNum::Heartbeat:
|
||||
return "Heartbeat";
|
||||
case OpNum::Auth:
|
||||
return "Auth";
|
||||
}
|
||||
int32_t raw_op = static_cast<int32_t>(op_num);
|
||||
throw Exception("Operation " + std::to_string(raw_op) + " is unknown", Error::ZUNIMPLEMENTED);
|
||||
}
|
||||
|
||||
OpNum getOpNum(int32_t raw_op_num)
|
||||
{
|
||||
if (!VALID_OPERATIONS.count(raw_op_num))
|
||||
throw Exception("Operation " + std::to_string(raw_op_num) + " is unknown", Error::ZUNIMPLEMENTED);
|
||||
return static_cast<OpNum>(raw_op_num);
|
||||
}
|
||||
|
||||
}
|
49
src/Common/ZooKeeper/ZooKeeperConstants.h
Normal file
49
src/Common/ZooKeeper/ZooKeeperConstants.h
Normal file
@ -0,0 +1,49 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <cstdint>
|
||||
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
using XID = int32_t;
|
||||
|
||||
static constexpr XID WATCH_XID = -1;
|
||||
static constexpr XID PING_XID = -2;
|
||||
static constexpr XID AUTH_XID = -4;
|
||||
static constexpr XID CLOSE_XID = 0x7FFFFFFF;
|
||||
|
||||
enum class OpNum : int32_t
|
||||
{
|
||||
Close = -11,
|
||||
Error = -1,
|
||||
Create = 1,
|
||||
Remove = 2,
|
||||
Exists = 3,
|
||||
Get = 4,
|
||||
Set = 5,
|
||||
SimpleList = 8,
|
||||
Heartbeat = 11,
|
||||
List = 12,
|
||||
Check = 13,
|
||||
Multi = 14,
|
||||
Auth = 100,
|
||||
};
|
||||
|
||||
std::string toString(OpNum op_num);
|
||||
OpNum getOpNum(int32_t raw_op_num);
|
||||
|
||||
static constexpr int32_t ZOOKEEPER_PROTOCOL_VERSION = 0;
|
||||
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH = 44;
|
||||
static constexpr int32_t CLIENT_HANDSHAKE_LENGTH_WITH_READONLY = 45;
|
||||
static constexpr int32_t SERVER_HANDSHAKE_LENGTH = 36;
|
||||
static constexpr int32_t PASSWORD_LENGTH = 16;
|
||||
|
||||
/// ZooKeeper has 1 MB node size and serialization limit by default,
|
||||
/// but it can be raised up, so we have a slightly larger limit on our side.
|
||||
static constexpr int32_t MAX_STRING_OR_ARRAY_SIZE = 1 << 28; /// 256 MiB
|
||||
static constexpr int32_t DEFAULT_SESSION_TIMEOUT_MS = 30000;
|
||||
static constexpr int32_t DEFAULT_OPERATION_TIMEOUT_MS = 10000;
|
||||
|
||||
}
|
140
src/Common/ZooKeeper/ZooKeeperIO.cpp
Normal file
140
src/Common/ZooKeeper/ZooKeeperIO.cpp
Normal file
@ -0,0 +1,140 @@
|
||||
#include <Common/ZooKeeper/ZooKeeperIO.h>
|
||||
|
||||
namespace Coordination
|
||||
{
|
||||
|
||||
void write(int64_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap64(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
void write(int32_t x, WriteBuffer & out)
|
||||
{
|
||||
x = __builtin_bswap32(x);
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
void write(OpNum x, WriteBuffer & out)
|
||||
{
|
||||
write(static_cast<int32_t>(x), out);
|
||||
}
|
||||
|
||||
void write(bool x, WriteBuffer & out)
|
||||
{
|
||||
writeBinary(x, out);
|
||||
}
|
||||
|
||||
void write(const std::string & s, WriteBuffer & out)
|
||||
{
|
||||
write(int32_t(s.size()), out);
|
||||
out.write(s.data(), s.size());
|
||||
}
|
||||
|
||||
void write(const ACL & acl, WriteBuffer & out)
|
||||
{
|
||||
write(acl.permissions, out);
|
||||
write(acl.scheme, out);
|
||||
write(acl.id, out);
|
||||
}
|
||||
|
||||
void write(const Stat & stat, WriteBuffer & out)
|
||||
{
|
||||
write(stat.czxid, out);
|
||||
write(stat.mzxid, out);
|
||||
write(stat.ctime, out);
|
||||
write(stat.mtime, out);
|
||||
write(stat.version, out);
|
||||
write(stat.cversion, out);
|
||||
write(stat.aversion, out);
|
||||
write(stat.ephemeralOwner, out);
|
||||
write(stat.dataLength, out);
|
||||
write(stat.numChildren, out);
|
||||
write(stat.pzxid, out);
|
||||
}
|
||||
|
||||
void write(const Error & x, WriteBuffer & out)
|
||||
{
|
||||
write(static_cast<int32_t>(x), out);
|
||||
}
|
||||
|
||||
void read(int64_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap64(x);
|
||||
}
|
||||
|
||||
void read(int32_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
x = __builtin_bswap32(x);
|
||||
}
|
||||
|
||||
void read(OpNum & x, ReadBuffer & in)
|
||||
{
|
||||
int32_t raw_op_num;
|
||||
read(raw_op_num, in);
|
||||
x = getOpNum(raw_op_num);
|
||||
}
|
||||
|
||||
void read(bool & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
void read(int8_t & x, ReadBuffer & in)
|
||||
{
|
||||
readBinary(x, in);
|
||||
}
|
||||
|
||||
void read(std::string & s, ReadBuffer & in)
|
||||
{
|
||||
int32_t size = 0;
|
||||
read(size, in);
|
||||
|
||||
if (size == -1)
|
||||
{
|
||||
/// It means that zookeeper node has NULL value. We will treat it like empty string.
|
||||
s.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
if (size < 0)
|
||||
throw Exception("Negative size while reading string from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
|
||||
if (size > MAX_STRING_OR_ARRAY_SIZE)
|
||||
throw Exception("Too large string size while reading from ZooKeeper", Error::ZMARSHALLINGERROR);
|
||||
|
||||
s.resize(size);
|
||||
in.read(s.data(), size);
|
||||
}
|
||||
|
||||
void read(ACL & acl, ReadBuffer & in)
|
||||
{
|
||||
read(acl.permissions, in);
|
||||
read(acl.scheme, in);
|
||||
read(acl.id, in);
|
||||
}
|
||||
|
||||
void read(Stat & stat, ReadBuffer & in)
|
||||
{
|
||||
read(stat.czxid, in);
|
||||
read(stat.mzxid, in);
|
||||
read(stat.ctime, in);
|
||||
read(stat.mtime, in);
|
||||
read(stat.version, in);
|
||||
read(stat.cversion, in);
|
||||
read(stat.aversion, in);
|
||||
read(stat.ephemeralOwner, in);
|
||||
read(stat.dataLength, in);
|
||||
read(stat.numChildren, in);
|
||||
read(stat.pzxid, in);
|
||||
}
|
||||
|
||||
void read(Error & x, ReadBuffer & in)
|
||||
{
|
||||
int32_t code;
|
||||
read(code, in);
|
||||
x = Coordination::Error(code);
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user