mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-12-17 20:02:05 +00:00
Merge branch 'master' into amosbird-fixcrash
This commit is contained in:
commit
1991f1d827
7
.gitmodules
vendored
7
.gitmodules
vendored
@ -193,3 +193,10 @@
|
||||
[submodule "contrib/miniselect"]
|
||||
path = contrib/miniselect
|
||||
url = https://github.com/danlark1/miniselect
|
||||
[submodule "contrib/rocksdb"]
|
||||
path = contrib/rocksdb
|
||||
url = https://github.com/facebook/rocksdb
|
||||
branch = v6.11.4
|
||||
[submodule "contrib/xz"]
|
||||
path = contrib/xz
|
||||
url = https://github.com/xz-mirror/xz
|
||||
|
277
CHANGELOG.md
277
CHANGELOG.md
@ -1,3 +1,12 @@
|
||||
## ClickHouse release 20.11
|
||||
|
||||
### ClickHouse release v20.11.3.3-stable, 2020-11-13
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix rare silent crashes when query profiler is on and ClickHouse is installed on OS with glibc version that has (supposedly) broken asynchronous unwind tables for some functions. This fixes [#15301](https://github.com/ClickHouse/ClickHouse/issues/15301). This fixes [#13098](https://github.com/ClickHouse/ClickHouse/issues/13098). [#16846](https://github.com/ClickHouse/ClickHouse/pull/16846) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
|
||||
### ClickHouse release v20.11.2.1, 2020-11-11
|
||||
|
||||
#### Backward Incompatible Change
|
||||
@ -119,6 +128,24 @@
|
||||
|
||||
## ClickHouse release 20.10
|
||||
|
||||
### ClickHouse release v20.10.4.1-stable, 2020-11-13
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix rare silent crashes when query profiler is on and ClickHouse is installed on OS with glibc version that has (supposedly) broken asynchronous unwind tables for some functions. This fixes [#15301](https://github.com/ClickHouse/ClickHouse/issues/15301). This fixes [#13098](https://github.com/ClickHouse/ClickHouse/issues/13098). [#16846](https://github.com/ClickHouse/ClickHouse/pull/16846) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `IN` operator over several columns and tuples with enabled `transform_null_in` setting. Fixes [#15310](https://github.com/ClickHouse/ClickHouse/issues/15310). [#16722](https://github.com/ClickHouse/ClickHouse/pull/16722) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* This will fix optimize_read_in_order/optimize_aggregation_in_order with max_threads>0 and expression in ORDER BY. [#16637](https://github.com/ClickHouse/ClickHouse/pull/16637) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Now when parsing AVRO from input the LowCardinality is removed from type. Fixes [#16188](https://github.com/ClickHouse/ClickHouse/issues/16188). [#16521](https://github.com/ClickHouse/ClickHouse/pull/16521) ([Mike](https://github.com/myrrc)).
|
||||
* Fix rapid growth of metadata when using MySQL Master -> MySQL Slave -> ClickHouse MaterializeMySQL Engine, and `slave_parallel_worker` enabled on MySQL Slave, by properly shrinking GTID sets. This fixes [#15951](https://github.com/ClickHouse/ClickHouse/issues/15951). [#16504](https://github.com/ClickHouse/ClickHouse/pull/16504) ([TCeason](https://github.com/TCeason)).
|
||||
* Fix DROP TABLE for Distributed (racy with INSERT). [#16409](https://github.com/ClickHouse/ClickHouse/pull/16409) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix processing of very large entries in replication queue. Very large entries may appear in ALTER queries if table structure is extremely large (near 1 MB). This fixes [#16307](https://github.com/ClickHouse/ClickHouse/issues/16307). [#16332](https://github.com/ClickHouse/ClickHouse/pull/16332) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix bug with MySQL database. When MySQL server used as database engine is down some queries raise Exception, because they try to get tables from disabled server, while it's unnecessary. For example, query `SELECT ... FROM system.parts` should work only with MergeTree tables and don't touch MySQL database at all. [#16032](https://github.com/ClickHouse/ClickHouse/pull/16032) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Workaround for use S3 with nginx server as proxy. Nginx currenty does not accept urls with empty path like http://domain.com?delete, but vanilla aws-sdk-cpp produces this kind of urls. This commit uses patched aws-sdk-cpp version, which makes urls with "/" as path in this cases, like http://domain.com/?delete. [#16813](https://github.com/ClickHouse/ClickHouse/pull/16813) ([ianton-ru](https://github.com/ianton-ru)).
|
||||
|
||||
|
||||
### ClickHouse release v20.10.3.30, 2020-10-28
|
||||
|
||||
#### Backward Incompatible Change
|
||||
@ -331,6 +358,84 @@
|
||||
|
||||
## ClickHouse release 20.9
|
||||
|
||||
### ClickHouse release v20.9.5.5-stable, 2020-11-13
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix rare silent crashes when query profiler is on and ClickHouse is installed on OS with glibc version that has (supposedly) broken asynchronous unwind tables for some functions. This fixes [#15301](https://github.com/ClickHouse/ClickHouse/issues/15301). This fixes [#13098](https://github.com/ClickHouse/ClickHouse/issues/13098). [#16846](https://github.com/ClickHouse/ClickHouse/pull/16846) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Now when parsing AVRO from input the LowCardinality is removed from type. Fixes [#16188](https://github.com/ClickHouse/ClickHouse/issues/16188). [#16521](https://github.com/ClickHouse/ClickHouse/pull/16521) ([Mike](https://github.com/myrrc)).
|
||||
* Fix rapid growth of metadata when using MySQL Master -> MySQL Slave -> ClickHouse MaterializeMySQL Engine, and `slave_parallel_worker` enabled on MySQL Slave, by properly shrinking GTID sets. This fixes [#15951](https://github.com/ClickHouse/ClickHouse/issues/15951). [#16504](https://github.com/ClickHouse/ClickHouse/pull/16504) ([TCeason](https://github.com/TCeason)).
|
||||
* Fix DROP TABLE for Distributed (racy with INSERT). [#16409](https://github.com/ClickHouse/ClickHouse/pull/16409) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix processing of very large entries in replication queue. Very large entries may appear in ALTER queries if table structure is extremely large (near 1 MB). This fixes [#16307](https://github.com/ClickHouse/ClickHouse/issues/16307). [#16332](https://github.com/ClickHouse/ClickHouse/pull/16332) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the inconsistent behaviour when a part of return data could be dropped because the set for its filtration wasn't created. [#16308](https://github.com/ClickHouse/ClickHouse/pull/16308) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix bug with MySQL database. When MySQL server used as database engine is down some queries raise Exception, because they try to get tables from disabled server, while it's unnecessary. For example, query `SELECT ... FROM system.parts` should work only with MergeTree tables and don't touch MySQL database at all. [#16032](https://github.com/ClickHouse/ClickHouse/pull/16032) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
|
||||
### ClickHouse release v20.9.4.76-stable (2020-10-29)
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix double free in case of exception in function `dictGet`. It could have happened if dictionary was loaded with error. [#16429](https://github.com/ClickHouse/ClickHouse/pull/16429) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix group by with totals/rollup/cube modifers and min/max functions over group by keys. Fixes [#16393](https://github.com/ClickHouse/ClickHouse/issues/16393). [#16397](https://github.com/ClickHouse/ClickHouse/pull/16397) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix async Distributed INSERT w/ prefer_localhost_replica=0 and internal_replication. [#16358](https://github.com/ClickHouse/ClickHouse/pull/16358) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix a very wrong code in TwoLevelStringHashTable implementation, which might lead to memory leak. I'm suprised how this bug can lurk for so long.... [#16264](https://github.com/ClickHouse/ClickHouse/pull/16264) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix the case when memory can be overallocated regardless to the limit. This closes [#14560](https://github.com/ClickHouse/ClickHouse/issues/14560). [#16206](https://github.com/ClickHouse/ClickHouse/pull/16206) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `ALTER MODIFY ... ORDER BY` query hang for `ReplicatedVersionedCollapsingMergeTree`. This fixes [#15980](https://github.com/ClickHouse/ClickHouse/issues/15980). [#16011](https://github.com/ClickHouse/ClickHouse/pull/16011) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix collate name & charset name parser and support `length = 0` for string type. [#16008](https://github.com/ClickHouse/ClickHouse/pull/16008) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Allow to use direct layout for dictionaries with complex keys. [#16007](https://github.com/ClickHouse/ClickHouse/pull/16007) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Prevent replica hang for 5-10 mins when replication error happens after a period of inactivity. [#15987](https://github.com/ClickHouse/ClickHouse/pull/15987) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix rare segfaults when inserting into or selecting from MaterializedView and concurrently dropping target table (for Atomic database engine). [#15984](https://github.com/ClickHouse/ClickHouse/pull/15984) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes https://github.com/ClickHouse/ClickHouse/issues/15628. [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix a crash when database creation fails. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fixed `DROP TABLE IF EXISTS` failure with `Table ... doesn't exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) Fixed `DROP/DETACH DATABASE` failure with `Table ... doesn't exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible deadlocks in RBAC. [#15875](https://github.com/ClickHouse/ClickHouse/pull/15875) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix exception `Block structure mismatch` in `SELECT ... ORDER BY DESC` queries which were executed after `ALTER MODIFY COLUMN` query. Fixes [#15800](https://github.com/ClickHouse/ClickHouse/issues/15800). [#15852](https://github.com/ClickHouse/ClickHouse/pull/15852) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix `select count()` inaccuracy for MaterializeMySQL. [#15767](https://github.com/ClickHouse/ClickHouse/pull/15767) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix some cases of queries, in which only virtual columns are selected. Previously `Not found column _nothing in block` exception may be thrown. Fixes [#12298](https://github.com/ClickHouse/ClickHouse/issues/12298). [#15756](https://github.com/ClickHouse/ClickHouse/pull/15756) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fixed too low default value of `max_replicated_logs_to_keep` setting, which might cause replicas to become lost too often. Improve lost replica recovery process by choosing the most up-to-date replica to clone. Also do not remove old parts from lost replica, detach them instead. [#15701](https://github.com/ClickHouse/ClickHouse/pull/15701) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix error `Cannot add simple transform to empty Pipe` which happened while reading from `Buffer` table which has different structure than destination table. It was possible if destination table returned empty result for query. Fixes [#15529](https://github.com/ClickHouse/ClickHouse/issues/15529). [#15662](https://github.com/ClickHouse/ClickHouse/pull/15662) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed bug with globs in S3 table function, region from URL was not applied to S3 client configuration. [#15646](https://github.com/ClickHouse/ClickHouse/pull/15646) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Decrement the `ReadonlyReplica` metric when detaching read-only tables. This fixes https://github.com/ClickHouse/ClickHouse/issues/15598. [#15592](https://github.com/ClickHouse/ClickHouse/pull/15592) ([sundyli](https://github.com/sundy-li)).
|
||||
* Throw an error when a single parameter is passed to ReplicatedMergeTree instead of ignoring it. [#15516](https://github.com/ClickHouse/ClickHouse/pull/15516) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Now it's allowed to execute `ALTER ... ON CLUSTER` queries regardless of the `<internal_replication>` setting in cluster config. [#16075](https://github.com/ClickHouse/ClickHouse/pull/16075) ([alesapin](https://github.com/alesapin)).
|
||||
* Unfold `{database}`, `{table}` and `{uuid}` macros in `ReplicatedMergeTree` arguments on table creation. [#16160](https://github.com/ClickHouse/ClickHouse/pull/16160) ([tavplubix](https://github.com/tavplubix)).
|
||||
|
||||
|
||||
### ClickHouse release v20.9.3.45-stable (2020-10-09)
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix error `Cannot find column` which may happen at insertion into `MATERIALIZED VIEW` in case if query for `MV` containes `ARRAY JOIN`. [#15717](https://github.com/ClickHouse/ClickHouse/pull/15717) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix race condition in AMQP-CPP. [#15667](https://github.com/ClickHouse/ClickHouse/pull/15667) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix the order of destruction for resources in `ReadFromStorage` step of query plan. It might cause crashes in rare cases. Possibly connected with [#15610](https://github.com/ClickHouse/ClickHouse/issues/15610). [#15645](https://github.com/ClickHouse/ClickHouse/pull/15645) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed `Element ... is not a constant expression` error when using `JSON*` function result in `VALUES`, `LIMIT` or right side of `IN` operator. [#15589](https://github.com/ClickHouse/ClickHouse/pull/15589) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Prevent the possibility of error message `Could not calculate available disk space (statvfs), errno: 4, strerror: Interrupted system call`. This fixes [#15541](https://github.com/ClickHouse/ClickHouse/issues/15541). [#15557](https://github.com/ClickHouse/ClickHouse/pull/15557) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Significantly reduce memory usage in AggregatingInOrderTransform/optimize_aggregation_in_order. [#15543](https://github.com/ClickHouse/ClickHouse/pull/15543) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Mutation might hang waiting for some non-existent part after `MOVE` or `REPLACE PARTITION` or, in rare cases, after `DETACH` or `DROP PARTITION`. It's fixed. [#15537](https://github.com/ClickHouse/ClickHouse/pull/15537) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix bug when `ILIKE` operator stops being case insensitive if `LIKE` with the same pattern was executed. [#15536](https://github.com/ClickHouse/ClickHouse/pull/15536) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix `Missing columns` errors when selecting columns which absent in data, but depend on other columns which also absent in data. Fixes [#15530](https://github.com/ClickHouse/ClickHouse/issues/15530). [#15532](https://github.com/ClickHouse/ClickHouse/pull/15532) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bug with event subscription in DDLWorker which rarely may lead to query hangs in `ON CLUSTER`. Introduced in [#13450](https://github.com/ClickHouse/ClickHouse/issues/13450). [#15477](https://github.com/ClickHouse/ClickHouse/pull/15477) ([alesapin](https://github.com/alesapin)).
|
||||
* Report proper error when the second argument of `boundingRatio` aggregate function has a wrong type. [#15407](https://github.com/ClickHouse/ClickHouse/pull/15407) ([detailyang](https://github.com/detailyang)).
|
||||
* Fix bug where queries like SELECT toStartOfDay(today()) fail complaining about empty time_zone argument. [#15319](https://github.com/ClickHouse/ClickHouse/pull/15319) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix race condition during MergeTree table rename and background cleanup. [#15304](https://github.com/ClickHouse/ClickHouse/pull/15304) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix rare race condition on server startup when system.logs are enabled. [#15300](https://github.com/ClickHouse/ClickHouse/pull/15300) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix instance crash when using joinGet with LowCardinality types. This fixes https://github.com/ClickHouse/ClickHouse/issues/15214. [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)).
|
||||
* Adjust decimals field size in mysql column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)).
|
||||
* Fixed `Cannot rename ... errno: 22, strerror: Invalid argument` error on DDL query execution in Atomic database when running clickhouse-server in docker on Mac OS. [#15024](https://github.com/ClickHouse/ClickHouse/pull/15024) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix to make predicate push down work when subquery contains finalizeAggregation function. Fixes [#14847](https://github.com/ClickHouse/ClickHouse/issues/14847). [#14937](https://github.com/ClickHouse/ClickHouse/pull/14937) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix a problem where the server may get stuck on startup while talking to ZooKeeper, if the configuration files have to be fetched from ZK (using the `from_zk` include option). This fixes [#14814](https://github.com/ClickHouse/ClickHouse/issues/14814). [#14843](https://github.com/ClickHouse/ClickHouse/pull/14843) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Now it's possible to change the type of version column for `VersionedCollapsingMergeTree` with `ALTER` query. [#15442](https://github.com/ClickHouse/ClickHouse/pull/15442) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
|
||||
### ClickHouse release v20.9.2.20, 2020-09-22
|
||||
|
||||
#### New Feature
|
||||
@ -405,6 +510,110 @@
|
||||
|
||||
## ClickHouse release 20.8
|
||||
|
||||
### ClickHouse release v20.8.6.6-lts, 2020-11-13
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix rare silent crashes when query profiler is on and ClickHouse is installed on OS with glibc version that has (supposedly) broken asynchronous unwind tables for some functions. This fixes [#15301](https://github.com/ClickHouse/ClickHouse/issues/15301). This fixes [#13098](https://github.com/ClickHouse/ClickHouse/issues/13098). [#16846](https://github.com/ClickHouse/ClickHouse/pull/16846) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Now when parsing AVRO from input the LowCardinality is removed from type. Fixes [#16188](https://github.com/ClickHouse/ClickHouse/issues/16188). [#16521](https://github.com/ClickHouse/ClickHouse/pull/16521) ([Mike](https://github.com/myrrc)).
|
||||
* Fix rapid growth of metadata when using MySQL Master -> MySQL Slave -> ClickHouse MaterializeMySQL Engine, and `slave_parallel_worker` enabled on MySQL Slave, by properly shrinking GTID sets. This fixes [#15951](https://github.com/ClickHouse/ClickHouse/issues/15951). [#16504](https://github.com/ClickHouse/ClickHouse/pull/16504) ([TCeason](https://github.com/TCeason)).
|
||||
* Fix DROP TABLE for Distributed (racy with INSERT). [#16409](https://github.com/ClickHouse/ClickHouse/pull/16409) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix processing of very large entries in replication queue. Very large entries may appear in ALTER queries if table structure is extremely large (near 1 MB). This fixes [#16307](https://github.com/ClickHouse/ClickHouse/issues/16307). [#16332](https://github.com/ClickHouse/ClickHouse/pull/16332) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed the inconsistent behaviour when a part of return data could be dropped because the set for its filtration wasn't created. [#16308](https://github.com/ClickHouse/ClickHouse/pull/16308) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix bug with MySQL database. When MySQL server used as database engine is down some queries raise Exception, because they try to get tables from disabled server, while it's unnecessary. For example, query `SELECT ... FROM system.parts` should work only with MergeTree tables and don't touch MySQL database at all. [#16032](https://github.com/ClickHouse/ClickHouse/pull/16032) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
|
||||
### ClickHouse release v20.8.5.45-lts, 2020-10-29
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix double free in case of exception in function `dictGet`. It could have happened if dictionary was loaded with error. [#16429](https://github.com/ClickHouse/ClickHouse/pull/16429) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix group by with totals/rollup/cube modifers and min/max functions over group by keys. Fixes [#16393](https://github.com/ClickHouse/ClickHouse/issues/16393). [#16397](https://github.com/ClickHouse/ClickHouse/pull/16397) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix async Distributed INSERT w/ prefer_localhost_replica=0 and internal_replication. [#16358](https://github.com/ClickHouse/ClickHouse/pull/16358) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix a possible memory leak during `GROUP BY` with string keys, caused by an error in `TwoLevelStringHashTable` implementation. [#16264](https://github.com/ClickHouse/ClickHouse/pull/16264) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix the case when memory can be overallocated regardless to the limit. This closes [#14560](https://github.com/ClickHouse/ClickHouse/issues/14560). [#16206](https://github.com/ClickHouse/ClickHouse/pull/16206) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix `ALTER MODIFY ... ORDER BY` query hang for `ReplicatedVersionedCollapsingMergeTree`. This fixes [#15980](https://github.com/ClickHouse/ClickHouse/issues/15980). [#16011](https://github.com/ClickHouse/ClickHouse/pull/16011) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix collate name & charset name parser and support `length = 0` for string type. [#16008](https://github.com/ClickHouse/ClickHouse/pull/16008) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Allow to use direct layout for dictionaries with complex keys. [#16007](https://github.com/ClickHouse/ClickHouse/pull/16007) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Prevent replica hang for 5-10 mins when replication error happens after a period of inactivity. [#15987](https://github.com/ClickHouse/ClickHouse/pull/15987) ([filimonov](https://github.com/filimonov)).
|
||||
* Fix rare segfaults when inserting into or selecting from MaterializedView and concurrently dropping target table (for Atomic database engine). [#15984](https://github.com/ClickHouse/ClickHouse/pull/15984) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix ambiguity in parsing of settings profiles: `CREATE USER ... SETTINGS profile readonly` is now considered as using a profile named `readonly`, not a setting named `profile` with the readonly constraint. This fixes [#15628](https://github.com/ClickHouse/ClickHouse/issues/15628). [#15982](https://github.com/ClickHouse/ClickHouse/pull/15982) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix a crash when database creation fails. [#15954](https://github.com/ClickHouse/ClickHouse/pull/15954) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fixed `DROP TABLE IF EXISTS` failure with `Table ... doesn't exist` error when table is concurrently renamed (for Atomic database engine). Fixed rare deadlock when concurrently executing some DDL queries with multiple tables (like `DROP DATABASE` and `RENAME TABLE`) Fixed `DROP/DETACH DATABASE` failure with `Table ... doesn't exist` when concurrently executing `DROP/DETACH TABLE`. [#15934](https://github.com/ClickHouse/ClickHouse/pull/15934) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix possible deadlocks in RBAC. [#15875](https://github.com/ClickHouse/ClickHouse/pull/15875) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix exception `Block structure mismatch` in `SELECT ... ORDER BY DESC` queries which were executed after `ALTER MODIFY COLUMN` query. Fixes [#15800](https://github.com/ClickHouse/ClickHouse/issues/15800). [#15852](https://github.com/ClickHouse/ClickHouse/pull/15852) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix some cases of queries, in which only virtual columns are selected. Previously `Not found column _nothing in block` exception may be thrown. Fixes [#12298](https://github.com/ClickHouse/ClickHouse/issues/12298). [#15756](https://github.com/ClickHouse/ClickHouse/pull/15756) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix error `Cannot find column` which may happen at insertion into `MATERIALIZED VIEW` in case if query for `MV` containes `ARRAY JOIN`. [#15717](https://github.com/ClickHouse/ClickHouse/pull/15717) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed too low default value of `max_replicated_logs_to_keep` setting, which might cause replicas to become lost too often. Improve lost replica recovery process by choosing the most up-to-date replica to clone. Also do not remove old parts from lost replica, detach them instead. [#15701](https://github.com/ClickHouse/ClickHouse/pull/15701) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix error `Cannot add simple transform to empty Pipe` which happened while reading from `Buffer` table which has different structure than destination table. It was possible if destination table returned empty result for query. Fixes [#15529](https://github.com/ClickHouse/ClickHouse/issues/15529). [#15662](https://github.com/ClickHouse/ClickHouse/pull/15662) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed bug with globs in S3 table function, region from URL was not applied to S3 client configuration. [#15646](https://github.com/ClickHouse/ClickHouse/pull/15646) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||
* Decrement the `ReadonlyReplica` metric when detaching read-only tables. This fixes [#15598](https://github.com/ClickHouse/ClickHouse/issues/15598). [#15592](https://github.com/ClickHouse/ClickHouse/pull/15592) ([sundyli](https://github.com/sundy-li)).
|
||||
* Throw an error when a single parameter is passed to ReplicatedMergeTree instead of ignoring it. [#15516](https://github.com/ClickHouse/ClickHouse/pull/15516) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Now it's allowed to execute `ALTER ... ON CLUSTER` queries regardless of the `<internal_replication>` setting in cluster config. [#16075](https://github.com/ClickHouse/ClickHouse/pull/16075) ([alesapin](https://github.com/alesapin)).
|
||||
* Unfold `{database}`, `{table}` and `{uuid}` macros in `ReplicatedMergeTree` arguments on table creation. [#16159](https://github.com/ClickHouse/ClickHouse/pull/16159) ([tavplubix](https://github.com/tavplubix)).
|
||||
|
||||
|
||||
### ClickHouse release v20.8.4.11-lts, 2020-10-09
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix the order of destruction for resources in `ReadFromStorage` step of query plan. It might cause crashes in rare cases. Possibly connected with [#15610](https://github.com/ClickHouse/ClickHouse/issues/15610). [#15645](https://github.com/ClickHouse/ClickHouse/pull/15645) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed `Element ... is not a constant expression` error when using `JSON*` function result in `VALUES`, `LIMIT` or right side of `IN` operator. [#15589](https://github.com/ClickHouse/ClickHouse/pull/15589) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Prevent the possibility of error message `Could not calculate available disk space (statvfs), errno: 4, strerror: Interrupted system call`. This fixes [#15541](https://github.com/ClickHouse/ClickHouse/issues/15541). [#15557](https://github.com/ClickHouse/ClickHouse/pull/15557) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Significantly reduce memory usage in AggregatingInOrderTransform/optimize_aggregation_in_order. [#15543](https://github.com/ClickHouse/ClickHouse/pull/15543) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Mutation might hang waiting for some non-existent part after `MOVE` or `REPLACE PARTITION` or, in rare cases, after `DETACH` or `DROP PARTITION`. It's fixed. [#15537](https://github.com/ClickHouse/ClickHouse/pull/15537) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix bug when `ILIKE` operator stops being case insensitive if `LIKE` with the same pattern was executed. [#15536](https://github.com/ClickHouse/ClickHouse/pull/15536) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix `Missing columns` errors when selecting columns which absent in data, but depend on other columns which also absent in data. Fixes [#15530](https://github.com/ClickHouse/ClickHouse/issues/15530). [#15532](https://github.com/ClickHouse/ClickHouse/pull/15532) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bug with event subscription in DDLWorker which rarely may lead to query hangs in `ON CLUSTER`. Introduced in [#13450](https://github.com/ClickHouse/ClickHouse/issues/13450). [#15477](https://github.com/ClickHouse/ClickHouse/pull/15477) ([alesapin](https://github.com/alesapin)).
|
||||
* Report proper error when the second argument of `boundingRatio` aggregate function has a wrong type. [#15407](https://github.com/ClickHouse/ClickHouse/pull/15407) ([detailyang](https://github.com/detailyang)).
|
||||
* Fix race condition during MergeTree table rename and background cleanup. [#15304](https://github.com/ClickHouse/ClickHouse/pull/15304) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix rare race condition on server startup when system.logs are enabled. [#15300](https://github.com/ClickHouse/ClickHouse/pull/15300) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix MSan report in QueryLog. Uninitialized memory can be used for the field `memory_usage`. [#15258](https://github.com/ClickHouse/ClickHouse/pull/15258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix instance crash when using joinGet with LowCardinality types. This fixes https://github.com/ClickHouse/ClickHouse/issues/15214. [#15220](https://github.com/ClickHouse/ClickHouse/pull/15220) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix bug in table engine `Buffer` which doesn't allow to insert data of new structure into `Buffer` after `ALTER` query. Fixes [#15117](https://github.com/ClickHouse/ClickHouse/issues/15117). [#15192](https://github.com/ClickHouse/ClickHouse/pull/15192) ([alesapin](https://github.com/alesapin)).
|
||||
* Adjust decimals field size in mysql column definition packet. [#15152](https://github.com/ClickHouse/ClickHouse/pull/15152) ([maqroll](https://github.com/maqroll)).
|
||||
* We already use padded comparison between String and FixedString (https://github.com/ClickHouse/ClickHouse/blob/master/src/Functions/FunctionsComparison.h#L333). This PR applies the same logic to field comparison which corrects the usage of FixedString as primary keys. This fixes https://github.com/ClickHouse/ClickHouse/issues/14908. [#15033](https://github.com/ClickHouse/ClickHouse/pull/15033) ([Amos Bird](https://github.com/amosbird)).
|
||||
* If function `bar` was called with specifically crafter arguments, buffer overflow was possible. This closes [#13926](https://github.com/ClickHouse/ClickHouse/issues/13926). [#15028](https://github.com/ClickHouse/ClickHouse/pull/15028) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fixed `Cannot rename ... errno: 22, strerror: Invalid argument` error on DDL query execution in Atomic database when running clickhouse-server in docker on Mac OS. [#15024](https://github.com/ClickHouse/ClickHouse/pull/15024) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Now settings `number_of_free_entries_in_pool_to_execute_mutation` and `number_of_free_entries_in_pool_to_lower_max_size_of_merge` can be equal to `background_pool_size`. [#14975](https://github.com/ClickHouse/ClickHouse/pull/14975) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix to make predicate push down work when subquery contains finalizeAggregation function. Fixes [#14847](https://github.com/ClickHouse/ClickHouse/issues/14847). [#14937](https://github.com/ClickHouse/ClickHouse/pull/14937) ([filimonov](https://github.com/filimonov)).
|
||||
* Publish CPU frequencies per logical core in `system.asynchronous_metrics`. This fixes https://github.com/ClickHouse/ClickHouse/issues/14923. [#14924](https://github.com/ClickHouse/ClickHouse/pull/14924) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fixed `.metadata.tmp File exists` error when using `MaterializeMySQL` database engine. [#14898](https://github.com/ClickHouse/ClickHouse/pull/14898) ([Winter Zhang](https://github.com/zhang2014)).
|
||||
* Fix a problem where the server may get stuck on startup while talking to ZooKeeper, if the configuration files have to be fetched from ZK (using the `from_zk` include option). This fixes [#14814](https://github.com/ClickHouse/ClickHouse/issues/14814). [#14843](https://github.com/ClickHouse/ClickHouse/pull/14843) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||
* Fix wrong monotonicity detection for shrunk `Int -> Int` cast of signed types. It might lead to incorrect query result. This bug is unveiled in [#14513](https://github.com/ClickHouse/ClickHouse/issues/14513). [#14783](https://github.com/ClickHouse/ClickHouse/pull/14783) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed the incorrect sorting order of `Nullable` column. This fixes [#14344](https://github.com/ClickHouse/ClickHouse/issues/14344). [#14495](https://github.com/ClickHouse/ClickHouse/pull/14495) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Now it's possible to change the type of version column for `VersionedCollapsingMergeTree` with `ALTER` query. [#15442](https://github.com/ClickHouse/ClickHouse/pull/15442) ([alesapin](https://github.com/alesapin)).
|
||||
|
||||
|
||||
### ClickHouse release v20.8.3.18-stable, 2020-09-18
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix the issue when some invocations of `extractAllGroups` function may trigger "Memory limit exceeded" error. This fixes [#13383](https://github.com/ClickHouse/ClickHouse/issues/13383). [#14889](https://github.com/ClickHouse/ClickHouse/pull/14889) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix SIGSEGV for an attempt to INSERT into StorageFile(fd). [#14887](https://github.com/ClickHouse/ClickHouse/pull/14887) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix rare error in `SELECT` queries when the queried column has `DEFAULT` expression which depends on the other column which also has `DEFAULT` and not present in select query and not exists on disk. Partially fixes [#14531](https://github.com/ClickHouse/ClickHouse/issues/14531). [#14845](https://github.com/ClickHouse/ClickHouse/pull/14845) ([alesapin](https://github.com/alesapin)).
|
||||
* Fixed missed default database name in metadata of materialized view when executing `ALTER ... MODIFY QUERY`. [#14664](https://github.com/ClickHouse/ClickHouse/pull/14664) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix bug when `ALTER UPDATE` mutation with Nullable column in assignment expression and constant value (like `UPDATE x = 42`) leads to incorrect value in column or segfault. Fixes [#13634](https://github.com/ClickHouse/ClickHouse/issues/13634), [#14045](https://github.com/ClickHouse/ClickHouse/issues/14045). [#14646](https://github.com/ClickHouse/ClickHouse/pull/14646) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix wrong Decimal multiplication result caused wrong decimal scale of result column. [#14603](https://github.com/ClickHouse/ClickHouse/pull/14603) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Added the checker as neither calling `lc->isNullable()` nor calling `ls->getDictionaryPtr()->isNullable()` would return the correct result. [#14591](https://github.com/ClickHouse/ClickHouse/pull/14591) ([myrrc](https://github.com/myrrc)).
|
||||
* Cleanup data directory after Zookeeper exceptions during CreateQuery for StorageReplicatedMergeTree Engine. [#14563](https://github.com/ClickHouse/ClickHouse/pull/14563) ([Bharat Nallan](https://github.com/bharatnc)).
|
||||
* Fix rare segfaults in functions with combinator -Resample, which could appear in result of overflow with very large parameters. [#14562](https://github.com/ClickHouse/ClickHouse/pull/14562) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Speed up server shutdown process if there are ongoing S3 requests. [#14858](https://github.com/ClickHouse/ClickHouse/pull/14858) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||
* Allow using multi-volume storage configuration in storage Distributed. [#14839](https://github.com/ClickHouse/ClickHouse/pull/14839) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||
* Speed up server shutdown process if there are ongoing S3 requests. [#14496](https://github.com/ClickHouse/ClickHouse/pull/14496) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||
* Support custom codecs in compact parts. [#12183](https://github.com/ClickHouse/ClickHouse/pull/12183) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
|
||||
### ClickHouse release v20.8.2.3-stable, 2020-09-08
|
||||
|
||||
#### Backward Incompatible Change
|
||||
@ -1755,6 +1964,74 @@ No changes compared to v20.4.3.16-stable.
|
||||
|
||||
## ClickHouse release v20.3
|
||||
|
||||
|
||||
### ClickHouse release v20.3.21.2-lts, 2020-11-02
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix dictGet in sharding_key (and similar places, i.e. when the function context is stored permanently). [#16205](https://github.com/ClickHouse/ClickHouse/pull/16205) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix incorrect empty result for query from `Distributed` table if query has `WHERE`, `PREWHERE` and `GLOBAL IN`. Fixes [#15792](https://github.com/ClickHouse/ClickHouse/issues/15792). [#15933](https://github.com/ClickHouse/ClickHouse/pull/15933) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix missing or excessive headers in `TSV/CSVWithNames` formats. This fixes [#12504](https://github.com/ClickHouse/ClickHouse/issues/12504). [#13343](https://github.com/ClickHouse/ClickHouse/pull/13343) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
### ClickHouse release v20.3.20.6-lts, 2020-10-09
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Mutation might hang waiting for some non-existent part after `MOVE` or `REPLACE PARTITION` or, in rare cases, after `DETACH` or `DROP PARTITION`. It's fixed. [#15724](https://github.com/ClickHouse/ClickHouse/pull/15724), [#15537](https://github.com/ClickHouse/ClickHouse/pull/15537) ([tavplubix](https://github.com/tavplubix)).
|
||||
* Fix hang of queries with a lot of subqueries to same table of `MySQL` engine. Previously, if there were more than 16 subqueries to same `MySQL` table in query, it hang forever. [#15299](https://github.com/ClickHouse/ClickHouse/pull/15299) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Fix 'Unknown identifier' in GROUP BY when query has JOIN over Merge table. [#15242](https://github.com/ClickHouse/ClickHouse/pull/15242) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fix to make predicate push down work when subquery contains finalizeAggregation function. Fixes [#14847](https://github.com/ClickHouse/ClickHouse/issues/14847). [#14937](https://github.com/ClickHouse/ClickHouse/pull/14937) ([filimonov](https://github.com/filimonov)).
|
||||
* Concurrent `ALTER ... REPLACE/MOVE PARTITION ...` queries might cause deadlock. It's fixed. [#13626](https://github.com/ClickHouse/ClickHouse/pull/13626) ([tavplubix](https://github.com/tavplubix)).
|
||||
|
||||
|
||||
### ClickHouse release v20.3.19.4-lts, 2020-09-18
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix rare error in `SELECT` queries when the queried column has `DEFAULT` expression which depends on the other column which also has `DEFAULT` and not present in select query and not exists on disk. Partially fixes [#14531](https://github.com/ClickHouse/ClickHouse/issues/14531). [#14845](https://github.com/ClickHouse/ClickHouse/pull/14845) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix bug when `ALTER UPDATE` mutation with Nullable column in assignment expression and constant value (like `UPDATE x = 42`) leads to incorrect value in column or segfault. Fixes [#13634](https://github.com/ClickHouse/ClickHouse/issues/13634), [#14045](https://github.com/ClickHouse/ClickHouse/issues/14045). [#14646](https://github.com/ClickHouse/ClickHouse/pull/14646) ([alesapin](https://github.com/alesapin)).
|
||||
* Fix wrong Decimal multiplication result caused wrong decimal scale of result column. [#14603](https://github.com/ClickHouse/ClickHouse/pull/14603) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Support custom codecs in compact parts. [#12183](https://github.com/ClickHouse/ClickHouse/pull/12183) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
|
||||
### ClickHouse release v20.3.18.10-lts, 2020-09-08
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Stop query execution if exception happened in `PipelineExecutor` itself. This could prevent rare possible query hung. Continuation of [#14334](https://github.com/ClickHouse/ClickHouse/issues/14334). [#14402](https://github.com/ClickHouse/ClickHouse/pull/14402) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fixed the behaviour when sometimes cache-dictionary returned default value instead of present value from source. [#13624](https://github.com/ClickHouse/ClickHouse/pull/13624) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix parsing row policies from users.xml when names of databases or tables contain dots. This fixes [#5779](https://github.com/ClickHouse/ClickHouse/issues/5779), [#12527](https://github.com/ClickHouse/ClickHouse/issues/12527). [#13199](https://github.com/ClickHouse/ClickHouse/pull/13199) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix CAST(Nullable(String), Enum()). [#12745](https://github.com/ClickHouse/ClickHouse/pull/12745) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fixed data race in `text_log`. It does not correspond to any real bug. [#9726](https://github.com/ClickHouse/ClickHouse/pull/9726) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Fix wrong error for long queries. It was possible to get syntax error other than `Max query size exceeded` for correct query. [#13928](https://github.com/ClickHouse/ClickHouse/pull/13928) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Return NULL/zero when value is not parsed completely in parseDateTimeBestEffortOrNull/Zero functions. This fixes [#7876](https://github.com/ClickHouse/ClickHouse/issues/7876). [#11653](https://github.com/ClickHouse/ClickHouse/pull/11653) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Slightly optimize very short queries with LowCardinality. [#14129](https://github.com/ClickHouse/ClickHouse/pull/14129) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Fix UBSan report (adding zero to nullptr) in HashTable that appeared after migration to clang-10. [#10638](https://github.com/ClickHouse/ClickHouse/pull/10638) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
|
||||
### ClickHouse release v20.3.17.173-lts, 2020-08-15
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix crash in JOIN with StorageMerge and `set enable_optimize_predicate_expression=1`. [#13679](https://github.com/ClickHouse/ClickHouse/pull/13679) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||
* Fix invalid return type for comparison of tuples with `NULL` elements. Fixes [#12461](https://github.com/ClickHouse/ClickHouse/issues/12461). [#13420](https://github.com/ClickHouse/ClickHouse/pull/13420) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix queries with constant columns and `ORDER BY` prefix of primary key. [#13396](https://github.com/ClickHouse/ClickHouse/pull/13396) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Return passed number for numbers with MSB set in roundUpToPowerOfTwoOrZero(). [#13234](https://github.com/ClickHouse/ClickHouse/pull/13234) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
### ClickHouse release v20.3.16.165-lts 2020-08-10
|
||||
|
||||
#### Bug Fix
|
||||
|
@ -456,6 +456,8 @@ include (cmake/find/simdjson.cmake)
|
||||
include (cmake/find/rapidjson.cmake)
|
||||
include (cmake/find/fastops.cmake)
|
||||
include (cmake/find/odbc.cmake)
|
||||
include (cmake/find/rocksdb.cmake)
|
||||
|
||||
|
||||
if(NOT USE_INTERNAL_PARQUET_LIBRARY)
|
||||
set (ENABLE_ORC OFF CACHE INTERNAL "")
|
||||
|
37
base/common/sort.h
Normal file
37
base/common/sort.h
Normal file
@ -0,0 +1,37 @@
|
||||
#pragma once
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#else
|
||||
# include <algorithm>
|
||||
#endif
|
||||
|
||||
template <class RandomIt>
|
||||
void nth_element(RandomIt first, RandomIt nth, RandomIt last)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_select(first, nth, last);
|
||||
#else
|
||||
::std::nth_element(first, nth, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class RandomIt>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last);
|
||||
#else
|
||||
::std::partial_sort(first, middle, last);
|
||||
#endif
|
||||
}
|
||||
|
||||
template <class RandomIt, class Compare>
|
||||
void partial_sort(RandomIt first, RandomIt middle, RandomIt last, Compare compare)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
::miniselect::floyd_rivest_partial_sort(first, middle, last, compare);
|
||||
#else
|
||||
::std::partial_sort(first, middle, last, compare);
|
||||
#endif
|
||||
}
|
21
base/glibc-compatibility/musl/sync_file_range.c
Normal file
21
base/glibc-compatibility/musl/sync_file_range.c
Normal file
@ -0,0 +1,21 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include "syscall.h"
|
||||
|
||||
// works same in x86_64 && aarch64
|
||||
#define __SYSCALL_LL_E(x) (x)
|
||||
#define __SYSCALL_LL_O(x) (x)
|
||||
|
||||
int sync_file_range(int fd, off_t pos, off_t len, unsigned flags)
|
||||
{
|
||||
#if defined(SYS_sync_file_range2)
|
||||
return syscall(SYS_sync_file_range2, fd, flags,
|
||||
__SYSCALL_LL_E(pos), __SYSCALL_LL_E(len));
|
||||
#elif defined(SYS_sync_file_range)
|
||||
return __syscall(SYS_sync_file_range, fd,
|
||||
__SYSCALL_LL_O(pos), __SYSCALL_LL_E(len), flags);
|
||||
#else
|
||||
return __syscall_ret(-ENOSYS);
|
||||
#endif
|
||||
}
|
67
cmake/find/rocksdb.cmake
Normal file
67
cmake/find/rocksdb.cmake
Normal file
@ -0,0 +1,67 @@
|
||||
option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_ROCKSDB)
|
||||
if (USE_INTERNAL_ROCKSDB_LIBRARY)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use internal rocksdb library with ENABLE_ROCKSDB=OFF")
|
||||
endif()
|
||||
return()
|
||||
endif()
|
||||
|
||||
option(USE_INTERNAL_ROCKSDB_LIBRARY "Set to FALSE to use system ROCKSDB library instead of bundled" ${NOT_UNBUNDLED})
|
||||
|
||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/rocksdb/CMakeLists.txt")
|
||||
if (USE_INTERNAL_ROCKSDB_LIBRARY)
|
||||
message (WARNING "submodule contrib is missing. to fix try run: \n git submodule update --init --recursive")
|
||||
message(${RECONFIGURE_MESSAGE_LEVEL} "cannot find internal rocksdb")
|
||||
endif()
|
||||
set (MISSING_INTERNAL_ROCKSDB 1)
|
||||
endif ()
|
||||
|
||||
if (NOT USE_INTERNAL_ROCKSDB_LIBRARY)
|
||||
find_library (ROCKSDB_LIBRARY rocksdb)
|
||||
find_path (ROCKSDB_INCLUDE_DIR NAMES rocksdb/db.h PATHS ${ROCKSDB_INCLUDE_PATHS})
|
||||
if (NOT ROCKSDB_LIBRARY OR NOT ROCKSDB_INCLUDE_DIR)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system rocksdb library")
|
||||
endif()
|
||||
|
||||
if (NOT SNAPPY_LIBRARY)
|
||||
include(cmake/find/snappy.cmake)
|
||||
endif()
|
||||
if (NOT ZLIB_LIBRARY)
|
||||
include(cmake/find/zlib.cmake)
|
||||
endif()
|
||||
|
||||
find_package(BZip2)
|
||||
find_library(ZSTD_LIBRARY zstd)
|
||||
find_library(LZ4_LIBRARY lz4)
|
||||
find_library(GFLAGS_LIBRARY gflags)
|
||||
|
||||
if(SNAPPY_LIBRARY AND ZLIB_LIBRARY AND LZ4_LIBRARY AND BZIP2_FOUND AND ZSTD_LIBRARY AND GFLAGS_LIBRARY)
|
||||
list (APPEND ROCKSDB_LIBRARY ${SNAPPY_LIBRARY})
|
||||
list (APPEND ROCKSDB_LIBRARY ${ZLIB_LIBRARY})
|
||||
list (APPEND ROCKSDB_LIBRARY ${LZ4_LIBRARY})
|
||||
list (APPEND ROCKSDB_LIBRARY ${BZIP2_LIBRARY})
|
||||
list (APPEND ROCKSDB_LIBRARY ${ZSTD_LIBRARY})
|
||||
list (APPEND ROCKSDB_LIBRARY ${GFLAGS_LIBRARY})
|
||||
else()
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL}
|
||||
"Can't find system rocksdb: snappy=${SNAPPY_LIBRARY} ;"
|
||||
" zlib=${ZLIB_LIBRARY} ;"
|
||||
" lz4=${LZ4_LIBRARY} ;"
|
||||
" bz2=${BZIP2_LIBRARY} ;"
|
||||
" zstd=${ZSTD_LIBRARY} ;"
|
||||
" gflags=${GFLAGS_LIBRARY} ;")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(ROCKSDB_LIBRARY AND ROCKSDB_INCLUDE_DIR)
|
||||
set(USE_ROCKSDB 1)
|
||||
elseif (NOT MISSING_INTERNAL_ROCKSDB)
|
||||
set (USE_INTERNAL_ROCKSDB_LIBRARY 1)
|
||||
|
||||
set (ROCKSDB_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb/include")
|
||||
set (ROCKSDB_LIBRARY "rocksdb")
|
||||
set (USE_ROCKSDB 1)
|
||||
endif ()
|
||||
|
||||
message (STATUS "Using ROCKSDB=${USE_ROCKSDB}: ${ROCKSDB_INCLUDE_DIR} : ${ROCKSDB_LIBRARY}")
|
5
contrib/CMakeLists.txt
vendored
5
contrib/CMakeLists.txt
vendored
@ -36,6 +36,7 @@ add_subdirectory (murmurhash)
|
||||
add_subdirectory (replxx-cmake)
|
||||
add_subdirectory (ryu-cmake)
|
||||
add_subdirectory (unixodbc-cmake)
|
||||
add_subdirectory (xz)
|
||||
|
||||
add_subdirectory (poco-cmake)
|
||||
add_subdirectory (croaring-cmake)
|
||||
@ -320,3 +321,7 @@ if (USE_KRB5)
|
||||
add_subdirectory (cyrus-sasl-cmake)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (USE_INTERNAL_ROCKSDB_LIBRARY)
|
||||
add_subdirectory(rocksdb-cmake)
|
||||
endif()
|
||||
|
1
contrib/rocksdb
vendored
Submodule
1
contrib/rocksdb
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 963314ffd681596ef2738a95249fe4c1163ef87a
|
668
contrib/rocksdb-cmake/CMakeLists.txt
Normal file
668
contrib/rocksdb-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,668 @@
|
||||
## this file is extracted from `contrib/rocksdb/CMakeLists.txt`
|
||||
set(ROCKSDB_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/rocksdb")
|
||||
list(APPEND CMAKE_MODULE_PATH "${ROCKSDB_SOURCE_DIR}/cmake/modules/")
|
||||
|
||||
find_program(CCACHE_FOUND ccache)
|
||||
if(CCACHE_FOUND)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
|
||||
endif(CCACHE_FOUND)
|
||||
|
||||
if (SANITIZE STREQUAL "undefined")
|
||||
set(WITH_UBSAN ON)
|
||||
elseif (SANITIZE STREQUAL "address")
|
||||
set(WITH_ASAN ON)
|
||||
elseif (SANITIZE STREQUAL "thread")
|
||||
set(WITH_TSAN ON)
|
||||
endif()
|
||||
|
||||
|
||||
set(PORTABLE ON)
|
||||
## always disable jemalloc for rocksdb by default
|
||||
## because it introduces non-standard jemalloc APIs
|
||||
option(WITH_JEMALLOC "build with JeMalloc" OFF)
|
||||
option(WITH_SNAPPY "build with SNAPPY" ${USE_SNAPPY})
|
||||
## lz4, zlib, zstd is enabled in ClickHouse by default
|
||||
option(WITH_LZ4 "build with lz4" ON)
|
||||
option(WITH_ZLIB "build with zlib" ON)
|
||||
option(WITH_ZSTD "build with zstd" ON)
|
||||
|
||||
# third-party/folly is only validated to work on Linux and Windows for now.
|
||||
# So only turn it on there by default.
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Windows")
|
||||
if(MSVC AND MSVC_VERSION LESS 1910)
|
||||
# Folly does not compile with MSVC older than VS2017
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" ON)
|
||||
endif()
|
||||
else()
|
||||
option(WITH_FOLLY_DISTRIBUTED_MUTEX "build with folly::DistributedMutex" OFF)
|
||||
endif()
|
||||
|
||||
if( NOT DEFINED CMAKE_CXX_STANDARD )
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
endif()
|
||||
|
||||
if(MSVC)
|
||||
option(WITH_XPRESS "build with windows built in compression" OFF)
|
||||
include(${ROCKSDB_SOURCE_DIR}/thirdparty.inc)
|
||||
else()
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
||||
# FreeBSD has jemalloc as default malloc
|
||||
# but it does not have all the jemalloc files in include/...
|
||||
set(WITH_JEMALLOC ON)
|
||||
else()
|
||||
if(WITH_JEMALLOC)
|
||||
add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_NO_DEMANGLE)
|
||||
list(APPEND THIRDPARTY_LIBS jemalloc)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(WITH_SNAPPY)
|
||||
add_definitions(-DSNAPPY)
|
||||
list(APPEND THIRDPARTY_LIBS snappy)
|
||||
endif()
|
||||
|
||||
if(WITH_ZLIB)
|
||||
add_definitions(-DZLIB)
|
||||
list(APPEND THIRDPARTY_LIBS zlib)
|
||||
endif()
|
||||
|
||||
if(WITH_LZ4)
|
||||
add_definitions(-DLZ4)
|
||||
list(APPEND THIRDPARTY_LIBS lz4)
|
||||
endif()
|
||||
|
||||
if(WITH_ZSTD)
|
||||
add_definitions(-DZSTD)
|
||||
include_directories(${ZSTD_INCLUDE_DIR})
|
||||
include_directories(${ZSTD_INCLUDE_DIR}/common)
|
||||
include_directories(${ZSTD_INCLUDE_DIR}/dictBuilder)
|
||||
include_directories(${ZSTD_INCLUDE_DIR}/deprecated)
|
||||
|
||||
list(APPEND THIRDPARTY_LIBS zstd)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
string(TIMESTAMP TS "%Y/%m/%d %H:%M:%S" UTC)
|
||||
set(GIT_DATE_TIME "${TS}" CACHE STRING "the time we first built rocksdb")
|
||||
|
||||
find_package(Git)
|
||||
|
||||
if(GIT_FOUND AND EXISTS "${ROCKSDB_SOURCE_DIR}/.git")
|
||||
if(WIN32)
|
||||
execute_process(COMMAND $ENV{COMSPEC} /C ${GIT_EXECUTABLE} -C ${ROCKSDB_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA)
|
||||
else()
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} -C ${ROCKSDB_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA)
|
||||
endif()
|
||||
else()
|
||||
set(GIT_SHA 0)
|
||||
endif()
|
||||
|
||||
string(REGEX REPLACE "[^0-9a-f]+" "" GIT_SHA "${GIT_SHA}")
|
||||
|
||||
set(BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/rocksdb_build_version.cc)
|
||||
configure_file(${ROCKSDB_SOURCE_DIR}/util/build_version.cc.in ${BUILD_VERSION_CC} @ONLY)
|
||||
add_library(rocksdb_build_version OBJECT ${BUILD_VERSION_CC})
|
||||
target_include_directories(rocksdb_build_version PRIVATE
|
||||
${ROCKSDB_SOURCE_DIR}/util)
|
||||
if(MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -Wno-strict-aliasing")
|
||||
if(MINGW)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format -fno-asynchronous-unwind-tables")
|
||||
add_definitions(-D_POSIX_C_SOURCE=1)
|
||||
endif()
|
||||
if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer")
|
||||
include(CheckCXXCompilerFlag)
|
||||
CHECK_CXX_COMPILER_FLAG("-momit-leaf-frame-pointer" HAVE_OMIT_LEAF_FRAME_POINTER)
|
||||
if(HAVE_OMIT_LEAF_FRAME_POINTER)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -momit-leaf-frame-pointer")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(CheckCCompilerFlag)
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
|
||||
if(HAS_POWER9)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power9 -mtune=power9")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power9 -mtune=power9")
|
||||
else()
|
||||
CHECK_C_COMPILER_FLAG("-mcpu=power8" HAS_POWER8)
|
||||
if(HAS_POWER8)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power8 -mtune=power8")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power8 -mtune=power8")
|
||||
endif(HAS_POWER8)
|
||||
endif(HAS_POWER9)
|
||||
CHECK_C_COMPILER_FLAG("-maltivec" HAS_ALTIVEC)
|
||||
if(HAS_ALTIVEC)
|
||||
message(STATUS " HAS_ALTIVEC yes")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maltivec")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec")
|
||||
endif(HAS_ALTIVEC)
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64")
|
||||
CHECK_C_COMPILER_FLAG("-march=armv8-a+crc+crypto" HAS_ARMV8_CRC)
|
||||
if(HAS_ARMV8_CRC)
|
||||
message(STATUS " HAS_ARMV8_CRC yes")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -Wno-unused-function")
|
||||
endif(HAS_ARMV8_CRC)
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64")
|
||||
|
||||
|
||||
include(CheckCXXSourceCompiles)
|
||||
if(NOT MSVC)
|
||||
set(CMAKE_REQUIRED_FLAGS "-msse4.2 -mpclmul")
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include <cstdint>
|
||||
#include <nmmintrin.h>
|
||||
#include <wmmintrin.h>
|
||||
int main() {
|
||||
volatile uint32_t x = _mm_crc32_u32(0, 0);
|
||||
const auto a = _mm_set_epi64x(0, 0);
|
||||
const auto b = _mm_set_epi64x(0, 0);
|
||||
const auto c = _mm_clmulepi64_si128(a, b, 0x00);
|
||||
auto d = _mm_cvtsi128_si64(c);
|
||||
}
|
||||
" HAVE_SSE42)
|
||||
unset(CMAKE_REQUIRED_FLAGS)
|
||||
if(HAVE_SSE42)
|
||||
add_definitions(-DHAVE_SSE42)
|
||||
add_definitions(-DHAVE_PCLMUL)
|
||||
elseif(FORCE_SSE42)
|
||||
message(FATAL_ERROR "FORCE_SSE42=ON but unable to compile with SSE4.2 enabled")
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#if defined(_MSC_VER) && !defined(__thread)
|
||||
#define __thread __declspec(thread)
|
||||
#endif
|
||||
int main() {
|
||||
static __thread int tls;
|
||||
}
|
||||
" HAVE_THREAD_LOCAL)
|
||||
if(HAVE_THREAD_LOCAL)
|
||||
add_definitions(-DROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||
endif()
|
||||
|
||||
option(FAIL_ON_WARNINGS "Treat compile warnings as errors" ON)
|
||||
if(FAIL_ON_WARNINGS)
|
||||
if(MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX")
|
||||
else() # assume GCC
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(WITH_ASAN "build with ASAN" OFF)
|
||||
if(WITH_ASAN)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address")
|
||||
if(WITH_JEMALLOC)
|
||||
message(FATAL "ASAN does not work well with JeMalloc")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(WITH_TSAN "build with TSAN" OFF)
|
||||
if(WITH_TSAN)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread -fPIC")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=thread -fPIC")
|
||||
if(WITH_JEMALLOC)
|
||||
message(FATAL "TSAN does not work well with JeMalloc")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
option(WITH_UBSAN "build with UBSAN" OFF)
|
||||
if(WITH_UBSAN)
|
||||
add_definitions(-DROCKSDB_UBSAN_RUN)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined")
|
||||
if(WITH_JEMALLOC)
|
||||
message(FATAL "UBSAN does not work well with JeMalloc")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "Cygwin")
|
||||
add_definitions(-fno-builtin-memcmp -DCYGWIN)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Darwin")
|
||||
add_definitions(-DOS_MACOSX)
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES arm)
|
||||
add_definitions(-DIOS_CROSS_COMPILE -DROCKSDB_LITE)
|
||||
# no debug info for IOS, that will make our library big
|
||||
add_definitions(-DNDEBUG)
|
||||
endif()
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||
add_definitions(-DOS_LINUX)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "SunOS")
|
||||
add_definitions(-DOS_SOLARIS)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
|
||||
add_definitions(-DOS_GNU_KFREEBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||
add_definitions(-DOS_FREEBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "NetBSD")
|
||||
add_definitions(-DOS_NETBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "OpenBSD")
|
||||
add_definitions(-DOS_OPENBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "DragonFly")
|
||||
add_definitions(-DOS_DRAGONFLYBSD)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||
add_definitions(-DOS_ANDROID)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Windows")
|
||||
add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DNOMINMAX)
|
||||
if(MINGW)
|
||||
add_definitions(-D_WIN32_WINNT=_WIN32_WINNT_VISTA)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT WIN32)
|
||||
add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX)
|
||||
endif()
|
||||
|
||||
option(WITH_FALLOCATE "build with fallocate" ON)
|
||||
if(WITH_FALLOCATE)
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include <fcntl.h>
|
||||
#include <linux/falloc.h>
|
||||
int main() {
|
||||
int fd = open(\"/dev/null\", 0);
|
||||
fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 1024);
|
||||
}
|
||||
" HAVE_FALLOCATE)
|
||||
if(HAVE_FALLOCATE)
|
||||
add_definitions(-DROCKSDB_FALLOCATE_PRESENT)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include <fcntl.h>
|
||||
int main() {
|
||||
int fd = open(\"/dev/null\", 0);
|
||||
sync_file_range(fd, 0, 1024, SYNC_FILE_RANGE_WRITE);
|
||||
}
|
||||
" HAVE_SYNC_FILE_RANGE_WRITE)
|
||||
if(HAVE_SYNC_FILE_RANGE_WRITE)
|
||||
add_definitions(-DROCKSDB_RANGESYNC_PRESENT)
|
||||
endif()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("
|
||||
#include <pthread.h>
|
||||
int main() {
|
||||
(void) PTHREAD_MUTEX_ADAPTIVE_NP;
|
||||
}
|
||||
" HAVE_PTHREAD_MUTEX_ADAPTIVE_NP)
|
||||
if(HAVE_PTHREAD_MUTEX_ADAPTIVE_NP)
|
||||
add_definitions(-DROCKSDB_PTHREAD_ADAPTIVE_MUTEX)
|
||||
endif()
|
||||
|
||||
include(CheckCXXSymbolExists)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES "^FreeBSD")
|
||||
check_cxx_symbol_exists(malloc_usable_size ${ROCKSDB_SOURCE_DIR}/malloc_np.h HAVE_MALLOC_USABLE_SIZE)
|
||||
else()
|
||||
check_cxx_symbol_exists(malloc_usable_size ${ROCKSDB_SOURCE_DIR}/malloc.h HAVE_MALLOC_USABLE_SIZE)
|
||||
endif()
|
||||
if(HAVE_MALLOC_USABLE_SIZE)
|
||||
add_definitions(-DROCKSDB_MALLOC_USABLE_SIZE)
|
||||
endif()
|
||||
|
||||
check_cxx_symbol_exists(sched_getcpu sched.h HAVE_SCHED_GETCPU)
|
||||
if(HAVE_SCHED_GETCPU)
|
||||
add_definitions(-DROCKSDB_SCHED_GETCPU_PRESENT)
|
||||
endif()
|
||||
|
||||
check_cxx_symbol_exists(getauxval auvx.h HAVE_AUXV_GETAUXVAL)
|
||||
if(HAVE_AUXV_GETAUXVAL)
|
||||
add_definitions(-DROCKSDB_AUXV_GETAUXVAL_PRESENT)
|
||||
endif()
|
||||
|
||||
include_directories(${ROCKSDB_SOURCE_DIR})
|
||||
include_directories(${ROCKSDB_SOURCE_DIR}/include)
|
||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
include_directories(${ROCKSDB_SOURCE_DIR}/third-party/folly)
|
||||
endif()
|
||||
find_package(Threads REQUIRED)
|
||||
|
||||
# Main library source code
|
||||
|
||||
set(SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/cache/cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/clock_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/lru_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/cache/sharded_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/arena_wrapped_db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_addition.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_garbage.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_file_meta.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/blob/blob_log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/column_family.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compacted_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_fifo.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_level.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/compaction_picker_universal.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/compaction/sst_partitioner.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/convenience.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_write.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_compaction_flush.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_files.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_open.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_experimental.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_readonly.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_impl/db_impl_secondary.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_info_dumper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/db_iter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/dbformat.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/error_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/event_helpers.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/experimental.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/external_sst_file_ingestion_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/file_indexer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/flush_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/flush_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/forward_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/import_column_family_job.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/internal_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/logs_with_prep_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/log_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/log_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/malloc_stats.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/memtable.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/memtable_list.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_del_aggregator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/range_tombstone_fragmenter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/repair.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/snapshot_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/table_properties_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/transaction_log_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/trim_history_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_edit.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_edit_handler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/version_set.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_edit.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/wal_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_batch_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_controller.cc
|
||||
${ROCKSDB_SOURCE_DIR}/db/write_thread.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_chroot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_encryption.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_hdfs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/file_system_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/mock_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/delete_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_prefetch_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/file_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/filename.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/random_access_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/read_write_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/readahead_raf.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/sequence_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/sst_file_manager_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/file/writable_file_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/auto_roll_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/event_logger.cc
|
||||
${ROCKSDB_SOURCE_DIR}/logging/log_buffer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/concurrent_arena.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/jemalloc_nodump_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memory/memkind_kmem_allocator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/alloc_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_linklist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/hash_skiplist_rep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/skiplistrep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/vectorrep.cc
|
||||
${ROCKSDB_SOURCE_DIR}/memtable/write_buffer_manager.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/histogram.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/histogram_windowing.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/in_memory_stats_history.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/instrumented_mutex.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/iostats_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/perf_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/perf_level.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/persistent_stats_history.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/statistics.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/stats_dump_scheduler.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_updater.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/monitoring/thread_status_util_debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/cf_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/db_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/options/options_parser.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/stack_trace.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/adaptive/adaptive_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/binary_search_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_based_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/block_prefix_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_hash_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/data_block_footer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_block_reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/filter_policy.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/flush_block_policy.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/full_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/hash_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/index_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/index_reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/parsed_full_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_filter_block.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/partitioned_index_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/reader_common.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_based/uncompression_dict_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/block_fetcher.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/cuckoo/cuckoo_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/get_context.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/merging_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/meta_blocks.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/persistent_cache_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_bloom.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_builder.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_factory.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_key_coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/plain/plain_table_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_dumper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_reader.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/sst_file_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/table_properties.cc
|
||||
${ROCKSDB_SOURCE_DIR}/table/two_level_iterator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/sync_point_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/testutil.cc
|
||||
${ROCKSDB_SOURCE_DIR}/test_util/transaction_test_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/block_cache_analyzer/block_cache_trace_analyzer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/dump/db_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_cmd.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/ldb_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/sst_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/tools/trace_analyzer_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/trace_replay.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/block_cache_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/trace_replay/io_tracer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/coding.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compaction_job_stats_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/comparator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/compression_context_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/concurrent_task_limiter_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/dynamic_bloom.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/hash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/murmurhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/random.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/rate_limiter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/slice.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/file_checksum_helper.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/status.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/string_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/thread_local.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/threadpool_imp.cc
|
||||
${ROCKSDB_SOURCE_DIR}/util/xxhash.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/backupable/backupable_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_db_impl_filesnapshot.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_dump_tool.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/blob_db/blob_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/cassandra_compaction_filter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/format.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/cassandra/merge_operator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/checkpoint/checkpoint_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/debug.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_mirror.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/env_timed.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_env.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/fault_injection_fs.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/leveldb_options/leveldb_options.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/memory/memory_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/bytesxor.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/max.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/put.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/sortlist.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/string_append/stringappend2.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/merge_operators/uint64add.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/object_registry.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/option_change_migration/option_change_migration.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/options/options_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_file.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/block_cache_tier_metadata.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/persistent_cache_tier.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/persistent_cache/volatile_tier_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/cache_simulator.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/simulator_cache/sim_cache.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/table_properties_collectors/compact_on_deletion_collector.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/trace/file_trace_reader_writer.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/lock/point_lock_tracker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction_db_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/optimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/pessimistic_transaction_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/snapshot_checker.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_base.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_db_mutex_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_lock_mgr.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/transaction_util.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_prepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/transactions/write_unprepared_txn_db.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/ttl/db_ttl_impl.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index.cc
|
||||
${ROCKSDB_SOURCE_DIR}/utilities/write_batch_with_index/write_batch_with_index_internal.cc
|
||||
$<TARGET_OBJECTS:rocksdb_build_version>)
|
||||
|
||||
if(HAVE_SSE42 AND NOT MSVC)
|
||||
set_source_files_properties(
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c.cc
|
||||
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc.c
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c_ppc_asm.S)
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
|
||||
if(HAS_ARMV8_CRC)
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/util/crc32c_arm64.cc)
|
||||
endif(HAS_ARMV8_CRC)
|
||||
|
||||
if(WIN32)
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/io_win.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/env_win.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/env_default.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/port_win.cc
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/win_logger.cc)
|
||||
if(NOT MINGW)
|
||||
# Mingw only supports std::thread when using
|
||||
# posix threads.
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/win_thread.cc)
|
||||
endif()
|
||||
if(WITH_XPRESS)
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/xpress_win.cc)
|
||||
endif()
|
||||
|
||||
if(WITH_JEMALLOC)
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/port/win/win_jemalloc.cc)
|
||||
endif()
|
||||
|
||||
else()
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/port/port_posix.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/env_posix.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/fs_posix.cc
|
||||
${ROCKSDB_SOURCE_DIR}/env/io_posix.cc)
|
||||
endif()
|
||||
|
||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||
list(APPEND SOURCES
|
||||
${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/detail/Futex.cpp
|
||||
${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/AtomicNotification.cpp
|
||||
${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/DistributedMutex.cpp
|
||||
${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/ParkingLot.cpp
|
||||
${ROCKSDB_SOURCE_DIR}/third-party/folly/folly/synchronization/WaitOptions.cpp)
|
||||
endif()
|
||||
|
||||
set(ROCKSDB_STATIC_LIB rocksdb)
|
||||
|
||||
if(WIN32)
|
||||
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
|
||||
else()
|
||||
set(SYSTEM_LIBS ${CMAKE_THREAD_LIBS_INIT})
|
||||
endif()
|
||||
|
||||
add_library(${ROCKSDB_STATIC_LIB} STATIC ${SOURCES})
|
||||
target_link_libraries(${ROCKSDB_STATIC_LIB} PRIVATE
|
||||
${THIRDPARTY_LIBS} ${SYSTEM_LIBS})
|
1
contrib/xz
vendored
Submodule
1
contrib/xz
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 869b9d1b4edd6df07f819d360d306251f8147353
|
@ -64,6 +64,8 @@ RUN apt-get update \
|
||||
libbz2-dev \
|
||||
libavro-dev \
|
||||
libfarmhash-dev \
|
||||
librocksdb-dev \
|
||||
libgflags-dev \
|
||||
libmysqlclient-dev \
|
||||
--yes --no-install-recommends
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# docker build -t yandex/clickhouse-fasttest .
|
||||
FROM ubuntu:19.10
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=10
|
||||
|
||||
|
@ -127,7 +127,7 @@ function clone_submodules
|
||||
(
|
||||
cd "$FASTTEST_SOURCE"
|
||||
|
||||
SUBMODULES_TO_UPDATE=(contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11 contrib/croaring contrib/miniselect)
|
||||
SUBMODULES_TO_UPDATE=(contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11 contrib/croaring contrib/miniselect contrib/xz)
|
||||
|
||||
git submodule sync
|
||||
git submodule update --init --recursive "${SUBMODULES_TO_UPDATE[@]}"
|
||||
@ -268,12 +268,16 @@ TESTS_TO_SKIP=(
|
||||
protobuf
|
||||
secure
|
||||
sha256
|
||||
xz
|
||||
|
||||
# Not sure why these two fail even in sequential mode. Disabled for now
|
||||
# to make some progress.
|
||||
00646_url_engine
|
||||
00974_query_profiler
|
||||
|
||||
# In fasttest, ENABLE_LIBRARIES=0, so rocksdb engine is not enabled by default
|
||||
01504_rocksdb
|
||||
|
||||
# Look at DistributedFilesToInsert, so cannot run in parallel.
|
||||
01460_DistributedFilesToInsert
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
<max_execution_time>300</max_execution_time>
|
||||
|
||||
<!-- One NUMA node w/o hyperthreading -->
|
||||
<max_threads>20</max_threads>
|
||||
<max_threads>12</max_threads>
|
||||
</default>
|
||||
</profiles>
|
||||
</yandex>
|
||||
|
@ -43,6 +43,8 @@ RUN apt-get --allow-unauthenticated update -y \
|
||||
libreadline-dev \
|
||||
libsasl2-dev \
|
||||
libzstd-dev \
|
||||
librocksdb-dev \
|
||||
libgflags-dev \
|
||||
lsof \
|
||||
moreutils \
|
||||
ncdu \
|
||||
|
@ -0,0 +1,45 @@
|
||||
---
|
||||
toc_priority: 6
|
||||
toc_title: EmbeddedRocksDB
|
||||
---
|
||||
|
||||
# EmbeddedRocksDB Engine {#EmbeddedRocksDB-engine}
|
||||
|
||||
This engine allows integrating ClickHouse with [rocksdb](http://rocksdb.org/).
|
||||
|
||||
`EmbeddedRocksDB` lets you:
|
||||
|
||||
## Creating a Table {#table_engine-EmbeddedRocksDB-creating-a-table}
|
||||
|
||||
``` sql
|
||||
CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
||||
(
|
||||
name1 [type1] [DEFAULT|MATERIALIZED|ALIAS expr1],
|
||||
name2 [type2] [DEFAULT|MATERIALIZED|ALIAS expr2],
|
||||
...
|
||||
) ENGINE = EmbeddedRocksDB PRIMARY KEY(primary_key_name)
|
||||
```
|
||||
|
||||
Required parameters:
|
||||
|
||||
- `primary_key_name` – any column name in the column list.
|
||||
|
||||
Example:
|
||||
|
||||
``` sql
|
||||
CREATE TABLE test
|
||||
(
|
||||
`key` String,
|
||||
`v1` UInt32,
|
||||
`v2` String,
|
||||
`v3` Float32,
|
||||
)
|
||||
ENGINE = EmbeddedRocksDB
|
||||
PRIMARY KEY key
|
||||
```
|
||||
|
||||
## Description {#description}
|
||||
|
||||
- `primary key` must be specified, it only supports one column in primary key. The primary key will serialized in binary as rocksdb key.
|
||||
- columns other than the primary key will be serialized in binary as rocksdb value in corresponding order.
|
||||
- queries with key `equals` or `in` filtering will be optimized to multi keys lookup from rocksdb.
|
@ -30,4 +30,4 @@ Instead of inserting data manually, you might consider to use one of [client lib
|
||||
- `input_format_import_nested_json` allows to insert nested JSON objects into columns of [Nested](../../sql-reference/data-types/nested-data-structures/nested.md) type.
|
||||
|
||||
!!! note "Note"
|
||||
Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the `CLI` interface.
|
||||
Settings are specified as `GET` parameters for the HTTP interface or as additional command-line arguments prefixed with `--` for the `CLI` interface.
|
||||
|
@ -26,6 +26,9 @@ toc_title: Client Libraries
|
||||
- [go-clickhouse](https://github.com/roistat/go-clickhouse)
|
||||
- [mailrugo-clickhouse](https://github.com/mailru/go-clickhouse)
|
||||
- [golang-clickhouse](https://github.com/leprosus/golang-clickhouse)
|
||||
- Swift
|
||||
- [ClickHouseNIO](https://github.com/patrick-zippenfenig/ClickHouseNIO)
|
||||
- [ClickHouseVapor ORM](https://github.com/patrick-zippenfenig/ClickHouseVapor)
|
||||
- NodeJs
|
||||
- [clickhouse (NodeJs)](https://github.com/TimonKK/clickhouse)
|
||||
- [node-clickhouse](https://github.com/apla/node-clickhouse)
|
||||
|
@ -11,6 +11,7 @@ toc_title: Adopters
|
||||
| Company | Industry | Usecase | Cluster Size | (Un)Compressed Data Size<abbr title="of single replica"><sup>\*</sup></abbr> | Reference |
|
||||
|------------------------------------------------------------------------------------------------|---------------------------------|-----------------------|------------------------------------------------------------|------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| <a href="https://2gis.ru" class="favicon">2gis</a> | Maps | Monitoring | — | — | [Talk in Russian, July 2019](https://youtu.be/58sPkXfq6nw) |
|
||||
| <a href="https://getadmiral.com/" class="favicon">Admiral</a> | Martech | Engagement Management | — | — | [Webinar Slides, June 2020](https://altinity.com/presentations/2020/06/16/big-data-in-real-time-how-clickhouse-powers-admirals-visitor-relationships-for-publishers) |
|
||||
| <a href="https://cn.aliyun.com/" class="favicon">Alibaba Cloud</a> | Cloud | Managed Service | — | — | [Official Website](https://help.aliyun.com/product/144466.html) |
|
||||
| <a href="https://alohabrowser.com/" class="favicon">Aloha Browser</a> | Mobile App | Browser backend | — | — | [Slides in Russian, May 2019](https://presentations.clickhouse.tech/meetup22/aloha.pdf) |
|
||||
| <a href="https://amadeus.com/" class="favicon">Amadeus</a> | Travel | Analytics | — | — | [Press Release, April 2018](https://www.altinity.com/blog/2018/4/5/amadeus-technologies-launches-investment-and-insights-tool-based-on-machine-learning-and-strategy-algorithms) |
|
||||
@ -29,6 +30,7 @@ toc_title: Adopters
|
||||
| <a href="https://www.citadelsecurities.com/" class="favicon">Citadel Securities</a> | Finance | — | — | — | [Contribution, March 2019](https://github.com/ClickHouse/ClickHouse/pull/4774) |
|
||||
| <a href="https://city-mobil.ru" class="favicon">Citymobil</a> | Taxi | Analytics | — | — | [Blog Post in Russian, March 2020](https://habr.com/en/company/citymobil/blog/490660/) |
|
||||
| <a href="https://cloudflare.com" class="favicon">Cloudflare</a> | CDN | Traffic analysis | 36 servers | — | [Blog post, May 2017](https://blog.cloudflare.com/how-cloudflare-analyzes-1m-dns-queries-per-second/), [Blog post, March 2018](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/) |
|
||||
| <a href="https://corporate.comcast.com/" class="favicon">Comcast</a> | Media | CDN Traffic Analysis | — | — | [ApacheCon 2019 Talk](https://www.youtube.com/watch?v=e9TZ6gFDjNg) |
|
||||
| <a href="https://contentsquare.com" class="favicon">ContentSquare</a> | Web analytics | Main product | — | — | [Blog post in French, November 2018](http://souslecapot.net/2018/11/21/patrick-chatain-vp-engineering-chez-contentsquare-penser-davantage-amelioration-continue-que-revolution-constante/) |
|
||||
| <a href="https://coru.net/" class="favicon">Corunet</a> | Analytics | Main product | — | — | [Slides in English, April 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup21/predictive_models.pdf) |
|
||||
| <a href="https://www.creditx.com" class="favicon">CraiditX 氪信</a> | Finance AI | Analysis | — | — | [Slides in English, November 2019](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup33/udf.pptx) |
|
||||
@ -64,7 +66,8 @@ toc_title: Adopters
|
||||
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
|
||||
| <a href="https://mellodesign.ru/" class="favicon">Mello</a> | Marketing | Analytics | 1 server | — | [Article, Oct 2020](https://vc.ru/marketing/166180-razrabotka-tipovogo-otcheta-skvoznoy-analitiki) |
|
||||
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
||||
| <a href="https://www.mindsdb.com/" class="favicon">MindsDB</a> | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) |
|
||||
| <a href="https://www.mindsdb.com/" class="favicon">MindsDB</a> | Machine Learning | Main Product | — | — | [Official Website](https://www.mindsdb.com/blog/machine-learning-models-as-tables-in-ch) |x
|
||||
| <a href="https://mux.com/" class="favicon">MUX</a> | Online Video | Video Analytics | — | — | [Talk in English, August 2019](https://altinity.com/presentations/2019/8/13/how-clickhouse-became-the-default-analytics-database-for-mux/) |
|
||||
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
|
||||
| <a href="https://getnoc.com/" class="favicon">NOC Project</a> | Network Monitoring | Analytics | Main Product | — | [Official Website](https://getnoc.com/features/big-data/) |
|
||||
| <a href="https://www.nuna.com/" class="favicon">Nuna Inc.</a> | Health Data Analytics | — | — | — | [Talk in English, July 2020](https://youtu.be/GMiXCMFDMow?t=170) |
|
||||
|
@ -1081,4 +1081,45 @@ Default value: `/var/lib/clickhouse/access/`.
|
||||
|
||||
- [Access Control and Account Management](../../operations/access-rights.md#access-control)
|
||||
|
||||
## user_directories {#user_directories}
|
||||
|
||||
Section of the configuration file that contains settings:
|
||||
- Path to configuration file with predefined users.
|
||||
- Path to folder where users created by SQL commands are stored.
|
||||
|
||||
If this section is specified, the path from [users_config](../../operations/server-configuration-parameters/settings.md#users-config) and [access_control_path](../../operations/server-configuration-parameters/settings.md#access_control_path) won't be used.
|
||||
|
||||
The `user_directories` section can contain any number of items, the order of the items means their precedence (the higher the item the higher the precedence).
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<path>/etc/clickhouse-server/users.xml</path>
|
||||
</users_xml>
|
||||
<local_directory>
|
||||
<path>/var/lib/clickhouse/access/</path>
|
||||
</local_directory>
|
||||
</user_directories>
|
||||
```
|
||||
|
||||
You can also specify settings `memory` — means storing information only in memory, without writing to disk, and `ldap` — means storing information on an LDAP server.
|
||||
|
||||
To add an LDAP server as a remote user directory of users that are not defined locally, define a single `ldap` section with a following parameters:
|
||||
- `server` — one of LDAP server names defined in `ldap_servers` config section. This parameter is mandatory and cannot be empty.
|
||||
- `roles` — section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server. If no roles are specified, user will not be able to perform any actions after authentication. If any of the listed roles is not defined locally at the time of authentication, the authenthication attept will fail as if the provided password was incorrect.
|
||||
|
||||
**Example**
|
||||
|
||||
``` xml
|
||||
<ldap>
|
||||
<server>my_ldap_server</server>
|
||||
<roles>
|
||||
<my_local_role1 />
|
||||
<my_local_role2 />
|
||||
</roles>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
[Original article](https://clickhouse.tech/docs/en/operations/server_configuration_parameters/settings/) <!--hide-->
|
||||
|
@ -307,7 +307,51 @@ Disabled by default.
|
||||
|
||||
## input_format_tsv_enum_as_number {#settings-input_format_tsv_enum_as_number}
|
||||
|
||||
For TSV input format switches to parsing enum values as enum ids.
|
||||
Enables or disables parsing enum values as enum ids for TSV input format.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Enum values are parsed as values.
|
||||
- 1 — Enum values are parsed as enum IDs
|
||||
|
||||
Default value: 0.
|
||||
|
||||
**Example**
|
||||
|
||||
Consider the table:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_enum_column_for_tsv_insert (Id Int32,Value Enum('first' = 1, 'second' = 2)) ENGINE=Memory();
|
||||
```
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is enabled:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 1;
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
┌──Id─┬─Value──┐
|
||||
│ 103 │ first │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
When the `input_format_tsv_enum_as_number` setting is disabled, the `INSERT` query:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
## input_format_null_as_default {#settings-input-format-null-as-default}
|
||||
|
||||
@ -1182,7 +1226,47 @@ For CSV input format enables or disables parsing of unquoted `NULL` as literal (
|
||||
|
||||
## input_format_csv_enum_as_number {#settings-input_format_csv_enum_as_number}
|
||||
|
||||
For CSV input format switches to parsing enum values as enum ids.
|
||||
Enables or disables parsing enum values as enum ids for CSV input format.
|
||||
|
||||
Possible values:
|
||||
|
||||
- 0 — Enum values are parsed as values.
|
||||
- 1 — Enum values are parsed as enum IDs.
|
||||
|
||||
Default value: 0.
|
||||
|
||||
**Examples**
|
||||
|
||||
Consider the table:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_enum_column_for_csv_insert (Id Int32,Value Enum('first' = 1, 'second' = 2)) ENGINE=Memory();
|
||||
```
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is enabled:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value─────┐
|
||||
│ 102 │ second │
|
||||
└─────┴───────────┘
|
||||
```
|
||||
|
||||
When the `input_format_csv_enum_as_number` setting is disabled, the `INSERT` query:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
```
|
||||
|
||||
throws an exception.
|
||||
|
||||
## output_format_csv_crlf_end_of_line {#settings-output-format-csv-crlf-end-of-line}
|
||||
|
||||
|
@ -50,8 +50,6 @@ ClickHouse-specific aggregate functions:
|
||||
- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md)
|
||||
- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md)
|
||||
- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md)
|
||||
- [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md)
|
||||
- [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md)
|
||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md)
|
||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md)
|
||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md)
|
||||
|
@ -1,16 +0,0 @@
|
||||
---
|
||||
toc_priority: 171
|
||||
---
|
||||
|
||||
# timeSeriesGroupRateSum {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
Syntax: `timeSeriesGroupRateSum(uid, ts, val)`
|
||||
|
||||
Similarly to [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md), `timeSeriesGroupRateSum` calculates the rate of time-series and then sum rates together.
|
||||
Also, timestamp should be in ascend order before use this function.
|
||||
|
||||
Applying this function to the data from the `timeSeriesGroupSum` example, you get the following result:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
@ -1,57 +0,0 @@
|
||||
---
|
||||
toc_priority: 170
|
||||
---
|
||||
|
||||
# timeSeriesGroupSum {#agg-function-timeseriesgroupsum}
|
||||
|
||||
Syntax: `timeSeriesGroupSum(uid, timestamp, value)`
|
||||
|
||||
`timeSeriesGroupSum` can aggregate different time series that sample timestamp not alignment.
|
||||
It will use linear interpolation between two sample timestamp and then sum time-series together.
|
||||
|
||||
- `uid` is the time series unique id, `UInt64`.
|
||||
- `timestamp` is Int64 type in order to support millisecond or microsecond.
|
||||
- `value` is the metric.
|
||||
|
||||
The function returns array of tuples with `(timestamp, aggregated_value)` pairs.
|
||||
|
||||
Before using this function make sure `timestamp` is in ascending order.
|
||||
|
||||
Example:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
And the result will be:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
@ -337,26 +337,124 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
|
||||
└────────────┴───────────┴───────────┴───────────┘
|
||||
```
|
||||
|
||||
## date_trunc(datepart, time_or_data\[, time_zone\]), dateTrunc(datepart, time_or_data\[, time_zone\]) {#date_trunc}
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
Truncates a date or date with time based on the specified datepart, such as
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
Truncates date and time data to the specified part of date.
|
||||
|
||||
```sql
|
||||
SELECT date_trunc('hour', now())
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
date_trunc(unit, value[, timezone])
|
||||
```
|
||||
|
||||
## now {#now}
|
||||
Alias: `dateTrunc`.
|
||||
|
||||
Accepts zero or one arguments(timezone) and returns the current time at one of the moments of request execution, or current time of specific timezone at one of the moments of request execution if `timezone` argument provided.
|
||||
This function returns a constant, even if the request took a long time to complete.
|
||||
**Parameters**
|
||||
|
||||
- `unit` — Part of date. [String](../syntax.md#syntax-string-literal).
|
||||
Possible values:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `value` — Date and time. [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). If not specified, the function uses the timezone of the `value` parameter. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Value, truncated to the specified part of date.
|
||||
|
||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query without timezone:
|
||||
|
||||
``` sql
|
||||
SELECT now(), date_trunc('hour', now());
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───────────────now()─┬─date_trunc('hour', now())─┐
|
||||
│ 2020-09-28 10:40:45 │ 2020-09-28 10:00:00 │
|
||||
└─────────────────────┴───────────────────────────┘
|
||||
```
|
||||
|
||||
Query with the specified timezone:
|
||||
|
||||
```sql
|
||||
SELECT now(), date_trunc('hour', now(), 'Europe/Moscow');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
┌───────────────now()─┬─date_trunc('hour', now(), 'Europe/Moscow')─┐
|
||||
│ 2020-09-28 10:46:26 │ 2020-09-28 13:00:00 │
|
||||
└─────────────────────┴────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**See also**
|
||||
|
||||
- [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone)
|
||||
|
||||
# now {#now}
|
||||
|
||||
Returns the current date and time.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
now([timezone])
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) for the returned value (optional). [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Current date and time.
|
||||
|
||||
Type: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query without timezone:
|
||||
|
||||
``` sql
|
||||
SELECT now();
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌───────────────now()─┐
|
||||
│ 2020-10-17 07:42:09 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
Query with the specified timezone:
|
||||
|
||||
``` sql
|
||||
SELECT now('Europe/Moscow');
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─now('Europe/Moscow')─┐
|
||||
│ 2020-10-17 10:42:23 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## today {#today}
|
||||
|
||||
|
@ -325,7 +325,59 @@ This function accepts a number or date or date with time, and returns a FixedStr
|
||||
|
||||
## reinterpretAsUUID {#reinterpretasuuid}
|
||||
|
||||
This function accepts FixedString, and returns UUID. Takes 16 bytes string. If the string isn't long enough, the functions work as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
This function accepts 16 bytes string, and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the functions work as if the string is padded with the necessary number of null bytes to the end. If the string longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
reinterpretAsUUID(fixed_string)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
- `fixed_string` — Big-endian byte string. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The UUID type value. [UUID](../../sql-reference/data-types/uuid.md#uuid-data-type).
|
||||
|
||||
**Examples**
|
||||
|
||||
String to UUID.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')))
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')))─┐
|
||||
│ 08090a0b-0c0d-0e0f-0001-020304050607 │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Going back and forth from String to UUID.
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
generateUUIDv4() AS uuid,
|
||||
identity(lower(hex(reverse(reinterpretAsString(uuid))))) AS str,
|
||||
reinterpretAsUUID(reverse(unhex(str))) AS uuid2
|
||||
SELECT uuid = uuid2;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─equals(uuid, uuid2)─┐
|
||||
│ 1 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## CAST(x, T) {#type_conversion_function-cast}
|
||||
|
||||
|
@ -204,7 +204,7 @@ SYSTEM STOP MOVES [[db.]merge_tree_family_table_name]
|
||||
|
||||
## Managing ReplicatedMergeTree Tables {#query-language-system-replicated}
|
||||
|
||||
ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replacingmergetree.md) tables.
|
||||
ClickHouse can manage background replication related processes in [ReplicatedMergeTree](../../engines/table-engines/mergetree-family/replication/#table_engines-replication) tables.
|
||||
|
||||
### STOP FETCHES {#query_language-system-stop-fetches}
|
||||
|
||||
|
@ -57,7 +57,7 @@ Identifiers are:
|
||||
|
||||
Identifiers can be quoted or non-quoted. The latter is preferred.
|
||||
|
||||
Non-quoted identifiers must match the regex `^[a-zA-Z_][0-9a-zA-Z_]*$` and can not be equal to [keywords](#syntax-keywords). Examples: `x, _1, X_y__Z123_.`
|
||||
Non-quoted identifiers must match the regex `^[0-9a-zA-Z_]*[a-zA-Z_]$` and can not be equal to [keywords](#syntax-keywords). Examples: `x, _1, X_y__Z123_.`
|
||||
|
||||
If you want to use identifiers the same as keywords or you want to use other symbols in identifiers, quote it using double quotes or backticks, for example, `"id"`, `` `id` ``.
|
||||
|
||||
|
@ -464,69 +464,6 @@ The kurtosis of the given distribution. Type — [Float64](../../sql-reference/d
|
||||
SELECT kurtSamp(value) FROM series_with_value_column
|
||||
```
|
||||
|
||||
## Para obtener más información, consulta nuestra Política de privacidad y nuestras Condiciones de uso) {#agg-function-timeseriesgroupsum}
|
||||
|
||||
`timeSeriesGroupSum` puede agregar diferentes series de tiempo que muestran la marca de tiempo no la alineación.
|
||||
Utilizará la interpolación lineal entre dos marcas de tiempo de muestra y luego sumará series temporales juntas.
|
||||
|
||||
- `uid` es la identificación única de la serie temporal, `UInt64`.
|
||||
- `timestamp` es el tipo Int64 para admitir milisegundos o microsegundos.
|
||||
- `value` es la métrica.
|
||||
|
||||
La función devuelve una matriz de tuplas con `(timestamp, aggregated_value)` par.
|
||||
|
||||
Antes de utilizar esta función, asegúrese de `timestamp` está en orden ascendente.
|
||||
|
||||
Ejemplo:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
Y el resultado será:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
||||
|
||||
## También puede utilizar el siguiente ejemplo:) {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
De manera similar a `timeSeriesGroupSum`, `timeSeriesGroupRateSum` calcula la tasa de series temporales y luego suma las tasas juntas.
|
||||
Además, la marca de tiempo debe estar en orden ascendente antes de usar esta función.
|
||||
|
||||
Aplicando esta función a los datos del `timeSeriesGroupSum` ejemplo, se obtiene el siguiente resultado:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
||||
|
||||
## Acerca de) {#agg_function-avg}
|
||||
|
||||
Calcula el promedio.
|
||||
|
@ -464,69 +464,6 @@ The kurtosis of the given distribution. Type — [جسم شناور64](../../sql
|
||||
SELECT kurtSamp(value) FROM series_with_value_column
|
||||
```
|
||||
|
||||
## هشدار داده می شود) {#agg-function-timeseriesgroupsum}
|
||||
|
||||
`timeSeriesGroupSum` می توانید سری های زمانی مختلف که برچسب زمان نمونه هم ترازی جمع نمی.
|
||||
این برون یابی خطی بین دو برچسب زمان نمونه و سپس مجموع زمان سری با هم استفاده کنید.
|
||||
|
||||
- `uid` سری زمان شناسه منحصر به فرد است, `UInt64`.
|
||||
- `timestamp` است نوع درون64 به منظور حمایت میلی ثانیه یا میکروثانیه.
|
||||
- `value` متریک است.
|
||||
|
||||
تابع گرداند مجموعه ای از تاپل با `(timestamp, aggregated_value)` جفت
|
||||
|
||||
قبل از استفاده از این تابع اطمینان حاصل کنید `timestamp` به ترتیب صعودی است.
|
||||
|
||||
مثال:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
و نتیجه خواهد بود:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
||||
|
||||
## هشدار داده می شود) {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
به طور مشابه به `timeSeriesGroupSum`, `timeSeriesGroupRateSum` محاسبه نرخ زمان سری و سپس مجموع نرخ با هم.
|
||||
همچنین, برچسب زمان باید در جهت صعود قبل از استفاده از این تابع باشد.
|
||||
|
||||
استفاده از این تابع به داده ها از `timeSeriesGroupSum` مثال, شما نتیجه زیر را دریافت کنید:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
||||
|
||||
## میانگین) {#agg_function-avg}
|
||||
|
||||
محاسبه متوسط.
|
||||
|
@ -464,69 +464,6 @@ The kurtosis of the given distribution. Type — [Float64](../../sql-reference/d
|
||||
SELECT kurtSamp(value) FROM series_with_value_column
|
||||
```
|
||||
|
||||
## timeSeriesGroupSum(uid, horodatage, valeur) {#agg-function-timeseriesgroupsum}
|
||||
|
||||
`timeSeriesGroupSum` peut agréger différentes séries temporelles qui échantillonnent l'horodatage et non l'alignement.
|
||||
Il utilisera une interpolation linéaire entre deux échantillons d'horodatage, puis additionnera les séries temporelles ensemble.
|
||||
|
||||
- `uid` la série temporelle est elle unique, `UInt64`.
|
||||
- `timestamp` est de type Int64 afin de prendre en charge la milliseconde ou la microseconde.
|
||||
- `value` est la métrique.
|
||||
|
||||
La fonction renvoie un tableau de tuples avec `(timestamp, aggregated_value)` pair.
|
||||
|
||||
Avant d'utiliser cette fonction, assurez-vous `timestamp` est dans l'ordre croissant.
|
||||
|
||||
Exemple:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
Et le résultat sera:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
||||
|
||||
## timeSeriesGroupRateSum(uid, ts, val) {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
De la même manière à `timeSeriesGroupSum`, `timeSeriesGroupRateSum` calcule le taux de séries chronologiques, puis additionne les taux ensemble.
|
||||
En outre, l'horodatage doit être dans l'ordre croissant avant d'utiliser cette fonction.
|
||||
|
||||
Application de cette fonction aux données du `timeSeriesGroupSum` exemple, vous obtenez le résultat suivant:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
||||
|
||||
## avg (x) {#agg_function-avg}
|
||||
|
||||
Calcule la moyenne.
|
||||
|
@ -464,69 +464,6 @@ The kurtosis of the given distribution. Type — [Float64](../../sql-reference/d
|
||||
SELECT kurtSamp(value) FROM series_with_value_column
|
||||
```
|
||||
|
||||
## timeSeriesGroupSum(uid,タイムスタンプ,値) {#agg-function-timeseriesgroupsum}
|
||||
|
||||
`timeSeriesGroupSum` 総異なる時系列のサンプルのタイムスタンプなアライメントを実施します。
|
||||
これは、二つのサンプルタイムスタンプ間の線形補間を使用して、一緒に時系列を合計します。
|
||||
|
||||
- `uid` 時系列は一意のidですか, `UInt64`.
|
||||
- `timestamp` ミリ秒またはマイクロ秒をサポートするためにInt64型です。
|
||||
- `value` は指標です。
|
||||
|
||||
この関数は、次のような組の配列を返します `(timestamp, aggregated_value)` ペア。
|
||||
|
||||
この関数を使用する前に、必ず `timestamp` 昇順です。
|
||||
|
||||
例:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
結果は次のようになります:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
||||
|
||||
## タイムセリエスグロプラテスム(uid,ts,val) {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
同様に `timeSeriesGroupSum`, `timeSeriesGroupRateSum` 時系列のレートを計算し、レートを合計します。
|
||||
また、timestampはこの関数を使用する前に上昇順にする必要があります。
|
||||
|
||||
のデータにこの関数を適用します。 `timeSeriesGroupSum` 例では、次の結果が得られます:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
||||
|
||||
## avg(x) {#agg_function-avg}
|
||||
|
||||
平均を計算します。
|
||||
|
@ -1068,4 +1068,45 @@ ClickHouse использует ZooKeeper для хранения метадан
|
||||
|
||||
- [Управление доступом](../access-rights.md#access-control)
|
||||
|
||||
## user_directories {#user_directories}
|
||||
|
||||
Секция конфигурационного файла,которая содержит настройки:
|
||||
- Путь к конфигурационному файлу с предустановленными пользователями.
|
||||
- Путь к файлу, в котором содержатся пользователи, созданные при помощи SQL команд.
|
||||
|
||||
Если эта секция определена, путь из [users_config](../../operations/server-configuration-parameters/settings.md#users-config) и [access_control_path](../../operations/server-configuration-parameters/settings.md#access_control_path) не используется.
|
||||
|
||||
Секция `user_directories` может содержать любое количество элементов, порядок расположения элементов обозначает их приоритет (чем выше элемент, тем выше приоритет).
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<user_directories>
|
||||
<users_xml>
|
||||
<path>/etc/clickhouse-server/users.xml</path>
|
||||
</users_xml>
|
||||
<local_directory>
|
||||
<path>/var/lib/clickhouse/access/</path>
|
||||
</local_directory>
|
||||
</user_directories>
|
||||
```
|
||||
|
||||
Также вы можете указать настройку `memory` — означает хранение информации только в памяти, без записи на диск, и `ldap` — означает хранения информации на [LDAP-сервере](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol).
|
||||
|
||||
Чтобы добавить LDAP-сервер в качестве удаленного каталога пользователей, которые не определены локально, определите один раздел `ldap` со следующими параметрами:
|
||||
- `server` — имя одного из LDAP-серверов, определенных в секции `ldap_servers` конфигурациионного файла. Этот параметр явялется необязательным и может быть пустым.
|
||||
- `roles` — раздел со списком локально определенных ролей, которые будут назначены каждому пользователю, полученному с LDAP-сервера. Если роли не заданы, пользователь не сможет выполнять никаких действий после аутентификации. Если какая-либо из перечисленных ролей не определена локально во время проверки подлинности, попытка проверки подлинности завершится неудачей, как если бы предоставленный пароль был неверным.
|
||||
|
||||
**Пример**
|
||||
|
||||
``` xml
|
||||
<ldap>
|
||||
<server>my_ldap_server</server>
|
||||
<roles>
|
||||
<my_local_role1 />
|
||||
<my_local_role2 />
|
||||
</roles>
|
||||
</ldap>
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/ru/operations/server_configuration_parameters/settings/) <!--hide-->
|
||||
|
@ -289,6 +289,54 @@ INSERT INTO test VALUES (lower('Hello')), (lower('world')), (lower('INSERT')), (
|
||||
|
||||
Disabled by default.
|
||||
|
||||
## input_format_tsv_enum_as_number {#settings-input_format_tsv_enum_as_number}
|
||||
|
||||
Включает или отключает парсинг значений перечислений как идентификаторов перечислений для входного формата TSV.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — парсинг значений перечисления как значений.
|
||||
- 1 — парсинг значений перечисления как идентификаторов перечисления.
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
**Пример**
|
||||
|
||||
Рассмотрим таблицу:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_enum_column_for_tsv_insert (Id Int32,Value Enum('first' = 1, 'second' = 2)) ENGINE=Memory();
|
||||
```
|
||||
|
||||
При включенной настройке `input_format_tsv_enum_as_number`:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 103 1;
|
||||
SELECT * FROM table_with_enum_column_for_tsv_insert;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
┌──Id─┬─Value──┐
|
||||
│ 103 │ first │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
При отключенной настройке `input_format_tsv_enum_as_number` запрос `INSERT`:
|
||||
|
||||
```sql
|
||||
SET input_format_tsv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2;
|
||||
```
|
||||
|
||||
сгенерирует исключение.
|
||||
|
||||
## input_format_null_as_default {#settings-input-format-null-as-default}
|
||||
|
||||
Включает или отключает использование значений по умолчанию в случаях, когда во входных данных содержится `NULL`, но тип соответствующего столбца не `Nullable(T)` (для текстовых форматов).
|
||||
@ -1127,6 +1175,50 @@ SELECT area/period FROM account_orders FORMAT JSON;
|
||||
|
||||
Для формата CSV включает или выключает парсинг неэкранированной строки `NULL` как литерала (синоним для `\N`)
|
||||
|
||||
## input_format_csv_enum_as_number {#settings-input_format_csv_enum_as_number}
|
||||
|
||||
Включает или отключает парсинг значений перечислений как идентификаторов перечислений для входного формата CSV.
|
||||
|
||||
Возможные значения:
|
||||
|
||||
- 0 — парсинг значений перечисления как значений.
|
||||
- 1 — парсинг значений перечисления как идентификаторов перечисления.
|
||||
|
||||
Значение по умолчанию: 0.
|
||||
|
||||
**Пример**
|
||||
|
||||
Рассмотрим таблицу:
|
||||
|
||||
```sql
|
||||
CREATE TABLE table_with_enum_column_for_csv_insert (Id Int32,Value Enum('first' = 1, 'second' = 2)) ENGINE=Memory();
|
||||
```
|
||||
|
||||
При включенной настройке `input_format_csv_enum_as_number`:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 1;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
SELECT * FROM table_with_enum_column_for_csv_insert;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌──Id─┬─Value──┐
|
||||
│ 102 │ second │
|
||||
└─────┴────────┘
|
||||
```
|
||||
|
||||
При отключенной настройке `input_format_csv_enum_as_number` запрос `INSERT`:
|
||||
|
||||
```sql
|
||||
SET input_format_csv_enum_as_number = 0;
|
||||
INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2;
|
||||
```
|
||||
|
||||
сгенерирует исключение.
|
||||
|
||||
## output_format_csv_crlf_end_of_line {#settings-output-format-csv-crlf-end-of-line}
|
||||
|
||||
Использовать в качестве разделителя строк для CSV формата CRLF (DOS/Windows стиль) вместо LF (Unix стиль).
|
||||
|
@ -45,8 +45,6 @@ toc_hidden: true
|
||||
- [skewPop](../../../sql-reference/aggregate-functions/reference/skewpop.md)
|
||||
- [kurtSamp](../../../sql-reference/aggregate-functions/reference/kurtsamp.md)
|
||||
- [kurtPop](../../../sql-reference/aggregate-functions/reference/kurtpop.md)
|
||||
- [timeSeriesGroupSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupsum.md)
|
||||
- [timeSeriesGroupRateSum](../../../sql-reference/aggregate-functions/reference/timeseriesgroupratesum.md)
|
||||
- [uniq](../../../sql-reference/aggregate-functions/reference/uniq.md)
|
||||
- [uniqExact](../../../sql-reference/aggregate-functions/reference/uniqexact.md)
|
||||
- [uniqCombined](../../../sql-reference/aggregate-functions/reference/uniqcombined.md)
|
||||
|
@ -1,18 +0,0 @@
|
||||
---
|
||||
toc_priority: 171
|
||||
---
|
||||
|
||||
# timeSeriesGroupRateSum {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
Синтаксис: `timeSeriesGroupRateSum(uid, ts, val)`
|
||||
|
||||
Аналогично timeSeriesGroupSum, timeSeriesGroupRateSum будет вычислять производные по timestamp для рядов, а затем суммировать полученные производные для всех рядов для одного значения timestamp.
|
||||
Также ряды должны быть отсортированы по возрастанию timestamp.
|
||||
|
||||
Для пример из описания timeSeriesGroupSum результат будет следующим:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/timeseriesgroupratesum/) <!--hide-->
|
@ -1,59 +0,0 @@
|
||||
---
|
||||
toc_priority: 170
|
||||
---
|
||||
|
||||
# timeSeriesGroupSum {#agg-function-timeseriesgroupsum}
|
||||
|
||||
Синтаксис: `timeSeriesGroupSum(uid, timestamp, value)`
|
||||
|
||||
`timeSeriesGroupSum` агрегирует временные ряды в которых не совпадают моменты.
|
||||
Функция использует линейную интерполяцию между двумя значениями времени, а затем суммирует значения для одного и того же момента (как измеренные так и интерполированные) по всем рядам.
|
||||
|
||||
- `uid` уникальный идентификатор временного ряда, `UInt64`.
|
||||
- `timestamp` имеет тип `Int64` чтобы можно было учитывать милли и микросекунды.
|
||||
- `value` представляет собой значение метрики.
|
||||
|
||||
Функция возвращает массив кортежей с парами `(timestamp, aggregated_value)`.
|
||||
|
||||
Временные ряды должны быть отсортированы по возрастанию `timestamp`.
|
||||
|
||||
Пример:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
И результат будет:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
||||
|
||||
[Оригинальная статья](https://clickhouse.tech/docs/en/sql-reference/aggregate-functions/reference/timeseriesgroupsum/) <!--hide-->
|
@ -234,10 +234,124 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
|
||||
|
||||
Переводит дату-с-временем в номер секунды, начиная с некоторого фиксированного момента в прошлом.
|
||||
|
||||
## date_trunc {#date_trunc}
|
||||
|
||||
Отсекает от даты и времени части, меньшие чем указанная часть.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
date_trunc(unit, value[, timezone])
|
||||
```
|
||||
|
||||
Синоним: `dateTrunc`.
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `unit` — Название части даты или времени. [String](../syntax.md#syntax-string-literal).
|
||||
Возможные значения:
|
||||
|
||||
- `second`
|
||||
- `minute`
|
||||
- `hour`
|
||||
- `day`
|
||||
- `week`
|
||||
- `month`
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `value` — Дата и время. [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `timezone` — [Часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) для возвращаемого значения (необязательно). Если параметр не задан, используется часовой пояс параметра `value`. [String](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Дата и время, отсеченные до указанной части.
|
||||
|
||||
Тип: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Примеры**
|
||||
|
||||
Запрос без указания часового пояса:
|
||||
|
||||
``` sql
|
||||
SELECT now(), date_trunc('hour', now());
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌───────────────now()─┬─date_trunc('hour', now())─┐
|
||||
│ 2020-09-28 10:40:45 │ 2020-09-28 10:00:00 │
|
||||
└─────────────────────┴───────────────────────────┘
|
||||
```
|
||||
|
||||
Запрос с указанием часового пояса:
|
||||
|
||||
```sql
|
||||
SELECT now(), date_trunc('hour', now(), 'Europe/Moscow');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
```text
|
||||
┌───────────────now()─┬─date_trunc('hour', now(), 'Europe/Moscow')─┐
|
||||
│ 2020-09-28 10:46:26 │ 2020-09-28 13:00:00 │
|
||||
└─────────────────────┴────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**См. также**
|
||||
|
||||
- [toStartOfInterval](#tostartofintervaltime-or-data-interval-x-unit-time-zone)
|
||||
|
||||
## now {#now}
|
||||
|
||||
Принимает ноль аргументов и возвращает текущее время на один из моментов выполнения запроса.
|
||||
Функция возвращает константу, даже если запрос выполнялся долго.
|
||||
Возвращает текущую дату и время.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
now([timezone])
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) для возвращаемого значения (необязательно). [String](../../sql-reference/data-types/string.md)
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Текущие дата и время.
|
||||
|
||||
Тип: [Datetime](../../sql-reference/data-types/datetime.md).
|
||||
|
||||
**Пример**
|
||||
|
||||
Запрос без указания часового пояса:
|
||||
|
||||
``` sql
|
||||
SELECT now();
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌───────────────now()─┐
|
||||
│ 2020-10-17 07:42:09 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
Запрос с указанием часового пояса:
|
||||
|
||||
``` sql
|
||||
SELECT now('Europe/Moscow');
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─now('Europe/Moscow')─┐
|
||||
│ 2020-10-17 10:42:23 │
|
||||
└──────────────────────┘
|
||||
```
|
||||
|
||||
## today {#today}
|
||||
|
||||
|
@ -319,6 +319,62 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut
|
||||
|
||||
Функция принимает число или дату или дату-с-временем и возвращает строку, содержащую байты, представляющие соответствующее значение в host order (little endian). При этом, отбрасываются нулевые байты с конца. Например, значение 255 типа UInt32 будет строкой длины 1 байт.
|
||||
|
||||
## reinterpretAsUUID {#reinterpretasuuid}
|
||||
|
||||
Функция принимает шестнадцатибайтную строку и интерпретирует ее байты в network order (big-endian). Если строка имеет недостаточную длину, то функция работает так, как будто строка дополнена необходимым количетсвом нулевых байт с конца. Если строка длиннее, чем шестнадцать байт, то игнорируются лишние байты с конца.
|
||||
|
||||
**Синтаксис**
|
||||
|
||||
``` sql
|
||||
reinterpretAsUUID(fixed_string)
|
||||
```
|
||||
|
||||
**Параметры**
|
||||
|
||||
- `fixed_string` — cтрока с big-endian порядком байтов. [FixedString](../../sql-reference/data-types/fixedstring.md#fixedstring).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
||||
- Значение типа [UUID](../../sql-reference/data-types/uuid.md#uuid-data-type).
|
||||
|
||||
**Примеры**
|
||||
|
||||
Интерпретация строки как UUID.
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
SELECT reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')))
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')))─┐
|
||||
│ 08090a0b-0c0d-0e0f-0001-020304050607 │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
Переход в UUID и обратно.
|
||||
|
||||
Запрос:
|
||||
|
||||
``` sql
|
||||
WITH
|
||||
generateUUIDv4() AS uuid,
|
||||
identity(lower(hex(reverse(reinterpretAsString(uuid))))) AS str,
|
||||
reinterpretAsUUID(reverse(unhex(str))) AS uuid2
|
||||
SELECT uuid = uuid2;
|
||||
```
|
||||
|
||||
Результат:
|
||||
|
||||
``` text
|
||||
┌─equals(uuid, uuid2)─┐
|
||||
│ 1 │
|
||||
└─────────────────────┘
|
||||
```
|
||||
|
||||
## CAST(x, T) {#type_conversion_function-cast}
|
||||
|
||||
Преобразует x в тип данных t.
|
||||
|
@ -464,69 +464,6 @@ The kurtosis of the given distribution. Type — [Float64](../../sql-reference/d
|
||||
SELECT kurtSamp(value) FROM series_with_value_column
|
||||
```
|
||||
|
||||
## timeSeriesGroupSum(uıd, zaman damgası, değer) {#agg-function-timeseriesgroupsum}
|
||||
|
||||
`timeSeriesGroupSum` örnek zaman damgası değil hizalama farklı zaman serileri toplayabilir.
|
||||
İki örnek zaman damgası arasında doğrusal enterpolasyon kullanacak ve daha sonra zaman serilerini birlikte toplayacaktır.
|
||||
|
||||
- `uid` zaman serisi benzersiz kimliği mi, `UInt64`.
|
||||
- `timestamp` milisaniye veya mikrosaniye desteklemek için Int64 türüdür.
|
||||
- `value` metr .iktir.
|
||||
|
||||
İşlev, tuples dizisini döndürür `(timestamp, aggregated_value)` çiftliler.
|
||||
|
||||
Bu işlevi kullanmadan önce emin olun `timestamp` artan düzende.
|
||||
|
||||
Örnek:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
Ve sonuç olacak:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
||||
|
||||
## timeSeriesGroupRateSum(uıd, ts, val) {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
Benzer şekilde `timeSeriesGroupSum`, `timeSeriesGroupRateSum` zaman serilerinin oranını hesaplar ve daha sonra toplam oranları birlikte hesaplar.
|
||||
Ayrıca, bu işlevi kullanmadan önce zaman damgası yükseliş sırasına göre olmalıdır.
|
||||
|
||||
Bu fonksiyon dataun ver theiye uygulanması `timeSeriesGroupSum` örnek, aşağıdaki sonucu alırsınız:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
||||
|
||||
## avg (x) {#agg_function-avg}
|
||||
|
||||
Ortalama hesaplar.
|
||||
|
@ -2,17 +2,17 @@
|
||||
machine_translated: true
|
||||
machine_translated_rev: 5decc73b5dc60054f19087d3690c4eb99446a6c3
|
||||
toc_priority: 3
|
||||
toc_title: "\u788C\u83BD\u7984Support:"
|
||||
toc_title: "\u5546\u4e1a\u652f\u6301"
|
||||
---
|
||||
|
||||
# ClickHouse商业支持服务提供商 {#clickhouse-commercial-support-service-providers}
|
||||
|
||||
!!! info "信息"
|
||||
如果您已经推出ClickHouse商业支持服务,请随时 [打开拉取请求](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) 将其添加到以下列表。
|
||||
如果您已经推出ClickHouse商业支持服务,请随时 [提交一个 pull-request](https://github.com/ClickHouse/ClickHouse/edit/master/docs/en/commercial/support.md) 将其添加到以下列表。
|
||||
|
||||
## 敏锐性 {#altinity}
|
||||
## Altinity {#altinity}
|
||||
|
||||
隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄and陇.貌路.隆拢脳枚脢虏 隆隆隆隆路虏脢..陇.貌.垄拢卢虏禄.陇 访问 [www.altinity.com](https://www.altinity.com/) 欲了解更多信息.
|
||||
Altinity 自从 2017 年开始已经为企业提供 ClickHouse 支持服务。Altinity 的客户范围包含百强企业到初创企业。访问 [www.altinity.com](https://www.altinity.com/) 了解更多信息。
|
||||
|
||||
## Mafiree {#mafiree}
|
||||
|
||||
|
@ -43,7 +43,7 @@ $ cd ..
|
||||
|
||||
为此,请创建以下文件:
|
||||
|
||||
/图书馆/LaunchDaemons/限制.maxfilesplist:
|
||||
/资源库/LaunchDaemons/limit.maxfiles.plist:
|
||||
|
||||
``` xml
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
@ -7,35 +7,37 @@ toc_title: "\u6570\u636E\u5907\u4EFD"
|
||||
|
||||
# 数据备份 {#data-backup}
|
||||
|
||||
碌莽禄While: [复制](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [您不能使用类似MergeTree的引擎删除包含超过50Gb数据的表](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). 但是,这些保障措施并不涵盖所有可能的情况,可以规避。
|
||||
尽管[副本](../engines/table-engines/mergetree-family/replication.md) 可以预防硬件错误带来的数据丢失, 但是它不能防止人为操作的错误: 意外删除数据, 删除错误的 table 或者删除错误 cluster 上的 table, 可以导致错误数据处理错误或者数据损坏的 bugs. 这类意外可能会影响所有的副本. ClickHouse 有内建的保障措施可以预防一些错误 — 例如, 默认情况下[您不能使用类似MergeTree的引擎删除包含超过50Gb数据的表](https://github.com/ClickHouse/ClickHouse/blob/v18.14.18-stable/programs/server/config.xml#L322-L330). 但是,这些保障措施不能涵盖所有可能的情况,并且可以规避。
|
||||
|
||||
为了有效地减少可能的人为错误,您应该仔细准备备份和还原数据的策略 **提前**.
|
||||
为了有效地减少可能的人为错误,您应该 **提前**准备备份和还原数据的策略.
|
||||
|
||||
每家公司都有不同的可用资源和业务需求,因此没有适合各种情况的ClickHouse备份和恢复通用解决方案。 什么适用于一千兆字节的数据可能不会为几十pb的工作。 有多种可能的方法有自己的优点和缺点,这将在下面讨论。 这是一个好主意,使用几种方法,而不是只是一个,以弥补其各种缺点。
|
||||
不同公司有不同的可用资源和业务需求,因此没有适合各种情况的ClickHouse备份和恢复通用解决方案。 适用于 1GB 的数据的方案可能并不适用于几十 PB 数据的情况。 有多种可能的并有自己优缺点的方法,这将在下面讨论。 好的主意是同时结合使用多种方法而不是仅使用一种,这样可以弥补不同方法各自的缺点。
|
||||
|
||||
!!! note "注"
|
||||
请记住,如果您备份了某些内容并且从未尝试过还原它,那么当您实际需要它时(或者至少需要比业务能够容忍的时间更长),恢复可能无法正常工作。 因此,无论您选择哪种备份方法,请确保自动还原过程,并定期在备用ClickHouse群集上练习。
|
||||
|
||||
## 将源数据复制到其他地方 {#duplicating-source-data-somewhere-else}
|
||||
|
||||
通常被摄入到ClickHouse的数据是通过某种持久队列传递的,例如 [Apache Kafka](https://kafka.apache.org). 在这种情况下,可以配置一组额外的订阅服务器,这些订阅服务器将在写入ClickHouse时读取相同的数据流,并将其存储在冷存储中。 大多数公司已经有一些默认的推荐冷存储,可能是对象存储或分布式文件系统,如 [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html).
|
||||
通常被聚集到ClickHouse的数据是通过某种持久队列传递的,例如 [Apache Kafka](https://kafka.apache.org). 在这种情况下,可以配置一组额外的订阅服务器,这些订阅服务器将在写入ClickHouse时读取相同的数据流,并将其存储在冷存储中。 大多数公司已经有一些默认的推荐冷存储,可能是对象存储或分布式文件系统,如 [HDFS](https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HdfsDesign.html).
|
||||
|
||||
## 文件系统快照 {#filesystem-snapshots}
|
||||
|
||||
某些本地文件系统提供快照功能(例如, [ZFS](https://en.wikipedia.org/wiki/ZFS)),但它们可能不是提供实时查询的最佳选择。 一个可能的解决方案是使用这种文件系统创建额外的副本,并将它们从 [分布](../engines/table-engines/special/distributed.md) 用于以下目的的表 `SELECT` 查询。 任何修改数据的查询都无法访问此类副本上的快照。 作为奖励,这些副本可能具有特殊的硬件配置,每个服务器附加更多的磁盘,这将是经济高效的。
|
||||
|
||||
## ツ环板-ョツ嘉ッツ偲 {#clickhouse-copier}
|
||||
## clickhouse-copier {#clickhouse-copier}
|
||||
|
||||
[ツ环板-ョツ嘉ッツ偲](utilities/clickhouse-copier.md) 是一个多功能工具,最初创建用于重新分片pb大小的表。 它还可用于备份和还原目的,因为它可以在ClickHouse表和集群之间可靠地复制数据。
|
||||
[clickhouse-copier](utilities/clickhouse-copier.md) 是一个多功能工具,最初创建用于重新分片pb大小的表。 因为它可以在ClickHouse表和集群之间可靠地复制数据,所以它还可用于备份和还原数据。
|
||||
|
||||
对于较小的数据量,一个简单的 `INSERT INTO ... SELECT ...` 到远程表也可以工作。
|
||||
|
||||
## 部件操作 {#manipulations-with-parts}
|
||||
|
||||
ClickHouse允许使用 `ALTER TABLE ... FREEZE PARTITION ...` 查询以创建表分区的本地副本。 这是使用硬链接来实现 `/var/lib/clickhouse/shadow/` 文件夹中,所以它通常不会占用旧数据的额外磁盘空间。 创建的文件副本不由ClickHouse服务器处理,所以你可以把它们留在那里:你将有一个简单的备份,不需要任何额外的外部系统,但它仍然会容易出现硬件问题。 出于这个原因,最好将它们远程复制到另一个位置,然后删除本地副本。 分布式文件系统和对象存储仍然是一个不错的选择,但是具有足够大容量的正常附加文件服务器也可以工作(在这种情况下,传输将通过网络文件系统 [rsync](https://en.wikipedia.org/wiki/Rsync)).
|
||||
ClickHouse允许使用 `ALTER TABLE ... FREEZE PARTITION ...` 查询以创建表分区的本地副本。 这是利用硬链接(hardlink)到 `/var/lib/clickhouse/shadow/` 文件夹中实现的,所以它通常不会占用旧数据的额外磁盘空间。 创建的文件副本不由ClickHouse服务器处理,所以你可以把它们留在那里:你将有一个简单的备份,不需要任何额外的外部系统,但它仍然会容易出现硬件问题。 出于这个原因,最好将它们远程复制到另一个位置,然后删除本地副本。 分布式文件系统和对象存储仍然是一个不错的选择,但是具有足够大容量的正常附加文件服务器也可以工作(在这种情况下,传输将通过网络文件系统 [rsync](https://en.wikipedia.org/wiki/Rsync)).
|
||||
|
||||
数据可以使用 `ALTER TABLE ... ATTACH PARTITION ...` 从备份中恢复。
|
||||
|
||||
有关与分区操作相关的查询的详细信息,请参阅 [更改文档](../sql-reference/statements/alter.md#alter_manipulations-with-partitions).
|
||||
|
||||
第三方工具可用于自动化此方法: [ツ环板backupョツ嘉ッツ偲](https://github.com/AlexAkulov/clickhouse-backup).
|
||||
第三方工具可用于自动化此方法: [clickhouse-backup](https://github.com/AlexAkulov/clickhouse-backup).
|
||||
|
||||
[原始文章](https://clickhouse.tech/docs/en/operations/backup/) <!--hide-->
|
||||
|
@ -462,69 +462,6 @@ kurtSamp(expr)
|
||||
SELECT kurtSamp(value) FROM series_with_value_column
|
||||
```
|
||||
|
||||
## timeSeriesGroupSum(uid,timestamp,value) {#agg-function-timeseriesgroupsum}
|
||||
|
||||
`timeSeriesGroupSum` 可以聚合不同的时间序列,即采样时间戳不对齐。
|
||||
它将在两个采样时间戳之间使用线性插值,然后将时间序列和在一起。
|
||||
|
||||
- `uid` 是时间序列唯一id, `UInt64`.
|
||||
- `timestamp` 是Int64型,以支持毫秒或微秒。
|
||||
- `value` 是指标。
|
||||
|
||||
函数返回元组数组 `(timestamp, aggregated_value)` 对。
|
||||
|
||||
在使用此功能之前,请确保 `timestamp` 按升序排列
|
||||
|
||||
示例:
|
||||
|
||||
``` text
|
||||
┌─uid─┬─timestamp─┬─value─┐
|
||||
│ 1 │ 2 │ 0.2 │
|
||||
│ 1 │ 7 │ 0.7 │
|
||||
│ 1 │ 12 │ 1.2 │
|
||||
│ 1 │ 17 │ 1.7 │
|
||||
│ 1 │ 25 │ 2.5 │
|
||||
│ 2 │ 3 │ 0.6 │
|
||||
│ 2 │ 8 │ 1.6 │
|
||||
│ 2 │ 12 │ 2.4 │
|
||||
│ 2 │ 18 │ 3.6 │
|
||||
│ 2 │ 24 │ 4.8 │
|
||||
└─────┴───────────┴───────┘
|
||||
```
|
||||
|
||||
``` sql
|
||||
CREATE TABLE time_series(
|
||||
uid UInt64,
|
||||
timestamp Int64,
|
||||
value Float64
|
||||
) ENGINE = Memory;
|
||||
INSERT INTO time_series VALUES
|
||||
(1,2,0.2),(1,7,0.7),(1,12,1.2),(1,17,1.7),(1,25,2.5),
|
||||
(2,3,0.6),(2,8,1.6),(2,12,2.4),(2,18,3.6),(2,24,4.8);
|
||||
|
||||
SELECT timeSeriesGroupSum(uid, timestamp, value)
|
||||
FROM (
|
||||
SELECT * FROM time_series order by timestamp ASC
|
||||
);
|
||||
```
|
||||
|
||||
其结果将是:
|
||||
|
||||
``` text
|
||||
[(2,0.2),(3,0.9),(7,2.1),(8,2.4),(12,3.6),(17,5.1),(18,5.4),(24,7.2),(25,2.5)]
|
||||
```
|
||||
|
||||
## timeSeriesGroupRateSum(uid,ts,val) {#agg-function-timeseriesgroupratesum}
|
||||
|
||||
同样 `timeSeriesGroupSum`, `timeSeriesGroupRateSum` 计算时间序列的速率,然后将速率总和在一起。
|
||||
此外,使用此函数之前,时间戳应该是上升顺序。
|
||||
|
||||
应用此功能从数据 `timeSeriesGroupSum` 例如,您将得到以下结果:
|
||||
|
||||
``` text
|
||||
[(2,0),(3,0.1),(7,0.3),(8,0.3),(12,0.3),(17,0.3),(18,0.3),(24,0.3),(25,0.1)]
|
||||
```
|
||||
|
||||
## avg(x) {#agg_function-avg}
|
||||
|
||||
计算平均值。
|
||||
|
@ -49,7 +49,6 @@ using Ports = std::vector<UInt16>;
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int CANNOT_BLOCK_SIGNAL;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int EMPTY_DATA_PASSED;
|
||||
}
|
||||
|
||||
@ -103,17 +102,7 @@ public:
|
||||
/// (example: when using stage = 'with_mergeable_state')
|
||||
registerAggregateFunctions();
|
||||
|
||||
if (stage == "complete")
|
||||
query_processing_stage = QueryProcessingStage::Complete;
|
||||
else if (stage == "fetch_columns")
|
||||
query_processing_stage = QueryProcessingStage::FetchColumns;
|
||||
else if (stage == "with_mergeable_state")
|
||||
query_processing_stage = QueryProcessingStage::WithMergeableState;
|
||||
else if (stage == "with_mergeable_state_after_aggregation")
|
||||
query_processing_stage = QueryProcessingStage::WithMergeableStateAfterAggregation;
|
||||
else
|
||||
throw Exception("Unknown query processing stage: " + stage, ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
query_processing_stage = QueryProcessingStage::fromString(stage);
|
||||
}
|
||||
|
||||
void initialize(Poco::Util::Application & self [[maybe_unused]]) override
|
||||
|
@ -224,6 +224,7 @@ private:
|
||||
|
||||
/// We will format query_id in interactive mode in various ways, the default is just to print Query id: ...
|
||||
std::vector<std::pair<String, String>> query_id_formats;
|
||||
QueryProcessingStage::Enum query_processing_stage;
|
||||
|
||||
void initialize(Poco::Util::Application & self) override
|
||||
{
|
||||
@ -1444,7 +1445,7 @@ private:
|
||||
connection_parameters.timeouts,
|
||||
query_to_send,
|
||||
context.getCurrentQueryId(),
|
||||
QueryProcessingStage::Complete,
|
||||
query_processing_stage,
|
||||
&context.getSettingsRef(),
|
||||
&context.getClientInfo(),
|
||||
true);
|
||||
@ -1485,7 +1486,7 @@ private:
|
||||
connection_parameters.timeouts,
|
||||
query_to_send,
|
||||
context.getCurrentQueryId(),
|
||||
QueryProcessingStage::Complete,
|
||||
query_processing_stage,
|
||||
&context.getSettingsRef(),
|
||||
&context.getClientInfo(),
|
||||
true);
|
||||
@ -2309,6 +2310,7 @@ public:
|
||||
("password", po::value<std::string>()->implicit_value("\n", ""), "password")
|
||||
("ask-password", "ask-password")
|
||||
("quota_key", po::value<std::string>(), "A string to differentiate quotas when the user have keyed quotas configured on server")
|
||||
("stage", po::value<std::string>()->default_value("complete"), "Request query processing up to specified stage: complete,fetch_columns,with_mergeable_state,with_mergeable_state_after_aggregation")
|
||||
("query_id", po::value<std::string>(), "query_id")
|
||||
("query,q", po::value<std::string>(), "query")
|
||||
("database,d", po::value<std::string>(), "database")
|
||||
@ -2433,6 +2435,8 @@ public:
|
||||
if (options.count("config-file") && options.count("config"))
|
||||
throw Exception("Two or more configuration files referenced in arguments", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
query_processing_stage = QueryProcessingStage::fromString(options["stage"].as<std::string>());
|
||||
|
||||
/// Save received data into the internal config.
|
||||
if (options.count("config-file"))
|
||||
config().setString("config-file", options["config-file"].as<std::string>());
|
||||
|
@ -680,7 +680,7 @@ void updateSnapshot(Snapshot & snapshot, const Commit & commit, CommitDiff & fil
|
||||
for (auto & elem : file_changes)
|
||||
{
|
||||
auto & file = elem.second.file_change;
|
||||
if (file.path != file.old_path)
|
||||
if (!file.old_path.empty() && file.path != file.old_path)
|
||||
snapshot[file.path] = snapshot[file.old_path];
|
||||
}
|
||||
|
||||
|
@ -329,14 +329,20 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
|
||||
bool has_password_for_default_user = false;
|
||||
|
||||
if (!fs::exists(main_config_file))
|
||||
if (!fs::exists(config_d))
|
||||
{
|
||||
fmt::print("Creating config directory {} that is used for tweaks of main server configuration.\n", config_d.string());
|
||||
fs::create_directory(config_d);
|
||||
}
|
||||
|
||||
if (!fs::exists(users_d))
|
||||
{
|
||||
fmt::print("Creating config directory {} that is used for tweaks of users configuration.\n", users_d.string());
|
||||
fs::create_directory(users_d);
|
||||
}
|
||||
|
||||
if (!fs::exists(main_config_file))
|
||||
{
|
||||
std::string_view main_config_content = getResource("config.xml");
|
||||
if (main_config_content.empty())
|
||||
{
|
||||
@ -349,7 +355,30 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
out.sync();
|
||||
out.finalize();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
fmt::print("Config file {} already exists, will keep it and extract path info from it.\n", main_config_file.string());
|
||||
|
||||
ConfigProcessor processor(main_config_file.string(), /* throw_on_bad_incl = */ false, /* log_to_console = */ false);
|
||||
ConfigurationPtr configuration(new Poco::Util::XMLConfiguration(processor.processConfig()));
|
||||
|
||||
if (configuration->has("path"))
|
||||
{
|
||||
data_path = configuration->getString("path");
|
||||
fmt::print("{} has {} as data path.\n", main_config_file.string(), data_path);
|
||||
}
|
||||
|
||||
if (configuration->has("logger.log"))
|
||||
{
|
||||
log_path = fs::path(configuration->getString("logger.log")).remove_filename();
|
||||
fmt::print("{} has {} as log path.\n", main_config_file.string(), log_path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!fs::exists(users_config_file))
|
||||
{
|
||||
std::string_view users_config_content = getResource("users.xml");
|
||||
if (users_config_content.empty())
|
||||
{
|
||||
@ -365,38 +394,17 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
||||
}
|
||||
else
|
||||
{
|
||||
{
|
||||
fmt::print("Config file {} already exists, will keep it and extract path info from it.\n", main_config_file.string());
|
||||
|
||||
ConfigProcessor processor(main_config_file.string(), /* throw_on_bad_incl = */ false, /* log_to_console = */ false);
|
||||
ConfigurationPtr configuration(new Poco::Util::XMLConfiguration(processor.processConfig()));
|
||||
|
||||
if (configuration->has("path"))
|
||||
{
|
||||
data_path = configuration->getString("path");
|
||||
fmt::print("{} has {} as data path.\n", main_config_file.string(), data_path);
|
||||
}
|
||||
|
||||
if (configuration->has("logger.log"))
|
||||
{
|
||||
log_path = fs::path(configuration->getString("logger.log")).remove_filename();
|
||||
fmt::print("{} has {} as log path.\n", main_config_file.string(), log_path);
|
||||
}
|
||||
}
|
||||
fmt::print("Users config file {} already exists, will keep it and extract users info from it.\n", users_config_file.string());
|
||||
|
||||
/// Check if password for default user already specified.
|
||||
ConfigProcessor processor(users_config_file.string(), /* throw_on_bad_incl = */ false, /* log_to_console = */ false);
|
||||
ConfigurationPtr configuration(new Poco::Util::XMLConfiguration(processor.processConfig()));
|
||||
|
||||
if (fs::exists(users_config_file))
|
||||
if (!configuration->getString("users.default.password", "").empty()
|
||||
|| configuration->getString("users.default.password_sha256_hex", "").empty()
|
||||
|| configuration->getString("users.default.password_double_sha1_hex", "").empty())
|
||||
{
|
||||
ConfigProcessor processor(users_config_file.string(), /* throw_on_bad_incl = */ false, /* log_to_console = */ false);
|
||||
ConfigurationPtr configuration(new Poco::Util::XMLConfiguration(processor.processConfig()));
|
||||
|
||||
if (!configuration->getString("users.default.password", "").empty()
|
||||
|| configuration->getString("users.default.password_sha256_hex", "").empty()
|
||||
|| configuration->getString("users.default.password_double_sha1_hex", "").empty())
|
||||
{
|
||||
has_password_for_default_user = true;
|
||||
}
|
||||
has_password_for_default_user = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -56,11 +56,7 @@ static DataTypes convertLowCardinalityTypesToNested(const DataTypes & types)
|
||||
}
|
||||
|
||||
AggregateFunctionPtr AggregateFunctionFactory::get(
|
||||
const String & name,
|
||||
const DataTypes & argument_types,
|
||||
const Array & parameters,
|
||||
AggregateFunctionProperties & out_properties,
|
||||
int recursion_level) const
|
||||
const String & name, const DataTypes & argument_types, const Array & parameters, AggregateFunctionProperties & out_properties) const
|
||||
{
|
||||
auto type_without_low_cardinality = convertLowCardinalityTypesToNested(argument_types);
|
||||
|
||||
@ -81,11 +77,11 @@ AggregateFunctionPtr AggregateFunctionFactory::get(
|
||||
[](const auto & type) { return type->onlyNull(); });
|
||||
|
||||
AggregateFunctionPtr nested_function = getImpl(
|
||||
name, nested_types, nested_parameters, out_properties, has_null_arguments, recursion_level);
|
||||
name, nested_types, nested_parameters, out_properties, has_null_arguments);
|
||||
return combinator->transformAggregateFunction(nested_function, out_properties, type_without_low_cardinality, parameters);
|
||||
}
|
||||
|
||||
auto res = getImpl(name, type_without_low_cardinality, parameters, out_properties, false, recursion_level);
|
||||
auto res = getImpl(name, type_without_low_cardinality, parameters, out_properties, false);
|
||||
if (!res)
|
||||
throw Exception("Logical error: AggregateFunctionFactory returned nullptr", ErrorCodes::LOGICAL_ERROR);
|
||||
return res;
|
||||
@ -97,8 +93,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
||||
const DataTypes & argument_types,
|
||||
const Array & parameters,
|
||||
AggregateFunctionProperties & out_properties,
|
||||
bool has_null_arguments,
|
||||
int recursion_level) const
|
||||
bool has_null_arguments) const
|
||||
{
|
||||
String name = getAliasToOrName(name_param);
|
||||
Value found;
|
||||
@ -108,13 +103,9 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
||||
{
|
||||
found = it->second;
|
||||
}
|
||||
/// Find by case-insensitive name.
|
||||
/// Combinators cannot apply for case insensitive (SQL-style) aggregate function names. Only for native names.
|
||||
else if (recursion_level == 0)
|
||||
{
|
||||
if (auto jt = case_insensitive_aggregate_functions.find(Poco::toLower(name)); jt != case_insensitive_aggregate_functions.end())
|
||||
found = jt->second;
|
||||
}
|
||||
|
||||
if (auto jt = case_insensitive_aggregate_functions.find(Poco::toLower(name)); jt != case_insensitive_aggregate_functions.end())
|
||||
found = jt->second;
|
||||
|
||||
if (found.creator)
|
||||
{
|
||||
@ -140,7 +131,7 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
||||
DataTypes nested_types = combinator->transformArguments(argument_types);
|
||||
Array nested_parameters = combinator->transformParameters(parameters);
|
||||
|
||||
AggregateFunctionPtr nested_function = get(nested_name, nested_types, nested_parameters, out_properties, recursion_level + 1);
|
||||
AggregateFunctionPtr nested_function = get(nested_name, nested_types, nested_parameters, out_properties);
|
||||
return combinator->transformAggregateFunction(nested_function, out_properties, argument_types, parameters);
|
||||
}
|
||||
|
||||
@ -162,7 +153,7 @@ AggregateFunctionPtr AggregateFunctionFactory::tryGet(
|
||||
}
|
||||
|
||||
|
||||
std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetPropertiesImpl(const String & name_param, int recursion_level) const
|
||||
std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetPropertiesImpl(const String & name_param) const
|
||||
{
|
||||
String name = getAliasToOrName(name_param);
|
||||
Value found;
|
||||
@ -172,13 +163,9 @@ std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetPrope
|
||||
{
|
||||
found = it->second;
|
||||
}
|
||||
/// Find by case-insensitive name.
|
||||
/// Combinators cannot apply for case insensitive (SQL-style) aggregate function names. Only for native names.
|
||||
else if (recursion_level == 0)
|
||||
{
|
||||
if (auto jt = case_insensitive_aggregate_functions.find(Poco::toLower(name)); jt != case_insensitive_aggregate_functions.end())
|
||||
found = jt->second;
|
||||
}
|
||||
|
||||
if (auto jt = case_insensitive_aggregate_functions.find(Poco::toLower(name)); jt != case_insensitive_aggregate_functions.end())
|
||||
found = jt->second;
|
||||
|
||||
if (found.creator)
|
||||
return found.properties;
|
||||
@ -195,7 +182,7 @@ std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetPrope
|
||||
String nested_name = name.substr(0, name.size() - combinator->getName().size());
|
||||
|
||||
/// NOTE: It's reasonable to also allow to transform properties by combinator.
|
||||
return tryGetPropertiesImpl(nested_name, recursion_level + 1);
|
||||
return tryGetPropertiesImpl(nested_name);
|
||||
}
|
||||
|
||||
return {};
|
||||
@ -204,21 +191,21 @@ std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetPrope
|
||||
|
||||
std::optional<AggregateFunctionProperties> AggregateFunctionFactory::tryGetProperties(const String & name) const
|
||||
{
|
||||
return tryGetPropertiesImpl(name, 0);
|
||||
return tryGetPropertiesImpl(name);
|
||||
}
|
||||
|
||||
|
||||
bool AggregateFunctionFactory::isAggregateFunctionName(const String & name, int recursion_level) const
|
||||
bool AggregateFunctionFactory::isAggregateFunctionName(const String & name) const
|
||||
{
|
||||
if (aggregate_functions.count(name) || isAlias(name))
|
||||
return true;
|
||||
|
||||
String name_lowercase = Poco::toLower(name);
|
||||
if (recursion_level == 0 && (case_insensitive_aggregate_functions.count(name_lowercase) || isAlias(name_lowercase)))
|
||||
if (case_insensitive_aggregate_functions.count(name_lowercase) || isAlias(name_lowercase))
|
||||
return true;
|
||||
|
||||
if (AggregateFunctionCombinatorPtr combinator = AggregateFunctionCombinatorFactory::instance().tryFindSuffix(name))
|
||||
return isAggregateFunctionName(name.substr(0, name.size() - combinator->getName().size()), recursion_level + 1);
|
||||
return isAggregateFunctionName(name.substr(0, name.size() - combinator->getName().size()));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -59,12 +59,11 @@ public:
|
||||
CaseSensitiveness case_sensitiveness = CaseSensitive);
|
||||
|
||||
/// Throws an exception if not found.
|
||||
AggregateFunctionPtr get(
|
||||
const String & name,
|
||||
AggregateFunctionPtr
|
||||
get(const String & name,
|
||||
const DataTypes & argument_types,
|
||||
const Array & parameters,
|
||||
AggregateFunctionProperties & out_properties,
|
||||
int recursion_level = 0) const;
|
||||
AggregateFunctionProperties & out_properties) const;
|
||||
|
||||
/// Returns nullptr if not found.
|
||||
AggregateFunctionPtr tryGet(
|
||||
@ -76,7 +75,7 @@ public:
|
||||
/// Get properties if the aggregate function exists.
|
||||
std::optional<AggregateFunctionProperties> tryGetProperties(const String & name) const;
|
||||
|
||||
bool isAggregateFunctionName(const String & name, int recursion_level = 0) const;
|
||||
bool isAggregateFunctionName(const String & name) const;
|
||||
|
||||
private:
|
||||
AggregateFunctionPtr getImpl(
|
||||
@ -84,10 +83,9 @@ private:
|
||||
const DataTypes & argument_types,
|
||||
const Array & parameters,
|
||||
AggregateFunctionProperties & out_properties,
|
||||
bool has_null_arguments,
|
||||
int recursion_level) const;
|
||||
bool has_null_arguments) const;
|
||||
|
||||
std::optional<AggregateFunctionProperties> tryGetPropertiesImpl(const String & name, int recursion_level) const;
|
||||
std::optional<AggregateFunctionProperties> tryGetPropertiesImpl(const String & name) const;
|
||||
|
||||
private:
|
||||
using AggregateFunctions = std::unordered_map<String, Value>;
|
||||
|
@ -187,7 +187,10 @@ struct AggregateFunctionTimeSeriesGroupSumData
|
||||
{
|
||||
size_t size = result.size();
|
||||
writeVarUInt(size, buf);
|
||||
buf.write(reinterpret_cast<const char *>(result.data()), sizeof(result[0]));
|
||||
if (size > 0)
|
||||
{
|
||||
buf.write(reinterpret_cast<const char *>(result.data()), size * sizeof(result[0]));
|
||||
}
|
||||
}
|
||||
|
||||
void deserialize(ReadBuffer & buf)
|
||||
@ -195,7 +198,10 @@ struct AggregateFunctionTimeSeriesGroupSumData
|
||||
size_t size = 0;
|
||||
readVarUInt(size, buf);
|
||||
result.resize(size);
|
||||
buf.read(reinterpret_cast<char *>(result.data()), size * sizeof(result[0]));
|
||||
if (size > 0)
|
||||
{
|
||||
buf.read(reinterpret_cast<char *>(result.data()), size * sizeof(result[0]));
|
||||
}
|
||||
}
|
||||
};
|
||||
template <bool rate>
|
||||
|
@ -1,19 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <common/types.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/VarInt.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <common/sort.h>
|
||||
#include <common/types.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
@ -89,12 +87,7 @@ struct QuantileExact : QuantileExactBase<Value, QuantileExact<Value>>
|
||||
if (!array.empty())
|
||||
{
|
||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin(), array.begin() + n, array.end()); /// NOTE You can think of the radix-select algorithm.
|
||||
#else
|
||||
std::nth_element(array.begin(), array.begin() + n, array.end()); /// NOTE You can think of the radix-select algorithm.
|
||||
#endif
|
||||
nth_element(array.begin(), array.begin() + n, array.end()); /// NOTE: You can think of the radix-select algorithm.
|
||||
return array[n];
|
||||
}
|
||||
|
||||
@ -113,12 +106,7 @@ struct QuantileExact : QuantileExactBase<Value, QuantileExact<Value>>
|
||||
auto level = levels[indices[i]];
|
||||
|
||||
size_t n = level < 1 ? level * array.size() : (array.size() - 1);
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
#else
|
||||
std::nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
#endif
|
||||
nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
result[indices[i]] = array[n];
|
||||
prev_n = n;
|
||||
}
|
||||
@ -154,14 +142,10 @@ struct QuantileExactExclusive : public QuantileExact<Value>
|
||||
else if (n < 1)
|
||||
return static_cast<Float64>(array[0]);
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin(), array.begin() + n - 1, array.end());
|
||||
#else
|
||||
std::nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
#endif
|
||||
auto nth_element = std::min_element(array.begin() + n, array.end());
|
||||
nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);
|
||||
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_elem - array[n - 1]);
|
||||
}
|
||||
|
||||
return std::numeric_limits<Float64>::quiet_NaN();
|
||||
@ -187,14 +171,10 @@ struct QuantileExactExclusive : public QuantileExact<Value>
|
||||
result[indices[i]] = static_cast<Float64>(array[0]);
|
||||
else
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
#else
|
||||
std::nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
#endif
|
||||
auto nth_element = std::min_element(array.begin() + n, array.end());
|
||||
nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);
|
||||
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_elem - array[n - 1]);
|
||||
prev_n = n - 1;
|
||||
}
|
||||
}
|
||||
@ -226,14 +206,10 @@ struct QuantileExactInclusive : public QuantileExact<Value>
|
||||
return static_cast<Float64>(array[array.size() - 1]);
|
||||
else if (n < 1)
|
||||
return static_cast<Float64>(array[0]);
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin(), array.begin() + n - 1, array.end());
|
||||
#else
|
||||
std::nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
#endif
|
||||
auto nth_element = std::min_element(array.begin() + n, array.end());
|
||||
nth_element(array.begin(), array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);
|
||||
return static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_elem - array[n - 1]);
|
||||
}
|
||||
|
||||
return std::numeric_limits<Float64>::quiet_NaN();
|
||||
@ -257,14 +233,10 @@ struct QuantileExactInclusive : public QuantileExact<Value>
|
||||
result[indices[i]] = static_cast<Float64>(array[0]);
|
||||
else
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
#else
|
||||
std::nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
#endif
|
||||
auto nth_element = std::min_element(array.begin() + n, array.end());
|
||||
nth_element(array.begin() + prev_n, array.begin() + n - 1, array.end());
|
||||
auto nth_elem = std::min_element(array.begin() + n, array.end());
|
||||
|
||||
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_element - array[n - 1]);
|
||||
result[indices[i]] = static_cast<Float64>(array[n - 1]) + (h - n) * static_cast<Float64>(*nth_elem - array[n - 1]);
|
||||
prev_n = n - 1;
|
||||
}
|
||||
}
|
||||
|
@ -1,15 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/PODArray.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <common/sort.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -140,7 +138,7 @@ namespace detail
|
||||
using Array = PODArray<UInt16, 128>;
|
||||
mutable Array elems; /// mutable because array sorting is not considered a state change.
|
||||
|
||||
QuantileTimingMedium() {}
|
||||
QuantileTimingMedium() = default;
|
||||
QuantileTimingMedium(const UInt16 * begin, const UInt16 * end) : elems(begin, end) {}
|
||||
|
||||
void insert(UInt64 x)
|
||||
@ -182,11 +180,7 @@ namespace detail
|
||||
|
||||
/// Sorting an array will not be considered a violation of constancy.
|
||||
auto & array = elems;
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin(), array.begin() + n, array.end());
|
||||
#else
|
||||
std::nth_element(array.begin(), array.begin() + n, array.end());
|
||||
#endif
|
||||
nth_element(array.begin(), array.begin() + n, array.end());
|
||||
quantile = array[n];
|
||||
}
|
||||
|
||||
@ -207,11 +201,7 @@ namespace detail
|
||||
? level * elems.size()
|
||||
: (elems.size() - 1);
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_select(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
#else
|
||||
std::nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
#endif
|
||||
nth_element(array.begin() + prev_n, array.begin() + n, array.end());
|
||||
|
||||
result[level_index] = array[n];
|
||||
prev_n = n;
|
||||
@ -282,7 +272,7 @@ namespace detail
|
||||
}
|
||||
|
||||
public:
|
||||
Iterator(const QuantileTimingLarge & parent)
|
||||
explicit Iterator(const QuantileTimingLarge & parent)
|
||||
: begin(parent.count_small), pos(begin), end(&parent.count_big[BIG_SIZE])
|
||||
{
|
||||
adjust();
|
||||
@ -429,8 +419,8 @@ namespace detail
|
||||
template <typename ResultType>
|
||||
void getMany(const double * levels, const size_t * indices, size_t size, ResultType * result) const
|
||||
{
|
||||
const auto indices_end = indices + size;
|
||||
auto index = indices;
|
||||
const auto * indices_end = indices + size;
|
||||
const auto * index = indices;
|
||||
|
||||
UInt64 pos = std::ceil(count * levels[*index]);
|
||||
|
||||
|
@ -78,6 +78,10 @@ if (USE_AMQPCPP)
|
||||
add_headers_and_sources(dbms Storages/RabbitMQ)
|
||||
endif()
|
||||
|
||||
if (USE_ROCKSDB)
|
||||
add_headers_and_sources(dbms Storages/RocksDB)
|
||||
endif()
|
||||
|
||||
if (USE_AWS_S3)
|
||||
add_headers_and_sources(dbms Common/S3)
|
||||
add_headers_and_sources(dbms Disks/S3)
|
||||
@ -294,6 +298,7 @@ if (USE_KRB5)
|
||||
dbms_target_link_libraries(PRIVATE ${KRB5_LIBRARY})
|
||||
endif()
|
||||
|
||||
|
||||
if(RE2_INCLUDE_DIR)
|
||||
target_include_directories(clickhouse_common_io SYSTEM BEFORE PUBLIC ${RE2_INCLUDE_DIR})
|
||||
endif()
|
||||
@ -330,6 +335,13 @@ if (ZSTD_LIBRARY)
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
set (LZMA_LIBRARY liblzma)
|
||||
set (LZMA_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api)
|
||||
if (LZMA_LIBRARY)
|
||||
target_link_libraries (clickhouse_common_io PUBLIC ${LZMA_LIBRARY})
|
||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${LZMA_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (USE_ICU)
|
||||
dbms_target_link_libraries (PRIVATE ${ICU_LIBRARIES})
|
||||
dbms_target_include_directories (SYSTEM PRIVATE ${ICU_INCLUDE_DIRS})
|
||||
@ -402,6 +414,11 @@ if (USE_ORC)
|
||||
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ORC_INCLUDE_DIR} ${CMAKE_BINARY_DIR}/contrib/orc/c++/include)
|
||||
endif ()
|
||||
|
||||
if (USE_ROCKSDB)
|
||||
dbms_target_link_libraries(PUBLIC ${ROCKSDB_LIBRARY})
|
||||
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${ROCKSDB_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
if (ENABLE_TESTS AND USE_GTEST)
|
||||
macro (grep_gtest_sources BASE_DIR DST_VAR)
|
||||
# Cold match files that are not in tests/ directories
|
||||
|
@ -73,6 +73,11 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
|
||||
{
|
||||
#if USE_SSL
|
||||
socket = std::make_unique<Poco::Net::SecureStreamSocket>();
|
||||
|
||||
/// we resolve the ip when we open SecureStreamSocket, so to make Server Name Indication (SNI)
|
||||
/// work we need to pass host name separately. It will be send into TLS Hello packet to let
|
||||
/// the server know which host we want to talk with (single IP can process requests for multiple hosts using SNI).
|
||||
static_cast<Poco::Net::SecureStreamSocket*>(socket.get())->setPeerHostName(host);
|
||||
#else
|
||||
throw Exception{"tcp_secure protocol is disabled because poco library was built without NetSSL support.", ErrorCodes::SUPPORT_IS_DISABLED};
|
||||
#endif
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
|
||||
#include <common/unaligned.h>
|
||||
#include <common/sort.h>
|
||||
|
||||
#include <DataStreams/ColumnGathererStream.h>
|
||||
|
||||
@ -20,10 +21,6 @@
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -786,11 +783,7 @@ void ColumnArray::getPermutationImpl(size_t limit, Permutation & res, Comparator
|
||||
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
|
||||
|
||||
if (limit)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
#else
|
||||
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
#endif
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
else
|
||||
std::sort(res.begin(), res.end(), less);
|
||||
}
|
||||
@ -842,11 +835,7 @@ void ColumnArray::updatePermutationImpl(size_t limit, Permutation & res, EqualRa
|
||||
return;
|
||||
|
||||
/// Since then we are working inside the interval.
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
#endif
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < limit; ++j)
|
||||
{
|
||||
|
@ -7,10 +7,8 @@
|
||||
#include <Core/BigInt.h>
|
||||
|
||||
#include <common/unaligned.h>
|
||||
#include <common/sort.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
@ -197,21 +195,11 @@ void ColumnDecimal<T>::updatePermutation(bool reverse, size_t limit, int, IColum
|
||||
/// Since then we are working inside the interval.
|
||||
|
||||
if (reverse)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
#endif
|
||||
else
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last,
|
||||
[this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
#endif
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < limit; ++j)
|
||||
{
|
||||
|
@ -1,15 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Columns/ColumnVectorHelper.h>
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Columns/ColumnVectorHelper.h>
|
||||
#include <Core/Field.h>
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <common/sort.h>
|
||||
|
||||
#include <cmath>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -256,17 +254,9 @@ protected:
|
||||
sort_end = res.begin() + limit;
|
||||
|
||||
if (reverse)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
#else
|
||||
std::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
#endif
|
||||
partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] > data[b]; });
|
||||
else
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
#else
|
||||
std::partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
#endif
|
||||
partial_sort(res.begin(), sort_end, res.end(), [this](size_t a, size_t b) { return data[a] < data[b]; });
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,25 +1,20 @@
|
||||
#include <Columns/ColumnFixedString.h>
|
||||
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/memcpySmall.h>
|
||||
#include <Common/memcmpSmall.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
|
||||
#include <ext/scope_guard.h>
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
#include <DataStreams/ColumnGathererStream.h>
|
||||
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/memcmpSmall.h>
|
||||
#include <Common/memcpySmall.h>
|
||||
#include <common/sort.h>
|
||||
#include <ext/scope_guard.h>
|
||||
|
||||
#ifdef __SSE2__
|
||||
#include <emmintrin.h>
|
||||
#if defined(__SSE2__)
|
||||
# include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
|
||||
@ -160,17 +155,9 @@ void ColumnFixedString::getPermutation(bool reverse, size_t limit, int /*nan_dir
|
||||
if (limit)
|
||||
{
|
||||
if (reverse)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less<false>(*this));
|
||||
#else
|
||||
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less<false>(*this));
|
||||
#endif
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less<false>(*this));
|
||||
else
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less<true>(*this));
|
||||
#else
|
||||
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less<true>(*this));
|
||||
#endif
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less<true>(*this));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -228,17 +215,9 @@ void ColumnFixedString::updatePermutation(bool reverse, size_t limit, int, Permu
|
||||
/// Since then we are working inside the interval.
|
||||
|
||||
if (reverse)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this));
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this));
|
||||
#endif
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<false>(*this));
|
||||
else
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this));
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this));
|
||||
#endif
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less<true>(*this));
|
||||
|
||||
auto new_first = first;
|
||||
for (auto j = first + 1; j < limit; ++j)
|
||||
|
@ -1,20 +1,19 @@
|
||||
#include <Columns/ColumnLowCardinality.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
|
||||
#include <Columns/ColumnString.h>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <DataStreams/ColumnGathererStream.h>
|
||||
#include <DataTypes/NumberTraits.h>
|
||||
#include <Common/HashTable/HashMap.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
|
||||
#include <Common/assert_cast.h>
|
||||
#include <common/sort.h>
|
||||
#include <ext/scope_guard.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int ILLEGAL_COLUMN;
|
||||
@ -397,11 +396,7 @@ void ColumnLowCardinality::updatePermutationImpl(size_t limit, Permutation & res
|
||||
|
||||
/// Since then we are working inside the interval.
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
#endif
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
auto new_first = first;
|
||||
|
||||
for (auto j = first + 1; j < limit; ++j)
|
||||
|
@ -1,18 +1,16 @@
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/memcmpSmall.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Columns/Collator.h>
|
||||
#include <Columns/ColumnString.h>
|
||||
|
||||
#include <Columns/Collator.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <DataStreams/ColumnGathererStream.h>
|
||||
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/memcmpSmall.h>
|
||||
#include <common/sort.h>
|
||||
#include <common/unaligned.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -317,11 +315,7 @@ void ColumnString::getPermutationImpl(size_t limit, Permutation & res, Comparato
|
||||
auto less = [&cmp](size_t lhs, size_t rhs){ return cmp(lhs, rhs) < 0; };
|
||||
|
||||
if (limit)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
#else
|
||||
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
#endif
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
else
|
||||
std::sort(res.begin(), res.end(), less);
|
||||
}
|
||||
@ -372,11 +366,7 @@ void ColumnString::updatePermutationImpl(size_t limit, Permutation & res, EqualR
|
||||
return;
|
||||
|
||||
/// Since then we are working inside the interval.
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
#endif
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less);
|
||||
|
||||
size_t new_first = first;
|
||||
for (size_t j = first + 1; j < limit; ++j)
|
||||
|
@ -1,17 +1,16 @@
|
||||
#include <Columns/ColumnTuple.h>
|
||||
|
||||
#include <Columns/IColumnImpl.h>
|
||||
#include <Core/Field.h>
|
||||
#include <DataStreams/ColumnGathererStream.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <IO/Operators.h>
|
||||
#include <IO/WriteBufferFromString.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <common/sort.h>
|
||||
#include <ext/map.h>
|
||||
#include <ext/range.h>
|
||||
#include <Common/typeid_cast.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Core/Field.h>
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -354,17 +353,9 @@ void ColumnTuple::getPermutationImpl(size_t limit, Permutation & res, LessOperat
|
||||
limit = 0;
|
||||
|
||||
if (limit)
|
||||
{
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
#else
|
||||
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
#endif
|
||||
}
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less);
|
||||
else
|
||||
{
|
||||
std::sort(res.begin(), res.end(), less);
|
||||
}
|
||||
}
|
||||
|
||||
void ColumnTuple::updatePermutationImpl(bool reverse, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges, const Collator * collator) const
|
||||
|
@ -1,28 +1,27 @@
|
||||
#include "ColumnVector.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <cmath>
|
||||
#include <common/unaligned.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <pdqsort.h>
|
||||
#include <Columns/ColumnsCommon.h>
|
||||
#include <DataStreams/ColumnGathererStream.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/Arena.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/HashTable/Hash.h>
|
||||
#include <Common/NaNUtils.h>
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <common/sort.h>
|
||||
#include <common/unaligned.h>
|
||||
#include <ext/bit_cast.h>
|
||||
#include <ext/scope_guard.h>
|
||||
#include <pdqsort.h>
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <miniselect/floyd_rivest_select.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
#ifdef __SSE2__
|
||||
#include <emmintrin.h>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
|
||||
#if defined(__SSE2__)
|
||||
# include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
@ -158,17 +157,9 @@ void ColumnVector<T>::getPermutation(bool reverse, size_t limit, int nan_directi
|
||||
res[i] = i;
|
||||
|
||||
if (reverse)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), greater(*this, nan_direction_hint));
|
||||
#else
|
||||
std::partial_sort(res.begin(), res.begin() + limit, res.end(), greater(*this, nan_direction_hint));
|
||||
#endif
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), greater(*this, nan_direction_hint));
|
||||
else
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin(), res.begin() + limit, res.end(), less(*this, nan_direction_hint));
|
||||
#else
|
||||
std::partial_sort(res.begin(), res.begin() + limit, res.end(), less(*this, nan_direction_hint));
|
||||
#endif
|
||||
partial_sort(res.begin(), res.begin() + limit, res.end(), less(*this, nan_direction_hint));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -264,17 +255,9 @@ void ColumnVector<T>::updatePermutation(bool reverse, size_t limit, int nan_dire
|
||||
/// Since then, we are working inside the interval.
|
||||
|
||||
if (reverse)
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, greater(*this, nan_direction_hint));
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, greater(*this, nan_direction_hint));
|
||||
#endif
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, greater(*this, nan_direction_hint));
|
||||
else
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
miniselect::floyd_rivest_partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this, nan_direction_hint));
|
||||
#else
|
||||
std::partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this, nan_direction_hint));
|
||||
#endif
|
||||
partial_sort(res.begin() + first, res.begin() + limit, res.begin() + last, less(*this, nan_direction_hint));
|
||||
|
||||
size_t new_first = first;
|
||||
for (size_t j = first + 1; j < limit; ++j)
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <Columns/IColumn.h>
|
||||
#include <Common/PODArray.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -14,6 +14,8 @@
|
||||
* In debug build, use small mmap threshold to reproduce more memory
|
||||
* stomping bugs. Along with ASLR it will hopefully detect more issues than
|
||||
* ASan. The program may fail due to the limit on number of memory mappings.
|
||||
*
|
||||
* Not too small to avoid too quick exhaust of memory mappings.
|
||||
*/
|
||||
__attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 4096;
|
||||
__attribute__((__weak__)) extern const size_t MMAP_THRESHOLD = 16384;
|
||||
#endif
|
||||
|
@ -9,7 +9,8 @@
|
||||
M(ReplicatedFetch, "Number of data parts being fetched from replica") \
|
||||
M(ReplicatedSend, "Number of data parts being sent to replicas") \
|
||||
M(ReplicatedChecks, "Number of data parts checking for consistency") \
|
||||
M(BackgroundPoolTask, "Number of active tasks in BackgroundProcessingPool (merges, mutations, fetches, or replication queue bookkeeping)") \
|
||||
M(BackgroundPoolTask, "Number of active tasks in BackgroundProcessingPool (merges, mutations, or replication queue bookkeeping)") \
|
||||
M(BackgroundFetchesPoolTask, "Number of active tasks in BackgroundFetchesPool") \
|
||||
M(BackgroundMovePoolTask, "Number of active tasks in BackgroundProcessingPool for moves") \
|
||||
M(BackgroundSchedulePoolTask, "Number of active tasks in BackgroundSchedulePool. This pool is used for periodic ReplicatedMergeTree tasks, like cleaning old data parts, altering data parts, replica re-initialization, etc.") \
|
||||
M(BackgroundBufferFlushSchedulePoolTask, "Number of active tasks in BackgroundBufferFlushSchedulePool. This pool is used for periodic Buffer flushes") \
|
||||
|
@ -519,34 +519,35 @@
|
||||
M(550, CONDITIONAL_TREE_PARENT_NOT_FOUND) \
|
||||
M(551, ILLEGAL_PROJECTION_MANIPULATOR) \
|
||||
M(552, UNRECOGNIZED_ARGUMENTS) \
|
||||
\
|
||||
M(553, ROCKSDB_ERROR) \
|
||||
M(553, LZMA_STREAM_ENCODER_FAILED) \
|
||||
M(554, LZMA_STREAM_DECODER_FAILED) \
|
||||
M(999, KEEPER_EXCEPTION) \
|
||||
M(1000, POCO_EXCEPTION) \
|
||||
M(1001, STD_EXCEPTION) \
|
||||
M(1002, UNKNOWN_EXCEPTION) \
|
||||
M(1002, UNKNOWN_EXCEPTION)
|
||||
|
||||
/* See END */
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
#define M(VALUE, NAME) extern const Value NAME = VALUE;
|
||||
APPLY_FOR_ERROR_CODES(M)
|
||||
#undef M
|
||||
#define M(VALUE, NAME) extern const Value NAME = VALUE;
|
||||
APPLY_FOR_ERROR_CODES(M)
|
||||
#undef M
|
||||
|
||||
constexpr Value END = 3000;
|
||||
std::atomic<Value> values[END + 1] {};
|
||||
std::atomic<Value> values[END + 1]{};
|
||||
|
||||
struct ErrorCodesNames
|
||||
{
|
||||
std::string_view names[END + 1];
|
||||
ErrorCodesNames()
|
||||
{
|
||||
#define M(VALUE, NAME) names[VALUE] = std::string_view(#NAME);
|
||||
APPLY_FOR_ERROR_CODES(M)
|
||||
#undef M
|
||||
#define M(VALUE, NAME) names[VALUE] = std::string_view(#NAME);
|
||||
APPLY_FOR_ERROR_CODES(M)
|
||||
#undef M
|
||||
}
|
||||
} error_codes_names;
|
||||
|
||||
@ -557,7 +558,7 @@ namespace ErrorCodes
|
||||
return error_codes_names.names[error_code];
|
||||
}
|
||||
|
||||
ErrorCode end() { return END+1; }
|
||||
ErrorCode end() { return END + 1; }
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/PipeFDs.h>
|
||||
#include <Common/StackTrace.h>
|
||||
#include <Common/setThreadName.h>
|
||||
#include <common/logger_useful.h>
|
||||
|
||||
|
||||
@ -115,6 +116,8 @@ void TraceCollector::stop()
|
||||
|
||||
void TraceCollector::run()
|
||||
{
|
||||
setThreadName("TraceCollector");
|
||||
|
||||
ReadBufferFromFileDescriptor in(pipe.fds_rw[0]);
|
||||
|
||||
while (true)
|
||||
|
@ -40,7 +40,7 @@ Block::Block(const ColumnsWithTypeAndName & data_) : data{data_}
|
||||
void Block::initializeIndexByName()
|
||||
{
|
||||
for (size_t i = 0, size = data.size(); i < size; ++i)
|
||||
index_by_name[data[i].name] = i;
|
||||
index_by_name.emplace(data[i].name, i);
|
||||
}
|
||||
|
||||
|
||||
@ -295,6 +295,20 @@ std::string Block::dumpStructure() const
|
||||
return out.str();
|
||||
}
|
||||
|
||||
std::string Block::dumpIndex() const
|
||||
{
|
||||
WriteBufferFromOwnString out;
|
||||
bool first = true;
|
||||
for (const auto & [name, pos] : index_by_name)
|
||||
{
|
||||
if (!first)
|
||||
out << ", ";
|
||||
first = false;
|
||||
|
||||
out << name << ' ' << pos;
|
||||
}
|
||||
return out.str();
|
||||
}
|
||||
|
||||
Block Block::cloneEmpty() const
|
||||
{
|
||||
|
@ -119,6 +119,9 @@ public:
|
||||
/** List of names, types and lengths of columns. Designed for debugging. */
|
||||
std::string dumpStructure() const;
|
||||
|
||||
/** List of column names and positions from index */
|
||||
std::string dumpIndex() const;
|
||||
|
||||
/** Get the same block, but empty. */
|
||||
Block cloneEmpty() const;
|
||||
|
||||
@ -156,7 +159,7 @@ private:
|
||||
/// This is needed to allow function execution over data.
|
||||
/// It is safe because functions does not change column names, so index is unaffected.
|
||||
/// It is temporary.
|
||||
friend struct ExpressionAction;
|
||||
friend class ExpressionActions;
|
||||
friend class ActionsDAG;
|
||||
};
|
||||
|
||||
|
@ -70,7 +70,7 @@
|
||||
/// Minimum revision supporting OpenTelemetry
|
||||
#define DBMS_MIN_REVISION_WITH_OPENTELEMETRY 54442
|
||||
|
||||
/// Mininum revision supporting interserver secret.
|
||||
/// Minimum revision supporting interserver secret.
|
||||
#define DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET 54441
|
||||
|
||||
/// Version of ClickHouse TCP protocol. Increment it manually when you change the protocol.
|
||||
|
35
src/Core/QueryProcessingStage.cpp
Normal file
35
src/Core/QueryProcessingStage.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
#include <Core/QueryProcessingStage.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
namespace QueryProcessingStage
|
||||
{
|
||||
|
||||
Enum fromString(const std::string & stage_string)
|
||||
{
|
||||
Enum stage;
|
||||
|
||||
if (stage_string == "complete")
|
||||
stage = Complete;
|
||||
else if (stage_string == "fetch_columns")
|
||||
stage = FetchColumns;
|
||||
else if (stage_string == "with_mergeable_state")
|
||||
stage = WithMergeableState;
|
||||
else if (stage_string == "with_mergeable_state_after_aggregation")
|
||||
stage = WithMergeableStateAfterAggregation;
|
||||
else
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown query processing stage: {}", stage_string);
|
||||
|
||||
return stage;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@ -43,6 +43,14 @@ namespace QueryProcessingStage
|
||||
? data[stage]
|
||||
: "Unknown stage";
|
||||
}
|
||||
|
||||
/// This methid is used for the program options,
|
||||
/// hence it accept under_score notation for stage:
|
||||
/// - complete
|
||||
/// - fetch_columns
|
||||
/// - with_mergeable_state
|
||||
/// - with_mergeable_state_after_aggregation
|
||||
Enum fromString(const std::string & stage_string);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ class IColumn;
|
||||
M(UInt64, background_buffer_flush_schedule_pool_size, 16, "Number of threads performing background flush for tables with Buffer engine. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_pool_size, 16, "Number of threads performing background work for tables (for example, merging in merge tree). Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_move_pool_size, 8, "Number of threads performing background moves for tables. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_fetches_pool_size, 3, "Number of threads performing background fetches for replicated tables. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_schedule_pool_size, 16, "Number of threads performing background tasks for replicated tables, dns cache updates. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_message_broker_schedule_pool_size, 16, "Number of threads performing background tasks for message streaming. Only has meaning at server startup.", 0) \
|
||||
M(UInt64, background_distributed_schedule_pool_size, 16, "Number of threads performing background tasks for distributed sends. Only has meaning at server startup.", 0) \
|
||||
|
@ -11,3 +11,5 @@
|
||||
#cmakedefine01 USE_SSL
|
||||
#cmakedefine01 USE_OPENCL
|
||||
#cmakedefine01 USE_LDAP
|
||||
#cmakedefine01 USE_ROCKSDB
|
||||
|
||||
|
@ -106,12 +106,6 @@ std::ostream & operator<<(std::ostream & stream, const Packet & what)
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const ExpressionAction & what)
|
||||
{
|
||||
stream << "ExpressionAction(" << what.toString() << ")";
|
||||
return stream;
|
||||
}
|
||||
|
||||
std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what)
|
||||
{
|
||||
stream << "ExpressionActions(" << what.dumpActions() << ")";
|
||||
|
@ -40,9 +40,6 @@ std::ostream & operator<<(std::ostream & stream, const IColumn & what);
|
||||
struct Packet;
|
||||
std::ostream & operator<<(std::ostream & stream, const Packet & what);
|
||||
|
||||
struct ExpressionAction;
|
||||
std::ostream & operator<<(std::ostream & stream, const ExpressionAction & what);
|
||||
|
||||
class ExpressionActions;
|
||||
std::ostream & operator<<(std::ostream & stream, const ExpressionActions & what);
|
||||
|
||||
|
@ -30,6 +30,7 @@ SRCS(
|
||||
MySQL/PacketsReplication.cpp
|
||||
NamesAndTypes.cpp
|
||||
PostgreSQLProtocol.cpp
|
||||
QueryProcessingStage.cpp
|
||||
Settings.cpp
|
||||
SettingsEnums.cpp
|
||||
SettingsFields.cpp
|
||||
|
@ -46,7 +46,7 @@ void CheckConstraintsBlockOutputStream::write(const Block & block)
|
||||
|
||||
auto * constraint_ptr = constraints.constraints[i]->as<ASTConstraintDeclaration>();
|
||||
|
||||
ColumnWithTypeAndName res_column = block_to_calculate.getByPosition(block_to_calculate.columns() - 1);
|
||||
ColumnWithTypeAndName res_column = block_to_calculate.getByName(constraint_ptr->expr->getColumnName());
|
||||
|
||||
if (!isUInt8(res_column.type))
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Constraint {} does not return a value of type UInt8",
|
||||
|
@ -103,6 +103,15 @@ bool TTLBlockInputStream::isTTLExpired(time_t ttl) const
|
||||
return (ttl && (ttl <= current_time));
|
||||
}
|
||||
|
||||
Block reorderColumns(Block block, const Block & header)
|
||||
{
|
||||
Block res;
|
||||
for (const auto & col : header)
|
||||
res.insert(block.getByName(col.name));
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Block TTLBlockInputStream::readImpl()
|
||||
{
|
||||
/// Skip all data if table ttl is expired for part
|
||||
@ -136,7 +145,7 @@ Block TTLBlockInputStream::readImpl()
|
||||
updateMovesTTL(block);
|
||||
updateRecompressionTTL(block);
|
||||
|
||||
return block;
|
||||
return reorderColumns(std::move(block), header);
|
||||
}
|
||||
|
||||
void TTLBlockInputStream::readSuffixImpl()
|
||||
|
@ -155,7 +155,7 @@ void DataTypeEnum<Type>::deserializeTextEscaped(IColumn & column, ReadBuffer & i
|
||||
/// NOTE It would be nice to do without creating a temporary object - at least extract std::string out.
|
||||
std::string field_name;
|
||||
readEscapedString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name), true));
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,7 +182,7 @@ void DataTypeEnum<Type>::deserializeWholeText(IColumn & column, ReadBuffer & ist
|
||||
{
|
||||
std::string field_name;
|
||||
readString(field_name, istr);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name), true));
|
||||
}
|
||||
}
|
||||
|
||||
@ -226,7 +226,7 @@ void DataTypeEnum<Type>::deserializeTextCSV(IColumn & column, ReadBuffer & istr,
|
||||
{
|
||||
std::string field_name;
|
||||
readCSVString(field_name, istr, settings.csv);
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name)));
|
||||
assert_cast<ColumnType &>(column).getData().push_back(getValue(StringRef(field_name), true));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,12 +80,25 @@ public:
|
||||
return findByValue(value)->second;
|
||||
}
|
||||
|
||||
FieldType getValue(StringRef field_name) const
|
||||
FieldType getValue(StringRef field_name, bool try_treat_as_id = false) const
|
||||
{
|
||||
const auto it = name_to_value_map.find(field_name);
|
||||
if (!it)
|
||||
{
|
||||
/// It is used in CSV and TSV input formats. If we fail to find given string in
|
||||
/// enum names, we will try to treat it as enum id.
|
||||
if (try_treat_as_id)
|
||||
{
|
||||
FieldType x;
|
||||
ReadBufferFromMemory tmp_buf(field_name.data, field_name.size);
|
||||
readText(x, tmp_buf);
|
||||
/// Check if we reached end of the tmp_buf (otherwise field_name is not a number)
|
||||
/// and try to find it in enum ids
|
||||
if (tmp_buf.eof() && value_to_name_map.find(x) != value_to_name_map.end())
|
||||
return x;
|
||||
}
|
||||
throw Exception{"Unknown element '" + field_name.toString() + "' for type " + getName(), ErrorCodes::BAD_ARGUMENTS};
|
||||
|
||||
}
|
||||
return it->getMapped();
|
||||
}
|
||||
|
||||
|
@ -201,7 +201,7 @@ public:
|
||||
{
|
||||
/// Check that expression does not contain unusual actions that will break columnss structure.
|
||||
for (const auto & action : expression_actions->getActions())
|
||||
if (action.type == ExpressionAction::Type::ARRAY_JOIN)
|
||||
if (action.node->type == ActionsDAG::ActionType::ARRAY_JOIN)
|
||||
throw Exception("Expression with arrayJoin or other unusual action cannot be captured", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
std::unordered_map<std::string, DataTypePtr> arguments_map;
|
||||
|
@ -32,6 +32,8 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isSuitableForConstantFolding() const override { return false; }
|
||||
|
||||
size_t getNumberOfArguments() const override
|
||||
{
|
||||
return 0;
|
||||
|
@ -26,6 +26,10 @@ public:
|
||||
return name;
|
||||
}
|
||||
|
||||
bool isDeterministic() const override { return false; }
|
||||
bool isDeterministicInScopeOfQuery() const override { return false; }
|
||||
bool isSuitableForConstantFolding() const override { return false; }
|
||||
|
||||
size_t getNumberOfArguments() const override
|
||||
{
|
||||
return 0;
|
||||
|
@ -1,11 +1,13 @@
|
||||
#include <IO/CompressionMethod.h>
|
||||
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/ZlibInflatingReadBuffer.h>
|
||||
#include <IO/ZlibDeflatingWriteBuffer.h>
|
||||
#include <IO/BrotliReadBuffer.h>
|
||||
#include <IO/BrotliWriteBuffer.h>
|
||||
#include <IO/LZMADeflatingWriteBuffer.h>
|
||||
#include <IO/LZMAInflatingReadBuffer.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
#include <IO/ZlibDeflatingWriteBuffer.h>
|
||||
#include <IO/ZlibInflatingReadBuffer.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
# include <Common/config.h>
|
||||
@ -14,7 +16,6 @@
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
@ -25,10 +26,16 @@ std::string toContentEncodingName(CompressionMethod method)
|
||||
{
|
||||
switch (method)
|
||||
{
|
||||
case CompressionMethod::Gzip: return "gzip";
|
||||
case CompressionMethod::Zlib: return "deflate";
|
||||
case CompressionMethod::Brotli: return "br";
|
||||
case CompressionMethod::None: return "";
|
||||
case CompressionMethod::Gzip:
|
||||
return "gzip";
|
||||
case CompressionMethod::Zlib:
|
||||
return "deflate";
|
||||
case CompressionMethod::Brotli:
|
||||
return "br";
|
||||
case CompressionMethod::Xz:
|
||||
return "xz";
|
||||
case CompressionMethod::None:
|
||||
return "";
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
@ -52,20 +59,19 @@ CompressionMethod chooseCompressionMethod(const std::string & path, const std::s
|
||||
return CompressionMethod::Zlib;
|
||||
if (*method_str == "brotli" || *method_str == "br")
|
||||
return CompressionMethod::Brotli;
|
||||
if (*method_str == "LZMA" || *method_str == "xz")
|
||||
return CompressionMethod::Xz;
|
||||
if (hint.empty() || hint == "auto" || hint == "none")
|
||||
return CompressionMethod::None;
|
||||
|
||||
throw Exception("Unknown compression method " + hint + ". Only 'auto', 'none', 'gzip', 'br' are supported as compression methods",
|
||||
throw Exception(
|
||||
"Unknown compression method " + hint + ". Only 'auto', 'none', 'gzip', 'br', 'xz' are supported as compression methods",
|
||||
ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
|
||||
std::unique_ptr<ReadBuffer> wrapReadBufferWithCompressionMethod(
|
||||
std::unique_ptr<ReadBuffer> nested,
|
||||
CompressionMethod method,
|
||||
size_t buf_size,
|
||||
char * existing_memory,
|
||||
size_t alignment)
|
||||
std::unique_ptr<ReadBuffer> nested, CompressionMethod method, size_t buf_size, char * existing_memory, size_t alignment)
|
||||
{
|
||||
if (method == CompressionMethod::Gzip || method == CompressionMethod::Zlib)
|
||||
return std::make_unique<ZlibInflatingReadBuffer>(std::move(nested), method, buf_size, existing_memory, alignment);
|
||||
@ -73,6 +79,8 @@ std::unique_ptr<ReadBuffer> wrapReadBufferWithCompressionMethod(
|
||||
if (method == CompressionMethod::Brotli)
|
||||
return std::make_unique<BrotliReadBuffer>(std::move(nested), buf_size, existing_memory, alignment);
|
||||
#endif
|
||||
if (method == CompressionMethod::Xz)
|
||||
return std::make_unique<LZMAInflatingReadBuffer>(std::move(nested), buf_size, existing_memory, alignment);
|
||||
|
||||
if (method == CompressionMethod::None)
|
||||
return nested;
|
||||
@ -82,12 +90,7 @@ std::unique_ptr<ReadBuffer> wrapReadBufferWithCompressionMethod(
|
||||
|
||||
|
||||
std::unique_ptr<WriteBuffer> wrapWriteBufferWithCompressionMethod(
|
||||
std::unique_ptr<WriteBuffer> nested,
|
||||
CompressionMethod method,
|
||||
int level,
|
||||
size_t buf_size,
|
||||
char * existing_memory,
|
||||
size_t alignment)
|
||||
std::unique_ptr<WriteBuffer> nested, CompressionMethod method, int level, size_t buf_size, char * existing_memory, size_t alignment)
|
||||
{
|
||||
if (method == DB::CompressionMethod::Gzip || method == CompressionMethod::Zlib)
|
||||
return std::make_unique<ZlibDeflatingWriteBuffer>(std::move(nested), method, level, buf_size, existing_memory, alignment);
|
||||
@ -96,6 +99,8 @@ std::unique_ptr<WriteBuffer> wrapWriteBufferWithCompressionMethod(
|
||||
if (method == DB::CompressionMethod::Brotli)
|
||||
return std::make_unique<BrotliWriteBuffer>(std::move(nested), level, buf_size, existing_memory, alignment);
|
||||
#endif
|
||||
if (method == CompressionMethod::Xz)
|
||||
return std::make_unique<LZMADeflatingWriteBuffer>(std::move(nested), level, buf_size, existing_memory, alignment);
|
||||
|
||||
if (method == CompressionMethod::None)
|
||||
return nested;
|
||||
|
@ -1,14 +1,13 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include <Core/Defines.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ReadBuffer;
|
||||
class WriteBuffer;
|
||||
|
||||
@ -26,6 +25,9 @@ enum class CompressionMethod
|
||||
/// DEFLATE compression with zlib header and Adler32 checksum.
|
||||
/// This option corresponds to HTTP Content-Encoding: deflate.
|
||||
Zlib,
|
||||
/// LZMA2-based content compression
|
||||
/// This option corresponds to HTTP Content-Encoding: xz
|
||||
Xz,
|
||||
Brotli
|
||||
};
|
||||
|
||||
|
128
src/IO/LZMADeflatingWriteBuffer.cpp
Normal file
128
src/IO/LZMADeflatingWriteBuffer.cpp
Normal file
@ -0,0 +1,128 @@
|
||||
#include <IO/LZMADeflatingWriteBuffer.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LZMA_STREAM_ENCODER_FAILED;
|
||||
}
|
||||
|
||||
LZMADeflatingWriteBuffer::LZMADeflatingWriteBuffer(
|
||||
std::unique_ptr<WriteBuffer> out_, int compression_level, size_t buf_size, char * existing_memory, size_t alignment)
|
||||
: BufferWithOwnMemory<WriteBuffer>(buf_size, existing_memory, alignment), out(std::move(out_))
|
||||
{
|
||||
|
||||
lstr = LZMA_STREAM_INIT;
|
||||
lstr.allocator = nullptr;
|
||||
lstr.next_in = nullptr;
|
||||
lstr.avail_in = 0;
|
||||
lstr.next_out = nullptr;
|
||||
lstr.avail_out = 0;
|
||||
|
||||
// options for further compression
|
||||
lzma_options_lzma opt_lzma2;
|
||||
if (lzma_lzma_preset(&opt_lzma2, compression_level))
|
||||
throw Exception(ErrorCodes::LZMA_STREAM_ENCODER_FAILED, "lzma preset failed: lzma version: {}", LZMA_VERSION_STRING);
|
||||
|
||||
|
||||
// LZMA_FILTER_X86 -
|
||||
// LZMA2 - codec for *.xz files compression; LZMA is not suitable for this purpose
|
||||
// VLI - variable length integer (in *.xz most integers encoded as VLI)
|
||||
// LZMA_VLI_UNKNOWN (UINT64_MAX) - VLI value to denote that the value is unknown
|
||||
lzma_filter filters[] = {
|
||||
{.id = LZMA_FILTER_X86, .options = nullptr},
|
||||
{.id = LZMA_FILTER_LZMA2, .options = &opt_lzma2},
|
||||
{.id = LZMA_VLI_UNKNOWN, .options = nullptr},
|
||||
};
|
||||
lzma_ret ret = lzma_stream_encoder(&lstr, filters, LZMA_CHECK_CRC64);
|
||||
|
||||
if (ret != LZMA_OK)
|
||||
throw Exception(
|
||||
ErrorCodes::LZMA_STREAM_ENCODER_FAILED,
|
||||
"lzma stream encoder init failed: error code: {} lzma version: {}",
|
||||
ret,
|
||||
LZMA_VERSION_STRING);
|
||||
}
|
||||
|
||||
LZMADeflatingWriteBuffer::~LZMADeflatingWriteBuffer()
|
||||
{
|
||||
try
|
||||
{
|
||||
finish();
|
||||
|
||||
lzma_end(&lstr);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
}
|
||||
}
|
||||
|
||||
void LZMADeflatingWriteBuffer::nextImpl()
|
||||
{
|
||||
if (!offset())
|
||||
return;
|
||||
|
||||
lstr.next_in = reinterpret_cast<unsigned char *>(working_buffer.begin());
|
||||
lstr.avail_in = offset();
|
||||
|
||||
lzma_action action = LZMA_RUN;
|
||||
do
|
||||
{
|
||||
out->nextIfAtEnd();
|
||||
lstr.next_out = reinterpret_cast<unsigned char *>(out->position());
|
||||
lstr.avail_out = out->buffer().end() - out->position();
|
||||
|
||||
lzma_ret ret = lzma_code(&lstr, action);
|
||||
out->position() = out->buffer().end() - lstr.avail_out;
|
||||
|
||||
if (ret == LZMA_STREAM_END)
|
||||
return;
|
||||
|
||||
if (ret != LZMA_OK)
|
||||
throw Exception(
|
||||
ErrorCodes::LZMA_STREAM_ENCODER_FAILED,
|
||||
"lzma stream encoding failed: error code: {}; lzma_version: {}",
|
||||
ret,
|
||||
LZMA_VERSION_STRING);
|
||||
|
||||
} while (lstr.avail_in > 0 || lstr.avail_out == 0);
|
||||
}
|
||||
|
||||
|
||||
void LZMADeflatingWriteBuffer::finish()
|
||||
{
|
||||
if (finished)
|
||||
return;
|
||||
|
||||
next();
|
||||
|
||||
do
|
||||
{
|
||||
out->nextIfAtEnd();
|
||||
lstr.next_out = reinterpret_cast<unsigned char *>(out->position());
|
||||
lstr.avail_out = out->buffer().end() - out->position();
|
||||
|
||||
lzma_ret ret = lzma_code(&lstr, LZMA_FINISH);
|
||||
out->position() = out->buffer().end() - lstr.avail_out;
|
||||
|
||||
if (ret == LZMA_STREAM_END)
|
||||
{
|
||||
finished = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (ret != LZMA_OK)
|
||||
throw Exception(
|
||||
ErrorCodes::LZMA_STREAM_ENCODER_FAILED,
|
||||
"lzma stream encoding failed: error code: {}; lzma version: {}",
|
||||
ret,
|
||||
LZMA_VERSION_STRING);
|
||||
|
||||
} while (lstr.avail_out == 0);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
61
src/IO/LZMADeflatingWriteBuffer.h
Normal file
61
src/IO/LZMADeflatingWriteBuffer.h
Normal file
@ -0,0 +1,61 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
#include <IO/WriteBuffer.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <lzma.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
|
||||
/// Performs compression using lzma library and writes compressed data to out_ WriteBuffer.
|
||||
class LZMADeflatingWriteBuffer : public BufferWithOwnMemory<WriteBuffer>
|
||||
{
|
||||
public:
|
||||
LZMADeflatingWriteBuffer(
|
||||
std::unique_ptr<WriteBuffer> out_,
|
||||
int compression_level,
|
||||
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
|
||||
char * existing_memory = nullptr,
|
||||
size_t alignment = 0);
|
||||
|
||||
void finish();
|
||||
|
||||
~LZMADeflatingWriteBuffer() override;
|
||||
|
||||
private:
|
||||
void nextImpl() override;
|
||||
|
||||
std::unique_ptr<WriteBuffer> out;
|
||||
lzma_stream lstr;
|
||||
bool finished = false;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
class LZMADeflatingWriteBuffer : public BufferWithOwnMemory<WriteBuffer>
|
||||
{
|
||||
public:
|
||||
LZMADeflatingWriteBuffer(
|
||||
std::unique_ptr<WriteBuffer> out_ [[maybe_unused]],
|
||||
int compression_level [[maybe_unused]],
|
||||
size_t buf_size [[maybe_unused]] = DBMS_DEFAULT_BUFFER_SIZE,
|
||||
char * existing_memory [[maybe_unused]] = nullptr,
|
||||
size_t alignment [[maybe_unused]] = 0)
|
||||
{
|
||||
throw Exception("LZMADeflatingWriteBuffer is not implemented for arcadia build", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
}
|
91
src/IO/LZMAInflatingReadBuffer.cpp
Normal file
91
src/IO/LZMAInflatingReadBuffer.cpp
Normal file
@ -0,0 +1,91 @@
|
||||
#include <IO/LZMAInflatingReadBuffer.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int LZMA_STREAM_DECODER_FAILED;
|
||||
}
|
||||
LZMAInflatingReadBuffer::LZMAInflatingReadBuffer(std::unique_ptr<ReadBuffer> in_, size_t buf_size, char * existing_memory, size_t alignment)
|
||||
: BufferWithOwnMemory<ReadBuffer>(buf_size, existing_memory, alignment), in(std::move(in_)), eof(false)
|
||||
{
|
||||
lstr = LZMA_STREAM_INIT;
|
||||
lstr.allocator = nullptr;
|
||||
lstr.next_in = nullptr;
|
||||
lstr.avail_in = 0;
|
||||
lstr.next_out = nullptr;
|
||||
lstr.avail_out = 0;
|
||||
|
||||
// 500 mb
|
||||
uint64_t memlimit = 500ULL << 20;
|
||||
|
||||
lzma_ret ret = lzma_stream_decoder(&lstr, memlimit, LZMA_CONCATENATED);
|
||||
// lzma does not provide api for converting error code to string unlike zlib
|
||||
if (ret != LZMA_OK)
|
||||
throw Exception(
|
||||
ErrorCodes::LZMA_STREAM_DECODER_FAILED,
|
||||
"lzma_stream_decoder initialization failed: error code: {}; lzma version: {}",
|
||||
ret,
|
||||
LZMA_VERSION_STRING);
|
||||
}
|
||||
|
||||
LZMAInflatingReadBuffer::~LZMAInflatingReadBuffer()
|
||||
{
|
||||
lzma_end(&lstr);
|
||||
}
|
||||
|
||||
bool LZMAInflatingReadBuffer::nextImpl()
|
||||
{
|
||||
if (eof)
|
||||
return false;
|
||||
|
||||
lzma_action action = LZMA_RUN;
|
||||
|
||||
if (!lstr.avail_in)
|
||||
{
|
||||
in->nextIfAtEnd();
|
||||
lstr.next_in = reinterpret_cast<unsigned char *>(in->position());
|
||||
lstr.avail_in = in->buffer().end() - in->position();
|
||||
}
|
||||
|
||||
if (in->eof())
|
||||
{
|
||||
action = LZMA_FINISH;
|
||||
}
|
||||
|
||||
lstr.next_out = reinterpret_cast<unsigned char *>(internal_buffer.begin());
|
||||
lstr.avail_out = internal_buffer.size();
|
||||
|
||||
lzma_ret ret = lzma_code(&lstr, action);
|
||||
in->position() = in->buffer().end() - lstr.avail_in;
|
||||
working_buffer.resize(internal_buffer.size() - lstr.avail_out);
|
||||
|
||||
if (ret == LZMA_STREAM_END)
|
||||
{
|
||||
if (in->eof())
|
||||
{
|
||||
eof = true;
|
||||
return working_buffer.size() != 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::LZMA_STREAM_DECODER_FAILED,
|
||||
"lzma decoder finished, but input stream has not exceeded: error code: {}; lzma version: {}",
|
||||
ret,
|
||||
LZMA_VERSION_STRING);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret != LZMA_OK)
|
||||
throw Exception(
|
||||
ErrorCodes::LZMA_STREAM_DECODER_FAILED,
|
||||
"lzma_stream_decoder failed: error code: error codeL {}; lzma version: {}",
|
||||
ret,
|
||||
LZMA_VERSION_STRING);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
#endif
|
55
src/IO/LZMAInflatingReadBuffer.h
Normal file
55
src/IO/LZMAInflatingReadBuffer.h
Normal file
@ -0,0 +1,55 @@
|
||||
#pragma once
|
||||
|
||||
#include <IO/BufferWithOwnMemory.h>
|
||||
#include <IO/ReadBuffer.h>
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
#include <lzma.h> // Y_IGNORE
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
#if !defined(ARCADIA_BUILD)
|
||||
class LZMAInflatingReadBuffer : public BufferWithOwnMemory<ReadBuffer>
|
||||
{
|
||||
public:
|
||||
LZMAInflatingReadBuffer(
|
||||
std::unique_ptr<ReadBuffer> in_,
|
||||
size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE,
|
||||
char * existing_memory = nullptr,
|
||||
size_t alignment = 0);
|
||||
|
||||
~LZMAInflatingReadBuffer() override;
|
||||
|
||||
private:
|
||||
bool nextImpl() override;
|
||||
|
||||
std::unique_ptr<ReadBuffer> in;
|
||||
lzma_stream lstr;
|
||||
|
||||
bool eof;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
class LZMAInflatingReadBuffer : public BufferWithOwnMemory<ReadBuffer>
|
||||
{
|
||||
public:
|
||||
LZMAInflatingReadBuffer(
|
||||
std::unique_ptr<ReadBuffer> in_ [[maybe_unused]],
|
||||
size_t buf_size [[maybe_unused]] = DBMS_DEFAULT_BUFFER_SIZE,
|
||||
char * existing_memory [[maybe_unused]] = nullptr,
|
||||
size_t alignment [[maybe_unused]] = 0)
|
||||
{
|
||||
throw Exception("LZMADeflatingWriteBuffer is not implemented for arcadia build", ErrorCodes::NOT_IMPLEMENTED);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
}
|
@ -65,6 +65,9 @@ endif ()
|
||||
add_executable (zlib_buffers zlib_buffers.cpp)
|
||||
target_link_libraries (zlib_buffers PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (lzma_buffers lzma_buffers.cpp)
|
||||
target_link_libraries (lzma_buffers PRIVATE clickhouse_common_io)
|
||||
|
||||
add_executable (limit_read_buffer limit_read_buffer.cpp)
|
||||
target_link_libraries (limit_read_buffer PRIVATE clickhouse_common_io)
|
||||
|
||||
|
64
src/IO/tests/lzma_buffers.cpp
Normal file
64
src/IO/tests/lzma_buffers.cpp
Normal file
@ -0,0 +1,64 @@
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
|
||||
#include <IO/LZMADeflatingWriteBuffer.h>
|
||||
#include <IO/LZMAInflatingReadBuffer.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
|
||||
int main(int, char **)
|
||||
try
|
||||
{
|
||||
std::cout << std::fixed << std::setprecision(2);
|
||||
|
||||
size_t n = 10000000;
|
||||
Stopwatch stopwatch;
|
||||
|
||||
{
|
||||
auto buf
|
||||
= std::make_unique<DB::WriteBufferFromFile>("test_lzma_buffers.xz", DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_CREAT | O_TRUNC);
|
||||
DB::LZMADeflatingWriteBuffer lzma_buf(std::move(buf), /*compression level*/ 3);
|
||||
|
||||
stopwatch.restart();
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
DB::writeIntText(i, lzma_buf);
|
||||
DB::writeChar('\t', lzma_buf);
|
||||
}
|
||||
lzma_buf.finish();
|
||||
|
||||
stopwatch.stop();
|
||||
|
||||
std::cout << "Writing done. Elapsed: " << stopwatch.elapsedSeconds() << " s."
|
||||
<< ", " << (lzma_buf.count() / stopwatch.elapsedSeconds() / 1000000) << " MB/s" << std::endl;
|
||||
}
|
||||
|
||||
{
|
||||
auto buf = std::make_unique<DB::ReadBufferFromFile>("test_lzma_buffers.xz");
|
||||
DB::LZMAInflatingReadBuffer lzma_buf(std::move(buf));
|
||||
|
||||
stopwatch.restart();
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
size_t x;
|
||||
DB::readIntText(x, lzma_buf);
|
||||
lzma_buf.ignore();
|
||||
|
||||
if (x != i)
|
||||
throw DB::Exception("Failed!, read: " + std::to_string(x) + ", expected: " + std::to_string(i), 0);
|
||||
}
|
||||
stopwatch.stop();
|
||||
std::cout << "Reading done. Elapsed: " << stopwatch.elapsedSeconds() << " s."
|
||||
<< ", " << (lzma_buf.count() / stopwatch.elapsedSeconds() / 1000000) << " MB/s" << std::endl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << e.what() << ", " << e.displayText() << std::endl;
|
||||
return 1;
|
||||
}
|
@ -20,6 +20,8 @@ SRCS(
|
||||
HTTPCommon.cpp
|
||||
HashingWriteBuffer.cpp
|
||||
HexWriteBuffer.cpp
|
||||
LZMADeflatingWriteBuffer.cpp
|
||||
LZMAInflatingReadBuffer.cpp
|
||||
LimitReadBuffer.cpp
|
||||
MMapReadBufferFromFile.cpp
|
||||
MMapReadBufferFromFileDescriptor.cpp
|
||||
|
@ -350,7 +350,7 @@ SetPtr makeExplicitSet(
|
||||
auto it = index.find(left_arg->getColumnName());
|
||||
if (it == index.end())
|
||||
throw Exception("Unknown identifier: '" + left_arg->getColumnName() + "'", ErrorCodes::UNKNOWN_IDENTIFIER);
|
||||
const DataTypePtr & left_arg_type = it->second->result_type;
|
||||
const DataTypePtr & left_arg_type = (*it)->result_type;
|
||||
|
||||
DataTypes set_element_types = {left_arg_type};
|
||||
const auto * left_tuple_type = typeid_cast<const DataTypeTuple *>(left_arg_type.get());
|
||||
@ -383,7 +383,7 @@ SetPtr makeExplicitSet(
|
||||
|
||||
ActionsMatcher::Data::Data(
|
||||
const Context & context_, SizeLimits set_size_limit_, size_t subquery_depth_,
|
||||
const NamesAndTypesList & source_columns_, ActionsDAGPtr actions,
|
||||
const NamesAndTypesList & source_columns_, ActionsDAGPtr actions_dag,
|
||||
PreparedSets & prepared_sets_, SubqueriesForSets & subqueries_for_sets_,
|
||||
bool no_subqueries_, bool no_makeset_, bool only_consts_, bool create_source_for_in_)
|
||||
: context(context_)
|
||||
@ -397,45 +397,45 @@ ActionsMatcher::Data::Data(
|
||||
, only_consts(only_consts_)
|
||||
, create_source_for_in(create_source_for_in_)
|
||||
, visit_depth(0)
|
||||
, actions_stack(std::move(actions), context)
|
||||
, actions_stack(std::move(actions_dag), context)
|
||||
, next_unique_suffix(actions_stack.getLastActions().getIndex().size() + 1)
|
||||
{
|
||||
}
|
||||
|
||||
bool ActionsMatcher::Data::hasColumn(const String & column_name) const
|
||||
{
|
||||
return actions_stack.getLastActions().getIndex().count(column_name) != 0;
|
||||
return actions_stack.getLastActions().getIndex().contains(column_name);
|
||||
}
|
||||
|
||||
ScopeStack::ScopeStack(ActionsDAGPtr actions, const Context & context_)
|
||||
ScopeStack::ScopeStack(ActionsDAGPtr actions_dag, const Context & context_)
|
||||
: context(context_)
|
||||
{
|
||||
auto & level = stack.emplace_back();
|
||||
level.actions = std::move(actions);
|
||||
level.actions_dag = std::move(actions_dag);
|
||||
|
||||
for (const auto & [name, node] : level.actions->getIndex())
|
||||
if (node->type == ActionsDAG::Type::INPUT)
|
||||
level.inputs.emplace(name);
|
||||
for (const auto & node : level.actions_dag->getIndex())
|
||||
if (node->type == ActionsDAG::ActionType::INPUT)
|
||||
level.inputs.emplace(node->result_name);
|
||||
}
|
||||
|
||||
void ScopeStack::pushLevel(const NamesAndTypesList & input_columns)
|
||||
{
|
||||
auto & level = stack.emplace_back();
|
||||
level.actions = std::make_shared<ActionsDAG>();
|
||||
level.actions_dag = std::make_shared<ActionsDAG>();
|
||||
const auto & prev = stack[stack.size() - 2];
|
||||
|
||||
for (const auto & input_column : input_columns)
|
||||
{
|
||||
level.actions->addInput(input_column.name, input_column.type);
|
||||
level.actions_dag->addInput(input_column.name, input_column.type);
|
||||
level.inputs.emplace(input_column.name);
|
||||
}
|
||||
|
||||
const auto & index = level.actions->getIndex();
|
||||
const auto & index = level.actions_dag->getIndex();
|
||||
|
||||
for (const auto & [name, node] : prev.actions->getIndex())
|
||||
for (const auto & node : prev.actions_dag->getIndex())
|
||||
{
|
||||
if (index.count(name) == 0)
|
||||
level.actions->addInput({node->column, node->result_type, node->result_name});
|
||||
if (!index.contains(node->result_name))
|
||||
level.actions_dag->addInput({node->column, node->result_type, node->result_name});
|
||||
}
|
||||
}
|
||||
|
||||
@ -448,10 +448,10 @@ size_t ScopeStack::getColumnLevel(const std::string & name)
|
||||
if (stack[i].inputs.count(name))
|
||||
return i;
|
||||
|
||||
const auto & index = stack[i].actions->getIndex();
|
||||
const auto & index = stack[i].actions_dag->getIndex();
|
||||
auto it = index.find(name);
|
||||
|
||||
if (it != index.end() && it->second->type != ActionsDAG::Type::INPUT)
|
||||
if (it != index.end() && (*it)->type != ActionsDAG::ActionType::INPUT)
|
||||
return i;
|
||||
}
|
||||
|
||||
@ -460,66 +460,65 @@ size_t ScopeStack::getColumnLevel(const std::string & name)
|
||||
|
||||
void ScopeStack::addColumn(ColumnWithTypeAndName column)
|
||||
{
|
||||
const auto & node = stack[0].actions->addColumn(std::move(column));
|
||||
const auto & node = stack[0].actions_dag->addColumn(std::move(column));
|
||||
|
||||
for (size_t j = 1; j < stack.size(); ++j)
|
||||
stack[j].actions->addInput({node.column, node.result_type, node.result_name});
|
||||
stack[j].actions_dag->addInput({node.column, node.result_type, node.result_name});
|
||||
}
|
||||
|
||||
void ScopeStack::addAlias(const std::string & name, std::string alias)
|
||||
{
|
||||
auto level = getColumnLevel(name);
|
||||
const auto & node = stack[level].actions->addAlias(name, std::move(alias));
|
||||
const auto & node = stack[level].actions_dag->addAlias(name, std::move(alias));
|
||||
|
||||
for (size_t j = level + 1; j < stack.size(); ++j)
|
||||
stack[j].actions->addInput({node.column, node.result_type, node.result_name});
|
||||
stack[j].actions_dag->addInput({node.column, node.result_type, node.result_name});
|
||||
}
|
||||
|
||||
void ScopeStack::addArrayJoin(const std::string & source_name, std::string result_name, std::string unique_column_name)
|
||||
void ScopeStack::addArrayJoin(const std::string & source_name, std::string result_name)
|
||||
{
|
||||
getColumnLevel(source_name);
|
||||
|
||||
if (stack.front().actions->getIndex().count(source_name) == 0)
|
||||
if (!stack.front().actions_dag->getIndex().contains(source_name))
|
||||
throw Exception("Expression with arrayJoin cannot depend on lambda argument: " + source_name,
|
||||
ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
const auto & node = stack.front().actions->addArrayJoin(source_name, std::move(result_name), std::move(unique_column_name));
|
||||
const auto & node = stack.front().actions_dag->addArrayJoin(source_name, std::move(result_name));
|
||||
|
||||
for (size_t j = 1; j < stack.size(); ++j)
|
||||
stack[j].actions->addInput({node.column, node.result_type, node.result_name});
|
||||
stack[j].actions_dag->addInput({node.column, node.result_type, node.result_name});
|
||||
}
|
||||
|
||||
void ScopeStack::addFunction(
|
||||
const FunctionOverloadResolverPtr & function,
|
||||
const Names & argument_names,
|
||||
std::string result_name,
|
||||
bool compile_expressions)
|
||||
std::string result_name)
|
||||
{
|
||||
size_t level = 0;
|
||||
for (const auto & argument : argument_names)
|
||||
level = std::max(level, getColumnLevel(argument));
|
||||
|
||||
const auto & node = stack[level].actions->addFunction(function, argument_names, std::move(result_name), compile_expressions);
|
||||
const auto & node = stack[level].actions_dag->addFunction(function, argument_names, std::move(result_name), context);
|
||||
|
||||
for (size_t j = level + 1; j < stack.size(); ++j)
|
||||
stack[j].actions->addInput({node.column, node.result_type, node.result_name});
|
||||
stack[j].actions_dag->addInput({node.column, node.result_type, node.result_name});
|
||||
}
|
||||
|
||||
ActionsDAGPtr ScopeStack::popLevel()
|
||||
{
|
||||
auto res = std::move(stack.back());
|
||||
stack.pop_back();
|
||||
return res.actions;
|
||||
return res.actions_dag;
|
||||
}
|
||||
|
||||
std::string ScopeStack::dumpNames() const
|
||||
{
|
||||
return stack.back().actions->dumpNames();
|
||||
return stack.back().actions_dag->dumpNames();
|
||||
}
|
||||
|
||||
const ActionsDAG & ScopeStack::getLastActions() const
|
||||
{
|
||||
return *stack.back().actions;
|
||||
return *stack.back().actions_dag;
|
||||
}
|
||||
|
||||
bool ActionsMatcher::needChildVisit(const ASTPtr & node, const ASTPtr & child)
|
||||
@ -572,7 +571,7 @@ std::optional<NameAndTypePair> ActionsMatcher::getNameAndTypeFromAST(const ASTPt
|
||||
const auto & index = data.actions_stack.getLastActions().getIndex();
|
||||
auto it = index.find(child_column_name);
|
||||
if (it != index.end())
|
||||
return NameAndTypePair(child_column_name, it->second->result_type);
|
||||
return NameAndTypePair(child_column_name, (*it)->result_type);
|
||||
|
||||
if (!data.only_consts)
|
||||
throw Exception("Unknown identifier: " + child_column_name + " there are columns: " + data.actions_stack.dumpNames(),
|
||||
@ -892,10 +891,12 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
|
||||
data.actions_stack.pushLevel(lambda_arguments);
|
||||
visit(lambda->arguments->children.at(1), data);
|
||||
auto lambda_dag = data.actions_stack.popLevel();
|
||||
auto lambda_actions = lambda_dag->buildExpressions(data.context);
|
||||
|
||||
String result_name = lambda->arguments->children.at(1)->getColumnName();
|
||||
lambda_actions->finalize(Names(1, result_name));
|
||||
lambda_dag->removeUnusedActions(Names(1, result_name));
|
||||
|
||||
auto lambda_actions = std::make_shared<ExpressionActions>(lambda_dag);
|
||||
|
||||
DataTypePtr result_type = lambda_actions->getSampleBlock().getByName(result_name).type;
|
||||
|
||||
Names captured;
|
||||
@ -954,7 +955,7 @@ void ActionsMatcher::visit(const ASTLiteral & literal, const ASTPtr & /* ast */,
|
||||
|
||||
auto it = index.find(default_name);
|
||||
if (it != index.end())
|
||||
existing_column = it->second;
|
||||
existing_column = *it;
|
||||
|
||||
/*
|
||||
* To approximate CSE, bind all identical literals to a single temporary
|
||||
@ -1051,7 +1052,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su
|
||||
* - this function shows the expression IN_data1.
|
||||
*
|
||||
* In case that we have HAVING with IN subquery, we have to force creating set for it.
|
||||
* Also it doesn't make sence if it is GLOBAL IN or ordinary IN.
|
||||
* Also it doesn't make sense if it is GLOBAL IN or ordinary IN.
|
||||
*/
|
||||
if (!subquery_for_set.source && data.create_source_for_in)
|
||||
{
|
||||
@ -1068,7 +1069,7 @@ SetPtr ActionsMatcher::makeSet(const ASTFunction & node, Data & data, bool no_su
|
||||
{
|
||||
const auto & last_actions = data.actions_stack.getLastActions();
|
||||
const auto & index = last_actions.getIndex();
|
||||
if (index.count(left_in_operand->getColumnName()) != 0)
|
||||
if (index.contains(left_in_operand->getColumnName()))
|
||||
/// An explicit enumeration of values in parentheses.
|
||||
return makeExplicitSet(&node, last_actions, false, data.context, data.set_size_limit, data.prepared_sets);
|
||||
else
|
||||
|
@ -12,7 +12,6 @@ namespace DB
|
||||
class Context;
|
||||
class ASTFunction;
|
||||
|
||||
struct ExpressionAction;
|
||||
class ExpressionActions;
|
||||
using ExpressionActionsPtr = std::shared_ptr<ExpressionActions>;
|
||||
|
||||
@ -65,7 +64,7 @@ struct ScopeStack
|
||||
{
|
||||
struct Level
|
||||
{
|
||||
ActionsDAGPtr actions;
|
||||
ActionsDAGPtr actions_dag;
|
||||
NameSet inputs;
|
||||
};
|
||||
|
||||
@ -75,7 +74,7 @@ struct ScopeStack
|
||||
|
||||
const Context & context;
|
||||
|
||||
ScopeStack(ActionsDAGPtr actions, const Context & context_);
|
||||
ScopeStack(ActionsDAGPtr actions_dag, const Context & context_);
|
||||
|
||||
void pushLevel(const NamesAndTypesList & input_columns);
|
||||
|
||||
@ -83,12 +82,11 @@ struct ScopeStack
|
||||
|
||||
void addColumn(ColumnWithTypeAndName column);
|
||||
void addAlias(const std::string & name, std::string alias);
|
||||
void addArrayJoin(const std::string & source_name, std::string result_name, std::string unique_column_name);
|
||||
void addArrayJoin(const std::string & source_name, std::string result_name);
|
||||
void addFunction(
|
||||
const FunctionOverloadResolverPtr & function,
|
||||
const Names & argument_names,
|
||||
std::string result_name,
|
||||
bool compile_expressions);
|
||||
std::string result_name);
|
||||
|
||||
ActionsDAGPtr popLevel();
|
||||
|
||||
@ -129,7 +127,7 @@ public:
|
||||
int next_unique_suffix;
|
||||
|
||||
Data(const Context & context_, SizeLimits set_size_limit_, size_t subquery_depth_,
|
||||
const NamesAndTypesList & source_columns_, ActionsDAGPtr actions,
|
||||
const NamesAndTypesList & source_columns_, ActionsDAGPtr actions_dag,
|
||||
PreparedSets & prepared_sets_, SubqueriesForSets & subqueries_for_sets_,
|
||||
bool no_subqueries_, bool no_makeset_, bool only_consts_, bool create_source_for_in_);
|
||||
|
||||
@ -147,15 +145,14 @@ public:
|
||||
|
||||
void addArrayJoin(const std::string & source_name, std::string result_name)
|
||||
{
|
||||
actions_stack.addArrayJoin(source_name, std::move(result_name), getUniqueName("_array_join_" + source_name));
|
||||
actions_stack.addArrayJoin(source_name, std::move(result_name));
|
||||
}
|
||||
|
||||
void addFunction(const FunctionOverloadResolverPtr & function,
|
||||
const Names & argument_names,
|
||||
std::string result_name)
|
||||
{
|
||||
actions_stack.addFunction(function, argument_names, std::move(result_name),
|
||||
context.getSettingsRef().compile_expressions);
|
||||
actions_stack.addFunction(function, argument_names, std::move(result_name));
|
||||
}
|
||||
|
||||
ActionsDAGPtr getActions()
|
||||
|
@ -112,7 +112,22 @@ Block Aggregator::Params::getHeader(
|
||||
{
|
||||
Block res;
|
||||
|
||||
if (src_header)
|
||||
if (intermediate_header)
|
||||
{
|
||||
res = intermediate_header.cloneEmpty();
|
||||
|
||||
if (final)
|
||||
{
|
||||
for (const auto & aggregate : aggregates)
|
||||
{
|
||||
auto & elem = res.getByName(aggregate.column_name);
|
||||
|
||||
elem.type = aggregate.function->getReturnType();
|
||||
elem.column = elem.type->createColumn();
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (const auto & key : keys)
|
||||
res.insert(src_header.safeGetByPosition(key).cloneEmpty());
|
||||
@ -133,21 +148,6 @@ Block Aggregator::Params::getHeader(
|
||||
res.insert({ type, aggregate.column_name });
|
||||
}
|
||||
}
|
||||
else if (intermediate_header)
|
||||
{
|
||||
res = intermediate_header.cloneEmpty();
|
||||
|
||||
if (final)
|
||||
{
|
||||
for (const auto & aggregate : aggregates)
|
||||
{
|
||||
auto & elem = res.getByName(aggregate.column_name);
|
||||
|
||||
elem.type = aggregate.function->getReturnType();
|
||||
elem.column = elem.type->createColumn();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return materializeBlock(res);
|
||||
}
|
||||
|
@ -451,6 +451,8 @@ struct ContextShared
|
||||
|
||||
void initializeTraceCollector(std::shared_ptr<TraceLog> trace_log)
|
||||
{
|
||||
if (!trace_log)
|
||||
return;
|
||||
if (hasTraceCollector())
|
||||
return;
|
||||
|
||||
|
@ -167,7 +167,7 @@ void DatabaseCatalog::shutdownImpl()
|
||||
std::lock_guard lock(databases_mutex);
|
||||
assert(std::find_if(uuid_map.begin(), uuid_map.end(), [](const auto & elem)
|
||||
{
|
||||
/// Ensure that all UUID mappings are emtpy (i.e. all mappings contain nullptr instead of a pointer to storage)
|
||||
/// Ensure that all UUID mappings are empty (i.e. all mappings contain nullptr instead of a pointer to storage)
|
||||
const auto & not_empty_mapping = [] (const auto & mapping)
|
||||
{
|
||||
auto & table = mapping.second.second;
|
||||
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user