mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-27 01:51:59 +00:00
fix conflict
This commit is contained in:
commit
aaedf02d2b
1
.gitignore
vendored
1
.gitignore
vendored
@ -79,6 +79,7 @@ configure-stamp
|
|||||||
*.bin
|
*.bin
|
||||||
*.mrk
|
*.mrk
|
||||||
*.mrk2
|
*.mrk2
|
||||||
|
*.mrk3
|
||||||
|
|
||||||
.dupload.conf
|
.dupload.conf
|
||||||
|
|
||||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -91,10 +91,10 @@
|
|||||||
url = https://github.com/ClickHouse-Extras/libunwind.git
|
url = https://github.com/ClickHouse-Extras/libunwind.git
|
||||||
[submodule "contrib/simdjson"]
|
[submodule "contrib/simdjson"]
|
||||||
path = contrib/simdjson
|
path = contrib/simdjson
|
||||||
url = https://github.com/lemire/simdjson.git
|
url = https://github.com/ClickHouse-Extras/simdjson.git
|
||||||
[submodule "contrib/rapidjson"]
|
[submodule "contrib/rapidjson"]
|
||||||
path = contrib/rapidjson
|
path = contrib/rapidjson
|
||||||
url = https://github.com/Tencent/rapidjson
|
url = https://github.com/ClickHouse-Extras/rapidjson
|
||||||
[submodule "contrib/fastops"]
|
[submodule "contrib/fastops"]
|
||||||
path = contrib/fastops
|
path = contrib/fastops
|
||||||
url = https://github.com/ClickHouse-Extras/fastops
|
url = https://github.com/ClickHouse-Extras/fastops
|
||||||
@ -173,7 +173,7 @@
|
|||||||
url = https://github.com/fmtlib/fmt.git
|
url = https://github.com/fmtlib/fmt.git
|
||||||
[submodule "contrib/sentry-native"]
|
[submodule "contrib/sentry-native"]
|
||||||
path = contrib/sentry-native
|
path = contrib/sentry-native
|
||||||
url = https://github.com/getsentry/sentry-native.git
|
url = https://github.com/ClickHouse-Extras/sentry-native.git
|
||||||
[submodule "contrib/gcem"]
|
[submodule "contrib/gcem"]
|
||||||
path = contrib/gcem
|
path = contrib/gcem
|
||||||
url = https://github.com/kthohr/gcem.git
|
url = https://github.com/kthohr/gcem.git
|
||||||
|
317
CHANGELOG.md
317
CHANGELOG.md
@ -1,5 +1,184 @@
|
|||||||
|
## ClickHouse release 20.6
|
||||||
|
|
||||||
|
### ClickHouse release v20.6.3.28-stable
|
||||||
|
|
||||||
|
#### New Feature
|
||||||
|
|
||||||
|
* Added an initial implementation of `EXPLAIN` query. Syntax: `EXPLAIN SELECT ...`. This fixes [#1118](https://github.com/ClickHouse/ClickHouse/issues/1118). [#11873](https://github.com/ClickHouse/ClickHouse/pull/11873) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Added storage `RabbitMQ`. [#11069](https://github.com/ClickHouse/ClickHouse/pull/11069) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Implemented PostgreSQL-like `ILIKE` operator for [#11710](https://github.com/ClickHouse/ClickHouse/issues/11710). [#12125](https://github.com/ClickHouse/ClickHouse/pull/12125) ([Mike](https://github.com/myrrc)).
|
||||||
|
* Supported RIGHT and FULL JOIN with `SET join_algorithm = 'partial_merge'`. Only ALL strictness is allowed (ANY, SEMI, ANTI, ASOF are not). [#12118](https://github.com/ClickHouse/ClickHouse/pull/12118) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||||
|
* Added a function `initializeAggregation` to initialize an aggregation based on a single value. [#12109](https://github.com/ClickHouse/ClickHouse/pull/12109) ([Guillaume Tassery](https://github.com/YiuRULE)).
|
||||||
|
* Supported `ALTER TABLE ... [ADD|MODIFY] COLUMN ... FIRST` [#4006](https://github.com/ClickHouse/ClickHouse/issues/4006). [#12073](https://github.com/ClickHouse/ClickHouse/pull/12073) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Added function `parseDateTimeBestEffortUS`. [#12028](https://github.com/ClickHouse/ClickHouse/pull/12028) ([flynn](https://github.com/ucasFL)).
|
||||||
|
* Support format `ORC` for output (was supported only for input). [#11662](https://github.com/ClickHouse/ClickHouse/pull/11662) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* Fixed `aggregate function any(x) is found inside another aggregate function in query` error with `SET optimize_move_functions_out_of_any = 1` and aliases inside `any()`. [#13419](https://github.com/ClickHouse/ClickHouse/pull/13419) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||||
|
* Fixed `PrettyCompactMonoBlock` for clickhouse-local. Fixed extremes/totals with `PrettyCompactMonoBlock`. This fixes [#7746](https://github.com/ClickHouse/ClickHouse/issues/7746). [#13394](https://github.com/ClickHouse/ClickHouse/pull/13394) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed possible error `Totals having transform was already added to pipeline` in case of a query from delayed replica. [#13290](https://github.com/ClickHouse/ClickHouse/pull/13290) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* The server may crash if user passed specifically crafted arguments to the function `h3ToChildren`. This fixes [#13275](https://github.com/ClickHouse/ClickHouse/issues/13275). [#13277](https://github.com/ClickHouse/ClickHouse/pull/13277) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potentially low performance and slightly incorrect result for `uniqExact`, `topK`, `sumDistinct` and similar aggregate functions called on Float types with NaN values. It also triggered assert in debug build. This fixes [#12491](https://github.com/ClickHouse/ClickHouse/issues/12491). [#13254](https://github.com/ClickHouse/ClickHouse/pull/13254) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed function if with nullable constexpr as cond that is not literal NULL. Fixes [#12463](https://github.com/ClickHouse/ClickHouse/issues/12463). [#13226](https://github.com/ClickHouse/ClickHouse/pull/13226) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed assert in `arrayElement` function in case of array elements are Nullable and array subscript is also Nullable. This fixes [#12172](https://github.com/ClickHouse/ClickHouse/issues/12172). [#13224](https://github.com/ClickHouse/ClickHouse/pull/13224) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed `DateTime64` conversion functions with constant argument. [#13205](https://github.com/ClickHouse/ClickHouse/pull/13205) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed wrong index analysis with functions. It could lead to pruning wrong parts, while reading from `MergeTree` tables. Fixes [#13060](https://github.com/ClickHouse/ClickHouse/issues/13060). Fixes [#12406](https://github.com/ClickHouse/ClickHouse/issues/12406). [#13081](https://github.com/ClickHouse/ClickHouse/pull/13081) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed error `Cannot convert column because it is constant but values of constants are different in source and result` for remote queries which use deterministic functions in scope of query, but not deterministic between queries, like `now()`, `now64()`, `randConstant()`. Fixes [#11327](https://github.com/ClickHouse/ClickHouse/issues/11327). [#13075](https://github.com/ClickHouse/ClickHouse/pull/13075) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed unnecessary limiting for the number of threads for selects from local replica. [#12840](https://github.com/ClickHouse/ClickHouse/pull/12840) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed rare bug when `ALTER DELETE` and `ALTER MODIFY COLUMN` queries executed simultaneously as a single mutation. Bug leads to an incorrect amount of rows in `count.txt` and as a consequence incorrect data in part. Also, fix a small bug with simultaneous `ALTER RENAME COLUMN` and `ALTER ADD COLUMN`. [#12760](https://github.com/ClickHouse/ClickHouse/pull/12760) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed `CAST(Nullable(String), Enum())`. [#12745](https://github.com/ClickHouse/ClickHouse/pull/12745) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed a performance with large tuples, which are interpreted as functions in `IN` section. The case when user write `WHERE x IN tuple(1, 2, ...)` instead of `WHERE x IN (1, 2, ...)` for some obscure reason. [#12700](https://github.com/ClickHouse/ClickHouse/pull/12700) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed memory tracking for `input_format_parallel_parsing` (by attaching thread to group). [#12672](https://github.com/ClickHouse/ClickHouse/pull/12672) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed bloom filter index with const expression. This fixes [#10572](https://github.com/ClickHouse/ClickHouse/issues/10572). [#12659](https://github.com/ClickHouse/ClickHouse/pull/12659) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Fixed `SIGSEGV` in `StorageKafka` when broker is unavailable (and not only). [#12658](https://github.com/ClickHouse/ClickHouse/pull/12658) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added support for function `if` with `Array(UUID)` arguments. This fixes [#11066](https://github.com/ClickHouse/ClickHouse/issues/11066). [#12648](https://github.com/ClickHouse/ClickHouse/pull/12648) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* `CREATE USER IF NOT EXISTS` now doesn't throw exception if the user exists. This fixes [#12507](https://github.com/ClickHouse/ClickHouse/issues/12507). [#12646](https://github.com/ClickHouse/ClickHouse/pull/12646) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Better exception message in disk access storage. [#12625](https://github.com/ClickHouse/ClickHouse/pull/12625) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* The function `groupArrayMoving*` was not working for distributed queries. It's result was calculated within incorrect data type (without promotion to the largest type). The function `groupArrayMovingAvg` was returning integer number that was inconsistent with the `avg` function. This fixes [#12568](https://github.com/ClickHouse/ClickHouse/issues/12568). [#12622](https://github.com/ClickHouse/ClickHouse/pull/12622) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed lack of aliases with function `any`. [#12593](https://github.com/ClickHouse/ClickHouse/pull/12593) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed race condition in external dictionaries with cache layout which can lead server crash. [#12566](https://github.com/ClickHouse/ClickHouse/pull/12566) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Remove data for Distributed tables (blocks from async INSERTs) on DROP TABLE. [#12556](https://github.com/ClickHouse/ClickHouse/pull/12556) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed bug which lead to broken old parts after `ALTER DELETE` query when `enable_mixed_granularity_parts=1`. Fixes [#12536](https://github.com/ClickHouse/ClickHouse/issues/12536). [#12543](https://github.com/ClickHouse/ClickHouse/pull/12543) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Better exception for function `in` with invalid number of arguments. [#12529](https://github.com/ClickHouse/ClickHouse/pull/12529) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixing race condition in live view tables which could cause data duplication. [#12519](https://github.com/ClickHouse/ClickHouse/pull/12519) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||||
|
* Fixed performance issue, while reading from compact parts. [#12492](https://github.com/ClickHouse/ClickHouse/pull/12492) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed backwards compatibility in binary format of `AggregateFunction(avg, ...)` values. This fixes [#12342](https://github.com/ClickHouse/ClickHouse/issues/12342). [#12486](https://github.com/ClickHouse/ClickHouse/pull/12486) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed SETTINGS parse after FORMAT. [#12480](https://github.com/ClickHouse/ClickHouse/pull/12480) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed the deadlock if `text_log` is enabled. [#12452](https://github.com/ClickHouse/ClickHouse/pull/12452) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed overflow when very large `LIMIT` or `OFFSET` is specified. This fixes [#10470](https://github.com/ClickHouse/ClickHouse/issues/10470). This fixes [#11372](https://github.com/ClickHouse/ClickHouse/issues/11372). [#12427](https://github.com/ClickHouse/ClickHouse/pull/12427) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed possible segfault if `StorageMerge`. This fixes [#12054](https://github.com/ClickHouse/ClickHouse/issues/12054). [#12401](https://github.com/ClickHouse/ClickHouse/pull/12401) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Reverted change introduced in [#11079](https://github.com/ClickHouse/ClickHouse/issues/11079) to resolve [#12098](https://github.com/ClickHouse/ClickHouse/issues/12098). [#12397](https://github.com/ClickHouse/ClickHouse/pull/12397) ([Mike](https://github.com/myrrc)).
|
||||||
|
* Additional check for arguments of bloom filter index. This fixes [#11408](https://github.com/ClickHouse/ClickHouse/issues/11408). [#12388](https://github.com/ClickHouse/ClickHouse/pull/12388) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Avoid exception when negative or floating point constant is used in WHERE condition for indexed tables. This fixes [#11905](https://github.com/ClickHouse/ClickHouse/issues/11905). [#12384](https://github.com/ClickHouse/ClickHouse/pull/12384) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allowed to `CLEAR` column even if there are depending `DEFAULT` expressions. This fixes [#12333](https://github.com/ClickHouse/ClickHouse/issues/12333). [#12378](https://github.com/ClickHouse/ClickHouse/pull/12378) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fix `TOTALS/ROLLUP/CUBE` for aggregate functions with `-State` and `Nullable` arguments. This fixes [#12163](https://github.com/ClickHouse/ClickHouse/issues/12163). [#12376](https://github.com/ClickHouse/ClickHouse/pull/12376) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed error message and exit codes for `ALTER RENAME COLUMN` queries, when `RENAME` is not allowed. Fixes [#12301](https://github.com/ClickHouse/ClickHouse/issues/12301) and [#12303](https://github.com/ClickHouse/ClickHouse/issues/12303). [#12335](https://github.com/ClickHouse/ClickHouse/pull/12335) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed very rare race condition in `ReplicatedMergeTreeQueue`. [#12315](https://github.com/ClickHouse/ClickHouse/pull/12315) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* When using codec `Delta` or `DoubleDelta` with non fixed width types, exception with code `LOGICAL_ERROR` was returned instead of exception with code `BAD_ARGUMENTS` (we ensure that exceptions with code logical error never happen). This fixes [#12110](https://github.com/ClickHouse/ClickHouse/issues/12110). [#12308](https://github.com/ClickHouse/ClickHouse/pull/12308) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed order of columns in `WITH FILL` modifier. Previously order of columns of `ORDER BY` statement wasn't respected. [#12306](https://github.com/ClickHouse/ClickHouse/pull/12306) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Avoid "bad cast" exception when there is an expression that filters data by virtual columns (like `_table` in `Merge` tables) or by "index" columns in system tables such as filtering by database name when querying from `system.tables`, and this expression returns `Nullable` type. This fixes [#12166](https://github.com/ClickHouse/ClickHouse/issues/12166). [#12305](https://github.com/ClickHouse/ClickHouse/pull/12305) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed `TTL` after renaming column, on which depends TTL expression. [#12304](https://github.com/ClickHouse/ClickHouse/pull/12304) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed SIGSEGV if there is an message with error in the middle of the batch in `Kafka` Engine. [#12302](https://github.com/ClickHouse/ClickHouse/pull/12302) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed the situation when some threads might randomly hang for a few seconds during `DNS` cache updating. [#12296](https://github.com/ClickHouse/ClickHouse/pull/12296) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fixed typo in setting name. [#12292](https://github.com/ClickHouse/ClickHouse/pull/12292) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Show error after `TrieDictionary` failed to load. [#12290](https://github.com/ClickHouse/ClickHouse/pull/12290) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* The function `arrayFill` worked incorrectly for empty arrays that may lead to crash. This fixes [#12263](https://github.com/ClickHouse/ClickHouse/issues/12263). [#12279](https://github.com/ClickHouse/ClickHouse/pull/12279) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Implement conversions to the common type for `LowCardinality` types. This allows to execute UNION ALL of tables with columns of LowCardinality and other columns. This fixes [#8212](https://github.com/ClickHouse/ClickHouse/issues/8212). This fixes [#4342](https://github.com/ClickHouse/ClickHouse/issues/4342). [#12275](https://github.com/ClickHouse/ClickHouse/pull/12275) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed the behaviour on reaching redirect limit in request to `S3` storage. [#12256](https://github.com/ClickHouse/ClickHouse/pull/12256) ([ianton-ru](https://github.com/ianton-ru)).
|
||||||
|
* Fixed the behaviour when during multiple sequential inserts in `StorageFile` header for some special types was written more than once. This fixed [#6155](https://github.com/ClickHouse/ClickHouse/issues/6155). [#12197](https://github.com/ClickHouse/ClickHouse/pull/12197) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fixed logical functions for UInt8 values when they are not equal to 0 or 1. [#12196](https://github.com/ClickHouse/ClickHouse/pull/12196) ([Alexander Kazakov](https://github.com/Akazz)).
|
||||||
|
* Cap max_memory_usage* limits to the process resident memory. [#12182](https://github.com/ClickHouse/ClickHouse/pull/12182) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fix dictGet arguments check during `GROUP BY` injective functions elimination. [#12179](https://github.com/ClickHouse/ClickHouse/pull/12179) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed the behaviour when `SummingMergeTree` engine sums up columns from partition key. Added an exception in case of explicit definition of columns to sum which intersects with partition key columns. This fixes [#7867](https://github.com/ClickHouse/ClickHouse/issues/7867). [#12173](https://github.com/ClickHouse/ClickHouse/pull/12173) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Don't split the dictionary source's table name into schema and table name itself if ODBC connection doesn't support schema. [#12165](https://github.com/ClickHouse/ClickHouse/pull/12165) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed wrong logic in `ALTER DELETE` that leads to deleting of records when condition evaluates to NULL. This fixes [#9088](https://github.com/ClickHouse/ClickHouse/issues/9088). This closes [#12106](https://github.com/ClickHouse/ClickHouse/issues/12106). [#12153](https://github.com/ClickHouse/ClickHouse/pull/12153) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed transform of query to send to external DBMS (e.g. MySQL, ODBC) in presense of aliases. This fixes [#12032](https://github.com/ClickHouse/ClickHouse/issues/12032). [#12151](https://github.com/ClickHouse/ClickHouse/pull/12151) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed bad code in redundant ORDER BY optimization. The bug was introduced in [#10067](https://github.com/ClickHouse/ClickHouse/issues/10067). [#12148](https://github.com/ClickHouse/ClickHouse/pull/12148) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential overflow in integer division. This fixes [#12119](https://github.com/ClickHouse/ClickHouse/issues/12119). [#12140](https://github.com/ClickHouse/ClickHouse/pull/12140) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential infinite loop in `greatCircleDistance`, `geoDistance`. This fixes [#12117](https://github.com/ClickHouse/ClickHouse/issues/12117). [#12137](https://github.com/ClickHouse/ClickHouse/pull/12137) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Normalize "pid" file handling. In previous versions the server may refuse to start if it was killed without proper shutdown and if there is another process that has the same pid as previously runned server. Also pid file may be removed in unsuccessful server startup even if there is another server running. This fixes [#3501](https://github.com/ClickHouse/ClickHouse/issues/3501). [#12133](https://github.com/ClickHouse/ClickHouse/pull/12133) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed bug which leads to incorrect table metadata in ZooKeepeer for ReplicatedVersionedCollapsingMergeTree tables. Fixes [#12093](https://github.com/ClickHouse/ClickHouse/issues/12093). [#12121](https://github.com/ClickHouse/ClickHouse/pull/12121) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Avoid "There is no query" exception for materialized views with joins or with subqueries attached to system logs (system.query_log, metric_log, etc) or to engine=Buffer underlying table. [#12120](https://github.com/ClickHouse/ClickHouse/pull/12120) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fixed handling dependency of table with ENGINE=Dictionary on dictionary. This fixes [#10994](https://github.com/ClickHouse/ClickHouse/issues/10994). This fixes [#10397](https://github.com/ClickHouse/ClickHouse/issues/10397). [#12116](https://github.com/ClickHouse/ClickHouse/pull/12116) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Format `Parquet` now properly works with `LowCardinality` and `LowCardinality(Nullable)` types. Fixes [#12086](https://github.com/ClickHouse/ClickHouse/issues/12086), [#8406](https://github.com/ClickHouse/ClickHouse/issues/8406). [#12108](https://github.com/ClickHouse/ClickHouse/pull/12108) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed performance for selects with `UNION` caused by wrong limit for the total number of threads. Fixes [#12030](https://github.com/ClickHouse/ClickHouse/issues/12030). [#12103](https://github.com/ClickHouse/ClickHouse/pull/12103) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed segfault with `-StateResample` combinators. [#12092](https://github.com/ClickHouse/ClickHouse/pull/12092) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed empty `result_rows` and `result_bytes` metrics in `system.quey_log` for selects. Fixes [#11595](https://github.com/ClickHouse/ClickHouse/issues/11595). [#12089](https://github.com/ClickHouse/ClickHouse/pull/12089) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed unnecessary limiting the number of threads for selects from `VIEW`. Fixes [#11937](https://github.com/ClickHouse/ClickHouse/issues/11937). [#12085](https://github.com/ClickHouse/ClickHouse/pull/12085) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed SIGSEGV in StorageKafka on DROP TABLE. [#12075](https://github.com/ClickHouse/ClickHouse/pull/12075) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed possible crash while using wrong type for `PREWHERE`. Fixes [#12053](https://github.com/ClickHouse/ClickHouse/issues/12053), [#12060](https://github.com/ClickHouse/ClickHouse/issues/12060). [#12060](https://github.com/ClickHouse/ClickHouse/pull/12060) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed error `Cannot capture column` for higher-order functions with `Tuple(LowCardinality)` argument. Fixes [#9766](https://github.com/ClickHouse/ClickHouse/issues/9766). [#12055](https://github.com/ClickHouse/ClickHouse/pull/12055) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed constraints check if constraint is a constant expression. This fixes [#11360](https://github.com/ClickHouse/ClickHouse/issues/11360). [#12042](https://github.com/ClickHouse/ClickHouse/pull/12042) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed wrong result and potential crash when invoking function `if` with arguments of type `FixedString` with different sizes. This fixes [#11362](https://github.com/ClickHouse/ClickHouse/issues/11362). [#12021](https://github.com/ClickHouse/ClickHouse/pull/12021) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
|
||||||
|
* Allowed to set `JOIN` kind and type in more standard way: `LEFT SEMI JOIN` instead of `SEMI LEFT JOIN`. For now both are correct. [#12520](https://github.com/ClickHouse/ClickHouse/pull/12520) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||||
|
* lifetime_rows/lifetime_bytes for Buffer engine. [#12421](https://github.com/ClickHouse/ClickHouse/pull/12421) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Write the detail exception message to the client instead of 'MySQL server has gone away'. [#12383](https://github.com/ClickHouse/ClickHouse/pull/12383) ([BohuTANG](https://github.com/BohuTANG)).
|
||||||
|
* Allows to change a charset which is used for printing grids borders. Available charsets are following: UTF-8, ASCII. Setting `output_format_pretty_grid_charset` enables this feature. [#12372](https://github.com/ClickHouse/ClickHouse/pull/12372) ([Sabyanin Maxim](https://github.com/s-mx)).
|
||||||
|
* Supported MySQL 'SELECT DATABASE()' [#9336](https://github.com/ClickHouse/ClickHouse/issues/9336) 2. Add MySQL replacement query integration test. [#12314](https://github.com/ClickHouse/ClickHouse/pull/12314) ([BohuTANG](https://github.com/BohuTANG)).
|
||||||
|
* Added `KILL QUERY [connection_id]` for the MySQL client/driver to cancel the long query, issue [#12038](https://github.com/ClickHouse/ClickHouse/issues/12038). [#12152](https://github.com/ClickHouse/ClickHouse/pull/12152) ([BohuTANG](https://github.com/BohuTANG)).
|
||||||
|
* Added support for `%g` (two digit ISO year) and `%G` (four digit ISO year) substitutions in `formatDateTime` function. [#12136](https://github.com/ClickHouse/ClickHouse/pull/12136) ([vivarum](https://github.com/vivarum)).
|
||||||
|
* Added 'type' column in system.disks. [#12115](https://github.com/ClickHouse/ClickHouse/pull/12115) ([ianton-ru](https://github.com/ianton-ru)).
|
||||||
|
* Improved `REVOKE` command: now it requires grant/admin option for only access which will be revoked. For example, to execute `REVOKE ALL ON *.* FROM user1` now it doesn't require to have full access rights granted with grant option. Added command `REVOKE ALL FROM user1` - it revokes all granted roles from `user1`. [#12083](https://github.com/ClickHouse/ClickHouse/pull/12083) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Added replica priority for load_balancing (for manual prioritization of the load balancing). [#11995](https://github.com/ClickHouse/ClickHouse/pull/11995) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Switched paths in S3 metadata to relative which allows to handle S3 blobs more easily. [#11892](https://github.com/ClickHouse/ClickHouse/pull/11892) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Improved performace of 'ORDER BY' and 'GROUP BY' by prefix of sorting key (enabled with `optimize_aggregation_in_order` setting, disabled by default). [#11696](https://github.com/ClickHouse/ClickHouse/pull/11696) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Removed injective functions inside `uniq*()` if `set optimize_injective_functions_inside_uniq=1`. [#12337](https://github.com/ClickHouse/ClickHouse/pull/12337) ([Ruslan Kamalov](https://github.com/kamalov-ruslan)).
|
||||||
|
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
* Implemented single part uploads for DiskS3 (experimental feature). [#12026](https://github.com/ClickHouse/ClickHouse/pull/12026) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
|
||||||
|
#### Experimental Feature
|
||||||
|
* Added new in-memory format of parts in `MergeTree`-family tables, which stores data in memory. Parts are written on disk at first merge. Part will be created in in-memory format if its size in rows or bytes is below thresholds `min_rows_for_compact_part` and `min_bytes_for_compact_part`. Also optional support of Write-Ahead-Log is available, which is enabled by default and is controlled by setting `in_memory_parts_enable_wal`. [#10697](https://github.com/ClickHouse/ClickHouse/pull/10697) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
|
* Implement AST-based query fuzzing mode for clickhouse-client. See [this label](https://github.com/ClickHouse/ClickHouse/issues?q=label%3Afuzz+is%3Aissue) for the list of issues we recently found by fuzzing. Most of them were found by this tool, and a couple by SQLancer and `00746_sql_fuzzy.pl`. [#12111](https://github.com/ClickHouse/ClickHouse/pull/12111) ([Alexander Kuzmenkov](https://github.com/akuzm)).
|
||||||
|
* Add new type of tests based on Testflows framework. [#12090](https://github.com/ClickHouse/ClickHouse/pull/12090) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||||
|
* Added S3 HTTPS integration test. [#12412](https://github.com/ClickHouse/ClickHouse/pull/12412) ([Pavel Kovalenko](https://github.com/Jokser)).
|
||||||
|
* Log sanitizer trap messages from separate thread. This will prevent possible deadlock under thread sanitizer. [#12313](https://github.com/ClickHouse/ClickHouse/pull/12313) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Now functional and stress tests will be able to run with old version of `clickhouse-test` script. [#12287](https://github.com/ClickHouse/ClickHouse/pull/12287) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Remove strange file creation during build in `orc`. [#12258](https://github.com/ClickHouse/ClickHouse/pull/12258) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Place common docker compose files to integration docker container. [#12168](https://github.com/ClickHouse/ClickHouse/pull/12168) ([Ilya Yatsishin](https://github.com/qoega)).
|
||||||
|
* Fix warnings from CodeQL. `CodeQL` is another static analyzer that we will use along with `clang-tidy` and `PVS-Studio` that we use already. [#12138](https://github.com/ClickHouse/ClickHouse/pull/12138) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Minor CMake fixes for UNBUNDLED build. [#12131](https://github.com/ClickHouse/ClickHouse/pull/12131) ([Matwey V. Kornilov](https://github.com/matwey)).
|
||||||
|
* Added a showcase of the minimal Docker image without using any Linux distribution. [#12126](https://github.com/ClickHouse/ClickHouse/pull/12126) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Perform an upgrade of system packages in the `clickhouse-server` docker image. [#12124](https://github.com/ClickHouse/ClickHouse/pull/12124) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||||
|
* Add `UNBUNDLED` flag to `system.build_options` table. Move skip lists for `clickhouse-test` to clickhouse repo. [#12107](https://github.com/ClickHouse/ClickHouse/pull/12107) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Regular check by [Anchore Container Analysis](https://docs.anchore.com) security analysis tool that looks for [CVE](https://cve.mitre.org/) in `clickhouse-server` Docker image. Also confirms that `Dockerfile` is buildable. Runs daily on `master` and on pull-requests to `Dockerfile`. [#12102](https://github.com/ClickHouse/ClickHouse/pull/12102) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||||
|
* Daily check by [GitHub CodeQL](https://securitylab.github.com/tools/codeql) security analysis tool that looks for [CWE](https://cwe.mitre.org/). [#12101](https://github.com/ClickHouse/ClickHouse/pull/12101) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||||
|
* Install `ca-certificates` before the first `apt-get update` in Dockerfile. [#12095](https://github.com/ClickHouse/ClickHouse/pull/12095) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||||
|
|
||||||
## ClickHouse release 20.5
|
## ClickHouse release 20.5
|
||||||
|
|
||||||
|
### ClickHouse release v20.5.4.40-stable 2020-08-10
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* Fixed wrong index analysis with functions. It could lead to pruning wrong parts, while reading from `MergeTree` tables. Fixes [#13060](https://github.com/ClickHouse/ClickHouse/issues/13060). Fixes [#12406](https://github.com/ClickHouse/ClickHouse/issues/12406). [#13081](https://github.com/ClickHouse/ClickHouse/pull/13081) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed unnecessary limiting for the number of threads for selects from local replica. [#12840](https://github.com/ClickHouse/ClickHouse/pull/12840) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed performance with large tuples, which are interpreted as functions in `IN` section. The case when user write `WHERE x IN tuple(1, 2, ...)` instead of `WHERE x IN (1, 2, ...)` for some obscure reason. [#12700](https://github.com/ClickHouse/ClickHouse/pull/12700) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed memory tracking for input_format_parallel_parsing (by attaching thread to group). [#12672](https://github.com/ClickHouse/ClickHouse/pull/12672) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed bloom filter index with const expression. This fixes [#10572](https://github.com/ClickHouse/ClickHouse/issues/10572). [#12659](https://github.com/ClickHouse/ClickHouse/pull/12659) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Fixed `SIGSEGV` in `StorageKafka` when broker is unavailable (and not only). [#12658](https://github.com/ClickHouse/ClickHouse/pull/12658) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added support for function `if` with `Array(UUID)` arguments. This fixes [#11066](https://github.com/ClickHouse/ClickHouse/issues/11066). [#12648](https://github.com/ClickHouse/ClickHouse/pull/12648) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed lack of aliases with function `any`. [#12593](https://github.com/ClickHouse/ClickHouse/pull/12593) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed race condition in external dictionaries with cache layout which can lead server crash. [#12566](https://github.com/ClickHouse/ClickHouse/pull/12566) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Remove data for Distributed tables (blocks from async INSERTs) on DROP TABLE. [#12556](https://github.com/ClickHouse/ClickHouse/pull/12556) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed bug which lead to broken old parts after `ALTER DELETE` query when `enable_mixed_granularity_parts=1`. Fixes [#12536](https://github.com/ClickHouse/ClickHouse/issues/12536). [#12543](https://github.com/ClickHouse/ClickHouse/pull/12543) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Better exception for function `in` with invalid number of arguments. [#12529](https://github.com/ClickHouse/ClickHouse/pull/12529) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed race condition in live view tables which could cause data duplication. [#12519](https://github.com/ClickHouse/ClickHouse/pull/12519) ([vzakaznikov](https://github.com/vzakaznikov)).
|
||||||
|
* Fixed performance issue, while reading from compact parts. [#12492](https://github.com/ClickHouse/ClickHouse/pull/12492) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed backwards compatibility in binary format of `AggregateFunction(avg, ...)` values. This fixes [#12342](https://github.com/ClickHouse/ClickHouse/issues/12342). [#12486](https://github.com/ClickHouse/ClickHouse/pull/12486) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed the deadlock if `text_log` is enabled. [#12452](https://github.com/ClickHouse/ClickHouse/pull/12452) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed overflow when very large LIMIT or OFFSET is specified. This fixes [#10470](https://github.com/ClickHouse/ClickHouse/issues/10470). This fixes [#11372](https://github.com/ClickHouse/ClickHouse/issues/11372). [#12427](https://github.com/ClickHouse/ClickHouse/pull/12427) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed possible segfault if StorageMerge. Closes [#12054](https://github.com/ClickHouse/ClickHouse/issues/12054). [#12401](https://github.com/ClickHouse/ClickHouse/pull/12401) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Reverts change introduced in [#11079](https://github.com/ClickHouse/ClickHouse/issues/11079) to resolve [#12098](https://github.com/ClickHouse/ClickHouse/issues/12098). [#12397](https://github.com/ClickHouse/ClickHouse/pull/12397) ([Mike](https://github.com/myrrc)).
|
||||||
|
* Avoid exception when negative or floating point constant is used in WHERE condition for indexed tables. This fixes [#11905](https://github.com/ClickHouse/ClickHouse/issues/11905). [#12384](https://github.com/ClickHouse/ClickHouse/pull/12384) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Allow to CLEAR column even if there are depending DEFAULT expressions. This fixes [#12333](https://github.com/ClickHouse/ClickHouse/issues/12333). [#12378](https://github.com/ClickHouse/ClickHouse/pull/12378) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed TOTALS/ROLLUP/CUBE for aggregate functions with `-State` and `Nullable` arguments. This fixes [#12163](https://github.com/ClickHouse/ClickHouse/issues/12163). [#12376](https://github.com/ClickHouse/ClickHouse/pull/12376) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed SIGSEGV if there is an message with error in the middle of the batch in `Kafka` Engine. [#12302](https://github.com/ClickHouse/ClickHouse/pull/12302) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed the behaviour when `SummingMergeTree` engine sums up columns from partition key. Added an exception in case of explicit definition of columns to sum which intersects with partition key columns. This fixes [#7867](https://github.com/ClickHouse/ClickHouse/issues/7867). [#12173](https://github.com/ClickHouse/ClickHouse/pull/12173) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fixed transform of query to send to external DBMS (e.g. MySQL, ODBC) in presense of aliases. This fixes [#12032](https://github.com/ClickHouse/ClickHouse/issues/12032). [#12151](https://github.com/ClickHouse/ClickHouse/pull/12151) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed bug which leads to incorrect table metadata in ZooKeepeer for ReplicatedVersionedCollapsingMergeTree tables. Fixes [#12093](https://github.com/ClickHouse/ClickHouse/issues/12093). [#12121](https://github.com/ClickHouse/ClickHouse/pull/12121) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed unnecessary limiting the number of threads for selects from `VIEW`. Fixes [#11937](https://github.com/ClickHouse/ClickHouse/issues/11937). [#12085](https://github.com/ClickHouse/ClickHouse/pull/12085) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed crash in JOIN with LowCardinality type with `join_algorithm=partial_merge`. [#12035](https://github.com/ClickHouse/ClickHouse/pull/12035) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||||
|
* Fixed wrong result for `if()` with NULLs in condition. [#11807](https://github.com/ClickHouse/ClickHouse/pull/11807) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
|
* Install `ca-certificates` before the first `apt-get update` in Dockerfile. [#12095](https://github.com/ClickHouse/ClickHouse/pull/12095) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||||
|
|
||||||
|
|
||||||
### ClickHouse release v20.5.2.7-stable 2020-07-02
|
### ClickHouse release v20.5.2.7-stable 2020-07-02
|
||||||
|
|
||||||
#### Backward Incompatible Change
|
#### Backward Incompatible Change
|
||||||
@ -355,6 +534,80 @@
|
|||||||
|
|
||||||
## ClickHouse release v20.4
|
## ClickHouse release v20.4
|
||||||
|
|
||||||
|
### ClickHouse release v20.4.8.99-stable 2020-08-10
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* Fixed error in `parseDateTimeBestEffort` function when unix timestamp was passed as an argument. This fixes [#13362](https://github.com/ClickHouse/ClickHouse/issues/13362). [#13441](https://github.com/ClickHouse/ClickHouse/pull/13441) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potentially low performance and slightly incorrect result for `uniqExact`, `topK`, `sumDistinct` and similar aggregate functions called on Float types with NaN values. It also triggered assert in debug build. This fixes [#12491](https://github.com/ClickHouse/ClickHouse/issues/12491). [#13254](https://github.com/ClickHouse/ClickHouse/pull/13254) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed function if with nullable constexpr as cond that is not literal NULL. Fixes [#12463](https://github.com/ClickHouse/ClickHouse/issues/12463). [#13226](https://github.com/ClickHouse/ClickHouse/pull/13226) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed assert in `arrayElement` function in case of array elements are Nullable and array subscript is also Nullable. This fixes [#12172](https://github.com/ClickHouse/ClickHouse/issues/12172). [#13224](https://github.com/ClickHouse/ClickHouse/pull/13224) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed wrong index analysis with functions. It could lead to pruning wrong parts, while reading from `MergeTree` tables. Fixes [#13060](https://github.com/ClickHouse/ClickHouse/issues/13060). Fixes [#12406](https://github.com/ClickHouse/ClickHouse/issues/12406). [#13081](https://github.com/ClickHouse/ClickHouse/pull/13081) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed unnecessary limiting for the number of threads for selects from local replica. [#12840](https://github.com/ClickHouse/ClickHouse/pull/12840) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed possible extra overflow row in data which could appear for queries `WITH TOTALS`. [#12747](https://github.com/ClickHouse/ClickHouse/pull/12747) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed performance with large tuples, which are interpreted as functions in `IN` section. The case when user write `WHERE x IN tuple(1, 2, ...)` instead of `WHERE x IN (1, 2, ...)` for some obscure reason. [#12700](https://github.com/ClickHouse/ClickHouse/pull/12700) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed memory tracking for `input_format_parallel_parsing` (by attaching thread to group). [#12672](https://github.com/ClickHouse/ClickHouse/pull/12672) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed [#12293](https://github.com/ClickHouse/ClickHouse/issues/12293) allow push predicate when subquery contains with clause. [#12663](https://github.com/ClickHouse/ClickHouse/pull/12663) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Fixed [#10572](https://github.com/ClickHouse/ClickHouse/issues/10572) fix bloom filter index with const expression. [#12659](https://github.com/ClickHouse/ClickHouse/pull/12659) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Fixed `SIGSEGV` in `StorageKafka` when broker is unavailable (and not only). [#12658](https://github.com/ClickHouse/ClickHouse/pull/12658) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Added support for function `if` with `Array(UUID)` arguments. This fixes [#11066](https://github.com/ClickHouse/ClickHouse/issues/11066). [#12648](https://github.com/ClickHouse/ClickHouse/pull/12648) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed race condition in external dictionaries with cache layout which can lead server crash. [#12566](https://github.com/ClickHouse/ClickHouse/pull/12566) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Removed data for Distributed tables (blocks from async INSERTs) on DROP TABLE. [#12556](https://github.com/ClickHouse/ClickHouse/pull/12556) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed bug which lead to broken old parts after `ALTER DELETE` query when `enable_mixed_granularity_parts=1`. Fixes [#12536](https://github.com/ClickHouse/ClickHouse/issues/12536). [#12543](https://github.com/ClickHouse/ClickHouse/pull/12543) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Better exception for function `in` with invalid number of arguments. [#12529](https://github.com/ClickHouse/ClickHouse/pull/12529) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed performance issue, while reading from compact parts. [#12492](https://github.com/ClickHouse/ClickHouse/pull/12492) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed crash in JOIN with dictionary when we are joining over expression of dictionary key: `t JOIN dict ON expr(dict.id) = t.id`. Disable dictionary join optimisation for this case. [#12458](https://github.com/ClickHouse/ClickHouse/pull/12458) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||||
|
* Fixed possible segfault if StorageMerge. Closes [#12054](https://github.com/ClickHouse/ClickHouse/issues/12054). [#12401](https://github.com/ClickHouse/ClickHouse/pull/12401) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fixed order of columns in `WITH FILL` modifier. Previously order of columns of `ORDER BY` statement wasn't respected. [#12306](https://github.com/ClickHouse/ClickHouse/pull/12306) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Avoid "bad cast" exception when there is an expression that filters data by virtual columns (like `_table` in `Merge` tables) or by "index" columns in system tables such as filtering by database name when querying from `system.tables`, and this expression returns `Nullable` type. This fixes [#12166](https://github.com/ClickHouse/ClickHouse/issues/12166). [#12305](https://github.com/ClickHouse/ClickHouse/pull/12305) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Show error after TrieDictionary failed to load. [#12290](https://github.com/ClickHouse/ClickHouse/pull/12290) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* The function `arrayFill` worked incorrectly for empty arrays that may lead to crash. This fixes [#12263](https://github.com/ClickHouse/ClickHouse/issues/12263). [#12279](https://github.com/ClickHouse/ClickHouse/pull/12279) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Implemented conversions to the common type for `LowCardinality` types. This allows to execute UNION ALL of tables with columns of LowCardinality and other columns. This fixes [#8212](https://github.com/ClickHouse/ClickHouse/issues/8212). This fixes [#4342](https://github.com/ClickHouse/ClickHouse/issues/4342). [#12275](https://github.com/ClickHouse/ClickHouse/pull/12275) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed the behaviour when during multiple sequential inserts in `StorageFile` header for some special types was written more than once. This fixed [#6155](https://github.com/ClickHouse/ClickHouse/issues/6155). [#12197](https://github.com/ClickHouse/ClickHouse/pull/12197) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fixed logical functions for UInt8 values when they are not equal to 0 or 1. [#12196](https://github.com/ClickHouse/ClickHouse/pull/12196) ([Alexander Kazakov](https://github.com/Akazz)).
|
||||||
|
* Cap max_memory_usage* limits to the process resident memory. [#12182](https://github.com/ClickHouse/ClickHouse/pull/12182) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed `dictGet` arguments check during GROUP BY injective functions elimination. [#12179](https://github.com/ClickHouse/ClickHouse/pull/12179) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Don't split the dictionary source's table name into schema and table name itself if ODBC connection doesn't support schema. [#12165](https://github.com/ClickHouse/ClickHouse/pull/12165) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed wrong logic in `ALTER DELETE` that leads to deleting of records when condition evaluates to NULL. This fixes [#9088](https://github.com/ClickHouse/ClickHouse/issues/9088). This closes [#12106](https://github.com/ClickHouse/ClickHouse/issues/12106). [#12153](https://github.com/ClickHouse/ClickHouse/pull/12153) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed transform of query to send to external DBMS (e.g. MySQL, ODBC) in presense of aliases. This fixes [#12032](https://github.com/ClickHouse/ClickHouse/issues/12032). [#12151](https://github.com/ClickHouse/ClickHouse/pull/12151) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential overflow in integer division. This fixes [#12119](https://github.com/ClickHouse/ClickHouse/issues/12119). [#12140](https://github.com/ClickHouse/ClickHouse/pull/12140) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential infinite loop in `greatCircleDistance`, `geoDistance`. This fixes [#12117](https://github.com/ClickHouse/ClickHouse/issues/12117). [#12137](https://github.com/ClickHouse/ClickHouse/pull/12137) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Normalize "pid" file handling. In previous versions the server may refuse to start if it was killed without proper shutdown and if there is another process that has the same pid as previously runned server. Also pid file may be removed in unsuccessful server startup even if there is another server running. This fixes [#3501](https://github.com/ClickHouse/ClickHouse/issues/3501). [#12133](https://github.com/ClickHouse/ClickHouse/pull/12133) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed handling dependency of table with ENGINE=Dictionary on dictionary. This fixes [#10994](https://github.com/ClickHouse/ClickHouse/issues/10994). This fixes [#10397](https://github.com/ClickHouse/ClickHouse/issues/10397). [#12116](https://github.com/ClickHouse/ClickHouse/pull/12116) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed performance for selects with `UNION` caused by wrong limit for the total number of threads. Fixes [#12030](https://github.com/ClickHouse/ClickHouse/issues/12030). [#12103](https://github.com/ClickHouse/ClickHouse/pull/12103) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed segfault with `-StateResample` combinators. [#12092](https://github.com/ClickHouse/ClickHouse/pull/12092) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed empty `result_rows` and `result_bytes` metrics in `system.quey_log` for selects. Fixes [#11595](https://github.com/ClickHouse/ClickHouse/issues/11595). [#12089](https://github.com/ClickHouse/ClickHouse/pull/12089) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed unnecessary limiting the number of threads for selects from `VIEW`. Fixes [#11937](https://github.com/ClickHouse/ClickHouse/issues/11937). [#12085](https://github.com/ClickHouse/ClickHouse/pull/12085) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed possible crash while using wrong type for `PREWHERE`. Fixes [#12053](https://github.com/ClickHouse/ClickHouse/issues/12053), [#12060](https://github.com/ClickHouse/ClickHouse/issues/12060). [#12060](https://github.com/ClickHouse/ClickHouse/pull/12060) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed error `Expected single dictionary argument for function` for function `defaultValueOfArgumentType` with `LowCardinality` type. Fixes [#11808](https://github.com/ClickHouse/ClickHouse/issues/11808). [#12056](https://github.com/ClickHouse/ClickHouse/pull/12056) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed error `Cannot capture column` for higher-order functions with `Tuple(LowCardinality)` argument. Fixes [#9766](https://github.com/ClickHouse/ClickHouse/issues/9766). [#12055](https://github.com/ClickHouse/ClickHouse/pull/12055) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Parse tables metadata in parallel when loading database. This fixes slow server startup when there are large number of tables. [#12045](https://github.com/ClickHouse/ClickHouse/pull/12045) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Make `topK` aggregate function return Enum for Enum types. This fixes [#3740](https://github.com/ClickHouse/ClickHouse/issues/3740). [#12043](https://github.com/ClickHouse/ClickHouse/pull/12043) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed constraints check if constraint is a constant expression. This fixes [#11360](https://github.com/ClickHouse/ClickHouse/issues/11360). [#12042](https://github.com/ClickHouse/ClickHouse/pull/12042) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed incorrect comparison of tuples with `Nullable` columns. Fixes [#11985](https://github.com/ClickHouse/ClickHouse/issues/11985). [#12039](https://github.com/ClickHouse/ClickHouse/pull/12039) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed calculation of access rights when allow_introspection_functions=0. [#12031](https://github.com/ClickHouse/ClickHouse/pull/12031) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed wrong result and potential crash when invoking function `if` with arguments of type `FixedString` with different sizes. This fixes [#11362](https://github.com/ClickHouse/ClickHouse/issues/11362). [#12021](https://github.com/ClickHouse/ClickHouse/pull/12021) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* A query with function `neighbor` as the only returned expression may return empty result if the function is called with offset `-9223372036854775808`. This fixes [#11367](https://github.com/ClickHouse/ClickHouse/issues/11367). [#12019](https://github.com/ClickHouse/ClickHouse/pull/12019) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed calculation of access rights when allow_ddl=0. [#12015](https://github.com/ClickHouse/ClickHouse/pull/12015) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Fixed potential array size overflow in generateRandom that may lead to crash. This fixes [#11371](https://github.com/ClickHouse/ClickHouse/issues/11371). [#12013](https://github.com/ClickHouse/ClickHouse/pull/12013) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential floating point exception. This closes [#11378](https://github.com/ClickHouse/ClickHouse/issues/11378). [#12005](https://github.com/ClickHouse/ClickHouse/pull/12005) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed wrong setting name in log message at server startup. [#11997](https://github.com/ClickHouse/ClickHouse/pull/11997) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed `Query parameter was not set` in `Values` format. Fixes [#11918](https://github.com/ClickHouse/ClickHouse/issues/11918). [#11936](https://github.com/ClickHouse/ClickHouse/pull/11936) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Keep aliases for substitutions in query (parametrized queries). This fixes [#11914](https://github.com/ClickHouse/ClickHouse/issues/11914). [#11916](https://github.com/ClickHouse/ClickHouse/pull/11916) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed bug with no moves when changing storage policy from default one. [#11893](https://github.com/ClickHouse/ClickHouse/pull/11893) ([Vladimir Chebotarev](https://github.com/excitoon)).
|
||||||
|
* Fixed potential floating point exception when parsing `DateTime64`. This fixes [#11374](https://github.com/ClickHouse/ClickHouse/issues/11374). [#11875](https://github.com/ClickHouse/ClickHouse/pull/11875) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed memory accounting via HTTP interface (can be significant with `wait_end_of_query=1`). [#11840](https://github.com/ClickHouse/ClickHouse/pull/11840) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Parse metadata stored in zookeeper before checking for equality. [#11739](https://github.com/ClickHouse/ClickHouse/pull/11739) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
|
||||||
|
* Install `ca-certificates` before the first `apt-get update` in Dockerfile. [#12095](https://github.com/ClickHouse/ClickHouse/pull/12095) ([Ivan Blinkov](https://github.com/blinkov)).
|
||||||
|
|
||||||
|
|
||||||
### ClickHouse release v20.4.6.53-stable 2020-06-25
|
### ClickHouse release v20.4.6.53-stable 2020-06-25
|
||||||
|
|
||||||
#### Bug Fix
|
#### Bug Fix
|
||||||
@ -762,6 +1015,70 @@ No changes compared to v20.4.3.16-stable.
|
|||||||
|
|
||||||
## ClickHouse release v20.3
|
## ClickHouse release v20.3
|
||||||
|
|
||||||
|
### ClickHouse release v20.3.16.165-lts 2020-08-10
|
||||||
|
|
||||||
|
#### Bug Fix
|
||||||
|
|
||||||
|
* Fixed error in `parseDateTimeBestEffort` function when unix timestamp was passed as an argument. This fixes [#13362](https://github.com/ClickHouse/ClickHouse/issues/13362). [#13441](https://github.com/ClickHouse/ClickHouse/pull/13441) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potentially low performance and slightly incorrect result for `uniqExact`, `topK`, `sumDistinct` and similar aggregate functions called on Float types with `NaN` values. It also triggered assert in debug build. This fixes [#12491](https://github.com/ClickHouse/ClickHouse/issues/12491). [#13254](https://github.com/ClickHouse/ClickHouse/pull/13254) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed function if with nullable constexpr as cond that is not literal NULL. Fixes [#12463](https://github.com/ClickHouse/ClickHouse/issues/12463). [#13226](https://github.com/ClickHouse/ClickHouse/pull/13226) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed assert in `arrayElement` function in case of array elements are Nullable and array subscript is also Nullable. This fixes [#12172](https://github.com/ClickHouse/ClickHouse/issues/12172). [#13224](https://github.com/ClickHouse/ClickHouse/pull/13224) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed unnecessary limiting for the number of threads for selects from local replica. [#12840](https://github.com/ClickHouse/ClickHouse/pull/12840) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed possible extra overflow row in data which could appear for queries `WITH TOTALS`. [#12747](https://github.com/ClickHouse/ClickHouse/pull/12747) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed performance with large tuples, which are interpreted as functions in `IN` section. The case when user write `WHERE x IN tuple(1, 2, ...)` instead of `WHERE x IN (1, 2, ...)` for some obscure reason. [#12700](https://github.com/ClickHouse/ClickHouse/pull/12700) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed memory tracking for input_format_parallel_parsing (by attaching thread to group). [#12672](https://github.com/ClickHouse/ClickHouse/pull/12672) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed [#12293](https://github.com/ClickHouse/ClickHouse/issues/12293) allow push predicate when subquery contains with clause. [#12663](https://github.com/ClickHouse/ClickHouse/pull/12663) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Fixed [#10572](https://github.com/ClickHouse/ClickHouse/issues/10572) fix bloom filter index with const expression. [#12659](https://github.com/ClickHouse/ClickHouse/pull/12659) ([Winter Zhang](https://github.com/zhang2014)).
|
||||||
|
* Fixed SIGSEGV in StorageKafka when broker is unavailable (and not only). [#12658](https://github.com/ClickHouse/ClickHouse/pull/12658) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed race condition in external dictionaries with cache layout which can lead server crash. [#12566](https://github.com/ClickHouse/ClickHouse/pull/12566) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Fixed bug which lead to broken old parts after `ALTER DELETE` query when `enable_mixed_granularity_parts=1`. Fixes [#12536](https://github.com/ClickHouse/ClickHouse/issues/12536). [#12543](https://github.com/ClickHouse/ClickHouse/pull/12543) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Better exception for function `in` with invalid number of arguments. [#12529](https://github.com/ClickHouse/ClickHouse/pull/12529) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed performance issue, while reading from compact parts. [#12492](https://github.com/ClickHouse/ClickHouse/pull/12492) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed the deadlock if `text_log` is enabled. [#12452](https://github.com/ClickHouse/ClickHouse/pull/12452) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed possible segfault if StorageMerge. Closes [#12054](https://github.com/ClickHouse/ClickHouse/issues/12054). [#12401](https://github.com/ClickHouse/ClickHouse/pull/12401) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Fixed `TOTALS/ROLLUP/CUBE` for aggregate functions with `-State` and `Nullable` arguments. This fixes [#12163](https://github.com/ClickHouse/ClickHouse/issues/12163). [#12376](https://github.com/ClickHouse/ClickHouse/pull/12376) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed order of columns in `WITH FILL` modifier. Previously order of columns of `ORDER BY` statement wasn't respected. [#12306](https://github.com/ClickHouse/ClickHouse/pull/12306) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Avoid "bad cast" exception when there is an expression that filters data by virtual columns (like `_table` in `Merge` tables) or by "index" columns in system tables such as filtering by database name when querying from `system.tables`, and this expression returns `Nullable` type. This fixes [#12166](https://github.com/ClickHouse/ClickHouse/issues/12166). [#12305](https://github.com/ClickHouse/ClickHouse/pull/12305) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Show error after `TrieDictionary` failed to load. [#12290](https://github.com/ClickHouse/ClickHouse/pull/12290) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* The function `arrayFill` worked incorrectly for empty arrays that may lead to crash. This fixes [#12263](https://github.com/ClickHouse/ClickHouse/issues/12263). [#12279](https://github.com/ClickHouse/ClickHouse/pull/12279) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Implement conversions to the common type for `LowCardinality` types. This allows to execute UNION ALL of tables with columns of LowCardinality and other columns. This fixes [#8212](https://github.com/ClickHouse/ClickHouse/issues/8212). This fixes [#4342](https://github.com/ClickHouse/ClickHouse/issues/4342). [#12275](https://github.com/ClickHouse/ClickHouse/pull/12275) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed the behaviour when during multiple sequential inserts in `StorageFile` header for some special types was written more than once. This fixed [#6155](https://github.com/ClickHouse/ClickHouse/issues/6155). [#12197](https://github.com/ClickHouse/ClickHouse/pull/12197) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
* Fixed logical functions for UInt8 values when they are not equal to 0 or 1. [#12196](https://github.com/ClickHouse/ClickHouse/pull/12196) ([Alexander Kazakov](https://github.com/Akazz)).
|
||||||
|
* Fixed `dictGet` arguments check during GROUP BY injective functions elimination. [#12179](https://github.com/ClickHouse/ClickHouse/pull/12179) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed wrong logic in `ALTER DELETE` that leads to deleting of records when condition evaluates to NULL. This fixes [#9088](https://github.com/ClickHouse/ClickHouse/issues/9088). This closes [#12106](https://github.com/ClickHouse/ClickHouse/issues/12106). [#12153](https://github.com/ClickHouse/ClickHouse/pull/12153) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed transform of query to send to external DBMS (e.g. MySQL, ODBC) in presense of aliases. This fixes [#12032](https://github.com/ClickHouse/ClickHouse/issues/12032). [#12151](https://github.com/ClickHouse/ClickHouse/pull/12151) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential overflow in integer division. This fixes [#12119](https://github.com/ClickHouse/ClickHouse/issues/12119). [#12140](https://github.com/ClickHouse/ClickHouse/pull/12140) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential infinite loop in `greatCircleDistance`, `geoDistance`. This fixes [#12117](https://github.com/ClickHouse/ClickHouse/issues/12117). [#12137](https://github.com/ClickHouse/ClickHouse/pull/12137) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Avoid `There is no query` exception for materialized views with joins or with subqueries attached to system logs (system.query_log, metric_log, etc) or to engine=Buffer underlying table. [#12120](https://github.com/ClickHouse/ClickHouse/pull/12120) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Fixed performance for selects with `UNION` caused by wrong limit for the total number of threads. Fixes [#12030](https://github.com/ClickHouse/ClickHouse/issues/12030). [#12103](https://github.com/ClickHouse/ClickHouse/pull/12103) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed segfault with `-StateResample` combinators. [#12092](https://github.com/ClickHouse/ClickHouse/pull/12092) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fixed unnecessary limiting the number of threads for selects from `VIEW`. Fixes [#11937](https://github.com/ClickHouse/ClickHouse/issues/11937). [#12085](https://github.com/ClickHouse/ClickHouse/pull/12085) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed possible crash while using wrong type for `PREWHERE`. Fixes [#12053](https://github.com/ClickHouse/ClickHouse/issues/12053), [#12060](https://github.com/ClickHouse/ClickHouse/issues/12060). [#12060](https://github.com/ClickHouse/ClickHouse/pull/12060) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed error `Expected single dictionary argument for function` for function `defaultValueOfArgumentType` with `LowCardinality` type. Fixes [#11808](https://github.com/ClickHouse/ClickHouse/issues/11808). [#12056](https://github.com/ClickHouse/ClickHouse/pull/12056) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed error `Cannot capture column` for higher-order functions with `Tuple(LowCardinality)` argument. Fixes [#9766](https://github.com/ClickHouse/ClickHouse/issues/9766). [#12055](https://github.com/ClickHouse/ClickHouse/pull/12055) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Parse tables metadata in parallel when loading database. This fixes slow server startup when there are large number of tables. [#12045](https://github.com/ClickHouse/ClickHouse/pull/12045) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Make `topK` aggregate function return Enum for Enum types. This fixes [#3740](https://github.com/ClickHouse/ClickHouse/issues/3740). [#12043](https://github.com/ClickHouse/ClickHouse/pull/12043) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed constraints check if constraint is a constant expression. This fixes [#11360](https://github.com/ClickHouse/ClickHouse/issues/11360). [#12042](https://github.com/ClickHouse/ClickHouse/pull/12042) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed incorrect comparison of tuples with `Nullable` columns. Fixes [#11985](https://github.com/ClickHouse/ClickHouse/issues/11985). [#12039](https://github.com/ClickHouse/ClickHouse/pull/12039) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Fixed wrong result and potential crash when invoking function `if` with arguments of type `FixedString` with different sizes. This fixes [#11362](https://github.com/ClickHouse/ClickHouse/issues/11362). [#12021](https://github.com/ClickHouse/ClickHouse/pull/12021) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* A query with function `neighbor` as the only returned expression may return empty result if the function is called with offset `-9223372036854775808`. This fixes [#11367](https://github.com/ClickHouse/ClickHouse/issues/11367). [#12019](https://github.com/ClickHouse/ClickHouse/pull/12019) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential array size overflow in generateRandom that may lead to crash. This fixes [#11371](https://github.com/ClickHouse/ClickHouse/issues/11371). [#12013](https://github.com/ClickHouse/ClickHouse/pull/12013) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential floating point exception. This closes [#11378](https://github.com/ClickHouse/ClickHouse/issues/11378). [#12005](https://github.com/ClickHouse/ClickHouse/pull/12005) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed wrong setting name in log message at server startup. [#11997](https://github.com/ClickHouse/ClickHouse/pull/11997) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed `Query parameter was not set` in `Values` format. Fixes [#11918](https://github.com/ClickHouse/ClickHouse/issues/11918). [#11936](https://github.com/ClickHouse/ClickHouse/pull/11936) ([tavplubix](https://github.com/tavplubix)).
|
||||||
|
* Keep aliases for substitutions in query (parametrized queries). This fixes [#11914](https://github.com/ClickHouse/ClickHouse/issues/11914). [#11916](https://github.com/ClickHouse/ClickHouse/pull/11916) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed potential floating point exception when parsing DateTime64. This fixes [#11374](https://github.com/ClickHouse/ClickHouse/issues/11374). [#11875](https://github.com/ClickHouse/ClickHouse/pull/11875) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Fixed memory accounting via `HTTP` interface (can be significant with `wait_end_of_query=1`). [#11840](https://github.com/ClickHouse/ClickHouse/pull/11840) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed wrong result for `if()` with NULLs in condition. [#11807](https://github.com/ClickHouse/ClickHouse/pull/11807) ([Artem Zuikov](https://github.com/4ertus2)).
|
||||||
|
* Parse metadata stored in zookeeper before checking for equality. [#11739](https://github.com/ClickHouse/ClickHouse/pull/11739) ([Azat Khuzhin](https://github.com/azat)).
|
||||||
|
* Fixed `LIMIT n WITH TIES` usage together with `ORDER BY` statement, which contains aliases. [#11689](https://github.com/ClickHouse/ClickHouse/pull/11689) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Fix potential read of uninitialized memory in cache dictionary. [#10834](https://github.com/ClickHouse/ClickHouse/pull/10834) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
|
||||||
|
* Index not used for IN operator with literals", performance regression introduced around v19.3. This fixes "[#10574](https://github.com/ClickHouse/ClickHouse/issues/10574). [#12062](https://github.com/ClickHouse/ClickHouse/pull/12062) ([nvartolomei](https://github.com/nvartolomei)).
|
||||||
|
|
||||||
|
|
||||||
### ClickHouse release v20.3.12.112-lts 2020-06-25
|
### ClickHouse release v20.3.12.112-lts 2020-06-25
|
||||||
|
|
||||||
#### Bug Fix
|
#### Bug Fix
|
||||||
|
@ -142,6 +142,27 @@ endif ()
|
|||||||
# Make sure the final executable has symbols exported
|
# Make sure the final executable has symbols exported
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
|
||||||
|
|
||||||
|
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
|
||||||
|
if (OBJCOPY_PATH)
|
||||||
|
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.")
|
||||||
|
|
||||||
|
if (ARCH_AMD64)
|
||||||
|
set(OBJCOPY_ARCH_OPTIONS -O elf64-x86-64 -B i386)
|
||||||
|
elseif (ARCH_AARCH64)
|
||||||
|
set(OBJCOPY_ARCH_OPTIONS -O elf64-aarch64 -B aarch64)
|
||||||
|
endif ()
|
||||||
|
else ()
|
||||||
|
message(FATAL_ERROR "Cannot find objcopy.")
|
||||||
|
endif ()
|
||||||
|
|
||||||
|
if (OS_DARWIN)
|
||||||
|
set(WHOLE_ARCHIVE -all_load)
|
||||||
|
set(NO_WHOLE_ARCHIVE -noall_load)
|
||||||
|
else ()
|
||||||
|
set(WHOLE_ARCHIVE --whole-archive)
|
||||||
|
set(NO_WHOLE_ARCHIVE --no-whole-archive)
|
||||||
|
endif ()
|
||||||
|
|
||||||
option (ADD_GDB_INDEX_FOR_GOLD "Set to add .gdb-index to resulting binaries for gold linker. NOOP if lld is used." 0)
|
option (ADD_GDB_INDEX_FOR_GOLD "Set to add .gdb-index to resulting binaries for gold linker. NOOP if lld is used." 0)
|
||||||
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
|
||||||
if (LINKER_NAME STREQUAL "lld")
|
if (LINKER_NAME STREQUAL "lld")
|
||||||
@ -176,10 +197,7 @@ if(NOT DISABLE_CPU_OPTIMIZE)
|
|||||||
include(cmake/cpu_features.cmake)
|
include(cmake/cpu_features.cmake)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT COMPILER_CLANG) # clang: error: the clang compiler does not support '-march=native'
|
option(ARCH_NATIVE "Enable -march=native compiler flag" 0)
|
||||||
option(ARCH_NATIVE "Enable -march=native compiler flag" ${ARCH_ARM})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ARCH_NATIVE)
|
if (ARCH_NATIVE)
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||||
endif ()
|
endif ()
|
||||||
@ -350,7 +368,6 @@ include (cmake/find/icu.cmake)
|
|||||||
include (cmake/find/zlib.cmake)
|
include (cmake/find/zlib.cmake)
|
||||||
include (cmake/find/zstd.cmake)
|
include (cmake/find/zstd.cmake)
|
||||||
include (cmake/find/ltdl.cmake) # for odbc
|
include (cmake/find/ltdl.cmake) # for odbc
|
||||||
include (cmake/find/termcap.cmake)
|
|
||||||
# openssl, zlib before poco
|
# openssl, zlib before poco
|
||||||
include (cmake/find/sparsehash.cmake)
|
include (cmake/find/sparsehash.cmake)
|
||||||
include (cmake/find/re2.cmake)
|
include (cmake/find/re2.cmake)
|
||||||
|
@ -10,10 +10,11 @@ ClickHouse is an open-source column-oriented database management system that all
|
|||||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||||
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
|
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-d2zxkf9e-XyxDa_ucfPxzuH4SJIm~Ng) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
|
||||||
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||||
|
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
|
||||||
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
|
* [Yandex.Messenger channel](https://yandex.ru/chat/#/join/20e380d9-c7be-4123-ab06-e95fb946975e) shares announcements and useful links in Russian.
|
||||||
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
|
||||||
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
* You can also [fill this form](https://clickhouse.tech/#meet) to meet Yandex ClickHouse team in person.
|
||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 31, 2020.
|
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on August 14, 2020.
|
||||||
|
@ -7,6 +7,7 @@ add_subdirectory (daemon)
|
|||||||
add_subdirectory (loggers)
|
add_subdirectory (loggers)
|
||||||
add_subdirectory (pcg-random)
|
add_subdirectory (pcg-random)
|
||||||
add_subdirectory (widechar_width)
|
add_subdirectory (widechar_width)
|
||||||
|
add_subdirectory (readpassphrase)
|
||||||
|
|
||||||
if (USE_MYSQL)
|
if (USE_MYSQL)
|
||||||
add_subdirectory (mysqlxx)
|
add_subdirectory (mysqlxx)
|
||||||
|
@ -17,6 +17,7 @@ set (SRCS
|
|||||||
sleep.cpp
|
sleep.cpp
|
||||||
terminalColors.cpp
|
terminalColors.cpp
|
||||||
errnoToString.cpp
|
errnoToString.cpp
|
||||||
|
getResource.cpp
|
||||||
)
|
)
|
||||||
|
|
||||||
if (ENABLE_REPLXX)
|
if (ENABLE_REPLXX)
|
||||||
|
@ -3,11 +3,9 @@
|
|||||||
#include <cctz/civil_time.h>
|
#include <cctz/civil_time.h>
|
||||||
#include <cctz/time_zone.h>
|
#include <cctz/time_zone.h>
|
||||||
#include <cctz/zone_info_source.h>
|
#include <cctz/zone_info_source.h>
|
||||||
#include <common/unaligned.h>
|
#include <common/getResource.h>
|
||||||
#include <Poco/Exception.h>
|
#include <Poco/Exception.h>
|
||||||
|
|
||||||
#include <dlfcn.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
@ -213,19 +211,9 @@ namespace cctz_extension
|
|||||||
const std::string & name,
|
const std::string & name,
|
||||||
const std::function<std::unique_ptr<cctz::ZoneInfoSource>(const std::string & name)> & fallback)
|
const std::function<std::unique_ptr<cctz::ZoneInfoSource>(const std::string & name)> & fallback)
|
||||||
{
|
{
|
||||||
std::string name_replaced = name;
|
std::string_view resource = getResource(name);
|
||||||
std::replace(name_replaced.begin(), name_replaced.end(), '/', '_');
|
if (!resource.empty())
|
||||||
std::replace(name_replaced.begin(), name_replaced.end(), '-', '_');
|
return std::make_unique<Source>(resource.data(), resource.size());
|
||||||
|
|
||||||
/// These are the names that are generated by "ld -r -b binary"
|
|
||||||
std::string symbol_name_data = "_binary_" + name_replaced + "_start";
|
|
||||||
std::string symbol_name_size = "_binary_" + name_replaced + "_size";
|
|
||||||
|
|
||||||
const void * sym_data = dlsym(RTLD_DEFAULT, symbol_name_data.c_str());
|
|
||||||
const void * sym_size = dlsym(RTLD_DEFAULT, symbol_name_size.c_str());
|
|
||||||
|
|
||||||
if (sym_data && sym_size)
|
|
||||||
return std::make_unique<Source>(static_cast<const char *>(sym_data), unalignedLoad<size_t>(&sym_size));
|
|
||||||
|
|
||||||
return fallback(name);
|
return fallback(name);
|
||||||
}
|
}
|
||||||
|
@ -404,7 +404,7 @@ public:
|
|||||||
a date at start of january) In this case one can get 53 for the
|
a date at start of january) In this case one can get 53 for the
|
||||||
first week of next year. This flag ensures that the week is
|
first week of next year. This flag ensures that the week is
|
||||||
relevant for the given year. Note that this flag is only
|
relevant for the given year. Note that this flag is only
|
||||||
releveant if WeekModeFlag::JANUARY is not set.
|
relevant if WeekModeFlag::JANUARY is not set.
|
||||||
|
|
||||||
If set Week is in range 1-53.
|
If set Week is in range 1-53.
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
@ -27,7 +28,11 @@ struct StringRef
|
|||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
|
|
||||||
template <typename CharT, typename = std::enable_if_t<sizeof(CharT) == 1>>
|
template <typename CharT, typename = std::enable_if_t<sizeof(CharT) == 1>>
|
||||||
constexpr StringRef(const CharT * data_, size_t size_) : data(reinterpret_cast<const char *>(data_)), size(size_) {}
|
constexpr StringRef(const CharT * data_, size_t size_) : data(reinterpret_cast<const char *>(data_)), size(size_)
|
||||||
|
{
|
||||||
|
/// Sanity check for overflowed values.
|
||||||
|
assert(size < 0x8000000000000000ULL);
|
||||||
|
}
|
||||||
|
|
||||||
StringRef(const std::string & s) : data(s.data()), size(s.size()) {}
|
StringRef(const std::string & s) : data(s.data()), size(s.size()) {}
|
||||||
constexpr explicit StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {}
|
constexpr explicit StringRef(const std::string_view & s) : data(s.data()), size(s.size()) {}
|
||||||
|
@ -308,7 +308,7 @@ inline void splitInto(To & to, const std::string & what, bool token_compress = f
|
|||||||
const char * delimiter_or_end = find_first_symbols<symbols...>(pos, end);
|
const char * delimiter_or_end = find_first_symbols<symbols...>(pos, end);
|
||||||
|
|
||||||
if (!token_compress || pos < delimiter_or_end)
|
if (!token_compress || pos < delimiter_or_end)
|
||||||
to.emplace_back(pos, delimiter_or_end);
|
to.emplace_back(pos, delimiter_or_end - pos);
|
||||||
|
|
||||||
if (delimiter_or_end < end)
|
if (delimiter_or_end < end)
|
||||||
pos = delimiter_or_end + 1;
|
pos = delimiter_or_end + 1;
|
||||||
|
24
base/common/getResource.cpp
Normal file
24
base/common/getResource.cpp
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
#include "getResource.h"
|
||||||
|
#include "unaligned.h"
|
||||||
|
#include <dlfcn.h>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
|
||||||
|
std::string_view getResource(std::string_view name)
|
||||||
|
{
|
||||||
|
std::string name_replaced(name);
|
||||||
|
std::replace(name_replaced.begin(), name_replaced.end(), '/', '_');
|
||||||
|
std::replace(name_replaced.begin(), name_replaced.end(), '-', '_');
|
||||||
|
std::replace(name_replaced.begin(), name_replaced.end(), '.', '_');
|
||||||
|
|
||||||
|
/// These are the names that are generated by "ld -r -b binary"
|
||||||
|
std::string symbol_name_data = "_binary_" + name_replaced + "_start";
|
||||||
|
std::string symbol_name_size = "_binary_" + name_replaced + "_size";
|
||||||
|
|
||||||
|
const void * sym_data = dlsym(RTLD_DEFAULT, symbol_name_data.c_str());
|
||||||
|
const void * sym_size = dlsym(RTLD_DEFAULT, symbol_name_size.c_str());
|
||||||
|
|
||||||
|
if (sym_data && sym_size)
|
||||||
|
return { static_cast<const char *>(sym_data), unalignedLoad<size_t>(&sym_size) };
|
||||||
|
return {};
|
||||||
|
}
|
7
base/common/getResource.h
Normal file
7
base/common/getResource.h
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string_view>
|
||||||
|
|
||||||
|
/// Get resource from binary if exists. Otherwise return empty string view.
|
||||||
|
/// Resources are data that is embedded into executable at link time.
|
||||||
|
std::string_view getResource(std::string_view name);
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,4 @@
|
|||||||
|
# This file is generated automatically, do not edit. See 'ya.make.in' and use 'utils/generate-ya-make' to regenerate it.
|
||||||
LIBRARY()
|
LIBRARY()
|
||||||
|
|
||||||
ADDINCL(
|
ADDINCL(
|
||||||
@ -10,6 +11,7 @@ CFLAGS (GLOBAL -DARCADIA_BUILD)
|
|||||||
CFLAGS (GLOBAL -DUSE_CPUID=1)
|
CFLAGS (GLOBAL -DUSE_CPUID=1)
|
||||||
CFLAGS (GLOBAL -DUSE_JEMALLOC=0)
|
CFLAGS (GLOBAL -DUSE_JEMALLOC=0)
|
||||||
CFLAGS (GLOBAL -DUSE_RAPIDJSON=1)
|
CFLAGS (GLOBAL -DUSE_RAPIDJSON=1)
|
||||||
|
CFLAGS (GLOBAL -DUSE_SSL=1)
|
||||||
|
|
||||||
IF (OS_DARWIN)
|
IF (OS_DARWIN)
|
||||||
CFLAGS (GLOBAL -DOS_DARWIN)
|
CFLAGS (GLOBAL -DOS_DARWIN)
|
||||||
@ -23,6 +25,7 @@ PEERDIR(
|
|||||||
contrib/libs/cctz/src
|
contrib/libs/cctz/src
|
||||||
contrib/libs/cxxsupp/libcxx-filesystem
|
contrib/libs/cxxsupp/libcxx-filesystem
|
||||||
contrib/libs/poco/Net
|
contrib/libs/poco/Net
|
||||||
|
contrib/libs/poco/NetSSL_OpenSSL
|
||||||
contrib/libs/poco/Util
|
contrib/libs/poco/Util
|
||||||
contrib/libs/fmt
|
contrib/libs/fmt
|
||||||
contrib/restricted/boost
|
contrib/restricted/boost
|
||||||
@ -35,8 +38,10 @@ SRCS(
|
|||||||
DateLUT.cpp
|
DateLUT.cpp
|
||||||
DateLUTImpl.cpp
|
DateLUTImpl.cpp
|
||||||
demangle.cpp
|
demangle.cpp
|
||||||
|
errnoToString.cpp
|
||||||
getFQDNOrHostName.cpp
|
getFQDNOrHostName.cpp
|
||||||
getMemoryAmount.cpp
|
getMemoryAmount.cpp
|
||||||
|
getResource.cpp
|
||||||
getThreadId.cpp
|
getThreadId.cpp
|
||||||
JSON.cpp
|
JSON.cpp
|
||||||
LineReader.cpp
|
LineReader.cpp
|
||||||
@ -47,7 +52,6 @@ SRCS(
|
|||||||
shift10.cpp
|
shift10.cpp
|
||||||
sleep.cpp
|
sleep.cpp
|
||||||
terminalColors.cpp
|
terminalColors.cpp
|
||||||
errnoToString.cpp
|
|
||||||
)
|
)
|
||||||
|
|
||||||
END()
|
END()
|
||||||
|
36
base/common/ya.make.in
Normal file
36
base/common/ya.make.in
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
LIBRARY()
|
||||||
|
|
||||||
|
ADDINCL(
|
||||||
|
GLOBAL clickhouse/base
|
||||||
|
GLOBAL contrib/libs/cctz/include
|
||||||
|
)
|
||||||
|
|
||||||
|
CFLAGS (GLOBAL -DARCADIA_BUILD)
|
||||||
|
|
||||||
|
CFLAGS (GLOBAL -DUSE_CPUID=1)
|
||||||
|
CFLAGS (GLOBAL -DUSE_JEMALLOC=0)
|
||||||
|
CFLAGS (GLOBAL -DUSE_RAPIDJSON=1)
|
||||||
|
|
||||||
|
IF (OS_DARWIN)
|
||||||
|
CFLAGS (GLOBAL -DOS_DARWIN)
|
||||||
|
ELSEIF (OS_FREEBSD)
|
||||||
|
CFLAGS (GLOBAL -DOS_FREEBSD)
|
||||||
|
ELSEIF (OS_LINUX)
|
||||||
|
CFLAGS (GLOBAL -DOS_LINUX)
|
||||||
|
ENDIF ()
|
||||||
|
|
||||||
|
PEERDIR(
|
||||||
|
contrib/libs/cctz/src
|
||||||
|
contrib/libs/cxxsupp/libcxx-filesystem
|
||||||
|
contrib/libs/poco/Net
|
||||||
|
contrib/libs/poco/Util
|
||||||
|
contrib/libs/fmt
|
||||||
|
contrib/restricted/boost
|
||||||
|
contrib/restricted/cityhash-1.0.2
|
||||||
|
)
|
||||||
|
|
||||||
|
SRCS(
|
||||||
|
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F Replxx | grep -v -F Readline | sed 's/^\.\// /' | sort ?>
|
||||||
|
)
|
||||||
|
|
||||||
|
END()
|
@ -51,6 +51,7 @@
|
|||||||
#include <Common/getMultipleKeysFromConfig.h>
|
#include <Common/getMultipleKeysFromConfig.h>
|
||||||
#include <Common/ClickHouseRevision.h>
|
#include <Common/ClickHouseRevision.h>
|
||||||
#include <Common/Config/ConfigProcessor.h>
|
#include <Common/Config/ConfigProcessor.h>
|
||||||
|
#include <Common/MemorySanitizer.h>
|
||||||
#include <Common/SymbolIndex.h>
|
#include <Common/SymbolIndex.h>
|
||||||
|
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
@ -76,6 +77,15 @@ static void call_default_signal_handler(int sig)
|
|||||||
raise(sig);
|
raise(sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char * msan_strsignal(int sig)
|
||||||
|
{
|
||||||
|
// Apparently strsignal is not instrumented by MemorySanitizer, so we
|
||||||
|
// have to unpoison it to avoid msan reports inside fmt library when we
|
||||||
|
// print it.
|
||||||
|
const char * signal_name = strsignal(sig);
|
||||||
|
__msan_unpoison_string(signal_name);
|
||||||
|
return signal_name;
|
||||||
|
}
|
||||||
|
|
||||||
static constexpr size_t max_query_id_size = 127;
|
static constexpr size_t max_query_id_size = 127;
|
||||||
|
|
||||||
@ -280,12 +290,14 @@ private:
|
|||||||
if (query_id.empty())
|
if (query_id.empty())
|
||||||
{
|
{
|
||||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (no query) Received signal {} ({})",
|
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (no query) Received signal {} ({})",
|
||||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, strsignal(sig), sig);
|
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info,
|
||||||
|
thread_num, msan_strsignal(sig), sig);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) Received signal {} ({})",
|
LOG_FATAL(log, "(version {}{}, {}) (from thread {}) (query_id: {}) Received signal {} ({})",
|
||||||
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info, thread_num, query_id, strsignal(sig), sig);
|
VERSION_STRING, VERSION_OFFICIAL, daemon.build_id_info,
|
||||||
|
thread_num, query_id, msan_strsignal(sig), sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
String error_message;
|
String error_message;
|
||||||
@ -447,6 +459,11 @@ BaseDaemon::~BaseDaemon()
|
|||||||
{
|
{
|
||||||
writeSignalIDtoSignalPipe(SignalListener::StopThread);
|
writeSignalIDtoSignalPipe(SignalListener::StopThread);
|
||||||
signal_listener_thread.join();
|
signal_listener_thread.join();
|
||||||
|
/// Reset signals to SIG_DFL to avoid trying to write to the signal_pipe that will be closed after.
|
||||||
|
for (int sig : handled_signals)
|
||||||
|
{
|
||||||
|
signal(sig, SIG_DFL);
|
||||||
|
}
|
||||||
signal_pipe.close();
|
signal_pipe.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -701,7 +718,7 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
|||||||
|
|
||||||
/// Setup signal handlers.
|
/// Setup signal handlers.
|
||||||
auto add_signal_handler =
|
auto add_signal_handler =
|
||||||
[](const std::vector<int> & signals, signal_function handler)
|
[this](const std::vector<int> & signals, signal_function handler)
|
||||||
{
|
{
|
||||||
struct sigaction sa;
|
struct sigaction sa;
|
||||||
memset(&sa, 0, sizeof(sa));
|
memset(&sa, 0, sizeof(sa));
|
||||||
@ -725,6 +742,8 @@ void BaseDaemon::initializeTerminationAndSignalProcessing()
|
|||||||
for (auto signal : signals)
|
for (auto signal : signals)
|
||||||
if (sigaction(signal, &sa, nullptr))
|
if (sigaction(signal, &sa, nullptr))
|
||||||
throw Poco::Exception("Cannot set signal handler.");
|
throw Poco::Exception("Cannot set signal handler.");
|
||||||
|
|
||||||
|
std::copy(signals.begin(), signals.end(), std::back_inserter(handled_signals));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -833,13 +852,13 @@ void BaseDaemon::handleSignal(int signal_id)
|
|||||||
onInterruptSignals(signal_id);
|
onInterruptSignals(signal_id);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw DB::Exception(std::string("Unsupported signal: ") + strsignal(signal_id), 0);
|
throw DB::Exception(std::string("Unsupported signal: ") + msan_strsignal(signal_id), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaseDaemon::onInterruptSignals(int signal_id)
|
void BaseDaemon::onInterruptSignals(int signal_id)
|
||||||
{
|
{
|
||||||
is_cancelled = true;
|
is_cancelled = true;
|
||||||
LOG_INFO(&logger(), "Received termination signal ({})", strsignal(signal_id));
|
LOG_INFO(&logger(), "Received termination signal ({})", msan_strsignal(signal_id));
|
||||||
|
|
||||||
if (sigint_signals_counter >= 2)
|
if (sigint_signals_counter >= 2)
|
||||||
{
|
{
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
namespace Poco { class TaskManager; }
|
namespace Poco { class TaskManager; }
|
||||||
|
|
||||||
|
|
||||||
/// \brief Base class for applications that can run as deamons.
|
/// \brief Base class for applications that can run as daemons.
|
||||||
///
|
///
|
||||||
/// \code
|
/// \code
|
||||||
/// # Some possible command line options:
|
/// # Some possible command line options:
|
||||||
@ -192,6 +192,8 @@ protected:
|
|||||||
Poco::Util::AbstractConfiguration * last_configuration = nullptr;
|
Poco::Util::AbstractConfiguration * last_configuration = nullptr;
|
||||||
|
|
||||||
String build_id_info;
|
String build_id_info;
|
||||||
|
|
||||||
|
std::vector<int> handled_signals;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ void setExtras()
|
|||||||
sentry_set_extra("version_patch", sentry_value_new_int32(VERSION_PATCH));
|
sentry_set_extra("version_patch", sentry_value_new_int32(VERSION_PATCH));
|
||||||
}
|
}
|
||||||
|
|
||||||
void sentry_logger(sentry_level_t level, const char * message, va_list args)
|
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
|
||||||
{
|
{
|
||||||
auto * logger = &Poco::Logger::get("SentryWriter");
|
auto * logger = &Poco::Logger::get("SentryWriter");
|
||||||
size_t size = 1024;
|
size_t size = 1024;
|
||||||
@ -107,7 +107,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
|
|||||||
|
|
||||||
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
|
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
|
||||||
sentry_options_set_release(options, VERSION_STRING_SHORT);
|
sentry_options_set_release(options, VERSION_STRING_SHORT);
|
||||||
sentry_options_set_logger(options, &sentry_logger);
|
sentry_options_set_logger(options, &sentry_logger, nullptr);
|
||||||
if (debug)
|
if (debug)
|
||||||
{
|
{
|
||||||
sentry_options_set_debug(options, 1);
|
sentry_options_set_debug(options, 1);
|
||||||
|
@ -26,12 +26,12 @@ namespace ext
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Rep, typename Period = std::ratio<1>>
|
template <typename Rep, typename Period = std::ratio<1>>
|
||||||
std::string to_string(const std::chrono::duration<Rep, Period> & dur)
|
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
|
||||||
{
|
{
|
||||||
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(dur);
|
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(duration);
|
||||||
if (seconds_as_int == dur)
|
if (seconds_as_int == duration)
|
||||||
return std::to_string(seconds_as_int.count()) + "s";
|
return std::to_string(seconds_as_int.count()) + "s";
|
||||||
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(dur);
|
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(duration);
|
||||||
return std::to_string(seconds_as_double.count()) + "s";
|
return std::to_string(seconds_as_double.count()) + "s";
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,8 +42,8 @@ namespace ext
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Rep, typename Period = std::ratio<1>>
|
template <typename Rep, typename Period = std::ratio<1>>
|
||||||
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & dur)
|
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & duration)
|
||||||
{
|
{
|
||||||
return o << to_string(dur);
|
return o << to_string(duration);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
* = log(6.3*5.3) + lgamma(5.3)
|
* = log(6.3*5.3) + lgamma(5.3)
|
||||||
* = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
|
* = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
|
||||||
* 2. Polynomial approximation of lgamma around its
|
* 2. Polynomial approximation of lgamma around its
|
||||||
* minimun ymin=1.461632144968362245 to maintain monotonicity.
|
* minimum ymin=1.461632144968362245 to maintain monotonicity.
|
||||||
* On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
|
* On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
|
||||||
* Let z = x-ymin;
|
* Let z = x-ymin;
|
||||||
* lgamma(x) = -1.214862905358496078218 + z^2*poly(z)
|
* lgamma(x) = -1.214862905358496078218 + z^2*poly(z)
|
||||||
|
@ -21,7 +21,7 @@ public:
|
|||||||
|
|
||||||
std::optional<size_t> getLayer() const
|
std::optional<size_t> getLayer() const
|
||||||
{
|
{
|
||||||
return layer; /// layer setted in inheritor class BaseDaemonApplication.
|
return layer; /// layer set in inheritor class BaseDaemonApplication.
|
||||||
}
|
}
|
||||||
|
|
||||||
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
|
||||||
|
@ -91,10 +91,13 @@ void OwnSplitChannel::logSplit(const Poco::Message & msg)
|
|||||||
elem.source_file = msg.getSourceFile();
|
elem.source_file = msg.getSourceFile();
|
||||||
|
|
||||||
elem.source_line = msg.getSourceLine();
|
elem.source_line = msg.getSourceLine();
|
||||||
|
std::shared_ptr<TextLog> text_log_locked{};
|
||||||
std::lock_guard<std::mutex> lock(text_log_mutex);
|
{
|
||||||
if (auto log = text_log.lock())
|
std::lock_guard<std::mutex> lock(text_log_mutex);
|
||||||
log->add(elem);
|
text_log_locked = text_log.lock();
|
||||||
|
}
|
||||||
|
if (text_log_locked)
|
||||||
|
text_log_locked->add(elem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,8 +53,6 @@
|
|||||||
* These assumptions are specific for Yandex.Metrica. Your mileage may vary.
|
* These assumptions are specific for Yandex.Metrica. Your mileage may vary.
|
||||||
*
|
*
|
||||||
* mysqlxx could not be considered as separate full-featured library,
|
* mysqlxx could not be considered as separate full-featured library,
|
||||||
* because it is developed from the principle - "everything that we don't need is not implemented",
|
* because it is developed from the principle - "everything that we don't need is not implemented".
|
||||||
* and also the library depends on some other libraries from Yandex.Metrica code.
|
|
||||||
* (dependencied could be easily removed if necessary).
|
|
||||||
* It is assumed that the user will add all missing functionality that is needed.
|
* It is assumed that the user will add all missing functionality that is needed.
|
||||||
*/
|
*/
|
||||||
|
@ -110,7 +110,7 @@ namespace pcg_extras {
|
|||||||
/*
|
/*
|
||||||
* C++ requires us to be able to serialize RNG state by printing or reading
|
* C++ requires us to be able to serialize RNG state by printing or reading
|
||||||
* it from a stream. Because we use 128-bit ints, we also need to be able
|
* it from a stream. Because we use 128-bit ints, we also need to be able
|
||||||
* ot print them, so here is code to do so.
|
* or print them, so here is code to do so.
|
||||||
*
|
*
|
||||||
* This code provides enough functionality to print 128-bit ints in decimal
|
* This code provides enough functionality to print 128-bit ints in decimal
|
||||||
* and zero-padded in hex. It's not a full-featured implementation.
|
* and zero-padded in hex. It's not a full-featured implementation.
|
||||||
@ -253,7 +253,7 @@ inline std::istream& operator>>(std::istream& in, uint8_t& value)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XorShifts are invertable, but they are someting of a pain to invert.
|
* XorShifts are invertable, but they are something of a pain to invert.
|
||||||
* This function backs them out. It's used by the whacky "inside out"
|
* This function backs them out. It's used by the whacky "inside out"
|
||||||
* generator defined later.
|
* generator defined later.
|
||||||
*/
|
*/
|
||||||
|
@ -174,7 +174,7 @@ PCG_DEFINE_CONSTANT(pcg128_t, default, increment,
|
|||||||
* period
|
* period
|
||||||
* specific stream - the constant can be changed at any time, selecting
|
* specific stream - the constant can be changed at any time, selecting
|
||||||
* a different random sequence
|
* a different random sequence
|
||||||
* unique stream - the constant is based on the memory addresss of the
|
* unique stream - the constant is based on the memory address of the
|
||||||
* object, thus every RNG has its own unique sequence
|
* object, thus every RNG has its own unique sequence
|
||||||
*
|
*
|
||||||
* This variation is provided though mixin classes which define a function
|
* This variation is provided though mixin classes which define a function
|
||||||
@ -352,7 +352,7 @@ protected:
|
|||||||
* (reducing register pressure).
|
* (reducing register pressure).
|
||||||
*
|
*
|
||||||
* Given the high level of parameterization, the code has to use some
|
* Given the high level of parameterization, the code has to use some
|
||||||
* template-metaprogramming tricks to handle some of the suble variations
|
* template-metaprogramming tricks to handle some of the subtle variations
|
||||||
* involved.
|
* involved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
@ -5,3 +5,4 @@ add_library(readpassphrase readpassphrase.c)
|
|||||||
|
|
||||||
set_target_properties(readpassphrase PROPERTIES LINKER_LANGUAGE C)
|
set_target_properties(readpassphrase PROPERTIES LINKER_LANGUAGE C)
|
||||||
target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro)
|
target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro)
|
||||||
|
target_include_directories(readpassphrase PUBLIC .)
|
@ -23,7 +23,9 @@
|
|||||||
|
|
||||||
/* OPENBSD ORIGINAL: lib/libc/gen/readpassphrase.c */
|
/* OPENBSD ORIGINAL: lib/libc/gen/readpassphrase.c */
|
||||||
|
|
||||||
#include "includes.h"
|
#ifndef _PATH_TTY
|
||||||
|
#define _PATH_TTY "/dev/tty"
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <termios.h>
|
#include <termios.h>
|
||||||
#include <signal.h>
|
#include <signal.h>
|
@ -4,4 +4,5 @@ RECURSE(
|
|||||||
loggers
|
loggers
|
||||||
pcg-random
|
pcg-random
|
||||||
widechar_width
|
widechar_width
|
||||||
|
readpassphrase
|
||||||
)
|
)
|
||||||
|
1774
benchmark/monetdb/aws.log
Normal file
1774
benchmark/monetdb/aws.log
Normal file
File diff suppressed because it is too large
Load Diff
10
benchmark/monetdb/benchmark.sh
Executable file
10
benchmark/monetdb/benchmark.sh
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
grep -v -P '^#' queries.sql | sed -e 's/{table}/hits/' | while read query; do
|
||||||
|
echo 3 | sudo tee /proc/sys/vm/drop_caches
|
||||||
|
|
||||||
|
echo "$query";
|
||||||
|
for i in {1..3}; do
|
||||||
|
./send-query "$query" 2>&1 | grep -P '\d+ tuple|clk: |unknown|overflow|error';
|
||||||
|
done;
|
||||||
|
done;
|
@ -1,4 +0,0 @@
|
|||||||
CONF_DIR=/home/kartavyy/benchmark/monetdb
|
|
||||||
expect_file=$CONF_DIR/expect.tcl
|
|
||||||
test_file=$CONF_DIR/queries.sql
|
|
||||||
etc_init_d_service=/etc/init.d/monetdb5-sql
|
|
@ -1,3 +0,0 @@
|
|||||||
create table hits_10m ( WatchID BIGINT, JavaEnable SMALLINT, Title VARCHAR(1400), GoodEvent SMALLINT, EventTime TIMESTAMP, EventDate DATE, CounterID BIGINT, ClientIP BIGINT, RegionID BIGINT, UserID BIGINT, CounterClass TINYINT, OS SMALLINT, UserAgent SMALLINT, URL VARCHAR(7800), Referer VARCHAR(3125), Refresh TINYINT, RefererCategoryID INT, RefererRegionID BIGINT, URLCategoryID INT, URLRegionID BIGINT, ResolutionWidth INT, ResolutionHeight INT, ResolutionDepth SMALLINT, FlashMajor SMALLINT, FlashMinor SMALLINT, FlashMinor2 VARCHAR(256), NetMajor SMALLINT, NetMinor SMALLINT, UserAgentMajor INT, UserAgentMinor CHAR(2), CookieEnable SMALLINT, JavascriptEnable SMALLINT, IsMobile SMALLINT, MobilePhone SMALLINT, MobilePhoneModel VARCHAR(80), Params VARCHAR(2925), IPNetworkID BIGINT, TraficSourceID SMALLINT, SearchEngineID INT, SearchPhrase VARCHAR(2008), AdvEngineID SMALLINT, IsArtifical SMALLINT, WindowClientWidth INT, WindowClientHeight INT, ClientTimeZone INTEGER, ClientEventTime TIMESTAMP, SilverlightVersion1 SMALLINT, SilverlightVersion2 SMALLINT, SilverlightVersion3 BIGINT, SilverlightVersion4 INT, PageCharset VARCHAR(80), CodeVersion BIGINT, IsLink SMALLINT, IsDownload SMALLINT, IsNotBounce SMALLINT, FUniqID BIGINT, OriginalURL VARCHAR(8181), HID BIGINT, IsOldCounter SMALLINT, IsEvent SMALLINT, IsParameter SMALLINT, DontCountHits SMALLINT, WithHash SMALLINT, HitColor CHAR(1), LocalEventTime TIMESTAMP, Age SMALLINT, Sex SMALLINT, Income SMALLINT, Interests INT, Robotness SMALLINT, RemoteIP BIGINT, WindowName INT, OpenerName INT, HistoryLength SMALLINT, BrowserLanguage CHAR(2), BrowserCountry CHAR(2), SocialNetwork VARCHAR(128), SocialAction VARCHAR(128), HTTPError INT, SendTiming BIGINT, DNSTiming BIGINT, ConnectTiming BIGINT, ResponseStartTiming BIGINT, ResponseEndTiming BIGINT, FetchTiming BIGINT, SocialSourceNetworkID SMALLINT, SocialSourcePage VARCHAR(256), ParamPrice BIGINT, ParamOrderID VARCHAR(80), ParamCurrency CHAR(3), ParamCurrencyID INT, OpenstatServiceName VARCHAR(80), OpenstatCampaignID VARCHAR(512), OpenstatAdID VARCHAR(80), OpenstatSourceID VARCHAR(256), UTMSource VARCHAR(256), UTMMedium VARCHAR(256), UTMCampaign VARCHAR(407), UTMContent VARCHAR(256), UTMTerm VARCHAR(437), FromTag VARCHAR(428), HasGCLID SMALLINT, RefererHash BIGINT, URLHash BIGINT, CLID BIGINT, UserIDHash BIGINT ); CREATE INDEX hits_10m_ind ON hits_10m(CounterID, EventDate, UserIDHash, EventTime);
|
|
||||||
|
|
||||||
COPY INTO hits_10m FROM ('/opt/dump/dump_0.3/dump_hits_10m_meshed_utf8.tsv') DELIMITERS '\t';
|
|
356
benchmark/monetdb/instruction.md
Normal file
356
benchmark/monetdb/instruction.md
Normal file
@ -0,0 +1,356 @@
|
|||||||
|
Go to https://www.monetdb.org/
|
||||||
|
|
||||||
|
Dowload now.
|
||||||
|
Latest binary releases.
|
||||||
|
Ubuntu & Debian.
|
||||||
|
|
||||||
|
https://www.monetdb.org/downloads/deb/
|
||||||
|
|
||||||
|
Go to the server where you want to install MonetDB.
|
||||||
|
```
|
||||||
|
$ sudo mcedit /etc/apt/sources.list.d/monetdb.list
|
||||||
|
```
|
||||||
|
Write:
|
||||||
|
```
|
||||||
|
deb https://dev.monetdb.org/downloads/deb/ bionic monetdb
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
$ wget --output-document=- https://www.monetdb.org/downloads/MonetDB-GPG-KEY | sudo apt-key add -
|
||||||
|
|
||||||
|
$ sudo apt update
|
||||||
|
$ sudo apt install monetdb5-sql monetdb-client
|
||||||
|
|
||||||
|
$ sudo systemctl enable monetdbd
|
||||||
|
$ sudo systemctl start monetdbd
|
||||||
|
$ sudo usermod -a -G monetdb $USER
|
||||||
|
```
|
||||||
|
|
||||||
|
Logout and login back to your server.
|
||||||
|
|
||||||
|
Tutorial:
|
||||||
|
https://www.monetdb.org/Documentation/UserGuide/Tutorial
|
||||||
|
|
||||||
|
Creating the database:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ sudo mkdir /opt/monetdb
|
||||||
|
$ sudo chmod 777 /opt/monetdb
|
||||||
|
$ monetdbd create /opt/monetdb
|
||||||
|
|
||||||
|
$ monetdbd start /opt/monetdb
|
||||||
|
cannot remove socket files
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you have to stop MonetDB, copy the contents of `/var/monetdb5` to `/opt/monetdb` and replace the `/var/monetdb5` with symlink to `/opt/monetdb`. This is necessary, because I don't have free space in `/var` and creation of database in `/opt` did not succeed.
|
||||||
|
|
||||||
|
Start MonetDB again.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ sudo systemctl start monetdbd
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
$ monetdb create test
|
||||||
|
created database in maintenance mode: test
|
||||||
|
|
||||||
|
$ monetdb release test
|
||||||
|
taken database out of maintenance mode: test
|
||||||
|
```
|
||||||
|
|
||||||
|
Run client:
|
||||||
|
```
|
||||||
|
$ mclient -u monetdb -d test
|
||||||
|
```
|
||||||
|
|
||||||
|
Type password: monetdb
|
||||||
|
|
||||||
|
```
|
||||||
|
CREATE TABLE hits
|
||||||
|
(
|
||||||
|
"WatchID" BIGINT,
|
||||||
|
"JavaEnable" TINYINT,
|
||||||
|
"Title" TEXT,
|
||||||
|
"GoodEvent" SMALLINT,
|
||||||
|
"EventTime" TIMESTAMP,
|
||||||
|
"EventDate" Date,
|
||||||
|
"CounterID" INTEGER,
|
||||||
|
"ClientIP" INTEGER,
|
||||||
|
"RegionID" INTEGER,
|
||||||
|
"UserID" BIGINT,
|
||||||
|
"CounterClass" TINYINT,
|
||||||
|
"OS" TINYINT,
|
||||||
|
"UserAgent" TINYINT,
|
||||||
|
"URL" TEXT,
|
||||||
|
"Referer" TEXT,
|
||||||
|
"Refresh" TINYINT,
|
||||||
|
"RefererCategoryID" SMALLINT,
|
||||||
|
"RefererRegionID" INTEGER,
|
||||||
|
"URLCategoryID" SMALLINT,
|
||||||
|
"URLRegionID" INTEGER,
|
||||||
|
"ResolutionWidth" SMALLINT,
|
||||||
|
"ResolutionHeight" SMALLINT,
|
||||||
|
"ResolutionDepth" TINYINT,
|
||||||
|
"FlashMajor" TINYINT,
|
||||||
|
"FlashMinor" TINYINT,
|
||||||
|
"FlashMinor2" TEXT,
|
||||||
|
"NetMajor" TINYINT,
|
||||||
|
"NetMinor" TINYINT,
|
||||||
|
"UserAgentMajor" SMALLINT,
|
||||||
|
"UserAgentMinor" TEXT,
|
||||||
|
"CookieEnable" TINYINT,
|
||||||
|
"JavascriptEnable" TINYINT,
|
||||||
|
"IsMobile" TINYINT,
|
||||||
|
"MobilePhone" TINYINT,
|
||||||
|
"MobilePhoneModel" TEXT,
|
||||||
|
"Params" TEXT,
|
||||||
|
"IPNetworkID" INTEGER,
|
||||||
|
"TraficSourceID" TINYINT,
|
||||||
|
"SearchEngineID" SMALLINT,
|
||||||
|
"SearchPhrase" TEXT,
|
||||||
|
"AdvEngineID" TINYINT,
|
||||||
|
"IsArtifical" TINYINT,
|
||||||
|
"WindowClientWidth" SMALLINT,
|
||||||
|
"WindowClientHeight" SMALLINT,
|
||||||
|
"ClientTimeZone" SMALLINT,
|
||||||
|
"ClientEventTime" TIMESTAMP,
|
||||||
|
"SilverlightVersion1" TINYINT,
|
||||||
|
"SilverlightVersion2" TINYINT,
|
||||||
|
"SilverlightVersion3" INTEGER,
|
||||||
|
"SilverlightVersion4" SMALLINT,
|
||||||
|
"PageCharset" TEXT,
|
||||||
|
"CodeVersion" INTEGER,
|
||||||
|
"IsLink" TINYINT,
|
||||||
|
"IsDownload" TINYINT,
|
||||||
|
"IsNotBounce" TINYINT,
|
||||||
|
"FUniqID" BIGINT,
|
||||||
|
"OriginalURL" TEXT,
|
||||||
|
"HID" INTEGER,
|
||||||
|
"IsOldCounter" TINYINT,
|
||||||
|
"IsEvent" TINYINT,
|
||||||
|
"IsParameter" TINYINT,
|
||||||
|
"DontCountHits" TINYINT,
|
||||||
|
"WithHash" TINYINT,
|
||||||
|
"HitColor" TEXT,
|
||||||
|
"LocalEventTime" TIMESTAMP,
|
||||||
|
"Age" TINYINT,
|
||||||
|
"Sex" TINYINT,
|
||||||
|
"Income" TINYINT,
|
||||||
|
"Interests" SMALLINT,
|
||||||
|
"Robotness" TINYINT,
|
||||||
|
"RemoteIP" INTEGER,
|
||||||
|
"WindowName" INTEGER,
|
||||||
|
"OpenerName" INTEGER,
|
||||||
|
"HistoryLength" SMALLINT,
|
||||||
|
"BrowserLanguage" TEXT,
|
||||||
|
"BrowserCountry" TEXT,
|
||||||
|
"SocialNetwork" TEXT,
|
||||||
|
"SocialAction" TEXT,
|
||||||
|
"HTTPError" SMALLINT,
|
||||||
|
"SendTiming" INTEGER,
|
||||||
|
"DNSTiming" INTEGER,
|
||||||
|
"ConnectTiming" INTEGER,
|
||||||
|
"ResponseStartTiming" INTEGER,
|
||||||
|
"ResponseEndTiming" INTEGER,
|
||||||
|
"FetchTiming" INTEGER,
|
||||||
|
"SocialSourceNetworkID" TINYINT,
|
||||||
|
"SocialSourcePage" TEXT,
|
||||||
|
"ParamPrice" BIGINT,
|
||||||
|
"ParamOrderID" TEXT,
|
||||||
|
"ParamCurrency" TEXT,
|
||||||
|
"ParamCurrencyID" SMALLINT,
|
||||||
|
"OpenstatServiceName" TEXT,
|
||||||
|
"OpenstatCampaignID" TEXT,
|
||||||
|
"OpenstatAdID" TEXT,
|
||||||
|
"OpenstatSourceID" TEXT,
|
||||||
|
"UTMSource" TEXT,
|
||||||
|
"UTMMedium" TEXT,
|
||||||
|
"UTMCampaign" TEXT,
|
||||||
|
"UTMContent" TEXT,
|
||||||
|
"UTMTerm" TEXT,
|
||||||
|
"FromTag" TEXT,
|
||||||
|
"HasGCLID" TINYINT,
|
||||||
|
"RefererHash" BIGINT,
|
||||||
|
"URLHash" BIGINT,
|
||||||
|
"CLID" INTEGER
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
# How to prepare data
|
||||||
|
|
||||||
|
Download the 100 million rows dataset from here and insert into ClickHouse:
|
||||||
|
https://clickhouse.tech/docs/en/getting-started/example-datasets/metrica/
|
||||||
|
|
||||||
|
Create the dataset from ClickHouse:
|
||||||
|
|
||||||
|
```
|
||||||
|
SELECT
|
||||||
|
toInt64(WatchID) = -9223372036854775808 ? -9223372036854775807 : toInt64(WatchID),
|
||||||
|
toInt8(JavaEnable) = -128 ? -127 : toInt8(JavaEnable),
|
||||||
|
toValidUTF8(toString(Title)),
|
||||||
|
toInt16(GoodEvent) = -32768 ? -32767 : toInt16(GoodEvent),
|
||||||
|
EventTime,
|
||||||
|
EventDate,
|
||||||
|
toInt32(CounterID) = -2147483648 ? -2147483647 : toInt32(CounterID),
|
||||||
|
toInt32(ClientIP) = -2147483648 ? -2147483647 : toInt32(ClientIP),
|
||||||
|
toInt32(RegionID) = -2147483648 ? -2147483647 : toInt32(RegionID),
|
||||||
|
toInt64(UserID) = -9223372036854775808 ? -9223372036854775807 : toInt64(UserID),
|
||||||
|
toInt8(CounterClass) = -128 ? -127 : toInt8(CounterClass),
|
||||||
|
toInt8(OS) = -128 ? -127 : toInt8(OS),
|
||||||
|
toInt8(UserAgent) = -128 ? -127 : toInt8(UserAgent),
|
||||||
|
toValidUTF8(toString(URL)),
|
||||||
|
toValidUTF8(toString(Referer)),
|
||||||
|
toInt8(Refresh) = -128 ? -127 : toInt8(Refresh),
|
||||||
|
toInt16(RefererCategoryID) = -32768 ? -32767 : toInt16(RefererCategoryID),
|
||||||
|
toInt32(RefererRegionID) = -2147483648 ? -2147483647 : toInt32(RefererRegionID),
|
||||||
|
toInt16(URLCategoryID) = -32768 ? -32767 : toInt16(URLCategoryID),
|
||||||
|
toInt32(URLRegionID) = -2147483648 ? -2147483647 : toInt32(URLRegionID),
|
||||||
|
toInt16(ResolutionWidth) = -32768 ? -32767 : toInt16(ResolutionWidth),
|
||||||
|
toInt16(ResolutionHeight) = -32768 ? -32767 : toInt16(ResolutionHeight),
|
||||||
|
toInt8(ResolutionDepth) = -128 ? -127 : toInt8(ResolutionDepth),
|
||||||
|
toInt8(FlashMajor) = -128 ? -127 : toInt8(FlashMajor),
|
||||||
|
toInt8(FlashMinor) = -128 ? -127 : toInt8(FlashMinor),
|
||||||
|
toValidUTF8(toString(FlashMinor2)),
|
||||||
|
toInt8(NetMajor) = -128 ? -127 : toInt8(NetMajor),
|
||||||
|
toInt8(NetMinor) = -128 ? -127 : toInt8(NetMinor),
|
||||||
|
toInt16(UserAgentMajor) = -32768 ? -32767 : toInt16(UserAgentMajor),
|
||||||
|
toValidUTF8(toString(UserAgentMinor)),
|
||||||
|
toInt8(CookieEnable) = -128 ? -127 : toInt8(CookieEnable),
|
||||||
|
toInt8(JavascriptEnable) = -128 ? -127 : toInt8(JavascriptEnable),
|
||||||
|
toInt8(IsMobile) = -128 ? -127 : toInt8(IsMobile),
|
||||||
|
toInt8(MobilePhone) = -128 ? -127 : toInt8(MobilePhone),
|
||||||
|
toValidUTF8(toString(MobilePhoneModel)),
|
||||||
|
toValidUTF8(toString(Params)),
|
||||||
|
toInt32(IPNetworkID) = -2147483648 ? -2147483647 : toInt32(IPNetworkID),
|
||||||
|
toInt8(TraficSourceID) = -128 ? -127 : toInt8(TraficSourceID),
|
||||||
|
toInt16(SearchEngineID) = -32768 ? -32767 : toInt16(SearchEngineID),
|
||||||
|
toValidUTF8(toString(SearchPhrase)),
|
||||||
|
toInt8(AdvEngineID) = -128 ? -127 : toInt8(AdvEngineID),
|
||||||
|
toInt8(IsArtifical) = -128 ? -127 : toInt8(IsArtifical),
|
||||||
|
toInt16(WindowClientWidth) = -32768 ? -32767 : toInt16(WindowClientWidth),
|
||||||
|
toInt16(WindowClientHeight) = -32768 ? -32767 : toInt16(WindowClientHeight),
|
||||||
|
toInt16(ClientTimeZone) = -32768 ? -32767 : toInt16(ClientTimeZone),
|
||||||
|
ClientEventTime,
|
||||||
|
toInt8(SilverlightVersion1) = -128 ? -127 : toInt8(SilverlightVersion1),
|
||||||
|
toInt8(SilverlightVersion2) = -128 ? -127 : toInt8(SilverlightVersion2),
|
||||||
|
toInt32(SilverlightVersion3) = -2147483648 ? -2147483647 : toInt32(SilverlightVersion3),
|
||||||
|
toInt16(SilverlightVersion4) = -32768 ? -32767 : toInt16(SilverlightVersion4),
|
||||||
|
toValidUTF8(toString(PageCharset)),
|
||||||
|
toInt32(CodeVersion) = -2147483648 ? -2147483647 : toInt32(CodeVersion),
|
||||||
|
toInt8(IsLink) = -128 ? -127 : toInt8(IsLink),
|
||||||
|
toInt8(IsDownload) = -128 ? -127 : toInt8(IsDownload),
|
||||||
|
toInt8(IsNotBounce) = -128 ? -127 : toInt8(IsNotBounce),
|
||||||
|
toInt64(FUniqID) = -9223372036854775808 ? -9223372036854775807 : toInt64(FUniqID),
|
||||||
|
toValidUTF8(toString(OriginalURL)),
|
||||||
|
toInt32(HID) = -2147483648 ? -2147483647 : toInt32(HID),
|
||||||
|
toInt8(IsOldCounter) = -128 ? -127 : toInt8(IsOldCounter),
|
||||||
|
toInt8(IsEvent) = -128 ? -127 : toInt8(IsEvent),
|
||||||
|
toInt8(IsParameter) = -128 ? -127 : toInt8(IsParameter),
|
||||||
|
toInt8(DontCountHits) = -128 ? -127 : toInt8(DontCountHits),
|
||||||
|
toInt8(WithHash) = -128 ? -127 : toInt8(WithHash),
|
||||||
|
toValidUTF8(toString(HitColor)),
|
||||||
|
LocalEventTime,
|
||||||
|
toInt8(Age) = -128 ? -127 : toInt8(Age),
|
||||||
|
toInt8(Sex) = -128 ? -127 : toInt8(Sex),
|
||||||
|
toInt8(Income) = -128 ? -127 : toInt8(Income),
|
||||||
|
toInt16(Interests) = -32768 ? -32767 : toInt16(Interests),
|
||||||
|
toInt8(Robotness) = -128 ? -127 : toInt8(Robotness),
|
||||||
|
toInt32(RemoteIP) = -2147483648 ? -2147483647 : toInt32(RemoteIP),
|
||||||
|
toInt32(WindowName) = -2147483648 ? -2147483647 : toInt32(WindowName),
|
||||||
|
toInt32(OpenerName) = -2147483648 ? -2147483647 : toInt32(OpenerName),
|
||||||
|
toInt16(HistoryLength) = -32768 ? -32767 : toInt16(HistoryLength),
|
||||||
|
toValidUTF8(toString(BrowserLanguage)),
|
||||||
|
toValidUTF8(toString(BrowserCountry)),
|
||||||
|
toValidUTF8(toString(SocialNetwork)),
|
||||||
|
toValidUTF8(toString(SocialAction)),
|
||||||
|
toInt16(HTTPError) = -32768 ? -32767 : toInt16(HTTPError),
|
||||||
|
toInt32(SendTiming) = -2147483648 ? -2147483647 : toInt32(SendTiming),
|
||||||
|
toInt32(DNSTiming) = -2147483648 ? -2147483647 : toInt32(DNSTiming),
|
||||||
|
toInt32(ConnectTiming) = -2147483648 ? -2147483647 : toInt32(ConnectTiming),
|
||||||
|
toInt32(ResponseStartTiming) = -2147483648 ? -2147483647 : toInt32(ResponseStartTiming),
|
||||||
|
toInt32(ResponseEndTiming) = -2147483648 ? -2147483647 : toInt32(ResponseEndTiming),
|
||||||
|
toInt32(FetchTiming) = -2147483648 ? -2147483647 : toInt32(FetchTiming),
|
||||||
|
toInt8(SocialSourceNetworkID) = -128 ? -127 : toInt8(SocialSourceNetworkID),
|
||||||
|
toValidUTF8(toString(SocialSourcePage)),
|
||||||
|
toInt64(ParamPrice) = -9223372036854775808 ? -9223372036854775807 : toInt64(ParamPrice),
|
||||||
|
toValidUTF8(toString(ParamOrderID)),
|
||||||
|
toValidUTF8(toString(ParamCurrency)),
|
||||||
|
toInt16(ParamCurrencyID) = -32768 ? -32767 : toInt16(ParamCurrencyID),
|
||||||
|
toValidUTF8(toString(OpenstatServiceName)),
|
||||||
|
toValidUTF8(toString(OpenstatCampaignID)),
|
||||||
|
toValidUTF8(toString(OpenstatAdID)),
|
||||||
|
toValidUTF8(toString(OpenstatSourceID)),
|
||||||
|
toValidUTF8(toString(UTMSource)),
|
||||||
|
toValidUTF8(toString(UTMMedium)),
|
||||||
|
toValidUTF8(toString(UTMCampaign)),
|
||||||
|
toValidUTF8(toString(UTMContent)),
|
||||||
|
toValidUTF8(toString(UTMTerm)),
|
||||||
|
toValidUTF8(toString(FromTag)),
|
||||||
|
toInt8(HasGCLID) = -128 ? -127 : toInt8(HasGCLID),
|
||||||
|
toInt64(RefererHash) = -9223372036854775808 ? -9223372036854775807 : toInt64(RefererHash),
|
||||||
|
toInt64(URLHash) = -9223372036854775808 ? -9223372036854775807 : toInt64(URLHash),
|
||||||
|
toInt32(CLID) = -2147483648 ? -2147483647 : toInt32(CLID)
|
||||||
|
FROM hits_100m_obfuscated
|
||||||
|
INTO OUTFILE '/home/milovidov/example_datasets/hits_100m_obfuscated_monetdb.tsv'
|
||||||
|
FORMAT TSV;
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that MonetDB does not support the most negative numbers like -128. And we have to convert them by adding one.
|
||||||
|
It makes impossible to store the values of 64bit identifiers in BIGINT.
|
||||||
|
Maybe it's a trick to optimize NULLs?
|
||||||
|
|
||||||
|
Upload the data:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ mclient -u monetdb -d test
|
||||||
|
```
|
||||||
|
|
||||||
|
Type password: monetdb
|
||||||
|
|
||||||
|
```
|
||||||
|
COPY INTO hits FROM '/home/milovidov/example_datasets/hits_100m_obfuscated_monetdb.tsv' USING DELIMITERS '\t';
|
||||||
|
```
|
||||||
|
|
||||||
|
It takes 28 minutes 02 seconds on a server (Linux Ubuntu, Xeon E5-2560v2, 32 logical CPU, 128 GiB RAM, 8xHDD RAID-5, 40 TB).
|
||||||
|
It is roughly 60 000 rows per second.
|
||||||
|
|
||||||
|
Validate the data:
|
||||||
|
|
||||||
|
```
|
||||||
|
SELECT count(*) FROM hits;
|
||||||
|
```
|
||||||
|
|
||||||
|
Create an index:
|
||||||
|
|
||||||
|
```
|
||||||
|
CREATE INDEX hits_idx ON hits ("CounterID", "EventDate");
|
||||||
|
```
|
||||||
|
|
||||||
|
(it takes 5 seconds)
|
||||||
|
|
||||||
|
Run the benchmark:
|
||||||
|
|
||||||
|
```
|
||||||
|
./benchmark.sh | tee log.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
You can find the log in `log.txt` file.
|
||||||
|
|
||||||
|
Postprocess data:
|
||||||
|
|
||||||
|
```
|
||||||
|
grep clk log.txt | tr -d '\r' | awk '{ if ($3 == "ms") { print $2 / 1000; } else if ($3 == "sec") { print $2 } else { print } }'
|
||||||
|
```
|
||||||
|
|
||||||
|
Then replace values with "min" (minutes) timing manually and save to `tmp.txt`.
|
||||||
|
Then process to JSON format:
|
||||||
|
|
||||||
|
```
|
||||||
|
awk '{
|
||||||
|
if (i % 3 == 0) { a = $1 }
|
||||||
|
else if (i % 3 == 1) { b = $1 }
|
||||||
|
else if (i % 3 == 2) { c = $1; print "[" a ", " b ", " c "]," };
|
||||||
|
++i; }' < tmp.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
And paste to `/website/benchmark/dbms/results/005_monetdb.json` in the repository.
|
341
benchmark/monetdb/log.txt
Normal file
341
benchmark/monetdb/log.txt
Normal file
@ -0,0 +1,341 @@
|
|||||||
|
3
|
||||||
|
SELECT count(*) FROM hits;
|
||||||
|
1 tuple
|
||||||
|
clk: 1.262 ms
|
||||||
|
1 tuple
|
||||||
|
clk: 1.420 ms
|
||||||
|
1 tuple
|
||||||
|
clk: 1.190 ms
|
||||||
|
3
|
||||||
|
SELECT count(*) FROM hits WHERE "AdvEngineID" <> 0;
|
||||||
|
1 tuple
|
||||||
|
clk: 1.530 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 1.489 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 1.490 sec
|
||||||
|
3
|
||||||
|
SELECT sum("AdvEngineID"), count(*), avg("ResolutionWidth") FROM hits;
|
||||||
|
1 tuple
|
||||||
|
clk: 597.512 ms
|
||||||
|
1 tuple
|
||||||
|
clk: 579.383 ms
|
||||||
|
1 tuple
|
||||||
|
clk: 598.220 ms
|
||||||
|
3
|
||||||
|
SELECT sum("UserID") FROM hits;
|
||||||
|
overflow in calculation.
|
||||||
|
clk: 568.003 ms
|
||||||
|
overflow in calculation.
|
||||||
|
clk: 554.572 ms
|
||||||
|
overflow in calculation.
|
||||||
|
clk: 552.076 ms
|
||||||
|
3
|
||||||
|
SELECT COUNT(DISTINCT "UserID") FROM hits;
|
||||||
|
1 tuple
|
||||||
|
clk: 6.688 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 6.689 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 6.652 sec
|
||||||
|
3
|
||||||
|
SELECT COUNT(DISTINCT "SearchPhrase") FROM hits;
|
||||||
|
1 tuple
|
||||||
|
clk: 15.702 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 17.189 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 15.514 sec
|
||||||
|
3
|
||||||
|
SELECT min("EventDate"), max("EventDate") FROM hits;
|
||||||
|
1 tuple
|
||||||
|
clk: 697.770 ms
|
||||||
|
1 tuple
|
||||||
|
clk: 711.870 ms
|
||||||
|
1 tuple
|
||||||
|
clk: 697.177 ms
|
||||||
|
3
|
||||||
|
SELECT "AdvEngineID", count(*) FROM hits WHERE "AdvEngineID" <> 0 GROUP BY "AdvEngineID" ORDER BY count(*) DESC;
|
||||||
|
18 tuples
|
||||||
|
clk: 1.536 sec
|
||||||
|
18 tuples
|
||||||
|
clk: 1.505 sec
|
||||||
|
18 tuples
|
||||||
|
clk: 1.492 sec
|
||||||
|
3
|
||||||
|
SELECT "RegionID", COUNT(DISTINCT "UserID") AS u FROM hits GROUP BY "RegionID" ORDER BY u DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 9.965 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 10.106 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 10.136 sec
|
||||||
|
3
|
||||||
|
SELECT "RegionID", sum("AdvEngineID"), count(*) AS c, avg("ResolutionWidth"), COUNT(DISTINCT "UserID") FROM hits GROUP BY "RegionID" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 8.329 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 8.601 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 8.039 sec
|
||||||
|
3
|
||||||
|
SELECT "MobilePhoneModel", COUNT(DISTINCT "UserID") AS u FROM hits WHERE "MobilePhoneModel" <> '' GROUP BY "MobilePhoneModel" ORDER BY u DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 3.385 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 3.321 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 3.326 sec
|
||||||
|
3
|
||||||
|
SELECT "MobilePhone", "MobilePhoneModel", COUNT(DISTINCT "UserID") AS u FROM hits WHERE "MobilePhoneModel" <> '' GROUP BY "MobilePhone", "MobilePhoneModel" ORDER BY u DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 3.510 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 3.431 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 3.382 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchPhrase", count(*) AS c FROM hits WHERE "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 10.891 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 11.483 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 10.352 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchPhrase", COUNT(DISTINCT "UserID") AS u FROM hits WHERE "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY u DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 15.711 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 15.444 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 15.503 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchEngineID", "SearchPhrase", count(*) AS c FROM hits WHERE "SearchPhrase" <> '' GROUP BY "SearchEngineID", "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 11.433 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 11.399 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 11.285 sec
|
||||||
|
3
|
||||||
|
SELECT "UserID", count(*) FROM hits GROUP BY "UserID" ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 7.184 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 7.015 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.849 sec
|
||||||
|
3
|
||||||
|
SELECT "UserID", "SearchPhrase", count(*) FROM hits GROUP BY "UserID", "SearchPhrase" ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 29.096 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 28.328 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 29.247 sec
|
||||||
|
3
|
||||||
|
SELECT "UserID", "SearchPhrase", count(*) FROM hits GROUP BY "UserID", "SearchPhrase" LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 29.457 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 29.364 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 29.269 sec
|
||||||
|
3
|
||||||
|
SELECT "UserID", extract(minute FROM "EventTime") AS m, "SearchPhrase", count(*) FROM hits GROUP BY "UserID", m, "SearchPhrase" ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 47.141 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 46.495 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 46.472 sec
|
||||||
|
3
|
||||||
|
SELECT "UserID" FROM hits WHERE "UserID" = -6101065172474983726;
|
||||||
|
0 tuples
|
||||||
|
clk: 783.332 ms
|
||||||
|
0 tuples
|
||||||
|
clk: 771.157 ms
|
||||||
|
0 tuples
|
||||||
|
clk: 783.082 ms
|
||||||
|
3
|
||||||
|
SELECT count(*) FROM hits WHERE "URL" LIKE '%metrika%';
|
||||||
|
1 tuple
|
||||||
|
clk: 3.963 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 3.930 sec
|
||||||
|
1 tuple
|
||||||
|
clk: 3.964 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchPhrase", min("URL"), count(*) AS c FROM hits WHERE "URL" LIKE '%metrika%' AND "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 3.925 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 3.817 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 3.802 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchPhrase", min("URL"), min("Title"), count(*) AS c, COUNT(DISTINCT "UserID") FROM hits WHERE "Title" LIKE '%Яндекс%' AND "URL" NOT LIKE '%.yandex.%' AND "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 6.067 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.120 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.012 sec
|
||||||
|
3
|
||||||
|
SELECT * FROM hits WHERE "URL" LIKE '%metrika%' ORDER BY "EventTime" LIMIT 10;
|
||||||
|
10 tuples !87 columns dropped, 29 fields truncated!
|
||||||
|
clk: 4.251 sec
|
||||||
|
10 tuples !87 columns dropped, 29 fields truncated!
|
||||||
|
clk: 4.190 sec
|
||||||
|
10 tuples !87 columns dropped, 29 fields truncated!
|
||||||
|
clk: 4.379 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchPhrase" FROM hits WHERE "SearchPhrase" <> '' ORDER BY "EventTime" LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 6.699 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.718 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.802 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchPhrase" FROM hits WHERE "SearchPhrase" <> '' ORDER BY "SearchPhrase" LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 6.887 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.838 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.844 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchPhrase" FROM hits WHERE "SearchPhrase" <> '' ORDER BY "EventTime", "SearchPhrase" LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 6.806 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.878 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.807 sec
|
||||||
|
3
|
||||||
|
SELECT "CounterID", avg(length("URL")) AS l, count(*) AS c FROM hits WHERE "URL" <> '' GROUP BY "CounterID" HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
|
25 tuples
|
||||||
|
clk: 1:01 min
|
||||||
|
25 tuples
|
||||||
|
clk: 55.553 sec
|
||||||
|
25 tuples
|
||||||
|
clk: 56.188 sec
|
||||||
|
3
|
||||||
|
SELECT sys.getdomain("Referer") AS key, avg(length("Referer")) AS l, count(*) AS c, min("Referer") FROM hits WHERE "Referer" <> '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
|
clk: 1:00 min
|
||||||
|
clk: 1:00 min
|
||||||
|
clk: 1:00 min
|
||||||
|
3
|
||||||
|
SELECT sum("ResolutionWidth"), sum("ResolutionWidth" + 1), sum("ResolutionWidth" + 2), sum("ResolutionWidth" + 3), sum("ResolutionWidth" + 4), sum("ResolutionWidth" + 5), sum("ResolutionWidth" + 6), sum("ResolutionWidth" + 7), sum("ResolutionWidth" + 8), sum("ResolutionWidth" + 9), sum("ResolutionWidth" + 10), sum("ResolutionWidth" + 11), sum("ResolutionWidth" + 12), sum("ResolutionWidth" + 13), sum("ResolutionWidth" + 14), sum("ResolutionWidth" + 15), sum("ResolutionWidth" + 16), sum("ResolutionWidth" + 17), sum("ResolutionWidth" + 18), sum("ResolutionWidth" + 19), sum("ResolutionWidth" + 20), sum("ResolutionWidth" + 21), sum("ResolutionWidth" + 22), sum("ResolutionWidth" + 23), sum("ResolutionWidth" + 24), sum("ResolutionWidth" + 25), sum("ResolutionWidth" + 26), sum("ResolutionWidth" + 27), sum("ResolutionWidth" + 28), sum("ResolutionWidth" + 29), sum("ResolutionWidth" + 30), sum("ResolutionWidth" + 31), sum("ResolutionWidth" + 32), sum("ResolutionWidth" + 33), sum("ResolutionWidth" + 34), sum("ResolutionWidth" + 35), sum("ResolutionWidth" + 36), sum("ResolutionWidth" + 37), sum("ResolutionWidth" + 38), sum("ResolutionWidth" + 39), sum("ResolutionWidth" + 40), sum("ResolutionWidth" + 41), sum("ResolutionWidth" + 42), sum("ResolutionWidth" + 43), sum("ResolutionWidth" + 44), sum("ResolutionWidth" + 45), sum("ResolutionWidth" + 46), sum("ResolutionWidth" + 47), sum("ResolutionWidth" + 48), sum("ResolutionWidth" + 49), sum("ResolutionWidth" + 50), sum("ResolutionWidth" + 51), sum("ResolutionWidth" + 52), sum("ResolutionWidth" + 53), sum("ResolutionWidth" + 54), sum("ResolutionWidth" + 55), sum("ResolutionWidth" + 56), sum("ResolutionWidth" + 57), sum("ResolutionWidth" + 58), sum("ResolutionWidth" + 59), sum("ResolutionWidth" + 60), sum("ResolutionWidth" + 61), sum("ResolutionWidth" + 62), sum("ResolutionWidth" + 63), sum("ResolutionWidth" + 64), sum("ResolutionWidth" + 65), sum("ResolutionWidth" + 66), sum("ResolutionWidth" + 67), sum("ResolutionWidth" + 68), sum("ResolutionWidth" + 69), sum("ResolutionWidth" + 70), sum("ResolutionWidth" + 71), sum("ResolutionWidth" + 72), sum("ResolutionWidth" + 73), sum("ResolutionWidth" + 74), sum("ResolutionWidth" + 75), sum("ResolutionWidth" + 76), sum("ResolutionWidth" + 77), sum("ResolutionWidth" + 78), sum("ResolutionWidth" + 79), sum("ResolutionWidth" + 80), sum("ResolutionWidth" + 81), sum("ResolutionWidth" + 82), sum("ResolutionWidth" + 83), sum("ResolutionWidth" + 84), sum("ResolutionWidth" + 85), sum("ResolutionWidth" + 86), sum("ResolutionWidth" + 87), sum("ResolutionWidth" + 88), sum("ResolutionWidth" + 89) FROM hits;
|
||||||
|
1 tuple !77 columns dropped!
|
||||||
|
clk: 6.221 sec
|
||||||
|
1 tuple !77 columns dropped!
|
||||||
|
clk: 6.170 sec
|
||||||
|
1 tuple !77 columns dropped!
|
||||||
|
clk: 6.382 sec
|
||||||
|
3
|
||||||
|
SELECT "SearchEngineID", "ClientIP", count(*) AS c, sum("Refresh"), avg("ResolutionWidth") FROM hits WHERE "SearchPhrase" <> '' GROUP BY "SearchEngineID", "ClientIP" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 5.684 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 5.585 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 5.463 sec
|
||||||
|
3
|
||||||
|
SELECT "WatchID", "ClientIP", count(*) AS c, sum("Refresh"), avg("ResolutionWidth") FROM hits WHERE "SearchPhrase" <> '' GROUP BY "WatchID", "ClientIP" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 6.281 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.574 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 6.243 sec
|
||||||
|
3
|
||||||
|
SELECT "WatchID", "ClientIP", count(*) AS c, sum("Refresh"), avg("ResolutionWidth") FROM hits GROUP BY "WatchID", "ClientIP" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 44.641 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 41.904 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 43.218 sec
|
||||||
|
3
|
||||||
|
SELECT "URL", count(*) AS c FROM hits GROUP BY "URL" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 1:24 min
|
||||||
|
10 tuples
|
||||||
|
clk: 1:31 min
|
||||||
|
10 tuples
|
||||||
|
clk: 1:24 min
|
||||||
|
3
|
||||||
|
SELECT 1, "URL", count(*) AS c FROM hits GROUP BY 1, "URL" ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 1:24 min
|
||||||
|
10 tuples
|
||||||
|
clk: 1:25 min
|
||||||
|
10 tuples
|
||||||
|
clk: 1:24 min
|
||||||
|
3
|
||||||
|
SELECT "ClientIP", "ClientIP" - 1, "ClientIP" - 2, "ClientIP" - 3, count(*) AS c FROM hits GROUP BY "ClientIP", "ClientIP" - 1, "ClientIP" - 2, "ClientIP" - 3 ORDER BY c DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 26.438 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 26.033 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 26.147 sec
|
||||||
|
3
|
||||||
|
SELECT "URL", count(*) AS "PageViews" FROM hits WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "DontCountHits" = 0 AND "Refresh" = 0 AND "URL" <> '' GROUP BY "URL" ORDER BY "PageViews" DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 4.825 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 4.618 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 4.623 sec
|
||||||
|
3
|
||||||
|
SELECT "Title", count(*) AS "PageViews" FROM hits WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "DontCountHits" = 0 AND "Refresh" = 0 AND "Title" <> '' GROUP BY "Title" ORDER BY "PageViews" DESC LIMIT 10;
|
||||||
|
10 tuples
|
||||||
|
clk: 4.380 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 4.418 sec
|
||||||
|
10 tuples
|
||||||
|
clk: 4.413 sec
|
||||||
|
3
|
||||||
|
SELECT "URL", count(*) AS "PageViews" FROM hits WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 AND "IsLink" <> 0 AND "IsDownload" = 0 GROUP BY "URL" ORDER BY "PageViews" DESC LIMIT 1000;
|
||||||
|
1000 tuples
|
||||||
|
clk: 4.259 sec
|
||||||
|
1000 tuples
|
||||||
|
clk: 4.195 sec
|
||||||
|
1000 tuples
|
||||||
|
clk: 4.195 sec
|
||||||
|
3
|
||||||
|
SELECT "TraficSourceID", "SearchEngineID", "AdvEngineID", CASE WHEN ("SearchEngineID" = 0 AND "AdvEngineID" = 0) THEN "Referer" ELSE '' END AS Src, "URL" AS Dst, count(*) AS "PageViews" FROM hits WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 GROUP BY "TraficSourceID", "SearchEngineID", "AdvEngineID", CASE WHEN ("SearchEngineID" = 0 AND "AdvEngineID" = 0) THEN "Referer" ELSE '' END, "URL" ORDER BY "PageViews" DESC LIMIT 1000;
|
||||||
|
1000 tuples
|
||||||
|
clk: 3.233 sec
|
||||||
|
1000 tuples
|
||||||
|
clk: 3.180 sec
|
||||||
|
1000 tuples
|
||||||
|
clk: 3.181 sec
|
||||||
|
3
|
||||||
|
SELECT "URLHash", "EventDate", count(*) AS "PageViews" FROM hits WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 AND "TraficSourceID" IN (-1, 6) AND "RefererHash" = 686716256552154761 GROUP BY "URLHash", "EventDate" ORDER BY "PageViews" DESC LIMIT 100;
|
||||||
|
0 tuples
|
||||||
|
clk: 2.656 sec
|
||||||
|
0 tuples
|
||||||
|
clk: 2.557 sec
|
||||||
|
0 tuples
|
||||||
|
clk: 2.561 sec
|
||||||
|
3
|
||||||
|
SELECT "WindowClientWidth", "WindowClientHeight", count(*) AS "PageViews" FROM hits WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 AND "DontCountHits" = 0 AND "URLHash" = 686716256552154761 GROUP BY "WindowClientWidth", "WindowClientHeight" ORDER BY "PageViews" DESC LIMIT 10000;
|
||||||
|
0 tuples
|
||||||
|
clk: 4.161 sec
|
||||||
|
0 tuples
|
||||||
|
clk: 4.243 sec
|
||||||
|
0 tuples
|
||||||
|
clk: 4.166 sec
|
||||||
|
3
|
||||||
|
SELECT DATE_TRUNC('minute', "EventTime") AS "Minute", count(*) AS "PageViews" FROM hits WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-02' AND "Refresh" = 0 AND "DontCountHits" = 0 GROUP BY DATE_TRUNC('minute', "EventTime") ORDER BY DATE_TRUNC('minute', "EventTime");
|
||||||
|
0 tuples
|
||||||
|
clk: 4.199 sec
|
||||||
|
0 tuples
|
||||||
|
clk: 4.211 sec
|
||||||
|
0 tuples
|
||||||
|
clk: 4.190 sec
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,111 +1,43 @@
|
|||||||
SELECT count(*) FROM hits_10m;
|
SELECT count(*) FROM {table};
|
||||||
SELECT count(*) FROM hits_10m WHERE AdvEngineID <> 0;
|
SELECT count(*) FROM {table} WHERE "AdvEngineID" <> 0;
|
||||||
SELECT sum(AdvEngineID), count(*), avg(ResolutionWidth) FROM hits_10m;
|
SELECT sum("AdvEngineID"), count(*), avg("ResolutionWidth") FROM {table};
|
||||||
SELECT sum(UserID) FROM hits_10m;
|
SELECT sum("UserID") FROM {table};
|
||||||
SELECT count(DISTINCT UserID) FROM hits_10m;
|
SELECT COUNT(DISTINCT "UserID") FROM {table};
|
||||||
SELECT count(DISTINCT SearchPhrase) FROM hits_10m;
|
SELECT COUNT(DISTINCT "SearchPhrase") FROM {table};
|
||||||
SELECT min(EventDate), max(EventDate) FROM hits_10m;
|
SELECT min("EventDate"), max("EventDate") FROM {table};
|
||||||
|
SELECT "AdvEngineID", count(*) FROM {table} WHERE "AdvEngineID" <> 0 GROUP BY "AdvEngineID" ORDER BY count(*) DESC;
|
||||||
SELECT AdvEngineID, count(*) FROM hits_10m WHERE AdvEngineID <> 0 GROUP BY AdvEngineID ORDER BY count(*) DESC;
|
SELECT "RegionID", COUNT(DISTINCT "UserID") AS u FROM {table} GROUP BY "RegionID" ORDER BY u DESC LIMIT 10;
|
||||||
-- мощная фильтрация. После фильтрации почти ничего не остаётся, но делаем ещё агрегацию.;
|
SELECT "RegionID", sum("AdvEngineID"), count(*) AS c, avg("ResolutionWidth"), COUNT(DISTINCT "UserID") FROM {table} GROUP BY "RegionID" ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT "MobilePhoneModel", COUNT(DISTINCT "UserID") AS u FROM {table} WHERE "MobilePhoneModel" <> '' GROUP BY "MobilePhoneModel" ORDER BY u DESC LIMIT 10;
|
||||||
SELECT RegionID, count(DISTINCT UserID) AS u FROM hits_10m GROUP BY RegionID ORDER BY u DESC LIMIT 10;
|
SELECT "MobilePhone", "MobilePhoneModel", COUNT(DISTINCT "UserID") AS u FROM {table} WHERE "MobilePhoneModel" <> '' GROUP BY "MobilePhone", "MobilePhoneModel" ORDER BY u DESC LIMIT 10;
|
||||||
-- агрегация, среднее количество ключей.;
|
SELECT "SearchPhrase", count(*) AS c FROM {table} WHERE "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT "SearchPhrase", COUNT(DISTINCT "UserID") AS u FROM {table} WHERE "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY u DESC LIMIT 10;
|
||||||
SELECT RegionID, sum(AdvEngineID), count(*) AS c, avg(ResolutionWidth), count(DISTINCT UserID) FROM hits_10m GROUP BY RegionID ORDER BY count(*) DESC LIMIT 10;
|
SELECT "SearchEngineID", "SearchPhrase", count(*) AS c FROM {table} WHERE "SearchPhrase" <> '' GROUP BY "SearchEngineID", "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
-- агрегация, среднее количество ключей, несколько агрегатных функций.;
|
SELECT "UserID", count(*) FROM {table} GROUP BY "UserID" ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
SELECT "UserID", "SearchPhrase", count(*) FROM {table} GROUP BY "UserID", "SearchPhrase" ORDER BY count(*) DESC LIMIT 10;
|
||||||
SELECT MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel <> '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
SELECT "UserID", "SearchPhrase", count(*) FROM {table} GROUP BY "UserID", "SearchPhrase" LIMIT 10;
|
||||||
-- мощная фильтрация по строкам, затем агрегация по строкам.;
|
SELECT "UserID", extract(minute FROM "EventTime") AS m, "SearchPhrase", count(*) FROM {table} GROUP BY "UserID", m, "SearchPhrase" ORDER BY count(*) DESC LIMIT 10;
|
||||||
|
SELECT "UserID" FROM {table} WHERE "UserID" = -6101065172474983726;
|
||||||
SELECT MobilePhone, MobilePhoneModel, count(DISTINCT UserID) AS u FROM hits_10m WHERE MobilePhoneModel <> '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10;
|
SELECT count(*) FROM {table} WHERE "URL" LIKE '%metrika%';
|
||||||
-- мощная фильтрация по строкам, затем агрегация по паре из числа и строки.;
|
SELECT "SearchPhrase", min("URL"), count(*) AS c FROM {table} WHERE "URL" LIKE '%metrika%' AND "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT "SearchPhrase", min("URL"), min("Title"), count(*) AS c, COUNT(DISTINCT "UserID") FROM {table} WHERE "Title" LIKE '%Яндекс%' AND "URL" NOT LIKE '%.yandex.%' AND "SearchPhrase" <> '' GROUP BY "SearchPhrase" ORDER BY c DESC LIMIT 10;
|
||||||
SELECT SearchPhrase, count(*) FROM hits_10m WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
SELECT * FROM {table} WHERE "URL" LIKE '%metrika%' ORDER BY "EventTime" LIMIT 10;
|
||||||
-- средняя фильтрация по строкам, затем агрегация по строкам, большое количество ключей.;
|
SELECT "SearchPhrase" FROM {table} WHERE "SearchPhrase" <> '' ORDER BY "EventTime" LIMIT 10;
|
||||||
|
SELECT "SearchPhrase" FROM {table} WHERE "SearchPhrase" <> '' ORDER BY "SearchPhrase" LIMIT 10;
|
||||||
SELECT SearchPhrase, count(DISTINCT UserID) AS u FROM hits_10m WHERE SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10;
|
SELECT "SearchPhrase" FROM {table} WHERE "SearchPhrase" <> '' ORDER BY "EventTime", "SearchPhrase" LIMIT 10;
|
||||||
-- агрегация чуть сложнее.;
|
SELECT "CounterID", avg(length("URL")) AS l, count(*) AS c FROM {table} WHERE "URL" <> '' GROUP BY "CounterID" HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
|
SELECT sys.getdomain("Referer") AS key, avg(length("Referer")) AS l, count(*) AS c, min("Referer") FROM {table} WHERE "Referer" <> '' GROUP BY key HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
||||||
SELECT SearchEngineID, SearchPhrase, count(*) FROM hits_10m WHERE SearchPhrase <> '' GROUP BY SearchEngineID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
SELECT sum("ResolutionWidth"), sum("ResolutionWidth" + 1), sum("ResolutionWidth" + 2), sum("ResolutionWidth" + 3), sum("ResolutionWidth" + 4), sum("ResolutionWidth" + 5), sum("ResolutionWidth" + 6), sum("ResolutionWidth" + 7), sum("ResolutionWidth" + 8), sum("ResolutionWidth" + 9), sum("ResolutionWidth" + 10), sum("ResolutionWidth" + 11), sum("ResolutionWidth" + 12), sum("ResolutionWidth" + 13), sum("ResolutionWidth" + 14), sum("ResolutionWidth" + 15), sum("ResolutionWidth" + 16), sum("ResolutionWidth" + 17), sum("ResolutionWidth" + 18), sum("ResolutionWidth" + 19), sum("ResolutionWidth" + 20), sum("ResolutionWidth" + 21), sum("ResolutionWidth" + 22), sum("ResolutionWidth" + 23), sum("ResolutionWidth" + 24), sum("ResolutionWidth" + 25), sum("ResolutionWidth" + 26), sum("ResolutionWidth" + 27), sum("ResolutionWidth" + 28), sum("ResolutionWidth" + 29), sum("ResolutionWidth" + 30), sum("ResolutionWidth" + 31), sum("ResolutionWidth" + 32), sum("ResolutionWidth" + 33), sum("ResolutionWidth" + 34), sum("ResolutionWidth" + 35), sum("ResolutionWidth" + 36), sum("ResolutionWidth" + 37), sum("ResolutionWidth" + 38), sum("ResolutionWidth" + 39), sum("ResolutionWidth" + 40), sum("ResolutionWidth" + 41), sum("ResolutionWidth" + 42), sum("ResolutionWidth" + 43), sum("ResolutionWidth" + 44), sum("ResolutionWidth" + 45), sum("ResolutionWidth" + 46), sum("ResolutionWidth" + 47), sum("ResolutionWidth" + 48), sum("ResolutionWidth" + 49), sum("ResolutionWidth" + 50), sum("ResolutionWidth" + 51), sum("ResolutionWidth" + 52), sum("ResolutionWidth" + 53), sum("ResolutionWidth" + 54), sum("ResolutionWidth" + 55), sum("ResolutionWidth" + 56), sum("ResolutionWidth" + 57), sum("ResolutionWidth" + 58), sum("ResolutionWidth" + 59), sum("ResolutionWidth" + 60), sum("ResolutionWidth" + 61), sum("ResolutionWidth" + 62), sum("ResolutionWidth" + 63), sum("ResolutionWidth" + 64), sum("ResolutionWidth" + 65), sum("ResolutionWidth" + 66), sum("ResolutionWidth" + 67), sum("ResolutionWidth" + 68), sum("ResolutionWidth" + 69), sum("ResolutionWidth" + 70), sum("ResolutionWidth" + 71), sum("ResolutionWidth" + 72), sum("ResolutionWidth" + 73), sum("ResolutionWidth" + 74), sum("ResolutionWidth" + 75), sum("ResolutionWidth" + 76), sum("ResolutionWidth" + 77), sum("ResolutionWidth" + 78), sum("ResolutionWidth" + 79), sum("ResolutionWidth" + 80), sum("ResolutionWidth" + 81), sum("ResolutionWidth" + 82), sum("ResolutionWidth" + 83), sum("ResolutionWidth" + 84), sum("ResolutionWidth" + 85), sum("ResolutionWidth" + 86), sum("ResolutionWidth" + 87), sum("ResolutionWidth" + 88), sum("ResolutionWidth" + 89) FROM {table};
|
||||||
-- агрегация по числу и строке, большое количество ключей.;
|
SELECT "SearchEngineID", "ClientIP", count(*) AS c, sum("Refresh"), avg("ResolutionWidth") FROM {table} WHERE "SearchPhrase" <> '' GROUP BY "SearchEngineID", "ClientIP" ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT "WatchID", "ClientIP", count(*) AS c, sum("Refresh"), avg("ResolutionWidth") FROM {table} WHERE "SearchPhrase" <> '' GROUP BY "WatchID", "ClientIP" ORDER BY c DESC LIMIT 10;
|
||||||
SELECT UserID, count(*) FROM hits_10m GROUP BY UserID ORDER BY count(*) DESC LIMIT 10;
|
SELECT "WatchID", "ClientIP", count(*) AS c, sum("Refresh"), avg("ResolutionWidth") FROM {table} GROUP BY "WatchID", "ClientIP" ORDER BY c DESC LIMIT 10;
|
||||||
-- агрегация по очень большому количеству ключей, может не хватить оперативки.;
|
SELECT "URL", count(*) AS c FROM {table} GROUP BY "URL" ORDER BY c DESC LIMIT 10;
|
||||||
|
SELECT 1, "URL", count(*) AS c FROM {table} GROUP BY 1, "URL" ORDER BY c DESC LIMIT 10;
|
||||||
SELECT UserID, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
SELECT "ClientIP", "ClientIP" - 1, "ClientIP" - 2, "ClientIP" - 3, count(*) AS c FROM {table} GROUP BY "ClientIP", "ClientIP" - 1, "ClientIP" - 2, "ClientIP" - 3 ORDER BY c DESC LIMIT 10;
|
||||||
-- ещё более сложная агрегация.;
|
SELECT "URL", count(*) AS "PageViews" FROM {table} WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "DontCountHits" = 0 AND "Refresh" = 0 AND "URL" <> '' GROUP BY "URL" ORDER BY "PageViews" DESC LIMIT 10;
|
||||||
|
SELECT "Title", count(*) AS "PageViews" FROM {table} WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "DontCountHits" = 0 AND "Refresh" = 0 AND "Title" <> '' GROUP BY "Title" ORDER BY "PageViews" DESC LIMIT 10;
|
||||||
SELECT UserID, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, SearchPhrase LIMIT 10;
|
SELECT "URL", count(*) AS "PageViews" FROM {table} WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 AND "IsLink" <> 0 AND "IsDownload" = 0 GROUP BY "URL" ORDER BY "PageViews" DESC LIMIT 1000;
|
||||||
-- то же самое, но без сортировки.;
|
SELECT "TraficSourceID", "SearchEngineID", "AdvEngineID", CASE WHEN ("SearchEngineID" = 0 AND "AdvEngineID" = 0) THEN "Referer" ELSE '' END AS Src, "URL" AS Dst, count(*) AS "PageViews" FROM {table} WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 GROUP BY "TraficSourceID", "SearchEngineID", "AdvEngineID", CASE WHEN ("SearchEngineID" = 0 AND "AdvEngineID" = 0) THEN "Referer" ELSE '' END, "URL" ORDER BY "PageViews" DESC LIMIT 1000;
|
||||||
|
SELECT "URLHash", "EventDate", count(*) AS "PageViews" FROM {table} WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 AND "TraficSourceID" IN (-1, 6) AND "RefererHash" = 686716256552154761 GROUP BY "URLHash", "EventDate" ORDER BY "PageViews" DESC LIMIT 100;
|
||||||
SELECT UserID, extract (minute from EventTime) AS m, SearchPhrase, count(*) FROM hits_10m GROUP BY UserID, m, SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
SELECT "WindowClientWidth", "WindowClientHeight", count(*) AS "PageViews" FROM {table} WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-31' AND "Refresh" = 0 AND "DontCountHits" = 0 AND "URLHash" = 686716256552154761 GROUP BY "WindowClientWidth", "WindowClientHeight" ORDER BY "PageViews" DESC LIMIT 10000;
|
||||||
-- ещё более сложная агрегация, не стоит выполнять на больших таблицах.;
|
SELECT DATE_TRUNC('minute', "EventTime") AS "Minute", count(*) AS "PageViews" FROM {table} WHERE "CounterID" = 62 AND "EventDate" >= '2013-07-01' AND "EventDate" <= '2013-07-02' AND "Refresh" = 0 AND "DontCountHits" = 0 GROUP BY DATE_TRUNC('minute', "EventTime") ORDER BY DATE_TRUNC('minute', "EventTime");
|
||||||
|
|
||||||
SELECT UserID FROM hits_10m WHERE UserID = 1234567890;
|
|
||||||
-- мощная фильтрация по столбцу типа UInt64.;
|
|
||||||
|
|
||||||
SELECT count(*) FROM hits_10m WHERE URL LIKE '%metrika%';
|
|
||||||
-- фильтрация по поиску подстроки в строке.;
|
|
||||||
|
|
||||||
SELECT SearchPhrase, MAX(URL), count(*) FROM hits_10m WHERE URL LIKE '%metrika%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
-- вынимаем большие столбцы, фильтрация по строке.;
|
|
||||||
|
|
||||||
SELECT SearchPhrase, MAX(URL), MAX(Title), count(*) AS c, count(DISTINCT UserID) FROM hits_10m WHERE Title LIKE '%Яндекс%' AND URL <> '%.yandex.%' AND SearchPhrase <> '' GROUP BY SearchPhrase ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
-- чуть больше столбцы.;
|
|
||||||
|
|
||||||
SELECT * FROM hits_10m WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10;
|
|
||||||
-- плохой запрос - вынимаем все столбцы.;
|
|
||||||
|
|
||||||
SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase <> '' ORDER BY EventTime LIMIT 10;
|
|
||||||
-- большая сортировка.;
|
|
||||||
|
|
||||||
SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase <> '' ORDER BY SearchPhrase LIMIT 10;
|
|
||||||
-- большая сортировка по строкам.;
|
|
||||||
|
|
||||||
SELECT SearchPhrase FROM hits_10m WHERE SearchPhrase <> '' ORDER BY EventTime, SearchPhrase LIMIT 10;
|
|
||||||
-- большая сортировка по кортежу.;
|
|
||||||
|
|
||||||
SELECT CounterID, avg(length(URL)) AS l, count(*) FROM hits_10m WHERE URL <> '' GROUP BY CounterID HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|
||||||
-- считаем средние длины URL для крупных счётчиков.;
|
|
||||||
|
|
||||||
SELECT SUBSTRING( SUBSTRING(Referer, POSITION('//' IN Referer) + 2), 1, ifthenelse( (0 > POSITION('/' IN SUBSTRING(Referer, POSITION('//' IN Referer) + 2)) - 1), 0, POSITION('/' IN SUBSTRING(Referer, POSITION('//' IN Referer) + 2)) - 1 ) ) AS k, avg(length(Referer)) AS l, count(*) AS c, MAX(Referer) FROM hits_10m WHERE Referer <> '' GROUP BY k HAVING count(*) > 100000 ORDER BY l DESC LIMIT 25;
|
|
||||||
-- то же самое, но с разбивкой по доменам.;
|
|
||||||
|
|
||||||
SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM hits_10m;
|
|
||||||
-- много тупых агрегатных функций.;
|
|
||||||
|
|
||||||
SELECT SearchEngineID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase <> '' GROUP BY SearchEngineID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
-- сложная агрегация, для больших таблиц может не хватить оперативки.;
|
|
||||||
|
|
||||||
SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m WHERE SearchPhrase <> '' GROUP BY WatchID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
-- агрегация по двум полям, которая ничего не агрегирует. Для больших таблиц выполнить не получится.;
|
|
||||||
|
|
||||||
SELECT WatchID, ClientIP, count(*) AS c, sum(Refresh), avg(ResolutionWidth) FROM hits_10m GROUP BY WatchID, ClientIP ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
-- то же самое, но ещё и без фильтрации.;
|
|
||||||
|
|
||||||
SELECT URL, count(*) FROM hits_10m GROUP BY URL ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
-- агрегация по URL.;
|
|
||||||
|
|
||||||
SELECT 1, URL, count(*) FROM hits_10m GROUP BY 1, URL ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
-- агрегация по URL и числу.;
|
|
||||||
|
|
||||||
SELECT ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3, count(*) FROM hits_10m GROUP BY ClientIP, ClientIP - 1, ClientIP - 2, ClientIP - 3 ORDER BY count(*) DESC LIMIT 10;
|
|
||||||
|
|
||||||
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND URL <> '' GROUP BY URL ORDER BY PageViews DESC LIMIT 10;
|
|
||||||
|
|
||||||
|
|
||||||
SELECT Title, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND Title <> '' GROUP BY Title ORDER BY PageViews DESC LIMIT 10;
|
|
||||||
|
|
||||||
|
|
||||||
SELECT URL, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000;
|
|
||||||
|
|
||||||
|
|
||||||
SELECT TraficSourceID, SearchEngineID, AdvEngineID, CASE WHEN SearchEngineID = 0 AND AdvEngineID = 0 THEN Referer ELSE '' END AS Src, URL AS Dst, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews DESC LIMIT 1000;
|
|
||||||
|
|
||||||
|
|
||||||
SELECT URLHash, EventDate, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = 6202628419148573758 GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100000;
|
|
||||||
|
|
||||||
SELECT WindowClientWidth, WindowClientHeight, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND NOT DontCountHits AND URLHash = 6202628419148573758 GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000;
|
|
||||||
|
|
||||||
SELECT EventTime - extract (SECOND from EventTime) AS M, count(*) AS PageViews FROM hits_10m WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND NOT Refresh AND NOT DontCountHits GROUP BY M ORDER BY M;
|
|
||||||
|
7
benchmark/monetdb/expect.tcl → benchmark/monetdb/send-query
Normal file → Executable file
7
benchmark/monetdb/expect.tcl → benchmark/monetdb/send-query
Normal file → Executable file
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/expect
|
||||||
#!/bin/expect
|
|
||||||
|
|
||||||
# Set timeout
|
# Set timeout
|
||||||
set timeout 600
|
set timeout 600
|
||||||
@ -7,12 +6,12 @@ set timeout 600
|
|||||||
# Get arguments
|
# Get arguments
|
||||||
set query [lindex $argv 0]
|
set query [lindex $argv 0]
|
||||||
|
|
||||||
spawn mclient -u monetdb -d hits
|
spawn mclient -u monetdb -d test --timer=clock
|
||||||
expect "password:"
|
expect "password:"
|
||||||
send "monetdb\r"
|
send "monetdb\r"
|
||||||
|
|
||||||
expect "sql>"
|
expect "sql>"
|
||||||
send "$query\r"
|
send "$query;\r"
|
||||||
|
|
||||||
expect "sql>"
|
expect "sql>"
|
||||||
send "\\q\r"
|
send "\\q\r"
|
1222
benchmark/monetdb/usability.md
Normal file
1222
benchmark/monetdb/usability.md
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,9 +1,9 @@
|
|||||||
# This strings autochanged from release_lib.sh:
|
# This strings autochanged from release_lib.sh:
|
||||||
SET(VERSION_REVISION 54437)
|
SET(VERSION_REVISION 54438)
|
||||||
SET(VERSION_MAJOR 20)
|
SET(VERSION_MAJOR 20)
|
||||||
SET(VERSION_MINOR 7)
|
SET(VERSION_MINOR 8)
|
||||||
SET(VERSION_PATCH 1)
|
SET(VERSION_PATCH 1)
|
||||||
SET(VERSION_GITHASH d64e51d1a78c1b53c33915ca0f75c97b2333844f)
|
SET(VERSION_GITHASH 5d60ab33a511efd149c7c3de77c0dd4b81e65b13)
|
||||||
SET(VERSION_DESCRIBE v20.7.1.1-prestable)
|
SET(VERSION_DESCRIBE v20.8.1.1-prestable)
|
||||||
SET(VERSION_STRING 20.7.1.1)
|
SET(VERSION_STRING 20.8.1.1)
|
||||||
# end of autochange
|
# end of autochange
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
option(ENABLE_CASSANDRA "Enable Cassandra" ${ENABLE_LIBRARIES})
|
option(ENABLE_CASSANDRA "Enable Cassandra" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (ENABLE_CASSANDRA)
|
if (ENABLE_CASSANDRA)
|
||||||
|
if (APPLE)
|
||||||
|
SET(CMAKE_MACOSX_RPATH ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libuv")
|
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libuv")
|
||||||
message (ERROR "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init --recursive")
|
message (ERROR "submodule contrib/libuv is missing. to fix try run: \n git submodule update --init --recursive")
|
||||||
elseif (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cassandra")
|
elseif (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/cassandra")
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
find_library (TERMCAP_LIBRARY tinfo)
|
|
||||||
if (NOT TERMCAP_LIBRARY)
|
|
||||||
find_library (TERMCAP_LIBRARY ncurses)
|
|
||||||
endif()
|
|
||||||
if (NOT TERMCAP_LIBRARY)
|
|
||||||
find_library (TERMCAP_LIBRARY termcap)
|
|
||||||
endif()
|
|
||||||
message (STATUS "Using termcap: ${TERMCAP_LIBRARY}")
|
|
@ -20,6 +20,12 @@ endif ()
|
|||||||
|
|
||||||
option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON)
|
option (WEVERYTHING "Enables -Weverything option with some exceptions. This is intended for exploration of new compiler warnings that may be found to be useful. Only makes sense for clang." ON)
|
||||||
|
|
||||||
|
# Control maximum size of stack frames. It can be important if the code is run in fibers with small stack size.
|
||||||
|
# Only in release build because debug has too large stack frames.
|
||||||
|
if ((NOT CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") AND (NOT SANITIZE))
|
||||||
|
add_warning(frame-larger-than=16384)
|
||||||
|
endif ()
|
||||||
|
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
add_warning(pedantic)
|
add_warning(pedantic)
|
||||||
no_warning(vla-extension)
|
no_warning(vla-extension)
|
||||||
|
@ -18,7 +18,6 @@ file(GLOB AWS_CORE_SOURCES
|
|||||||
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
|
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
|
||||||
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
|
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
|
||||||
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
|
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
|
||||||
"${AWS_CORE_LIBRARY_DIR}/source/http/curl/*.cpp"
|
|
||||||
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
|
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
|
||||||
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
|
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
|
||||||
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
|
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
|
||||||
@ -91,7 +90,6 @@ set_property(TARGET aws_s3_checksums PROPERTY C_STANDARD 99)
|
|||||||
|
|
||||||
add_library(aws_s3 ${S3_UNIFIED_SRC})
|
add_library(aws_s3 ${S3_UNIFIED_SRC})
|
||||||
|
|
||||||
target_compile_definitions(aws_s3 PUBLIC -DENABLE_CURL_CLIENT)
|
|
||||||
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
|
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
|
||||||
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
|
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
|
||||||
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
|
target_compile_definitions(aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
|
||||||
@ -102,4 +100,4 @@ if (OPENSSL_FOUND)
|
|||||||
target_link_libraries(aws_s3 PRIVATE ${OPENSSL_LIBRARIES})
|
target_link_libraries(aws_s3 PRIVATE ${OPENSSL_LIBRARIES})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_link_libraries(aws_s3 PRIVATE aws_s3_checksums curl)
|
target_link_libraries(aws_s3 PRIVATE aws_s3_checksums)
|
||||||
|
@ -26,14 +26,7 @@ if (USE_INTERNAL_CCTZ)
|
|||||||
|
|
||||||
# Build a libray with embedded tzdata
|
# Build a libray with embedded tzdata
|
||||||
|
|
||||||
if (OS_LINUX AND ARCH_AMD64)
|
if (OS_LINUX)
|
||||||
|
|
||||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
|
|
||||||
if (OBJCOPY_PATH)
|
|
||||||
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.")
|
|
||||||
else ()
|
|
||||||
message(FATAL_ERROR "Cannot find objcopy.")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set (TIMEZONES
|
set (TIMEZONES
|
||||||
Africa/Abidjan
|
Africa/Abidjan
|
||||||
@ -609,7 +602,7 @@ if (USE_INTERNAL_CCTZ)
|
|||||||
|
|
||||||
# https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake
|
# https://stackoverflow.com/questions/14776463/compile-and-add-an-object-file-from-a-binary-with-cmake
|
||||||
add_custom_command(OUTPUT ${TZ_OBJ}
|
add_custom_command(OUTPUT ${TZ_OBJ}
|
||||||
COMMAND cd ${TZDIR} && ${OBJCOPY_PATH} -I binary -O elf64-x86-64 -B i386 ${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ}
|
COMMAND cd ${TZDIR} && ${OBJCOPY_PATH} -I binary ${OBJCOPY_ARCH_OPTIONS} ${TIMEZONE} ${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ}
|
||||||
COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents
|
COMMAND ${OBJCOPY_PATH} --rename-section .data=.rodata,alloc,load,readonly,data,contents
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ} ${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ})
|
${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ} ${CMAKE_CURRENT_BINARY_DIR}/${TZ_OBJ})
|
||||||
|
|
||||||
@ -623,7 +616,7 @@ if (USE_INTERNAL_CCTZ)
|
|||||||
# libraries in linker command. To avoid this we hardcode whole-archive
|
# libraries in linker command. To avoid this we hardcode whole-archive
|
||||||
# library into single string.
|
# library into single string.
|
||||||
add_dependencies(cctz tzdata)
|
add_dependencies(cctz tzdata)
|
||||||
target_link_libraries(cctz INTERFACE "-Wl,--whole-archive $<TARGET_FILE:tzdata> -Wl,--no-whole-archive")
|
target_link_libraries(cctz INTERFACE "-Wl,${WHOLE_ARCHIVE} $<TARGET_FILE:tzdata> -Wl,${NO_WHOLE_ARCHIVE}")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
else ()
|
else ()
|
||||||
|
@ -54,7 +54,7 @@ endif ()
|
|||||||
# Example: DwarfInstructions.hpp: register unsigned long long x16 __asm("x16") = cfa;
|
# Example: DwarfInstructions.hpp: register unsigned long long x16 __asm("x16") = cfa;
|
||||||
check_cxx_compiler_flag(-Wregister HAVE_WARNING_REGISTER)
|
check_cxx_compiler_flag(-Wregister HAVE_WARNING_REGISTER)
|
||||||
if (HAVE_WARNING_REGISTER)
|
if (HAVE_WARNING_REGISTER)
|
||||||
target_compile_options(unwind PRIVATE -Wno-register)
|
target_compile_options(unwind PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:-Wno-register>")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
install(
|
install(
|
||||||
|
2
contrib/rapidjson
vendored
2
contrib/rapidjson
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 8f4c021fa2f1e001d2376095928fc0532adf2ae6
|
Subproject commit c4ef90ccdbc21d5d5a628d08316bfd301e32d6fa
|
2
contrib/sentry-native
vendored
2
contrib/sentry-native
vendored
@ -1 +1 @@
|
|||||||
Subproject commit f91ed3f95b5653f247189d720ab00765b4899d6f
|
Subproject commit 94644e92f0a3ff14bd35ed902a8622a2d15f7be4
|
2
contrib/simdjson
vendored
2
contrib/simdjson
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 1e4aa116e5a39e4ba23b9a93e6c7f048c5105b20
|
Subproject commit 3190d66a49059092a1753dc35595923debfc1698
|
4
debian/changelog
vendored
4
debian/changelog
vendored
@ -1,5 +1,5 @@
|
|||||||
clickhouse (20.7.1.1) unstable; urgency=low
|
clickhouse (20.8.1.1) unstable; urgency=low
|
||||||
|
|
||||||
* Modified source code
|
* Modified source code
|
||||||
|
|
||||||
-- clickhouse-release <clickhouse-release@yandex-team.ru> Mon, 13 Jul 2020 18:25:58 +0300
|
-- clickhouse-release <clickhouse-release@yandex-team.ru> Fri, 07 Aug 2020 21:45:46 +0300
|
||||||
|
8
debian/clickhouse-server.init
vendored
8
debian/clickhouse-server.init
vendored
@ -18,7 +18,6 @@ EXTRACT_FROM_CONFIG=${CLICKHOUSE_GENERIC_PROGRAM}-extract-from-config
|
|||||||
CLICKHOUSE_CONFDIR=/etc/$PROGRAM
|
CLICKHOUSE_CONFDIR=/etc/$PROGRAM
|
||||||
CLICKHOUSE_LOGDIR=/var/log/clickhouse-server
|
CLICKHOUSE_LOGDIR=/var/log/clickhouse-server
|
||||||
CLICKHOUSE_LOGDIR_USER=root
|
CLICKHOUSE_LOGDIR_USER=root
|
||||||
CLICKHOUSE_DATADIR_OLD=/opt/clickhouse
|
|
||||||
CLICKHOUSE_DATADIR=/var/lib/clickhouse
|
CLICKHOUSE_DATADIR=/var/lib/clickhouse
|
||||||
if [ -d "/var/lock" ]; then
|
if [ -d "/var/lock" ]; then
|
||||||
LOCALSTATEDIR=/var/lock
|
LOCALSTATEDIR=/var/lock
|
||||||
@ -34,7 +33,6 @@ CLICKHOUSE_BINDIR=/usr/bin
|
|||||||
CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server
|
CLICKHOUSE_CRONFILE=/etc/cron.d/clickhouse-server
|
||||||
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
|
CLICKHOUSE_CONFIG=$CLICKHOUSE_CONFDIR/config.xml
|
||||||
LOCKFILE=$LOCALSTATEDIR/$PROGRAM
|
LOCKFILE=$LOCALSTATEDIR/$PROGRAM
|
||||||
RETVAL=0
|
|
||||||
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
|
CLICKHOUSE_PIDDIR=/var/run/$PROGRAM
|
||||||
CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid"
|
CLICKHOUSE_PIDFILE="$CLICKHOUSE_PIDDIR/$PROGRAM.pid"
|
||||||
# CLICKHOUSE_STOP_TIMEOUT=60 # Disabled by default. Place to /etc/default/clickhouse if you need.
|
# CLICKHOUSE_STOP_TIMEOUT=60 # Disabled by default. Place to /etc/default/clickhouse if you need.
|
||||||
@ -124,11 +122,11 @@ initdb()
|
|||||||
CLICKHOUSE_DATADIR_FROM_CONFIG=$CLICKHOUSE_DATADIR
|
CLICKHOUSE_DATADIR_FROM_CONFIG=$CLICKHOUSE_DATADIR
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! getent group ${CLICKHOUSE_USER} >/dev/null; then
|
if ! getent passwd ${CLICKHOUSE_USER} >/dev/null; then
|
||||||
echo "Can't chown to non-existing user ${CLICKHOUSE_USER}"
|
echo "Can't chown to non-existing user ${CLICKHOUSE_USER}"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
if ! getent passwd ${CLICKHOUSE_GROUP} >/dev/null; then
|
if ! getent group ${CLICKHOUSE_GROUP} >/dev/null; then
|
||||||
echo "Can't chown to non-existing group ${CLICKHOUSE_GROUP}"
|
echo "Can't chown to non-existing group ${CLICKHOUSE_GROUP}"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
@ -182,7 +180,7 @@ start()
|
|||||||
su -s $SHELL ${CLICKHOUSE_USER} -c "$FLOCK -u 9; $CLICKHOUSE_PROGRAM_ENV exec -a \"$PROGRAM\" \"$CLICKHOUSE_BINDIR/$PROGRAM\" --daemon --pid-file=\"$CLICKHOUSE_PIDFILE\" --config-file=\"$CLICKHOUSE_CONFIG\""
|
su -s $SHELL ${CLICKHOUSE_USER} -c "$FLOCK -u 9; $CLICKHOUSE_PROGRAM_ENV exec -a \"$PROGRAM\" \"$CLICKHOUSE_BINDIR/$PROGRAM\" --daemon --pid-file=\"$CLICKHOUSE_PIDFILE\" --config-file=\"$CLICKHOUSE_CONFIG\""
|
||||||
EXIT_STATUS=$?
|
EXIT_STATUS=$?
|
||||||
if [ $EXIT_STATUS -ne 0 ]; then
|
if [ $EXIT_STATUS -ne 0 ]; then
|
||||||
break
|
return $EXIT_STATUS
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
4
debian/clickhouse-server.postinst
vendored
4
debian/clickhouse-server.postinst
vendored
@ -62,13 +62,13 @@ if [ "$1" = configure ] || [ -n "$not_deb_os" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# check validity of user and group
|
# check validity of user and group
|
||||||
if [ "`id -u ${CLICKHOUSE_USER}`" -eq 0 ]; then
|
if [ "$(id -u ${CLICKHOUSE_USER})" -eq 0 ]; then
|
||||||
echo "The ${CLICKHOUSE_USER} system user must not have uid 0 (root).
|
echo "The ${CLICKHOUSE_USER} system user must not have uid 0 (root).
|
||||||
Please fix this and reinstall this package." >&2
|
Please fix this and reinstall this package." >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "`id -g ${CLICKHOUSE_GROUP}`" -eq 0 ]; then
|
if [ "$(id -g ${CLICKHOUSE_GROUP})" -eq 0 ]; then
|
||||||
echo "The ${CLICKHOUSE_USER} system user must not have root as primary group.
|
echo "The ${CLICKHOUSE_USER} system user must not have root as primary group.
|
||||||
Please fix this and reinstall this package." >&2
|
Please fix this and reinstall this package." >&2
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -34,4 +34,5 @@ sudo chroot . /clickhouse server
|
|||||||
|
|
||||||
- creation of `clickhouse` user to run the server;
|
- creation of `clickhouse` user to run the server;
|
||||||
- VOLUME for server;
|
- VOLUME for server;
|
||||||
- most of the details, see other docker images for comparison.
|
- CA Certificates;
|
||||||
|
- most of the details, see other docker images for comparison;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||||
ARG version=20.7.1.*
|
ARG version=20.8.1.*
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install --yes --no-install-recommends \
|
&& apt-get install --yes --no-install-recommends \
|
||||||
|
@ -92,6 +92,10 @@
|
|||||||
"name": "yandex/clickhouse-fasttest",
|
"name": "yandex/clickhouse-fasttest",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
|
"docker/test/style": {
|
||||||
|
"name": "yandex/clickhouse-style-test",
|
||||||
|
"dependent": []
|
||||||
|
},
|
||||||
"docker/test/integration/s3_proxy": {
|
"docker/test/integration/s3_proxy": {
|
||||||
"name": "yandex/clickhouse-s3-proxy",
|
"name": "yandex/clickhouse-s3-proxy",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
@ -103,5 +107,25 @@
|
|||||||
"docker/test/integration/helper_container": {
|
"docker/test/integration/helper_container": {
|
||||||
"name": "yandex/clickhouse-integration-helper",
|
"name": "yandex/clickhouse-integration-helper",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/mysql_golang_client": {
|
||||||
|
"name": "yandex/clickhouse-mysql-golang-client",
|
||||||
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/mysql_java_client": {
|
||||||
|
"name": "yandex/clickhouse-mysql-java-client",
|
||||||
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/mysql_js_client": {
|
||||||
|
"name": "yandex/clickhouse-mysql-js-client",
|
||||||
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/mysql_php_client": {
|
||||||
|
"name": "yandex/clickhouse-mysql-php-client",
|
||||||
|
"dependent": []
|
||||||
|
},
|
||||||
|
"docker/test/integration/postgresql_java_client": {
|
||||||
|
"name": "yandex/clickhouse-postgresql-java-client",
|
||||||
|
"dependent": []
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,9 @@
|
|||||||
|
|
||||||
set -x -e
|
set -x -e
|
||||||
|
|
||||||
|
# Update tzdata to the latest version. It is embedded into clickhouse binary.
|
||||||
|
sudo apt-get update && sudo apt-get install tzdata
|
||||||
|
|
||||||
mkdir -p build/cmake/toolchain/darwin-x86_64
|
mkdir -p build/cmake/toolchain/darwin-x86_64
|
||||||
tar xJf MacOSX10.14.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
tar xJf MacOSX10.14.sdk.tar.xz -C build/cmake/toolchain/darwin-x86_64 --strip-components=1
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ RUN apt-get --allow-unauthenticated update -y \
|
|||||||
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
# Special dpkg-deb (https://github.com/ClickHouse-Extras/dpkg) version which is able
|
||||||
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
# to compress files using pigz (https://zlib.net/pigz/) instead of gzip.
|
||||||
# Significantly increase deb packaging speed and compatible with old systems
|
# Significantly increase deb packaging speed and compatible with old systems
|
||||||
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/dpkg-deb
|
RUN curl -O https://clickhouse-builds.s3.yandex.net/utils/1/dpkg-deb
|
||||||
RUN chmod +x dpkg-deb
|
RUN chmod +x dpkg-deb
|
||||||
RUN cp dpkg-deb /usr/bin
|
RUN cp dpkg-deb /usr/bin
|
||||||
|
|
||||||
|
@ -2,6 +2,9 @@
|
|||||||
|
|
||||||
set -x -e
|
set -x -e
|
||||||
|
|
||||||
|
# Update tzdata to the latest version. It is embedded into clickhouse binary.
|
||||||
|
sudo apt-get update && sudo apt-get install tzdata
|
||||||
|
|
||||||
ccache --show-stats ||:
|
ccache --show-stats ||:
|
||||||
ccache --zero-stats ||:
|
ccache --zero-stats ||:
|
||||||
build/release --no-pbuilder $ALIEN_PKGS | ts '%Y-%m-%d %H:%M:%S'
|
build/release --no-pbuilder $ALIEN_PKGS | ts '%Y-%m-%d %H:%M:%S'
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:20.04
|
FROM ubuntu:20.04
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||||
ARG version=20.7.1.*
|
ARG version=20.8.1.*
|
||||||
ARG gosu_ver=1.10
|
ARG gosu_ver=1.10
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
ARG repository="deb https://repo.clickhouse.tech/deb/stable/ main/"
|
||||||
ARG version=20.7.1.*
|
ARG version=20.8.1.*
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y apt-transport-https dirmngr && \
|
apt-get install -y apt-transport-https dirmngr && \
|
||||||
|
@ -53,7 +53,7 @@ mkdir -p /etc/clickhouse-server
|
|||||||
mkdir -p /etc/clickhouse-client
|
mkdir -p /etc/clickhouse-client
|
||||||
mkdir -p /etc/clickhouse-server/config.d
|
mkdir -p /etc/clickhouse-server/config.d
|
||||||
mkdir -p /etc/clickhouse-server/users.d
|
mkdir -p /etc/clickhouse-server/users.d
|
||||||
mkdir -p /var/log/clickhouse-server
|
ln -s /test_output /var/log/clickhouse-server
|
||||||
cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/
|
cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/
|
||||||
cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/
|
cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/
|
||||||
|
|
||||||
@ -66,7 +66,6 @@ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config
|
|||||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
|
||||||
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/custom_settings_prefixes.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||||
@ -84,28 +83,98 @@ ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
|||||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||||
|
|
||||||
|
# Keep original query_masking_rules.xml
|
||||||
|
ln -s --backup=simple --suffix=_original.xml /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||||
|
|
||||||
|
|
||||||
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||||
|
|
||||||
|
counter=0
|
||||||
|
|
||||||
until clickhouse-client --query "SELECT 1"
|
until clickhouse-client --query "SELECT 1"
|
||||||
do
|
do
|
||||||
sleep 0.1
|
sleep 0.1
|
||||||
|
if [ "$counter" -gt 1200 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
counter=$(($counter + 1))
|
||||||
done
|
done
|
||||||
|
|
||||||
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having ddl_dictionaries 01251_dict_is_in_infinite_loop 01259_dictionary_custom_settings_ddl 01268_dictionary_direct_layout 01280_ssd_complex_key_dictionary 00652_replicated_mutations_zookeeper 01411_bayesian_ab_testing"
|
TESTS_TO_SKIP=(
|
||||||
|
parquet
|
||||||
|
avro
|
||||||
|
h3
|
||||||
|
odbc
|
||||||
|
mysql
|
||||||
|
sha256
|
||||||
|
_orc_
|
||||||
|
arrow
|
||||||
|
01098_temporary_and_external_tables
|
||||||
|
01083_expressions_in_engine_arguments
|
||||||
|
hdfs
|
||||||
|
00911_tautological_compare
|
||||||
|
protobuf
|
||||||
|
capnproto
|
||||||
|
java_hash
|
||||||
|
hashing
|
||||||
|
secure
|
||||||
|
00490_special_line_separators_and_characters_outside_of_bmp
|
||||||
|
00436_convert_charset
|
||||||
|
00105_shard_collations
|
||||||
|
01354_order_by_tuple_collate_const
|
||||||
|
01292_create_user
|
||||||
|
01098_msgpack_format
|
||||||
|
00929_multi_match_edit_distance
|
||||||
|
00926_multimatch
|
||||||
|
00834_cancel_http_readonly_queries_on_client_close
|
||||||
|
brotli
|
||||||
|
parallel_alter
|
||||||
|
00302_http_compression
|
||||||
|
00417_kill_query
|
||||||
|
01294_lazy_database_concurrent
|
||||||
|
01193_metadata_loading
|
||||||
|
base64
|
||||||
|
01031_mutations_interpreter_and_context
|
||||||
|
json
|
||||||
|
client
|
||||||
|
01305_replica_create_drop_zookeeper
|
||||||
|
01092_memory_profiler
|
||||||
|
01355_ilike
|
||||||
|
01281_unsucceeded_insert_select_queries_counter
|
||||||
|
live_view
|
||||||
|
limit_memory
|
||||||
|
memory_limit
|
||||||
|
memory_leak
|
||||||
|
00110_external_sort
|
||||||
|
00682_empty_parts_merge
|
||||||
|
00701_rollup
|
||||||
|
00109_shard_totals_after_having
|
||||||
|
ddl_dictionaries
|
||||||
|
01251_dict_is_in_infinite_loop
|
||||||
|
01259_dictionary_custom_settings_ddl
|
||||||
|
01268_dictionary_direct_layout
|
||||||
|
01280_ssd_complex_key_dictionary
|
||||||
|
00652_replicated_mutations_zookeeper
|
||||||
|
01411_bayesian_ab_testing
|
||||||
|
01238_http_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||||
|
01281_group_by_limit_memory_tracking # max_memory_usage_for_user can interfere another queries running concurrently
|
||||||
|
)
|
||||||
|
|
||||||
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip ${TESTS_TO_SKIP[*]} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||||
|
|
||||||
|
|
||||||
kill_clickhouse () {
|
kill_clickhouse () {
|
||||||
kill `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` 2>/dev/null
|
killall clickhouse-server ||:
|
||||||
|
|
||||||
for i in {1..10}
|
for i in {1..10}
|
||||||
do
|
do
|
||||||
if ! kill -0 `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'`; then
|
if ! killall -0 clickhouse-server; then
|
||||||
echo "No clickhouse process"
|
echo "No clickhouse process"
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
echo "Process" `ps ax | grep clickhouse-server | grep -v 'grep' | awk '{print $1}'` "still alive"
|
echo "Clickhouse server process" $(pgrep -f clickhouse-server) "still alive"
|
||||||
sleep 10
|
sleep 10
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -120,9 +189,16 @@ if [[ ! -z "$FAILED_TESTS" ]]; then
|
|||||||
|
|
||||||
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||||
|
|
||||||
|
counter=0
|
||||||
until clickhouse-client --query "SELECT 1"
|
until clickhouse-client --query "SELECT 1"
|
||||||
do
|
do
|
||||||
sleep 0.1
|
sleep 0.1
|
||||||
|
if [ "$counter" -gt 1200 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
counter=$(($counter + 1))
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Going to run again: $FAILED_TESTS"
|
echo "Going to run again: $FAILED_TESTS"
|
||||||
@ -131,5 +207,3 @@ if [[ ! -z "$FAILED_TESTS" ]]; then
|
|||||||
else
|
else
|
||||||
echo "No failed tests"
|
echo "No failed tests"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mv /var/log/clickhouse-server/* /test_output
|
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
# docker build -t yandex/clickhouse-mysql-golang-client .
|
||||||
|
# MySQL golang client docker container
|
||||||
|
|
||||||
FROM golang:1.12.2
|
FROM golang:1.12.2
|
||||||
|
|
||||||
RUN go get "github.com/go-sql-driver/mysql"
|
RUN go get "github.com/go-sql-driver/mysql"
|
@ -1,3 +1,6 @@
|
|||||||
|
# docker build -t yandex/clickhouse-mysql-java-client .
|
||||||
|
# MySQL Java client docker container
|
||||||
|
|
||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
8
docker/test/integration/mysql_js_client/Dockerfile
Normal file
8
docker/test/integration/mysql_js_client/Dockerfile
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# docker build -t yandex/clickhouse-mysql-js-client .
|
||||||
|
# MySQL JavaScript client docker container
|
||||||
|
|
||||||
|
FROM node:8
|
||||||
|
|
||||||
|
RUN npm install mysql
|
||||||
|
|
||||||
|
COPY ./test.js test.js
|
@ -1,3 +1,6 @@
|
|||||||
|
# docker build -t yandex/clickhouse-mysql-php-client .
|
||||||
|
# MySQL PHP client docker container
|
||||||
|
|
||||||
FROM php:7.3-cli
|
FROM php:7.3-cli
|
||||||
|
|
||||||
COPY ./client.crt client.crt
|
COPY ./client.crt client.crt
|
@ -1,3 +1,6 @@
|
|||||||
|
# docker build -t yandex/clickhouse-postgresql-java-client .
|
||||||
|
# PostgreSQL Java client docker container
|
||||||
|
|
||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
@ -1,8 +1,6 @@
|
|||||||
version: '2.3'
|
version: '2.3'
|
||||||
services:
|
services:
|
||||||
golang1:
|
golang1:
|
||||||
build:
|
image: yandex/clickhouse-mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG}
|
||||||
context: ./
|
|
||||||
network: host
|
|
||||||
# to keep container running
|
# to keep container running
|
||||||
command: sleep infinity
|
command: sleep infinity
|
@ -1,8 +1,6 @@
|
|||||||
version: '2.3'
|
version: '2.3'
|
||||||
services:
|
services:
|
||||||
java1:
|
java1:
|
||||||
build:
|
image: yandex/clickhouse-mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG}
|
||||||
context: ./
|
|
||||||
network: host
|
|
||||||
# to keep container running
|
# to keep container running
|
||||||
command: sleep infinity
|
command: sleep infinity
|
@ -1,8 +1,6 @@
|
|||||||
version: '2.3'
|
version: '2.3'
|
||||||
services:
|
services:
|
||||||
mysqljs1:
|
mysqljs1:
|
||||||
build:
|
image: yandex/clickhouse-mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG}
|
||||||
context: ./
|
|
||||||
network: host
|
|
||||||
# to keep container running
|
# to keep container running
|
||||||
command: sleep infinity
|
command: sleep infinity
|
@ -1,7 +1,6 @@
|
|||||||
version: '2.3'
|
version: '2.3'
|
||||||
services:
|
services:
|
||||||
php1:
|
php1:
|
||||||
build:
|
image: yandex/clickhouse-mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG}
|
||||||
context: ./
|
|
||||||
# to keep container running
|
# to keep container running
|
||||||
command: sleep infinity
|
command: sleep infinity
|
@ -1,8 +1,6 @@
|
|||||||
version: '2.2'
|
version: '2.2'
|
||||||
services:
|
services:
|
||||||
java:
|
java:
|
||||||
build:
|
image: yandex/clickhouse-postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG}
|
||||||
context: ./
|
|
||||||
network: host
|
|
||||||
# to keep container running
|
# to keep container running
|
||||||
command: sleep infinity
|
command: sleep infinity
|
@ -22,5 +22,11 @@ export CLICKHOUSE_TESTS_CLIENT_BIN_PATH=/clickhouse
|
|||||||
export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config
|
export CLICKHOUSE_TESTS_BASE_CONFIG_DIR=/clickhouse-config
|
||||||
export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
|
export CLICKHOUSE_ODBC_BRIDGE_BINARY_PATH=/clickhouse-odbc-bridge
|
||||||
|
|
||||||
|
export DOCKER_MYSQL_GOLANG_CLIENT_TAG=${DOCKER_MYSQL_GOLANG_CLIENT_TAG:=latest}
|
||||||
|
export DOCKER_MYSQL_JAVA_CLIENT_TAG=${DOCKER_MYSQL_JAVA_CLIENT_TAG:=latest}
|
||||||
|
export DOCKER_MYSQL_JS_CLIENT_TAG=${DOCKER_MYSQL_JS_CLIENT_TAG:=latest}
|
||||||
|
export DOCKER_MYSQL_PHP_CLIENT_TAG=${DOCKER_MYSQL_PHP_CLIENT_TAG:=latest}
|
||||||
|
export DOCKER_POSTGRESQL_JAVA_CLIENT_TAG=${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:=latest}
|
||||||
|
|
||||||
cd /ClickHouse/tests/integration
|
cd /ClickHouse/tests/integration
|
||||||
exec "$@"
|
exec "$@"
|
||||||
|
@ -318,10 +318,10 @@ create view right_query_log as select *
|
|||||||
|
|
||||||
create view query_logs as
|
create view query_logs as
|
||||||
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
select 0 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||||
query_duration_ms from left_query_log
|
query_duration_ms, memory_usage from left_query_log
|
||||||
union all
|
union all
|
||||||
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
select 1 version, query_id, ProfileEvents.Names, ProfileEvents.Values,
|
||||||
query_duration_ms from right_query_log
|
query_duration_ms, memory_usage from right_query_log
|
||||||
;
|
;
|
||||||
|
|
||||||
-- This is a single source of truth on all metrics we have for query runs. The
|
-- This is a single source of truth on all metrics we have for query runs. The
|
||||||
@ -345,10 +345,11 @@ create table query_run_metric_arrays engine File(TSV, 'analyze/query-run-metric-
|
|||||||
arrayMap(x->toFloat64(x), ProfileEvents.Values))]
|
arrayMap(x->toFloat64(x), ProfileEvents.Values))]
|
||||||
),
|
),
|
||||||
arrayReduce('sumMapState', [(
|
arrayReduce('sumMapState', [(
|
||||||
['client_time', 'server_time'],
|
['client_time', 'server_time', 'memory_usage'],
|
||||||
arrayMap(x->if(x != 0., x, -0.), [
|
arrayMap(x->if(x != 0., x, -0.), [
|
||||||
toFloat64(query_runs.time),
|
toFloat64(query_runs.time),
|
||||||
toFloat64(query_duration_ms / 1000.)]))])
|
toFloat64(query_duration_ms / 1000.),
|
||||||
|
toFloat64(memory_usage)]))])
|
||||||
]
|
]
|
||||||
)) as metrics_tuple).1 metric_names,
|
)) as metrics_tuple).1 metric_names,
|
||||||
metrics_tuple.2 metric_values
|
metrics_tuple.2 metric_values
|
||||||
@ -514,16 +515,20 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
|||||||
;
|
;
|
||||||
|
|
||||||
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv') as
|
create table changed_perf_report engine File(TSV, 'report/changed-perf.tsv') as
|
||||||
select
|
with
|
||||||
toDecimal64(left, 3), toDecimal64(right, 3),
|
|
||||||
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||||
-- so we have to work around this to not get an error about conversion
|
-- so we have to work around this to not get an error about conversion
|
||||||
-- of NaN to decimal.
|
-- of NaN to decimal.
|
||||||
left > right
|
(left > right ? left / right : right / left) as times_change_float,
|
||||||
? '- ' || toString(toDecimal64(left / (right + 0.001), 3)) || 'x'
|
isFinite(times_change_float) as times_change_finite,
|
||||||
: '+ ' || toString(toDecimal64(right / (left + 0.001), 3)) || 'x',
|
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||||
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3),
|
times_change_finite
|
||||||
changed_fail, test, query_index, query_display_name
|
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||||
|
: '--' as times_change_str
|
||||||
|
select
|
||||||
|
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||||
|
toDecimal64(diff, 3), toDecimal64(stat_threshold, 3),
|
||||||
|
changed_fail, test, query_index, query_display_name
|
||||||
from queries where changed_show order by abs(diff) desc;
|
from queries where changed_show order by abs(diff) desc;
|
||||||
|
|
||||||
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv') as
|
create table unstable_queries_report engine File(TSV, 'report/unstable-queries.tsv') as
|
||||||
@ -603,11 +608,18 @@ create table test_times_report engine File(TSV, 'report/test-times.tsv') as
|
|||||||
|
|
||||||
-- report for all queries page, only main metric
|
-- report for all queries page, only main metric
|
||||||
create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as
|
create table all_tests_report engine File(TSV, 'report/all-queries.tsv') as
|
||||||
|
with
|
||||||
|
-- server_time is sometimes reported as zero (if it's less than 1 ms),
|
||||||
|
-- so we have to work around this to not get an error about conversion
|
||||||
|
-- of NaN to decimal.
|
||||||
|
(left > right ? left / right : right / left) as times_change_float,
|
||||||
|
isFinite(times_change_float) as times_change_finite,
|
||||||
|
toDecimal64(times_change_finite ? times_change_float : 1., 3) as times_change_decimal,
|
||||||
|
times_change_finite
|
||||||
|
? (left > right ? '-' : '+') || toString(times_change_decimal) || 'x'
|
||||||
|
: '--' as times_change_str
|
||||||
select changed_fail, unstable_fail,
|
select changed_fail, unstable_fail,
|
||||||
toDecimal64(left, 3), toDecimal64(right, 3),
|
toDecimal64(left, 3), toDecimal64(right, 3), times_change_str,
|
||||||
left > right
|
|
||||||
? '- ' || toString(toDecimal64(left / (right + 0.001), 3)) || 'x'
|
|
||||||
: '+ ' || toString(toDecimal64(right / (left + 0.001), 3)) || 'x',
|
|
||||||
toDecimal64(isFinite(diff) ? diff : 0, 3),
|
toDecimal64(isFinite(diff) ? diff : 0, 3),
|
||||||
toDecimal64(isFinite(stat_threshold) ? stat_threshold : 0, 3),
|
toDecimal64(isFinite(stat_threshold) ? stat_threshold : 0, 3),
|
||||||
test, query_index, query_display_name
|
test, query_index, query_display_name
|
||||||
@ -932,6 +944,8 @@ case "$stage" in
|
|||||||
time configure
|
time configure
|
||||||
;&
|
;&
|
||||||
"restart")
|
"restart")
|
||||||
|
numactl --hardware ||:
|
||||||
|
lscpu ||:
|
||||||
time restart
|
time restart
|
||||||
;&
|
;&
|
||||||
"run_tests")
|
"run_tests")
|
||||||
@ -967,7 +981,7 @@ case "$stage" in
|
|||||||
# to collect the logs. Prefer not to restart, because addresses might change
|
# to collect the logs. Prefer not to restart, because addresses might change
|
||||||
# and we won't be able to process trace_log data. Start in a subshell, so that
|
# and we won't be able to process trace_log data. Start in a subshell, so that
|
||||||
# it doesn't interfere with the watchdog through `wait`.
|
# it doesn't interfere with the watchdog through `wait`.
|
||||||
( get_profiles || restart || get_profiles ||: )
|
( get_profiles || restart && get_profiles ||: )
|
||||||
|
|
||||||
# Kill the whole process group, because somehow when the subshell is killed,
|
# Kill the whole process group, because somehow when the subshell is killed,
|
||||||
# the sleep inside remains alive and orphaned.
|
# the sleep inside remains alive and orphaned.
|
||||||
|
@ -33,13 +33,16 @@ function download
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Might have the same version on left and right (for testing).
|
# Might have the same version on left and right (for testing) -- in this case we just copy
|
||||||
|
# already downloaded 'right' to the 'left. There is the third case when we don't have to
|
||||||
|
# download anything, for example in some manual runs. In this case, SHAs are not set.
|
||||||
if ! [ "$left_sha" = "$right_sha" ]
|
if ! [ "$left_sha" = "$right_sha" ]
|
||||||
then
|
then
|
||||||
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv &
|
wget -nv -nd -c "$left_path" -O- | tar -C left --strip-components=1 -zxv &
|
||||||
else
|
elif [ "$right_sha" != "" ]
|
||||||
|
then
|
||||||
mkdir left ||:
|
mkdir left ||:
|
||||||
cp -a right/* left &
|
cp -an right/* left &
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for dataset_name in $datasets
|
for dataset_name in $datasets
|
||||||
|
@ -14,7 +14,7 @@ function find_reference_sha
|
|||||||
# master, the merge base is the tested commit itself, so we have to step back
|
# master, the merge base is the tested commit itself, so we have to step back
|
||||||
# once.
|
# once.
|
||||||
start_ref=$(git -C right/ch merge-base origin/master pr)
|
start_ref=$(git -C right/ch merge-base origin/master pr)
|
||||||
if [ "PR_TO_TEST" == "0" ]
|
if [ "$PR_TO_TEST" == "0" ]
|
||||||
then
|
then
|
||||||
start_ref=$start_ref~
|
start_ref=$start_ref~
|
||||||
fi
|
fi
|
||||||
|
@ -15,24 +15,24 @@ function download
|
|||||||
mkdir left right db0 ||:
|
mkdir left right db0 ||:
|
||||||
|
|
||||||
"$script_dir/download.sh" ||: &
|
"$script_dir/download.sh" ||: &
|
||||||
cp -vP "$repo_dir"/../build-gcc9-rel/programs/clickhouse* right &
|
cp -nvP "$repo_dir"/../build-gcc9-rel/programs/clickhouse* left &
|
||||||
cp -vP "$repo_dir"/../build-clang10-rel/programs/clickhouse* left &
|
cp -nvP "$repo_dir"/../build-clang10-rel/programs/clickhouse* right &
|
||||||
wait
|
wait
|
||||||
}
|
}
|
||||||
|
|
||||||
function configure
|
function configure
|
||||||
{
|
{
|
||||||
# Test files
|
# Test files
|
||||||
cp -av "$repo_dir/tests/performance" right
|
cp -nav "$repo_dir/tests/performance" right
|
||||||
cp -av "$repo_dir/tests/performance" left
|
cp -nav "$repo_dir/tests/performance" left
|
||||||
|
|
||||||
# Configs
|
# Configs
|
||||||
cp -av "$script_dir/config" right
|
cp -nav "$script_dir/config" right
|
||||||
cp -av "$script_dir/config" left
|
cp -nav "$script_dir/config" left
|
||||||
cp -av "$repo_dir"/programs/server/config* right/config
|
cp -nav "$repo_dir"/programs/server/config* right/config
|
||||||
cp -av "$repo_dir"/programs/server/user* right/config
|
cp -nav "$repo_dir"/programs/server/user* right/config
|
||||||
cp -av "$repo_dir"/programs/server/config* left/config
|
cp -nav "$repo_dir"/programs/server/config* left/config
|
||||||
cp -av "$repo_dir"/programs/server/user* left/config
|
cp -nav "$repo_dir"/programs/server/user* left/config
|
||||||
|
|
||||||
tree left
|
tree left
|
||||||
}
|
}
|
||||||
|
@ -222,17 +222,22 @@ for query_index, q in enumerate(test_queries):
|
|||||||
query_error_on_connection[conn_index] = traceback.format_exc();
|
query_error_on_connection[conn_index] = traceback.format_exc();
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# If prewarm fails for the query on both servers -- report the error, skip
|
|
||||||
# the query and continue testing the next query.
|
|
||||||
if query_error_on_connection.count(None) == 0:
|
|
||||||
print(query_error_on_connection[0], file = sys.stderr)
|
|
||||||
continue
|
|
||||||
|
|
||||||
|
# Report all errors that ocurred during prewarm and decide what to do next.
|
||||||
|
# If prewarm fails for the query on all servers -- skip the query and
|
||||||
|
# continue testing the next query.
|
||||||
# If prewarm fails on one of the servers, run the query on the rest of them.
|
# If prewarm fails on one of the servers, run the query on the rest of them.
|
||||||
# Useful for queries that use new functions added in the new server version.
|
no_errors = []
|
||||||
if query_error_on_connection.count(None) < len(query_error_on_connection):
|
for i, e in enumerate(query_error_on_connection):
|
||||||
no_error = [i for i, e in enumerate(query_error_on_connection) if not e]
|
if e:
|
||||||
print(f'partial\t{query_index}\t{no_error}')
|
print(e, file = sys.stderr)
|
||||||
|
else:
|
||||||
|
no_errors.append(i)
|
||||||
|
|
||||||
|
if len(no_errors) == 0:
|
||||||
|
continue
|
||||||
|
elif len(no_errors) < len(connections):
|
||||||
|
print(f'partial\t{query_index}\t{no_errors}')
|
||||||
|
|
||||||
# Now, perform measured runs.
|
# Now, perform measured runs.
|
||||||
# Track the time spent by the client to process this query, so that we can
|
# Track the time spent by the client to process this query, so that we can
|
||||||
@ -245,7 +250,15 @@ for query_index, q in enumerate(test_queries):
|
|||||||
for conn_index, c in enumerate(connections):
|
for conn_index, c in enumerate(connections):
|
||||||
if query_error_on_connection[conn_index]:
|
if query_error_on_connection[conn_index]:
|
||||||
continue
|
continue
|
||||||
res = c.execute(q, query_id = run_id)
|
|
||||||
|
try:
|
||||||
|
res = c.execute(q, query_id = run_id)
|
||||||
|
except Exception as e:
|
||||||
|
# Add query id to the exception to make debugging easier.
|
||||||
|
e.args = (run_id, *e.args)
|
||||||
|
e.message = run_id + ': ' + e.message
|
||||||
|
raise
|
||||||
|
|
||||||
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
|
print(f'query\t{query_index}\t{run_id}\t{conn_index}\t{c.last_query.elapsed}')
|
||||||
server_seconds += c.last_query.elapsed
|
server_seconds += c.last_query.elapsed
|
||||||
|
|
||||||
|
@ -17,6 +17,8 @@ parser.add_argument('--report', default='main', choices=['main', 'all-queries'],
|
|||||||
help='Which report to build')
|
help='Which report to build')
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
tables = []
|
||||||
|
errors_explained = []
|
||||||
report_errors = []
|
report_errors = []
|
||||||
error_tests = 0
|
error_tests = 0
|
||||||
slow_average_tests = 0
|
slow_average_tests = 0
|
||||||
@ -145,21 +147,42 @@ tr:nth-child(odd) td {{filter: brightness(90%);}}
|
|||||||
table_anchor = 0
|
table_anchor = 0
|
||||||
row_anchor = 0
|
row_anchor = 0
|
||||||
|
|
||||||
def nextTableAnchor():
|
def currentTableAnchor():
|
||||||
|
global table_anchor
|
||||||
|
return f'{table_anchor}'
|
||||||
|
|
||||||
|
def newTableAnchor():
|
||||||
global table_anchor
|
global table_anchor
|
||||||
table_anchor += 1
|
table_anchor += 1
|
||||||
return str(table_anchor)
|
return currentTableAnchor()
|
||||||
|
|
||||||
|
def currentRowAnchor():
|
||||||
|
global row_anchor
|
||||||
|
global table_anchor
|
||||||
|
return f'{table_anchor}.{row_anchor}'
|
||||||
|
|
||||||
def nextRowAnchor():
|
def nextRowAnchor():
|
||||||
global row_anchor
|
global row_anchor
|
||||||
global table_anchor
|
global table_anchor
|
||||||
row_anchor += 1
|
return f'{table_anchor}.{row_anchor + 1}'
|
||||||
return str(table_anchor) + "." + str(row_anchor)
|
|
||||||
|
|
||||||
def tr(x):
|
def setRowAnchor(anchor_row_part):
|
||||||
a = nextRowAnchor()
|
global row_anchor
|
||||||
|
global table_anchor
|
||||||
|
row_anchor = anchor_row_part
|
||||||
|
return currentRowAnchor()
|
||||||
|
|
||||||
|
def advanceRowAnchor():
|
||||||
|
global row_anchor
|
||||||
|
global table_anchor
|
||||||
|
row_anchor += 1
|
||||||
|
return currentRowAnchor()
|
||||||
|
|
||||||
|
|
||||||
|
def tr(x, anchor=None):
|
||||||
#return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
|
#return '<tr onclick="location.href=\'#{a}\'" id={a}>{x}</tr>'.format(a=a, x=str(x))
|
||||||
return '<tr id={a}>{x}</tr>'.format(a=a, x=str(x))
|
anchor = anchor if anchor else advanceRowAnchor()
|
||||||
|
return f'<tr id={anchor}>{x}</tr>'
|
||||||
|
|
||||||
def td(value, cell_attributes = ''):
|
def td(value, cell_attributes = ''):
|
||||||
return '<td {cell_attributes}>{value}</td>'.format(
|
return '<td {cell_attributes}>{value}</td>'.format(
|
||||||
@ -169,19 +192,23 @@ def td(value, cell_attributes = ''):
|
|||||||
def th(x):
|
def th(x):
|
||||||
return '<th>' + str(x) + '</th>'
|
return '<th>' + str(x) + '</th>'
|
||||||
|
|
||||||
def tableRow(cell_values, cell_attributes = []):
|
def tableRow(cell_values, cell_attributes = [], anchor=None):
|
||||||
return tr(''.join([td(v, a)
|
return tr(
|
||||||
for v, a in itertools.zip_longest(
|
''.join([td(v, a)
|
||||||
cell_values, cell_attributes,
|
for v, a in itertools.zip_longest(
|
||||||
fillvalue = '')
|
cell_values, cell_attributes,
|
||||||
if a is not None and v is not None]))
|
fillvalue = '')
|
||||||
|
if a is not None and v is not None]),
|
||||||
|
anchor)
|
||||||
|
|
||||||
def tableHeader(r):
|
def tableHeader(r):
|
||||||
return tr(''.join([th(f) for f in r]))
|
return tr(''.join([th(f) for f in r]))
|
||||||
|
|
||||||
def tableStart(title):
|
def tableStart(title):
|
||||||
anchor = nextTableAnchor();
|
|
||||||
cls = '-'.join(title.lower().split(' ')[:3]);
|
cls = '-'.join(title.lower().split(' ')[:3]);
|
||||||
|
global table_anchor
|
||||||
|
table_anchor = cls
|
||||||
|
anchor = currentTableAnchor()
|
||||||
return f"""
|
return f"""
|
||||||
<h2 id="{anchor}">
|
<h2 id="{anchor}">
|
||||||
<a class="cancela" href="#{anchor}">{title}</a>
|
<a class="cancela" href="#{anchor}">{title}</a>
|
||||||
@ -211,20 +238,23 @@ def htmlRows(n):
|
|||||||
result += tableRow(row)
|
result += tableRow(row)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def printSimpleTable(caption, columns, rows):
|
def addSimpleTable(caption, columns, rows, pos=None):
|
||||||
|
global tables
|
||||||
|
text = ''
|
||||||
if not rows:
|
if not rows:
|
||||||
return
|
return
|
||||||
|
|
||||||
print(tableStart(caption))
|
text += tableStart(caption)
|
||||||
print(tableHeader(columns))
|
text += tableHeader(columns)
|
||||||
for row in rows:
|
for row in rows:
|
||||||
print(tableRow(row))
|
text += tableRow(row)
|
||||||
print(tableEnd())
|
text += tableEnd()
|
||||||
|
tables.insert(pos if pos else len(tables), text)
|
||||||
|
|
||||||
def print_tested_commits():
|
def add_tested_commits():
|
||||||
global report_errors
|
global report_errors
|
||||||
try:
|
try:
|
||||||
printSimpleTable('Tested commits', ['Old', 'New'],
|
addSimpleTable('Tested commits', ['Old', 'New'],
|
||||||
[['<pre>{}</pre>'.format(x) for x in
|
[['<pre>{}</pre>'.format(x) for x in
|
||||||
[open('left-commit.txt').read(),
|
[open('left-commit.txt').read(),
|
||||||
open('right-commit.txt').read()]]])
|
open('right-commit.txt').read()]]])
|
||||||
@ -235,7 +265,8 @@ def print_tested_commits():
|
|||||||
*sys.exc_info()[:2])[-1])
|
*sys.exc_info()[:2])[-1])
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def print_report_errors():
|
def add_report_errors():
|
||||||
|
global tables
|
||||||
global report_errors
|
global report_errors
|
||||||
# Add the errors reported by various steps of comparison script
|
# Add the errors reported by various steps of comparison script
|
||||||
try:
|
try:
|
||||||
@ -246,67 +277,100 @@ def print_report_errors():
|
|||||||
*sys.exc_info()[:2])[-1])
|
*sys.exc_info()[:2])[-1])
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if len(report_errors):
|
if not report_errors:
|
||||||
print(tableStart('Errors while building the report'))
|
return
|
||||||
print(tableHeader(['Error']))
|
|
||||||
for x in report_errors:
|
text = tableStart('Errors while building the report')
|
||||||
print(tableRow([x]))
|
text += tableHeader(['Error'])
|
||||||
print(tableEnd())
|
for x in report_errors:
|
||||||
|
text += tableRow([x])
|
||||||
|
text += tableEnd()
|
||||||
|
# Insert after Tested Commits
|
||||||
|
tables.insert(1, text)
|
||||||
|
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while building the report</a>']);
|
||||||
|
|
||||||
|
def add_errors_explained():
|
||||||
|
if not errors_explained:
|
||||||
|
return
|
||||||
|
|
||||||
|
text = '<a name="fail1"/>'
|
||||||
|
text += tableStart('Error summary')
|
||||||
|
text += tableHeader(['Description'])
|
||||||
|
for row in errors_explained:
|
||||||
|
text += tableRow(row)
|
||||||
|
text += tableEnd()
|
||||||
|
|
||||||
|
global tables
|
||||||
|
tables.insert(1, text)
|
||||||
|
|
||||||
|
|
||||||
if args.report == 'main':
|
if args.report == 'main':
|
||||||
print(header_template.format())
|
print(header_template.format())
|
||||||
|
|
||||||
print_tested_commits()
|
add_tested_commits()
|
||||||
|
|
||||||
|
|
||||||
run_error_rows = tsvRows('run-errors.tsv')
|
run_error_rows = tsvRows('run-errors.tsv')
|
||||||
error_tests += len(run_error_rows)
|
error_tests += len(run_error_rows)
|
||||||
printSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
|
addSimpleTable('Run errors', ['Test', 'Error'], run_error_rows)
|
||||||
|
if run_error_rows:
|
||||||
|
errors_explained.append([f'<a href="#{currentTableAnchor()}">There were some errors while running the tests</a>']);
|
||||||
|
|
||||||
|
|
||||||
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
|
slow_on_client_rows = tsvRows('report/slow-on-client.tsv')
|
||||||
error_tests += len(slow_on_client_rows)
|
error_tests += len(slow_on_client_rows)
|
||||||
printSimpleTable('Slow on client',
|
addSimpleTable('Slow on client',
|
||||||
['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'],
|
['Client time, s', 'Server time, s', 'Ratio', 'Test', 'Query'],
|
||||||
slow_on_client_rows)
|
slow_on_client_rows)
|
||||||
|
if slow_on_client_rows:
|
||||||
|
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries are taking noticeable time client-side (missing `FORMAT Null`?)</a>']);
|
||||||
|
|
||||||
unmarked_short_rows = tsvRows('report/unmarked-short-queries.tsv')
|
unmarked_short_rows = tsvRows('report/unmarked-short-queries.tsv')
|
||||||
error_tests += len(unmarked_short_rows)
|
error_tests += len(unmarked_short_rows)
|
||||||
printSimpleTable('Short queries not marked as short',
|
addSimpleTable('Short queries not marked as short',
|
||||||
['New client time, s', 'Test', '#', 'Query'],
|
['New client time, s', 'Test', '#', 'Query'],
|
||||||
unmarked_short_rows)
|
unmarked_short_rows)
|
||||||
|
if unmarked_short_rows:
|
||||||
|
errors_explained.append([f'<a href="#{currentTableAnchor()}">Some queries have short duration but are not explicitly marked as "short"</a>']);
|
||||||
|
|
||||||
def print_partial():
|
def add_partial():
|
||||||
rows = tsvRows('report/partial-queries-report.tsv')
|
rows = tsvRows('report/partial-queries-report.tsv')
|
||||||
if not rows:
|
if not rows:
|
||||||
return
|
return
|
||||||
global unstable_partial_queries, slow_average_tests
|
|
||||||
print(tableStart('Partial queries'))
|
global unstable_partial_queries, slow_average_tests, tables
|
||||||
|
text = tableStart('Partial queries')
|
||||||
columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query']
|
columns = ['Median time, s', 'Relative time variance', 'Test', '#', 'Query']
|
||||||
print(tableHeader(columns))
|
text += tableHeader(columns)
|
||||||
attrs = ['' for c in columns]
|
attrs = ['' for c in columns]
|
||||||
for row in rows:
|
for row in rows:
|
||||||
|
anchor = f'{currentTableAnchor()}.{row[2]}.{row[3]}'
|
||||||
if float(row[1]) > 0.10:
|
if float(row[1]) > 0.10:
|
||||||
attrs[1] = f'style="background: {color_bad}"'
|
attrs[1] = f'style="background: {color_bad}"'
|
||||||
unstable_partial_queries += 1
|
unstable_partial_queries += 1
|
||||||
|
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' has excessive variance of run time. Keep it below 10%</a>'])
|
||||||
else:
|
else:
|
||||||
attrs[1] = ''
|
attrs[1] = ''
|
||||||
if float(row[0]) > allowed_single_run_time:
|
if float(row[0]) > allowed_single_run_time:
|
||||||
attrs[0] = f'style="background: {color_bad}"'
|
attrs[0] = f'style="background: {color_bad}"'
|
||||||
|
errors_explained.append([f'<a href="#{anchor}">The query no. {row[3]} of test \'{row[2]}\' is taking too long to run. Keep the run time below {allowed_single_run_time} seconds"</a>'])
|
||||||
slow_average_tests += 1
|
slow_average_tests += 1
|
||||||
else:
|
else:
|
||||||
attrs[0] = ''
|
attrs[0] = ''
|
||||||
print(tableRow(row, attrs))
|
text += tableRow(row, attrs, anchor)
|
||||||
print(tableEnd())
|
text += tableEnd()
|
||||||
|
tables.append(text)
|
||||||
|
|
||||||
print_partial()
|
add_partial()
|
||||||
|
|
||||||
def print_changes():
|
def add_changes():
|
||||||
rows = tsvRows('report/changed-perf.tsv')
|
rows = tsvRows('report/changed-perf.tsv')
|
||||||
if not rows:
|
if not rows:
|
||||||
return
|
return
|
||||||
|
|
||||||
global faster_queries, slower_queries
|
global faster_queries, slower_queries, tables
|
||||||
|
|
||||||
print(tableStart('Changes in performance'))
|
text = tableStart('Changes in performance')
|
||||||
columns = [
|
columns = [
|
||||||
'Old, s', # 0
|
'Old, s', # 0
|
||||||
'New, s', # 1
|
'New, s', # 1
|
||||||
@ -319,11 +383,12 @@ if args.report == 'main':
|
|||||||
'Query', # 8
|
'Query', # 8
|
||||||
]
|
]
|
||||||
|
|
||||||
print(tableHeader(columns))
|
text += tableHeader(columns)
|
||||||
|
|
||||||
attrs = ['' for c in columns]
|
attrs = ['' for c in columns]
|
||||||
attrs[5] = None
|
attrs[5] = None
|
||||||
for row in rows:
|
for row in rows:
|
||||||
|
anchor = f'{currentTableAnchor()}.{row[6]}.{row[7]}'
|
||||||
if int(row[5]):
|
if int(row[5]):
|
||||||
if float(row[3]) < 0.:
|
if float(row[3]) < 0.:
|
||||||
faster_queries += 1
|
faster_queries += 1
|
||||||
@ -331,18 +396,19 @@ if args.report == 'main':
|
|||||||
else:
|
else:
|
||||||
slower_queries += 1
|
slower_queries += 1
|
||||||
attrs[2] = attrs[3] = f'style="background: {color_bad}"'
|
attrs[2] = attrs[3] = f'style="background: {color_bad}"'
|
||||||
|
errors_explained.append([f'<a href="#{anchor}">The query no. {row[7]} of test \'{row[6]}\' has slowed down</a>'])
|
||||||
else:
|
else:
|
||||||
attrs[2] = attrs[3] = ''
|
attrs[2] = attrs[3] = ''
|
||||||
|
|
||||||
print(tableRow(row, attrs))
|
text += tableRow(row, attrs, anchor)
|
||||||
|
|
||||||
print(tableEnd())
|
text += tableEnd()
|
||||||
|
tables.append(text)
|
||||||
|
|
||||||
print_changes()
|
add_changes()
|
||||||
|
|
||||||
def print_unstable_queries():
|
def add_unstable_queries():
|
||||||
global unstable_queries
|
global unstable_queries, very_unstable_queries, tables
|
||||||
global very_unstable_queries
|
|
||||||
|
|
||||||
unstable_rows = tsvRows('report/unstable-queries.tsv')
|
unstable_rows = tsvRows('report/unstable-queries.tsv')
|
||||||
if not unstable_rows:
|
if not unstable_rows:
|
||||||
@ -361,33 +427,35 @@ if args.report == 'main':
|
|||||||
'Query' #7
|
'Query' #7
|
||||||
]
|
]
|
||||||
|
|
||||||
print(tableStart('Unstable queries'))
|
text = tableStart('Unstable queries')
|
||||||
print(tableHeader(columns))
|
text += tableHeader(columns)
|
||||||
|
|
||||||
attrs = ['' for c in columns]
|
attrs = ['' for c in columns]
|
||||||
attrs[4] = None
|
attrs[4] = None
|
||||||
for r in unstable_rows:
|
for r in unstable_rows:
|
||||||
|
anchor = f'{currentTableAnchor()}.{r[5]}.{r[6]}'
|
||||||
if int(r[4]):
|
if int(r[4]):
|
||||||
very_unstable_queries += 1
|
very_unstable_queries += 1
|
||||||
attrs[3] = f'style="background: {color_bad}"'
|
attrs[3] = f'style="background: {color_bad}"'
|
||||||
else:
|
else:
|
||||||
attrs[3] = ''
|
attrs[3] = ''
|
||||||
|
|
||||||
print(tableRow(r, attrs))
|
text += tableRow(r, attrs, anchor)
|
||||||
|
|
||||||
print(tableEnd())
|
text += tableEnd()
|
||||||
|
tables.append(text)
|
||||||
|
|
||||||
print_unstable_queries()
|
add_unstable_queries()
|
||||||
|
|
||||||
skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv')
|
skipped_tests_rows = tsvRows('analyze/skipped-tests.tsv')
|
||||||
printSimpleTable('Skipped tests', ['Test', 'Reason'], skipped_tests_rows)
|
addSimpleTable('Skipped tests', ['Test', 'Reason'], skipped_tests_rows)
|
||||||
|
|
||||||
printSimpleTable('Test performance changes',
|
addSimpleTable('Test performance changes',
|
||||||
['Test', 'Queries', 'Unstable', 'Changed perf', 'Total not OK', 'Avg relative time diff'],
|
['Test', 'Queries', 'Unstable', 'Changed perf', 'Total not OK', 'Avg relative time diff'],
|
||||||
tsvRows('report/test-perf-changes.tsv'))
|
tsvRows('report/test-perf-changes.tsv'))
|
||||||
|
|
||||||
def print_test_times():
|
def add_test_times():
|
||||||
global slow_average_tests
|
global slow_average_tests, tables
|
||||||
rows = tsvRows('report/test-times.tsv')
|
rows = tsvRows('report/test-times.tsv')
|
||||||
if not rows:
|
if not rows:
|
||||||
return
|
return
|
||||||
@ -403,8 +471,8 @@ if args.report == 'main':
|
|||||||
'Shortest query<br>(sum for all runs), s', #7
|
'Shortest query<br>(sum for all runs), s', #7
|
||||||
]
|
]
|
||||||
|
|
||||||
print(tableStart('Test times'))
|
text = tableStart('Test times')
|
||||||
print(tableHeader(columns))
|
text += tableHeader(columns)
|
||||||
|
|
||||||
nominal_runs = 13 # FIXME pass this as an argument
|
nominal_runs = 13 # FIXME pass this as an argument
|
||||||
total_runs = (nominal_runs + 1) * 2 # one prewarm run, two servers
|
total_runs = (nominal_runs + 1) * 2 # one prewarm run, two servers
|
||||||
@ -414,22 +482,25 @@ if args.report == 'main':
|
|||||||
# FIXME should be 15s max -- investigate parallel_insert
|
# FIXME should be 15s max -- investigate parallel_insert
|
||||||
slow_average_tests += 1
|
slow_average_tests += 1
|
||||||
attrs[6] = f'style="background: {color_bad}"'
|
attrs[6] = f'style="background: {color_bad}"'
|
||||||
|
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">The test \'{r[0]}\' is too slow to run as a whole. Investigate whether the create and fill queries can be sped up'])
|
||||||
else:
|
else:
|
||||||
attrs[6] = ''
|
attrs[6] = ''
|
||||||
|
|
||||||
if float(r[5]) > allowed_single_run_time * total_runs:
|
if float(r[5]) > allowed_single_run_time * total_runs:
|
||||||
slow_average_tests += 1
|
slow_average_tests += 1
|
||||||
attrs[5] = f'style="background: {color_bad}"'
|
attrs[5] = f'style="background: {color_bad}"'
|
||||||
|
errors_explained.append([f'<a href="./all-queries.html#all-query-times.{r[0]}.0">Some query of the test \'{r[0]}\' is too slow to run. See the all queries report'])
|
||||||
else:
|
else:
|
||||||
attrs[5] = ''
|
attrs[5] = ''
|
||||||
|
|
||||||
print(tableRow(r, attrs))
|
text += tableRow(r, attrs)
|
||||||
|
|
||||||
print(tableEnd())
|
text += tableEnd()
|
||||||
|
tables.append(text)
|
||||||
|
|
||||||
print_test_times()
|
add_test_times()
|
||||||
|
|
||||||
def print_benchmark_results():
|
def add_benchmark_results():
|
||||||
if not os.path.isfile('benchmark/website-left.json'):
|
if not os.path.isfile('benchmark/website-left.json'):
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -479,26 +550,33 @@ if args.report == 'main':
|
|||||||
|
|
||||||
all_rows.append([row, attrs])
|
all_rows.append([row, attrs])
|
||||||
|
|
||||||
print(tableStart('Concurrent benchmarks'))
|
text = tableStart('Concurrent benchmarks')
|
||||||
print(tableHeader(header))
|
text += tableHeader(header)
|
||||||
for row, attrs in all_rows:
|
for row, attrs in all_rows:
|
||||||
print(tableRow(row, attrs))
|
text += tableRow(row, attrs)
|
||||||
print(tableEnd())
|
text += tableEnd()
|
||||||
|
|
||||||
|
global tables
|
||||||
|
tables.append(text)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print_benchmark_results()
|
add_benchmark_results()
|
||||||
except:
|
except:
|
||||||
report_errors.append(
|
report_errors.append(
|
||||||
traceback.format_exception_only(
|
traceback.format_exception_only(
|
||||||
*sys.exc_info()[:2])[-1])
|
*sys.exc_info()[:2])[-1])
|
||||||
pass
|
pass
|
||||||
|
|
||||||
printSimpleTable('Metric changes',
|
addSimpleTable('Metric changes',
|
||||||
['Metric', 'Old median value', 'New median value',
|
['Metric', 'Old median value', 'New median value',
|
||||||
'Relative difference', 'Times difference'],
|
'Relative difference', 'Times difference'],
|
||||||
tsvRows('metrics/changes.tsv'))
|
tsvRows('metrics/changes.tsv'))
|
||||||
|
|
||||||
print_report_errors()
|
add_report_errors()
|
||||||
|
add_errors_explained()
|
||||||
|
|
||||||
|
for t in tables:
|
||||||
|
print(t)
|
||||||
|
|
||||||
print("""
|
print("""
|
||||||
<p class="links">
|
<p class="links">
|
||||||
@ -559,9 +637,9 @@ elif args.report == 'all-queries':
|
|||||||
|
|
||||||
print(header_template.format())
|
print(header_template.format())
|
||||||
|
|
||||||
print_tested_commits()
|
add_tested_commits()
|
||||||
|
|
||||||
def print_all_queries():
|
def add_all_queries():
|
||||||
rows = tsvRows('report/all-queries.tsv')
|
rows = tsvRows('report/all-queries.tsv')
|
||||||
if not rows:
|
if not rows:
|
||||||
return
|
return
|
||||||
@ -579,13 +657,14 @@ elif args.report == 'all-queries':
|
|||||||
'Query', #9
|
'Query', #9
|
||||||
]
|
]
|
||||||
|
|
||||||
print(tableStart('All query times'))
|
text = tableStart('All query times')
|
||||||
print(tableHeader(columns))
|
text += tableHeader(columns)
|
||||||
|
|
||||||
attrs = ['' for c in columns]
|
attrs = ['' for c in columns]
|
||||||
attrs[0] = None
|
attrs[0] = None
|
||||||
attrs[1] = None
|
attrs[1] = None
|
||||||
for r in rows:
|
for r in rows:
|
||||||
|
anchor = f'{currentTableAnchor()}.{r[7]}.{r[8]}'
|
||||||
if int(r[1]):
|
if int(r[1]):
|
||||||
attrs[6] = f'style="background: {color_bad}"'
|
attrs[6] = f'style="background: {color_bad}"'
|
||||||
else:
|
else:
|
||||||
@ -606,13 +685,15 @@ elif args.report == 'all-queries':
|
|||||||
attrs[2] = ''
|
attrs[2] = ''
|
||||||
attrs[3] = ''
|
attrs[3] = ''
|
||||||
|
|
||||||
print(tableRow(r, attrs))
|
text += tableRow(r, attrs, anchor)
|
||||||
|
|
||||||
print(tableEnd())
|
text += tableEnd()
|
||||||
|
tables.append(text)
|
||||||
|
|
||||||
print_all_queries()
|
add_all_queries()
|
||||||
|
add_report_errors()
|
||||||
print_report_errors()
|
for t in tables:
|
||||||
|
print(t)
|
||||||
|
|
||||||
print("""
|
print("""
|
||||||
<p class="links">
|
<p class="links">
|
||||||
|
@ -29,7 +29,7 @@ echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_module
|
|||||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
||||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||||
|
|
||||||
service clickhouse-server start
|
timeout 120 service clickhouse-server start
|
||||||
|
|
||||||
wait_server
|
wait_server
|
||||||
|
|
||||||
@ -37,7 +37,9 @@ wait_server
|
|||||||
chmod 777 -R /var/lib/clickhouse
|
chmod 777 -R /var/lib/clickhouse
|
||||||
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||||
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||||
service clickhouse-server restart
|
|
||||||
|
timeout 120 service clickhouse-server stop
|
||||||
|
timeout 120 service clickhouse-server start
|
||||||
|
|
||||||
wait_server
|
wait_server
|
||||||
|
|
||||||
@ -49,7 +51,8 @@ clickhouse-client --query "SHOW TABLES FROM test"
|
|||||||
|
|
||||||
./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
||||||
|
|
||||||
service clickhouse-server restart
|
timeout 120 service clickhouse-server stop
|
||||||
|
timeout 120 service clickhouse-server start
|
||||||
|
|
||||||
wait_server
|
wait_server
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user