Merge branch 'master' into random_timezone_for_stateless_tests

This commit is contained in:
Alexey Milovidov 2021-07-18 01:12:24 +03:00
commit da94ed8a87
7358 changed files with 317377 additions and 227703 deletions

2
.gitattributes vendored
View File

@ -1,2 +1,4 @@
contrib/* linguist-vendored
*.h linguist-language=C++
# to avoid frequent conflicts
tests/queries/0_stateless/arcadia_skip_list.txt text merge=union

View File

@ -7,15 +7,29 @@ assignees: ''
---
(you don't have to strictly follow this form)
You have to provide the following information whenever possible.
**Describe the bug**
A clear and concise description of what works not as it is supposed to.
**Does it reproduce on recent release?**
[The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
**Enable crash reporting**
If possible, change "enabled" to true in "send_crash_reports" section in `config.xml`:
```
<send_crash_reports>
<!-- Changing <enabled> to true allows sending crash reports to -->
<!-- the ClickHouse core developers team via Sentry https://sentry.io -->
<enabled>false</enabled>
```
**How to reproduce**
* Which ClickHouse server version to use
* Which interface to use, if matters
* Non-default settings, if any
@ -24,10 +38,13 @@ A clear and concise description of what works not as it is supposed to.
* Queries to run that lead to unexpected result
**Expected behavior**
A clear and concise description of what you expected to happen.
**Error message and/or stacktrace**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.

6
.gitignore vendored
View File

@ -14,6 +14,11 @@
/build-*
/tests/venv
# logs
*.log
*.stderr
*.stdout
/docs/build
/docs/publish
/docs/edit
@ -27,6 +32,7 @@
/docs/zh/single.md
/docs/ja/single.md
/docs/fa/single.md
/docs/en/development/cmake-in-clickhouse.md
# callgrind files
callgrind.out.*

33
.gitmodules vendored
View File

@ -17,6 +17,7 @@
[submodule "contrib/zlib-ng"]
path = contrib/zlib-ng
url = https://github.com/ClickHouse-Extras/zlib-ng.git
branch = clickhouse-new
[submodule "contrib/googletest"]
path = contrib/googletest
url = https://github.com/google/googletest.git
@ -93,7 +94,7 @@
url = https://github.com/ClickHouse-Extras/libunwind.git
[submodule "contrib/simdjson"]
path = contrib/simdjson
url = https://github.com/ClickHouse-Extras/simdjson.git
url = https://github.com/simdjson/simdjson.git
[submodule "contrib/rapidjson"]
path = contrib/rapidjson
url = https://github.com/ClickHouse-Extras/rapidjson
@ -102,7 +103,7 @@
url = https://github.com/ClickHouse-Extras/fastops
[submodule "contrib/orc"]
path = contrib/orc
url = https://github.com/apache/orc
url = https://github.com/ClickHouse-Extras/orc
[submodule "contrib/sparsehash-c11"]
path = contrib/sparsehash-c11
url = https://github.com/sparsehash/sparsehash-c11.git
@ -133,7 +134,7 @@
url = https://github.com/unicode-org/icu.git
[submodule "contrib/flatbuffers"]
path = contrib/flatbuffers
url = https://github.com/google/flatbuffers.git
url = https://github.com/ClickHouse-Extras/flatbuffers.git
[submodule "contrib/libc-headers"]
path = contrib/libc-headers
url = https://github.com/ClickHouse-Extras/libc-headers.git
@ -167,9 +168,6 @@
[submodule "contrib/fmtlib"]
path = contrib/fmtlib
url = https://github.com/fmtlib/fmt.git
[submodule "contrib/antlr4-runtime"]
path = contrib/antlr4-runtime
url = https://github.com/ClickHouse-Extras/antlr4-runtime.git
[submodule "contrib/sentry-native"]
path = contrib/sentry-native
url = https://github.com/ClickHouse-Extras/sentry-native.git
@ -195,7 +193,7 @@
url = https://github.com/danlark1/miniselect
[submodule "contrib/rocksdb"]
path = contrib/rocksdb
url = https://github.com/ClickHouse-Extras/rocksdb.git
url = https://github.com/ClickHouse-Extras/rocksdb.git
[submodule "contrib/xz"]
path = contrib/xz
url = https://github.com/xz-mirror/xz
@ -209,9 +207,6 @@
[submodule "contrib/fast_float"]
path = contrib/fast_float
url = https://github.com/fastfloat/fast_float
[submodule "contrib/libpqxx"]
path = contrib/libpqxx
url = https://github.com/jtv/libpqxx
[submodule "contrib/libpq"]
path = contrib/libpq
url = https://github.com/ClickHouse-Extras/libpq
@ -221,3 +216,21 @@
[submodule "contrib/NuRaft"]
path = contrib/NuRaft
url = https://github.com/ClickHouse-Extras/NuRaft.git
[submodule "contrib/nanodbc"]
path = contrib/nanodbc
url = https://github.com/ClickHouse-Extras/nanodbc.git
[submodule "contrib/datasketches-cpp"]
path = contrib/datasketches-cpp
url = https://github.com/ClickHouse-Extras/datasketches-cpp.git
[submodule "contrib/yaml-cpp"]
path = contrib/yaml-cpp
url = https://github.com/ClickHouse-Extras/yaml-cpp.git
[submodule "contrib/libpqxx"]
path = contrib/libpqxx
url = https://github.com/ClickHouse-Extras/libpqxx.git
[submodule "contrib/sqlite-amalgamation"]
path = contrib/sqlite-amalgamation
url = https://github.com/azadkuh/sqlite-amalgamation
[submodule "contrib/s2geometry"]
path = contrib/s2geometry
url = https://github.com/ClickHouse-Extras/s2geometry.git

View File

@ -1,3 +1,733 @@
### ClickHouse release v21.7, 2021-07-09
#### Backward Incompatible Change
* Improved performance of queries with explicitly defined large sets. Added compatibility setting `legacy_column_name_of_tuple_literal`. It makes sense to set it to `true`, while doing rolling update of cluster from version lower than 21.7 to any higher version. Otherwise distributed queries with explicitly defined sets at `IN` clause may fail during update. [#25371](https://github.com/ClickHouse/ClickHouse/pull/25371) ([Anton Popov](https://github.com/CurtizJ)).
* Forward/backward incompatible change of maximum buffer size in clickhouse-keeper (an experimental alternative to ZooKeeper). Better to do it now (before production), than later. [#25421](https://github.com/ClickHouse/ClickHouse/pull/25421) ([alesapin](https://github.com/alesapin)).
#### New Feature
* Support configuration in YAML format as alternative to XML. This closes [#3607](https://github.com/ClickHouse/ClickHouse/issues/3607). [#21858](https://github.com/ClickHouse/ClickHouse/pull/21858) ([BoloniniD](https://github.com/BoloniniD)).
* Provides a way to restore replicated table when the data is (possibly) present, but the ZooKeeper metadata is lost. Resolves [#13458](https://github.com/ClickHouse/ClickHouse/issues/13458). [#13652](https://github.com/ClickHouse/ClickHouse/pull/13652) ([Mike Kot](https://github.com/myrrc)).
* Support structs and maps in Arrow/Parquet/ORC and dictionaries in Arrow input/output formats. Present new setting `output_format_arrow_low_cardinality_as_dictionary`. [#24341](https://github.com/ClickHouse/ClickHouse/pull/24341) ([Kruglov Pavel](https://github.com/Avogar)).
* Added support for `Array` type in dictionaries. [#25119](https://github.com/ClickHouse/ClickHouse/pull/25119) ([Maksim Kita](https://github.com/kitaisreal)).
* Added function `bitPositionsToArray`. Closes [#23792](https://github.com/ClickHouse/ClickHouse/issues/23792). Author [Kevin Wan] (@MaxWk). [#25394](https://github.com/ClickHouse/ClickHouse/pull/25394) ([Maksim Kita](https://github.com/kitaisreal)).
* Added function `dateName` to return names like 'Friday' or 'April'. Author [Daniil Kondratyev] (@dankondr). [#25372](https://github.com/ClickHouse/ClickHouse/pull/25372) ([Maksim Kita](https://github.com/kitaisreal)).
* Add `toJSONString` function to serialize columns to their JSON representations. [#25164](https://github.com/ClickHouse/ClickHouse/pull/25164) ([Amos Bird](https://github.com/amosbird)).
* Now `query_log` has two new columns: `initial_query_start_time`, `initial_query_start_time_microsecond` that record the starting time of a distributed query if any. [#25022](https://github.com/ClickHouse/ClickHouse/pull/25022) ([Amos Bird](https://github.com/amosbird)).
* Add aggregate function `segmentLengthSum`. [#24250](https://github.com/ClickHouse/ClickHouse/pull/24250) ([flynn](https://github.com/ucasfl)).
* Add a new boolean setting `prefer_global_in_and_join` which defaults all IN/JOIN as GLOBAL IN/JOIN. [#23434](https://github.com/ClickHouse/ClickHouse/pull/23434) ([Amos Bird](https://github.com/amosbird)).
* Support `ALTER DELETE` queries for `Join` table engine. [#23260](https://github.com/ClickHouse/ClickHouse/pull/23260) ([foolchi](https://github.com/foolchi)).
* Add `quantileBFloat16` aggregate function as well as the corresponding `quantilesBFloat16` and `medianBFloat16`. It is very simple and fast quantile estimator with relative error not more than 0.390625%. This closes [#16641](https://github.com/ClickHouse/ClickHouse/issues/16641). [#23204](https://github.com/ClickHouse/ClickHouse/pull/23204) ([Ivan Novitskiy](https://github.com/RedClusive)).
* Implement `sequenceNextNode()` function useful for `flow analysis`. [#19766](https://github.com/ClickHouse/ClickHouse/pull/19766) ([achimbab](https://github.com/achimbab)).
#### Experimental Feature
* Add support for virtual filesystem over HDFS. [#11058](https://github.com/ClickHouse/ClickHouse/pull/11058) ([overshov](https://github.com/overshov)) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Now clickhouse-keeper (an experimental alternative to ZooKeeper) supports ZooKeeper-like `digest` ACLs. [#24448](https://github.com/ClickHouse/ClickHouse/pull/24448) ([alesapin](https://github.com/alesapin)).
#### Performance Improvement
* Added optimization that transforms some functions to reading of subcolumns to reduce amount of read data. E.g., statement `col IS NULL` is transformed to reading of subcolumn `col.null`. Optimization can be enabled by setting `optimize_functions_to_subcolumns` which is currently off by default. [#24406](https://github.com/ClickHouse/ClickHouse/pull/24406) ([Anton Popov](https://github.com/CurtizJ)).
* Rewrite more columns to possible alias expressions. This may enable better optimization, such as projections. [#24405](https://github.com/ClickHouse/ClickHouse/pull/24405) ([Amos Bird](https://github.com/amosbird)).
* Index of type `bloom_filter` can be used for expressions with `hasAny` function with constant arrays. This closes: [#24291](https://github.com/ClickHouse/ClickHouse/issues/24291). [#24900](https://github.com/ClickHouse/ClickHouse/pull/24900) ([Vasily Nemkov](https://github.com/Enmk)).
* Add exponential backoff to reschedule read attempt in case RabbitMQ queues are empty. (ClickHouse has support for importing data from RabbitMQ). Closes [#24340](https://github.com/ClickHouse/ClickHouse/issues/24340). [#24415](https://github.com/ClickHouse/ClickHouse/pull/24415) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### Improvement
* Allow to limit bandwidth for replication. Add two Replicated\*MergeTree settings: `max_replicated_fetches_network_bandwidth` and `max_replicated_sends_network_bandwidth` which allows to limit maximum speed of replicated fetches/sends for table. Add two server-wide settings (in `default` user profile): `max_replicated_fetches_network_bandwidth_for_server` and `max_replicated_sends_network_bandwidth_for_server` which limit maximum speed of replication for all tables. The settings are not followed perfectly accurately. Turned off by default. Fixes [#1821](https://github.com/ClickHouse/ClickHouse/issues/1821). [#24573](https://github.com/ClickHouse/ClickHouse/pull/24573) ([alesapin](https://github.com/alesapin)).
* Resource constraints and isolation for ODBC and Library bridges. Use separate `clickhouse-bridge` group and user for bridge processes. Set oom_score_adj so the bridges will be first subjects for OOM killer. Set set maximum RSS to 1 GiB. Closes [#23861](https://github.com/ClickHouse/ClickHouse/issues/23861). [#25280](https://github.com/ClickHouse/ClickHouse/pull/25280) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add standalone `clickhouse-keeper` symlink to the main `clickhouse` binary. Now it's possible to run coordination without the main clickhouse server. [#24059](https://github.com/ClickHouse/ClickHouse/pull/24059) ([alesapin](https://github.com/alesapin)).
* Use global settings for query to `VIEW`. Fixed the behavior when queries to `VIEW` use local settings, that leads to errors if setting on `CREATE VIEW` and `SELECT` were different. As for now, `VIEW` won't use these modified settings, but you can still pass additional settings in `SETTINGS` section of `CREATE VIEW` query. Close [#20551](https://github.com/ClickHouse/ClickHouse/issues/20551). [#24095](https://github.com/ClickHouse/ClickHouse/pull/24095) ([Vladimir](https://github.com/vdimir)).
* On server start, parts with incorrect partition ID would not be ever removed, but always detached. [#25070](https://github.com/ClickHouse/ClickHouse/issues/25070). [#25166](https://github.com/ClickHouse/ClickHouse/pull/25166) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Increase size of background schedule pool to 128 (`background_schedule_pool_size` setting). It allows avoiding replication queue hung on slow zookeeper connection. [#25072](https://github.com/ClickHouse/ClickHouse/pull/25072) ([alesapin](https://github.com/alesapin)).
* Add merge tree setting `max_parts_to_merge_at_once` which limits the number of parts that can be merged in the background at once. Doesn't affect `OPTIMIZE FINAL` query. Fixes [#1820](https://github.com/ClickHouse/ClickHouse/issues/1820). [#24496](https://github.com/ClickHouse/ClickHouse/pull/24496) ([alesapin](https://github.com/alesapin)).
* Allow `NOT IN` operator to be used in partition pruning. [#24894](https://github.com/ClickHouse/ClickHouse/pull/24894) ([Amos Bird](https://github.com/amosbird)).
* Recognize IPv4 addresses like `127.0.1.1` as local. This is controversial and closes [#23504](https://github.com/ClickHouse/ClickHouse/issues/23504). Michael Filimonov will test this feature. [#24316](https://github.com/ClickHouse/ClickHouse/pull/24316) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* ClickHouse database created with MaterializeMySQL (it is an experimental feature) now contains all column comments from the MySQL database that materialized. [#25199](https://github.com/ClickHouse/ClickHouse/pull/25199) ([Storozhuk Kostiantyn](https://github.com/sand6255)).
* Add settings (`connection_auto_close`/`connection_max_tries`/`connection_pool_size`) for MySQL storage engine. [#24146](https://github.com/ClickHouse/ClickHouse/pull/24146) ([Azat Khuzhin](https://github.com/azat)).
* Improve startup time of Distributed engine. [#25663](https://github.com/ClickHouse/ClickHouse/pull/25663) ([Azat Khuzhin](https://github.com/azat)).
* Improvement for Distributed tables. Drop replicas from dirname for internal_replication=true (allows INSERT into Distributed with cluster from any number of replicas, before only 15 replicas was supported, everything more will fail with ENAMETOOLONG while creating directory for async blocks). [#25513](https://github.com/ClickHouse/ClickHouse/pull/25513) ([Azat Khuzhin](https://github.com/azat)).
* Added support `Interval` type for `LowCardinality`. It is needed for intermediate values of some expressions. Closes [#21730](https://github.com/ClickHouse/ClickHouse/issues/21730). [#25410](https://github.com/ClickHouse/ClickHouse/pull/25410) ([Vladimir](https://github.com/vdimir)).
* Add `==` operator on time conditions for `sequenceMatch` and `sequenceCount` functions. For eg: sequenceMatch('(?1)(?t==1)(?2)')(time, data = 1, data = 2). [#25299](https://github.com/ClickHouse/ClickHouse/pull/25299) ([Christophe Kalenzaga](https://github.com/mga-chka)).
* Add settings `http_max_fields`, `http_max_field_name_size`, `http_max_field_value_size`. [#25296](https://github.com/ClickHouse/ClickHouse/pull/25296) ([Ivan](https://github.com/abyss7)).
* Add support for function `if` with `Decimal` and `Int` types on its branches. This closes [#20549](https://github.com/ClickHouse/ClickHouse/issues/20549). This closes [#10142](https://github.com/ClickHouse/ClickHouse/issues/10142). [#25283](https://github.com/ClickHouse/ClickHouse/pull/25283) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Update prompt in `clickhouse-client` and display a message when reconnecting. This closes [#10577](https://github.com/ClickHouse/ClickHouse/issues/10577). [#25281](https://github.com/ClickHouse/ClickHouse/pull/25281) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Correct memory tracking in aggregate function `topK`. This closes [#25259](https://github.com/ClickHouse/ClickHouse/issues/25259). [#25260](https://github.com/ClickHouse/ClickHouse/pull/25260) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix `topLevelDomain` for IDN hosts (i.e. `example.рф`), before it returns empty string for such hosts. [#25103](https://github.com/ClickHouse/ClickHouse/pull/25103) ([Azat Khuzhin](https://github.com/azat)).
* Detect Linux kernel version at runtime (for worked nested epoll, that is required for `async_socket_for_remote`/`use_hedged_requests`, otherwise remote queries may stuck). [#25067](https://github.com/ClickHouse/ClickHouse/pull/25067) ([Azat Khuzhin](https://github.com/azat)).
* For distributed query, when `optimize_skip_unused_shards=1`, allow to skip shard with condition like `(sharding key) IN (one-element-tuple)`. (Tuples with many elements were supported. Tuple with single element did not work because it is parsed as literal). [#24930](https://github.com/ClickHouse/ClickHouse/pull/24930) ([Amos Bird](https://github.com/amosbird)).
* Improved log messages of S3 errors, no more double whitespaces in case of empty keys and buckets. [#24897](https://github.com/ClickHouse/ClickHouse/pull/24897) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Some queries require multi-pass semantic analysis. Try reusing built sets for `IN` in this case. [#24874](https://github.com/ClickHouse/ClickHouse/pull/24874) ([Amos Bird](https://github.com/amosbird)).
* Respect `max_distributed_connections` for `insert_distributed_sync` (otherwise for huge clusters and sync insert it may run out of `max_thread_pool_size`). [#24754](https://github.com/ClickHouse/ClickHouse/pull/24754) ([Azat Khuzhin](https://github.com/azat)).
* Avoid hiding errors like `Limit for rows or bytes to read exceeded` for scalar subqueries. [#24545](https://github.com/ClickHouse/ClickHouse/pull/24545) ([nvartolomei](https://github.com/nvartolomei)).
* Make String-to-Int parser stricter so that `toInt64('+')` will throw. [#24475](https://github.com/ClickHouse/ClickHouse/pull/24475) ([Amos Bird](https://github.com/amosbird)).
* If `SSD_CACHE` is created with DDL query, it can be created only inside `user_files` directory. [#24466](https://github.com/ClickHouse/ClickHouse/pull/24466) ([Maksim Kita](https://github.com/kitaisreal)).
* PostgreSQL support for specifying non default schema for insert queries. Closes [#24149](https://github.com/ClickHouse/ClickHouse/issues/24149). [#24413](https://github.com/ClickHouse/ClickHouse/pull/24413) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix IPv6 addresses resolving (i.e. fixes `select * from remote('[::1]', system.one)`). [#24319](https://github.com/ClickHouse/ClickHouse/pull/24319) ([Azat Khuzhin](https://github.com/azat)).
* Fix trailing whitespaces in FROM clause with subqueries in multiline mode, and also changes the output of the queries slightly in a more human friendly way. [#24151](https://github.com/ClickHouse/ClickHouse/pull/24151) ([Azat Khuzhin](https://github.com/azat)).
* Improvement for Distributed tables. Add ability to split distributed batch on failures (i.e. due to memory limits, corruptions), under `distributed_directory_monitor_split_batch_on_failure` (OFF by default). [#23864](https://github.com/ClickHouse/ClickHouse/pull/23864) ([Azat Khuzhin](https://github.com/azat)).
* Handle column name clashes for `Join` table engine. Closes [#20309](https://github.com/ClickHouse/ClickHouse/issues/20309). [#23769](https://github.com/ClickHouse/ClickHouse/pull/23769) ([Vladimir](https://github.com/vdimir)).
* Display progress for `File` table engine in `clickhouse-local` and on INSERT query in `clickhouse-client` when data is passed to stdin. Closes [#18209](https://github.com/ClickHouse/ClickHouse/issues/18209). [#23656](https://github.com/ClickHouse/ClickHouse/pull/23656) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Bugfixes and improvements of `clickhouse-copier`. Allow to copy tables with different (but compatible schemas). Closes [#9159](https://github.com/ClickHouse/ClickHouse/issues/9159). Added test to copy ReplacingMergeTree. Closes [#22711](https://github.com/ClickHouse/ClickHouse/issues/22711). Support TTL on columns and Data Skipping Indices. It simply removes it to create internal Distributed table (underlying table will have TTL and skipping indices). Closes [#19384](https://github.com/ClickHouse/ClickHouse/issues/19384). Allow to copy MATERIALIZED and ALIAS columns. There are some cases in which it could be helpful (e.g. if this column is in PRIMARY KEY). Now it could be allowed by setting `allow_to_copy_alias_and_materialized_columns` property to true in task configuration. Closes [#9177](https://github.com/ClickHouse/ClickHouse/issues/9177). Closes [#11007] (https://github.com/ClickHouse/ClickHouse/issues/11007). Closes [#9514](https://github.com/ClickHouse/ClickHouse/issues/9514). Added a property `allow_to_drop_target_partitions` in task configuration to drop partition in original table before moving helping tables. Closes [#20957](https://github.com/ClickHouse/ClickHouse/issues/20957). Get rid of `OPTIMIZE DEDUPLICATE` query. This hack was needed, because `ALTER TABLE MOVE PARTITION` was retried many times and plain MergeTree tables don't have deduplication. Closes [#17966](https://github.com/ClickHouse/ClickHouse/issues/17966). Write progress to ZooKeeper node on path `task_path + /status` in JSON format. Closes [#20955](https://github.com/ClickHouse/ClickHouse/issues/20955). Support for ReplicatedTables without arguments. Closes [#24834](https://github.com/ClickHouse/ClickHouse/issues/24834) .[#23518](https://github.com/ClickHouse/ClickHouse/pull/23518) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Added sleep with backoff between read retries from S3. [#23461](https://github.com/ClickHouse/ClickHouse/pull/23461) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Respect `insert_allow_materialized_columns` (allows materialized columns) for INSERT into `Distributed` table. [#23349](https://github.com/ClickHouse/ClickHouse/pull/23349) ([Azat Khuzhin](https://github.com/azat)).
* Add ability to push down LIMIT for distributed queries. [#23027](https://github.com/ClickHouse/ClickHouse/pull/23027) ([Azat Khuzhin](https://github.com/azat)).
* Fix zero-copy replication with several S3 volumes (Fixes [#22679](https://github.com/ClickHouse/ClickHouse/issues/22679)). [#22864](https://github.com/ClickHouse/ClickHouse/pull/22864) ([ianton-ru](https://github.com/ianton-ru)).
* Resolve the actual port number bound when a user requests any available port from the operating system to show it in the log message. [#25569](https://github.com/ClickHouse/ClickHouse/pull/25569) ([bnaecker](https://github.com/bnaecker)).
* Fixed case, when sometimes conversion of postgres arrays resulted in String data type, not n-dimensional array, because `attndims` works incorrectly in some cases. Closes [#24804](https://github.com/ClickHouse/ClickHouse/issues/24804). [#25538](https://github.com/ClickHouse/ClickHouse/pull/25538) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix convertion of DateTime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Distinguish KILL MUTATION for different tables (fixes unexpected `Cancelled mutating parts` error). [#25025](https://github.com/ClickHouse/ClickHouse/pull/25025) ([Azat Khuzhin](https://github.com/azat)).
* Allow to declare S3 disk at root of bucket (S3 virtual filesystem is an experimental feature under development). [#24898](https://github.com/ClickHouse/ClickHouse/pull/24898) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Enable reading of subcolumns (e.g. components of Tuples) for distributed tables. [#24472](https://github.com/ClickHouse/ClickHouse/pull/24472) ([Anton Popov](https://github.com/CurtizJ)).
* A feature for MySQL compatibility protocol: make `user` function to return correct output. Closes [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697). [#25697](https://github.com/ClickHouse/ClickHouse/pull/25697) ([sundyli](https://github.com/sundy-li)).
#### Bug Fix
* Improvement for backward compatibility. Use old modulo function version when used in partition key. Closes [#23508](https://github.com/ClickHouse/ClickHouse/issues/23508). [#24157](https://github.com/ClickHouse/ClickHouse/pull/24157) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix extremely rare bug on low-memory servers which can lead to the inability to perform merges without restart. Possibly fixes [#24603](https://github.com/ClickHouse/ClickHouse/issues/24603). [#24872](https://github.com/ClickHouse/ClickHouse/pull/24872) ([alesapin](https://github.com/alesapin)).
* Fix extremely rare error `Tagging already tagged part` in replication queue during concurrent `alter move/replace partition`. Possibly fixes [#22142](https://github.com/ClickHouse/ClickHouse/issues/22142). [#24961](https://github.com/ClickHouse/ClickHouse/pull/24961) ([alesapin](https://github.com/alesapin)).
* Fix potential crash when calculating aggregate function states by aggregation of aggregate function states of other aggregate functions (not a practical use case). See [#24523](https://github.com/ClickHouse/ClickHouse/issues/24523). [#25015](https://github.com/ClickHouse/ClickHouse/pull/25015) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fixed the behavior when query `SYSTEM RESTART REPLICA` or `SYSTEM SYNC REPLICA` does not finish. This was detected on server with extremely low amount of RAM. [#24457](https://github.com/ClickHouse/ClickHouse/pull/24457) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix bug which can lead to ZooKeeper client hung inside clickhouse-server. [#24721](https://github.com/ClickHouse/ClickHouse/pull/24721) ([alesapin](https://github.com/alesapin)).
* If ZooKeeper connection was lost and replica was cloned after restoring the connection, its replication queue might contain outdated entries. Fixed failed assertion when replication queue contains intersecting virtual parts. It may rarely happen if some data part was lost. Print error in log instead of terminating. [#24777](https://github.com/ClickHouse/ClickHouse/pull/24777) ([tavplubix](https://github.com/tavplubix)).
* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix bug which can lead to intersecting parts after merges with TTL: `Part all_40_40_0 is covered by all_40_40_1 but should be merged into all_40_41_1. This shouldn't happen often.`. [#25549](https://github.com/ClickHouse/ClickHouse/pull/25549) ([alesapin](https://github.com/alesapin)).
* On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([tavplubix](https://github.com/tavplubix)).
* Fix error `Key expression contains comparison between inconvertible types` for queries with `ARRAY JOIN` in case if array is used in primary key. Fixes [#8247](https://github.com/ClickHouse/ClickHouse/issues/8247). [#25546](https://github.com/ClickHouse/ClickHouse/pull/25546) ([Anton Popov](https://github.com/CurtizJ)).
* Fix wrong totals for query `WITH TOTALS` and `WITH FILL`. Fixes [#20872](https://github.com/ClickHouse/ClickHouse/issues/20872). [#25539](https://github.com/ClickHouse/ClickHouse/pull/25539) ([Anton Popov](https://github.com/CurtizJ)).
* Fix data race when querying `system.clusters` while reloading the cluster configuration at the same time. [#25737](https://github.com/ClickHouse/ClickHouse/pull/25737) ([Amos Bird](https://github.com/amosbird)).
* Fixed `No such file or directory` error on moving `Distributed` table between databases. Fixes [#24971](https://github.com/ClickHouse/ClickHouse/issues/24971). [#25667](https://github.com/ClickHouse/ClickHouse/pull/25667) ([tavplubix](https://github.com/tavplubix)).
* `REPLACE PARTITION` might be ignored in rare cases if the source partition was empty. It's fixed. Fixes [#24869](https://github.com/ClickHouse/ClickHouse/issues/24869). [#25665](https://github.com/ClickHouse/ClickHouse/pull/25665) ([tavplubix](https://github.com/tavplubix)).
* Fixed a bug in `Replicated` database engine that might rarely cause some replica to skip enqueued DDL query. [#24805](https://github.com/ClickHouse/ClickHouse/pull/24805) ([tavplubix](https://github.com/tavplubix)).
* Fix null pointer dereference in `EXPLAIN AST` without query. [#25631](https://github.com/ClickHouse/ClickHouse/pull/25631) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix waiting of automatic dropping of empty parts. It could lead to full filling of background pool and stuck of replication. [#23315](https://github.com/ClickHouse/ClickHouse/pull/23315) ([Anton Popov](https://github.com/CurtizJ)).
* Fix restore of a table stored in S3 virtual filesystem (it is an experimental feature not ready for production). [#25601](https://github.com/ClickHouse/ClickHouse/pull/25601) ([ianton-ru](https://github.com/ianton-ru)).
* Fix nullptr dereference in `Arrow` format when using `Decimal256`. Add `Decimal256` support for `Arrow` format. [#25531](https://github.com/ClickHouse/ClickHouse/pull/25531) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)).
* A fix for `clickhouse-copier` tool: Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Support `SimpleAggregateFunction(LowCardinality)` for `SummingMergeTree`. Fixes [#25134](https://github.com/ClickHouse/ClickHouse/issues/25134). [#25300](https://github.com/ClickHouse/ClickHouse/pull/25300) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix logical error with exception message "Cannot sum Array/Tuple in min/maxMap". [#25298](https://github.com/ClickHouse/ClickHouse/pull/25298) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix error `Bad cast from type DB::ColumnLowCardinality to DB::ColumnVector<char8_t>` for queries where `LowCardinality` argument was used for IN (this bug appeared in 21.6). Fixes [#25187](https://github.com/ClickHouse/ClickHouse/issues/25187). [#25290](https://github.com/ClickHouse/ClickHouse/pull/25290) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix incorrect behaviour of `joinGetOrNull` with not-nullable columns. This fixes [#24261](https://github.com/ClickHouse/ClickHouse/issues/24261). [#25288](https://github.com/ClickHouse/ClickHouse/pull/25288) ([Amos Bird](https://github.com/amosbird)).
* Fix incorrect behaviour and UBSan report in big integers. In previous versions `CAST(1e19 AS UInt128)` returned zero. [#25279](https://github.com/ClickHouse/ClickHouse/pull/25279) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fixed an error which occurred while inserting a subset of columns using CSVWithNames format. Fixes [#25129](https://github.com/ClickHouse/ClickHouse/issues/25129). [#25169](https://github.com/ClickHouse/ClickHouse/pull/25169) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Do not use table's projection for `SELECT` with `FINAL`. It is not supported yet. [#25163](https://github.com/ClickHouse/ClickHouse/pull/25163) ([Amos Bird](https://github.com/amosbird)).
* Fix possible parts loss after updating up to 21.5 in case table used `UUID` in partition key. (It is not recommended to use `UUID` in partition key). Fixes [#25070](https://github.com/ClickHouse/ClickHouse/issues/25070). [#25127](https://github.com/ClickHouse/ClickHouse/pull/25127) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix crash in query with cross join and `joined_subquery_requires_alias = 0`. Fixes [#24011](https://github.com/ClickHouse/ClickHouse/issues/24011). [#25082](https://github.com/ClickHouse/ClickHouse/pull/25082) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix bug with constant maps in mapContains function that lead to error `empty column was returned by function mapContains`. Closes [#25077](https://github.com/ClickHouse/ClickHouse/issues/25077). [#25080](https://github.com/ClickHouse/ClickHouse/pull/25080) ([Kruglov Pavel](https://github.com/Avogar)).
* Remove possibility to create tables with columns referencing themselves like `a UInt32 ALIAS a + 1` or `b UInt32 MATERIALIZED b`. Fixes [#24910](https://github.com/ClickHouse/ClickHouse/issues/24910), [#24292](https://github.com/ClickHouse/ClickHouse/issues/24292). [#25059](https://github.com/ClickHouse/ClickHouse/pull/25059) ([alesapin](https://github.com/alesapin)).
* Fix wrong result when using aggregate projection with *not empty* `GROUP BY` key to execute query with `GROUP BY` by *empty* key. [#25055](https://github.com/ClickHouse/ClickHouse/pull/25055) ([Amos Bird](https://github.com/amosbird)).
* Fix serialization of splitted nested messages in Protobuf format. This PR fixes [#24647](https://github.com/ClickHouse/ClickHouse/issues/24647). [#25000](https://github.com/ClickHouse/ClickHouse/pull/25000) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix limit/offset settings for distributed queries (ignore on the remote nodes). [#24940](https://github.com/ClickHouse/ClickHouse/pull/24940) ([Azat Khuzhin](https://github.com/azat)).
* Fix possible heap-buffer-overflow in `Arrow` format. [#24922](https://github.com/ClickHouse/ClickHouse/pull/24922) ([Kruglov Pavel](https://github.com/Avogar)).
* Fixed possible error 'Cannot read from istream at offset 0' when reading a file from DiskS3 (S3 virtual filesystem is an experimental feature under development that should not be used in production). [#24885](https://github.com/ClickHouse/ClickHouse/pull/24885) ([Pavel Kovalenko](https://github.com/Jokser)).
* Fix "Missing columns" exception when joining Distributed Materialized View. [#24870](https://github.com/ClickHouse/ClickHouse/pull/24870) ([Azat Khuzhin](https://github.com/azat)).
* Allow `NULL` values in postgresql compatibility protocol. Closes [#22622](https://github.com/ClickHouse/ClickHouse/issues/22622). [#24857](https://github.com/ClickHouse/ClickHouse/pull/24857) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix bug when exception `Mutation was killed` can be thrown to the client on mutation wait when mutation not loaded into memory yet. [#24809](https://github.com/ClickHouse/ClickHouse/pull/24809) ([alesapin](https://github.com/alesapin)).
* Fixed bug in deserialization of random generator state with might cause some data types such as `AggregateFunction(groupArraySample(N), T))` to behave in a non-deterministic way. [#24538](https://github.com/ClickHouse/ClickHouse/pull/24538) ([tavplubix](https://github.com/tavplubix)).
* Disallow building uniqXXXXStates of other aggregation states. [#24523](https://github.com/ClickHouse/ClickHouse/pull/24523) ([Raúl Marín](https://github.com/Algunenano)). Then allow it back by actually eliminating the root cause of the related issue. ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix usage of tuples in `CREATE .. AS SELECT` queries. [#24464](https://github.com/ClickHouse/ClickHouse/pull/24464) ([Anton Popov](https://github.com/CurtizJ)).
* Fix computation of total bytes in `Buffer` table. In current ClickHouse version total_writes.bytes counter decreases too much during the buffer flush. It leads to counter overflow and totalBytes return something around 17.44 EB some time after the flush. [#24450](https://github.com/ClickHouse/ClickHouse/pull/24450) ([DimasKovas](https://github.com/DimasKovas)).
* Fix incorrect information about the monotonicity of toWeek function. This fixes [#24422](https://github.com/ClickHouse/ClickHouse/issues/24422) . This bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/5212 , and was exposed later by smarter partition pruner. [#24446](https://github.com/ClickHouse/ClickHouse/pull/24446) ([Amos Bird](https://github.com/amosbird)).
* When user authentication is managed by LDAP. Fixed potential deadlock that can happen during LDAP role (re)mapping, when LDAP group is mapped to a nonexistent local role. [#24431](https://github.com/ClickHouse/ClickHouse/pull/24431) ([Denis Glazachev](https://github.com/traceon)).
* In "multipart/form-data" message consider the CRLF preceding a boundary as part of it. Fixes [#23905](https://github.com/ClickHouse/ClickHouse/issues/23905). [#24399](https://github.com/ClickHouse/ClickHouse/pull/24399) ([Ivan](https://github.com/abyss7)).
* Fix drop partition with intersect fake parts. In rare cases there might be parts with mutation version greater than current block number. [#24321](https://github.com/ClickHouse/ClickHouse/pull/24321) ([Amos Bird](https://github.com/amosbird)).
* Fixed a bug in moving Materialized View from Ordinary to Atomic database (`RENAME TABLE` query). Now inner table is moved to new database together with Materialized View. Fixes [#23926](https://github.com/ClickHouse/ClickHouse/issues/23926). [#24309](https://github.com/ClickHouse/ClickHouse/pull/24309) ([tavplubix](https://github.com/tavplubix)).
* Allow empty HTTP headers. Fixes [#23901](https://github.com/ClickHouse/ClickHouse/issues/23901). [#24285](https://github.com/ClickHouse/ClickHouse/pull/24285) ([Ivan](https://github.com/abyss7)).
* Correct processing of mutations (ALTER UPDATE/DELETE) in Memory tables. Closes [#24274](https://github.com/ClickHouse/ClickHouse/issues/24274). [#24275](https://github.com/ClickHouse/ClickHouse/pull/24275) ([flynn](https://github.com/ucasfl)).
* Make column LowCardinality property in JOIN output the same as in the input, close [#23351](https://github.com/ClickHouse/ClickHouse/issues/23351), close [#20315](https://github.com/ClickHouse/ClickHouse/issues/20315). [#24061](https://github.com/ClickHouse/ClickHouse/pull/24061) ([Vladimir](https://github.com/vdimir)).
* A fix for Kafka tables. Fix the bug in failover behavior when Engine = Kafka was not able to start consumption if the same consumer had an empty assignment previously. Closes [#21118](https://github.com/ClickHouse/ClickHouse/issues/21118). [#21267](https://github.com/ClickHouse/ClickHouse/pull/21267) ([filimonov](https://github.com/filimonov)).
#### Build/Testing/Packaging Improvement
* Add `darwin-aarch64` (Mac M1 / Apple Silicon) builds in CI [#25560](https://github.com/ClickHouse/ClickHouse/pull/25560) ([Ivan](https://github.com/abyss7)) and put the links to the docs and website ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Adds cross-platform embedding of binary resources into executables. It works on Illumos. [#25146](https://github.com/ClickHouse/ClickHouse/pull/25146) ([bnaecker](https://github.com/bnaecker)).
* Add join related options to stress tests to improve fuzzing. [#25200](https://github.com/ClickHouse/ClickHouse/pull/25200) ([Vladimir](https://github.com/vdimir)).
* Enable build with s3 module in osx [#25217](https://github.com/ClickHouse/ClickHouse/issues/25217). [#25218](https://github.com/ClickHouse/ClickHouse/pull/25218) ([kevin wan](https://github.com/MaxWk)).
* Add integration test cases to cover JDBC bridge. [#25047](https://github.com/ClickHouse/ClickHouse/pull/25047) ([Zhichun Wu](https://github.com/zhicwu)).
* Integration tests configuration has special treatment for dictionaries. Removed remaining dictionaries manual setup. [#24728](https://github.com/ClickHouse/ClickHouse/pull/24728) ([Ilya Yatsishin](https://github.com/qoega)).
* Add libfuzzer tests for YAMLParser class. [#24480](https://github.com/ClickHouse/ClickHouse/pull/24480) ([BoloniniD](https://github.com/BoloniniD)).
* Ubuntu 20.04 is now used to run integration tests, docker-compose version used to run integration tests is updated to 1.28.2. Environment variables now take effect on docker-compose. Rework test_dictionaries_all_layouts_separate_sources to allow parallel run. [#20393](https://github.com/ClickHouse/ClickHouse/pull/20393) ([Ilya Yatsishin](https://github.com/qoega)).
* Fix TOCTOU error in installation script. [#25277](https://github.com/ClickHouse/ClickHouse/pull/25277) ([alexey-milovidov](https://github.com/alexey-milovidov)).
### ClickHouse release 21.6, 2021-06-05
#### Upgrade Notes
* `zstd` compression library is updated to v1.5.0. You may get messages about "checksum does not match" in replication. These messages are expected due to update of compression algorithm and you can ignore them. These messages are informational and do not indicate any kinds of undesired behaviour.
* The setting `compile_expressions` is enabled by default. Although it has been heavily tested on variety of scenarios, if you find some undesired behaviour on your servers, you can try turning this setting off.
* Values of `UUID` type cannot be compared with integer. For example, instead of writing `uuid != 0` type `uuid != '00000000-0000-0000-0000-000000000000'`.
#### New Feature
* Add Postgres-like cast operator (`::`). E.g.: `[1, 2]::Array(UInt8)`, `0.1::Decimal(4, 4)`, `number::UInt16`. [#23871](https://github.com/ClickHouse/ClickHouse/pull/23871) ([Anton Popov](https://github.com/CurtizJ)).
* Make big integers production ready. Add support for `UInt128` data type. Fix known issues with the `Decimal256` data type. Support big integers in dictionaries. Support `gcd`/`lcm` functions for big integers. Support big integers in array search and conditional functions. Support `LowCardinality(UUID)`. Support big integers in `generateRandom` table function and `clickhouse-obfuscator`. Fix error with returning `UUID` from scalar subqueries. This fixes [#7834](https://github.com/ClickHouse/ClickHouse/issues/7834). This fixes [#23936](https://github.com/ClickHouse/ClickHouse/issues/23936). This fixes [#4176](https://github.com/ClickHouse/ClickHouse/issues/4176). This fixes [#24018](https://github.com/ClickHouse/ClickHouse/issues/24018). Backward incompatible change: values of `UUID` type cannot be compared with integer. For example, instead of writing `uuid != 0` type `uuid != '00000000-0000-0000-0000-000000000000'`. [#23631](https://github.com/ClickHouse/ClickHouse/pull/23631) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Support `Array` data type for inserting and selecting data in `Arrow`, `Parquet` and `ORC` formats. [#21770](https://github.com/ClickHouse/ClickHouse/pull/21770) ([taylor12805](https://github.com/taylor12805)).
* Implement table comments. Closes [#23225](https://github.com/ClickHouse/ClickHouse/issues/23225). [#23548](https://github.com/ClickHouse/ClickHouse/pull/23548) ([flynn](https://github.com/ucasFL)).
* Support creating dictionaries with DDL queries in `clickhouse-local`. Closes [#22354](https://github.com/ClickHouse/ClickHouse/issues/22354). Added support for `DETACH DICTIONARY PERMANENTLY`. Added support for `EXCHANGE DICTIONARIES` for `Atomic` database engine. Added support for moving dictionaries between databases using `RENAME DICTIONARY`. [#23436](https://github.com/ClickHouse/ClickHouse/pull/23436) ([Maksim Kita](https://github.com/kitaisreal)).
* Add aggregate function `uniqTheta` to support [Theta Sketch](https://datasketches.apache.org/docs/Theta/ThetaSketchFramework.html) in ClickHouse. [#23894](https://github.com/ClickHouse/ClickHouse/pull/23894). [#22609](https://github.com/ClickHouse/ClickHouse/pull/22609) ([Ping Yu](https://github.com/pingyu)).
* Add function `splitByRegexp`. [#24077](https://github.com/ClickHouse/ClickHouse/pull/24077) ([abel-cheng](https://github.com/abel-cheng)).
* Add function `arrayProduct` which accept an array as the parameter, and return the product of all the elements in array. Closes [#21613](https://github.com/ClickHouse/ClickHouse/issues/21613). [#23782](https://github.com/ClickHouse/ClickHouse/pull/23782) ([Maksim Kita](https://github.com/kitaisreal)).
* Add `thread_name` column in `system.stack_trace`. This closes [#23256](https://github.com/ClickHouse/ClickHouse/issues/23256). [#24124](https://github.com/ClickHouse/ClickHouse/pull/24124) ([abel-cheng](https://github.com/abel-cheng)).
* If `insert_null_as_default` = 1, insert default values instead of NULL in `INSERT ... SELECT` and `INSERT ... SELECT ... UNION ALL ...` queries. Closes [#22832](https://github.com/ClickHouse/ClickHouse/issues/22832). [#23524](https://github.com/ClickHouse/ClickHouse/pull/23524) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add support for progress indication in `clickhouse-local` with `--progress` option. [#23196](https://github.com/ClickHouse/ClickHouse/pull/23196) ([Egor Savin](https://github.com/Amesaru)).
* Add support for HTTP compression (determined by `Content-Encoding` HTTP header) in `http` dictionary source. This fixes [#8912](https://github.com/ClickHouse/ClickHouse/issues/8912). [#23946](https://github.com/ClickHouse/ClickHouse/pull/23946) ([FArthur-cmd](https://github.com/FArthur-cmd)).
* Added `SYSTEM QUERY RELOAD MODEL`, `SYSTEM QUERY RELOAD MODELS`. Closes [#18722](https://github.com/ClickHouse/ClickHouse/issues/18722). [#23182](https://github.com/ClickHouse/ClickHouse/pull/23182) ([Maksim Kita](https://github.com/kitaisreal)).
* Add setting `json` (boolean, 0 by default) for `EXPLAIN PLAN` query. When enabled, query output will be a single `JSON` row. It is recommended to use `TSVRaw` format to avoid unnecessary escaping. [#23082](https://github.com/ClickHouse/ClickHouse/pull/23082) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Add setting `indexes` (boolean, disabled by default) to `EXPLAIN PIPELINE` query. When enabled, shows used indexes, number of filtered parts and granules for every index applied. Supported for `MergeTree*` tables. [#22352](https://github.com/ClickHouse/ClickHouse/pull/22352) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* LDAP: implemented user DN detection functionality to use when mapping Active Directory groups to ClickHouse roles. [#22228](https://github.com/ClickHouse/ClickHouse/pull/22228) ([Denis Glazachev](https://github.com/traceon)).
* New aggregate function `deltaSumTimestamp` for summing the difference between consecutive rows while maintaining ordering during merge by storing timestamps. [#21888](https://github.com/ClickHouse/ClickHouse/pull/21888) ([Russ Frank](https://github.com/rf)).
* Added less secure IMDS credentials provider for S3 which works under docker correctly. [#21852](https://github.com/ClickHouse/ClickHouse/pull/21852) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Add back `indexHint` function. This is for [#21238](https://github.com/ClickHouse/ClickHouse/issues/21238). This reverts [#9542](https://github.com/ClickHouse/ClickHouse/pull/9542). This fixes [#9540](https://github.com/ClickHouse/ClickHouse/issues/9540). [#21304](https://github.com/ClickHouse/ClickHouse/pull/21304) ([Amos Bird](https://github.com/amosbird)).
#### Experimental Feature
* Add `PROJECTION` support for `MergeTree*` tables. [#20202](https://github.com/ClickHouse/ClickHouse/pull/20202) ([Amos Bird](https://github.com/amosbird)).
#### Performance Improvement
* Enable `compile_expressions` setting by default. When this setting enabled, compositions of simple functions and operators will be compiled to native code with LLVM at runtime. [#8482](https://github.com/ClickHouse/ClickHouse/pull/8482) ([Maksim Kita](https://github.com/kitaisreal), [alexey-milovidov](https://github.com/alexey-milovidov)). Note: if you feel in trouble, turn this option off.
* Update `re2` library. Performance of regular expressions matching is improved. Also this PR adds compatibility with gcc-11. [#24196](https://github.com/ClickHouse/ClickHouse/pull/24196) ([Raúl Marín](https://github.com/Algunenano)).
* ORC input format reading by stripe instead of reading entire table into memory by once which is cost memory when file size is huge. [#23102](https://github.com/ClickHouse/ClickHouse/pull/23102) ([Chao Ma](https://github.com/godliness)).
* Fusion of aggregate functions `sum`, `count` and `avg` in a query into single aggregate function. The optimization is controlled with the `optimize_fuse_sum_count_avg` setting. This is implemented with a new aggregate function `sumCount`. This function returns a tuple of two fields: `sum` and `count`. [#21337](https://github.com/ClickHouse/ClickHouse/pull/21337) ([hexiaoting](https://github.com/hexiaoting)).
* Update `zstd` to v1.5.0. The performance of compression is improved for single digits percentage. [#24135](https://github.com/ClickHouse/ClickHouse/pull/24135) ([Raúl Marín](https://github.com/Algunenano)). Note: you may get messages about "checksum does not match" in replication. These messages are expected due to update of compression algorithm and you can ignore them.
* Improved performance of `Buffer` tables: do not acquire lock for total_bytes/total_rows for `Buffer` engine. [#24066](https://github.com/ClickHouse/ClickHouse/pull/24066) ([Azat Khuzhin](https://github.com/azat)).
* Preallocate support for hashed/sparse_hashed dictionaries is returned. [#23979](https://github.com/ClickHouse/ClickHouse/pull/23979) ([Azat Khuzhin](https://github.com/azat)).
* Enable `async_socket_for_remote` by default (lower amount of threads in querying Distributed tables with large fanout). [#23683](https://github.com/ClickHouse/ClickHouse/pull/23683) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
#### Improvement
* Add `_partition_value` virtual column to MergeTree table family. It can be used to prune partition in a deterministic way. It's needed to implement partition matcher for mutations. [#23673](https://github.com/ClickHouse/ClickHouse/pull/23673) ([Amos Bird](https://github.com/amosbird)).
* Added `region` parameter for S3 storage and disk. [#23846](https://github.com/ClickHouse/ClickHouse/pull/23846) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Allow configuring different log levels for different logging channels. Closes [#19569](https://github.com/ClickHouse/ClickHouse/issues/19569). [#23857](https://github.com/ClickHouse/ClickHouse/pull/23857) ([filimonov](https://github.com/filimonov)).
* Keep default timezone on `DateTime` operations if it was not provided explicitly. For example, if you add one second to a value of `DateTime` type without timezone it will remain `DateTime` without timezone. In previous versions the value of default timezone was placed to the returned data type explicitly so it becomes DateTime('something'). This closes [#4854](https://github.com/ClickHouse/ClickHouse/issues/4854). [#23392](https://github.com/ClickHouse/ClickHouse/pull/23392) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Allow user to specify empty string instead of database name for `MySQL` storage. Default database will be used for queries. In previous versions it was working for SELECT queries and not support for INSERT was also added. This closes [#19281](https://github.com/ClickHouse/ClickHouse/issues/19281). This can be useful working with `Sphinx` or other MySQL-compatible foreign databases. [#23319](https://github.com/ClickHouse/ClickHouse/pull/23319) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fixed `quantile(s)TDigest`. Added special handling of singleton centroids according to tdunning/t-digest 3.2+. Also a bug with over-compression of centroids in implementation of earlier version of the algorithm was fixed. [#23314](https://github.com/ClickHouse/ClickHouse/pull/23314) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Function `now64` now supports optional timezone argument. [#24091](https://github.com/ClickHouse/ClickHouse/pull/24091) ([Vasily Nemkov](https://github.com/Enmk)).
* Fix the case when a progress bar in interactive mode in clickhouse-client that appear in the middle of the data may rewrite some parts of visible data in terminal. This closes [#19283](https://github.com/ClickHouse/ClickHouse/issues/19283). [#23050](https://github.com/ClickHouse/ClickHouse/pull/23050) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix crash when memory allocation fails in simdjson. https://github.com/simdjson/simdjson/pull/1567 . Mark as improvement because it's a very rare bug. [#24147](https://github.com/ClickHouse/ClickHouse/pull/24147) ([Amos Bird](https://github.com/amosbird)).
* Preserve dictionaries until storage shutdown (this will avoid possible `external dictionary 'DICT' not found` errors at server shutdown during final flush of the `Buffer` engine). [#24068](https://github.com/ClickHouse/ClickHouse/pull/24068) ([Azat Khuzhin](https://github.com/azat)).
* Flush `Buffer` tables before shutting down tables (within one database), to avoid discarding blocks due to underlying table had been already detached (and `Destination table default.a_data_01870 doesn't exist. Block of data is discarded` error in the log). [#24067](https://github.com/ClickHouse/ClickHouse/pull/24067) ([Azat Khuzhin](https://github.com/azat)).
* Now `prefer_column_name_to_alias = 1` will also favor column names for `group by`, `having` and `order by`. This fixes [#23882](https://github.com/ClickHouse/ClickHouse/issues/23882). [#24022](https://github.com/ClickHouse/ClickHouse/pull/24022) ([Amos Bird](https://github.com/amosbird)).
* Add support for `ORDER BY WITH FILL` with `DateTime64`. [#24016](https://github.com/ClickHouse/ClickHouse/pull/24016) ([kevin wan](https://github.com/MaxWk)).
* Enable `DateTime64` to be a version column in `ReplacingMergeTree`. [#23992](https://github.com/ClickHouse/ClickHouse/pull/23992) ([kevin wan](https://github.com/MaxWk)).
* Log information about OS name, kernel version and CPU architecture on server startup. [#23988](https://github.com/ClickHouse/ClickHouse/pull/23988) ([Azat Khuzhin](https://github.com/azat)).
* Support specifying table schema for `postgresql` dictionary source. Closes [#23958](https://github.com/ClickHouse/ClickHouse/issues/23958). [#23980](https://github.com/ClickHouse/ClickHouse/pull/23980) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add hints for names of `Enum` elements (suggest names in case of typos). Closes [#17112](https://github.com/ClickHouse/ClickHouse/issues/17112). [#23919](https://github.com/ClickHouse/ClickHouse/pull/23919) ([flynn](https://github.com/ucasFL)).
* Measure found rate (the percentage for which the value was found) for dictionaries (see `found_rate` in `system.dictionaries`). [#23916](https://github.com/ClickHouse/ClickHouse/pull/23916) ([Azat Khuzhin](https://github.com/azat)).
* Allow to add specific queue settings via table settng `rabbitmq_queue_settings_list`. (Closes [#23737](https://github.com/ClickHouse/ClickHouse/issues/23737) and [#23918](https://github.com/ClickHouse/ClickHouse/issues/23918)). Allow user to control all RabbitMQ setup: if table setting `rabbitmq_queue_consume` is set to `1` - RabbitMQ table engine will only connect to specified queue and will not perform any RabbitMQ consumer-side setup like declaring exchange, queues, bindings. (Closes [#21757](https://github.com/ClickHouse/ClickHouse/issues/21757)). Add proper cleanup when RabbitMQ table is dropped - delete queues, which the table has declared and all bound exchanges - if they were created by the table. [#23887](https://github.com/ClickHouse/ClickHouse/pull/23887) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add `broken_data_files`/`broken_data_compressed_bytes` into `system.distribution_queue`. Add metric for number of files for asynchronous insertion into Distributed tables that has been marked as broken (`BrokenDistributedFilesToInsert`). [#23885](https://github.com/ClickHouse/ClickHouse/pull/23885) ([Azat Khuzhin](https://github.com/azat)).
* Querying `system.tables` does not go to ZooKeeper anymore. [#23793](https://github.com/ClickHouse/ClickHouse/pull/23793) ([Fuwang Hu](https://github.com/fuwhu)).
* Respect `lock_acquire_timeout_for_background_operations` for `OPTIMIZE` queries. [#23623](https://github.com/ClickHouse/ClickHouse/pull/23623) ([Azat Khuzhin](https://github.com/azat)).
* Possibility to change `S3` disk settings in runtime via new `SYSTEM RESTART DISK` SQL command. [#23429](https://github.com/ClickHouse/ClickHouse/pull/23429) ([Pavel Kovalenko](https://github.com/Jokser)).
* If user applied a misconfiguration by mistakenly setting `max_distributed_connections` to value zero, every query to a `Distributed` table will throw exception with a message containing "logical error". But it's really an expected behaviour, not a logical error, so the exception message was slightly incorrect. It also triggered checks in our CI enviroment that ensures that no logical errors ever happen. Instead we will treat `max_distributed_connections` misconfigured to zero as the minimum possible value (one). [#23348](https://github.com/ClickHouse/ClickHouse/pull/23348) ([Azat Khuzhin](https://github.com/azat)).
* Disable `min_bytes_to_use_mmap_io` by default. [#23322](https://github.com/ClickHouse/ClickHouse/pull/23322) ([Azat Khuzhin](https://github.com/azat)).
* Support `LowCardinality` nullability with `join_use_nulls`, close [#15101](https://github.com/ClickHouse/ClickHouse/issues/15101). [#23237](https://github.com/ClickHouse/ClickHouse/pull/23237) ([vdimir](https://github.com/vdimir)).
* Added possibility to restore `MergeTree` parts to `detached` directory for `S3` disk. [#23112](https://github.com/ClickHouse/ClickHouse/pull/23112) ([Pavel Kovalenko](https://github.com/Jokser)).
* Retries on HTTP connection drops in S3. [#22988](https://github.com/ClickHouse/ClickHouse/pull/22988) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Add settings `external_storage_max_read_rows` and `external_storage_max_read_rows` for MySQL table engine, dictionary source and MaterializeMySQL minor data fetches. [#22697](https://github.com/ClickHouse/ClickHouse/pull/22697) ([TCeason](https://github.com/TCeason)).
* `MaterializeMySQL` (experimental feature): Previously, MySQL 5.7.9 was not supported due to SQL incompatibility. Now leave MySQL parameter verification to the MaterializeMySQL. [#23413](https://github.com/ClickHouse/ClickHouse/pull/23413) ([TCeason](https://github.com/TCeason)).
* Enable reading of subcolumns for distributed tables. [#24472](https://github.com/ClickHouse/ClickHouse/pull/24472) ([Anton Popov](https://github.com/CurtizJ)).
* Fix usage of tuples in `CREATE .. AS SELECT` queries. [#24464](https://github.com/ClickHouse/ClickHouse/pull/24464) ([Anton Popov](https://github.com/CurtizJ)).
* Support for `Parquet` format in `Kafka` tables. [#23412](https://github.com/ClickHouse/ClickHouse/pull/23412) ([Chao Ma](https://github.com/godliness)).
#### Bug Fix
* Use old modulo function version when used in partition key and primary key. Closes [#23508](https://github.com/ClickHouse/ClickHouse/issues/23508). [#24157](https://github.com/ClickHouse/ClickHouse/pull/24157) ([Kseniia Sumarokova](https://github.com/kssenii)). It was a source of backward incompatibility in previous releases.
* Fixed the behavior when query `SYSTEM RESTART REPLICA` or `SYSTEM SYNC REPLICA` is being processed infinitely. This was detected on server with extremely little amount of RAM. [#24457](https://github.com/ClickHouse/ClickHouse/pull/24457) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix incorrect monotonicity of `toWeek` function. This fixes [#24422](https://github.com/ClickHouse/ClickHouse/issues/24422) . This bug was introduced in [#5212](https://github.com/ClickHouse/ClickHouse/pull/5212), and was exposed later by smarter partition pruner. [#24446](https://github.com/ClickHouse/ClickHouse/pull/24446) ([Amos Bird](https://github.com/amosbird)).
* Fix drop partition with intersect fake parts. In rare cases there might be parts with mutation version greater than current block number. [#24321](https://github.com/ClickHouse/ClickHouse/pull/24321) ([Amos Bird](https://github.com/amosbird)).
* Fixed a bug in moving Materialized View from Ordinary to Atomic database (`RENAME TABLE` query). Now inner table is moved to new database together with Materialized View. Fixes [#23926](https://github.com/ClickHouse/ClickHouse/issues/23926). [#24309](https://github.com/ClickHouse/ClickHouse/pull/24309) ([tavplubix](https://github.com/tavplubix)).
* Allow empty HTTP headers in client requests. Fixes [#23901](https://github.com/ClickHouse/ClickHouse/issues/23901). [#24285](https://github.com/ClickHouse/ClickHouse/pull/24285) ([Ivan](https://github.com/abyss7)).
* Set `max_threads = 1` to fix mutation fail of `Memory` tables. Closes [#24274](https://github.com/ClickHouse/ClickHouse/issues/24274). [#24275](https://github.com/ClickHouse/ClickHouse/pull/24275) ([flynn](https://github.com/ucasFL)).
* Fix typo in implementation of `Memory` tables, this bug was introduced at [#15127](https://github.com/ClickHouse/ClickHouse/issues/15127). Closes [#24192](https://github.com/ClickHouse/ClickHouse/issues/24192). [#24193](https://github.com/ClickHouse/ClickHouse/pull/24193) ([张中南](https://github.com/plugine)).
* Fix abnormal server termination due to `HDFS` becoming not accessible during query execution. Closes [#24117](https://github.com/ClickHouse/ClickHouse/issues/24117). [#24191](https://github.com/ClickHouse/ClickHouse/pull/24191) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix crash on updating of `Nested` column with const condition. [#24183](https://github.com/ClickHouse/ClickHouse/pull/24183) ([hexiaoting](https://github.com/hexiaoting)).
* Fix race condition which could happen in RBAC under a heavy load. This PR fixes [#24090](https://github.com/ClickHouse/ClickHouse/issues/24090), [#24134](https://github.com/ClickHouse/ClickHouse/issues/24134),. [#24176](https://github.com/ClickHouse/ClickHouse/pull/24176) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix a rare bug that could lead to a partially initialized table that can serve write requests (insert/alter/so on). Now such tables will be in readonly mode. [#24122](https://github.com/ClickHouse/ClickHouse/pull/24122) ([alesapin](https://github.com/alesapin)).
* Fix an issue: `EXPLAIN PIPELINE` with `SELECT xxx FINAL` showed a wrong pipeline. ([hexiaoting](https://github.com/hexiaoting)).
* Fixed using const `DateTime` value vs `DateTime64` column in `WHERE`. [#24100](https://github.com/ClickHouse/ClickHouse/pull/24100) ([Vasily Nemkov](https://github.com/Enmk)).
* Fix crash in merge JOIN, closes [#24010](https://github.com/ClickHouse/ClickHouse/issues/24010). [#24013](https://github.com/ClickHouse/ClickHouse/pull/24013) ([vdimir](https://github.com/vdimir)).
* Some `ALTER PARTITION` queries might cause `Part A intersects previous part B` and `Unexpected merged part C intersecting drop range D` errors in replication queue. It's fixed. Fixes [#23296](https://github.com/ClickHouse/ClickHouse/issues/23296). [#23997](https://github.com/ClickHouse/ClickHouse/pull/23997) ([tavplubix](https://github.com/tavplubix)).
* Fix SIGSEGV for external GROUP BY and overflow row (i.e. queries like `SELECT FROM GROUP BY WITH TOTALS SETTINGS max_bytes_before_external_group_by>0, max_rows_to_group_by>0, group_by_overflow_mode='any', totals_mode='before_having'`). [#23962](https://github.com/ClickHouse/ClickHouse/pull/23962) ([Azat Khuzhin](https://github.com/azat)).
* Fix keys metrics accounting for `CACHE` dictionary with duplicates in the source (leads to `DictCacheKeysRequestedMiss` overflows). [#23929](https://github.com/ClickHouse/ClickHouse/pull/23929) ([Azat Khuzhin](https://github.com/azat)).
* Fix implementation of connection pool of `PostgreSQL` engine. Closes [#23897](https://github.com/ClickHouse/ClickHouse/issues/23897). [#23909](https://github.com/ClickHouse/ClickHouse/pull/23909) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix `distributed_group_by_no_merge = 2` with `GROUP BY` and aggregate function wrapped into regular function (had been broken in [#23546](https://github.com/ClickHouse/ClickHouse/issues/23546)). Throw exception in case of someone trying to use `distributed_group_by_no_merge = 2` with window functions. Disable `optimize_distributed_group_by_sharding_key` for queries with window functions. [#23906](https://github.com/ClickHouse/ClickHouse/pull/23906) ([Azat Khuzhin](https://github.com/azat)).
* A fix for `s3` table function: better handling of HTTP errors. Response bodies of HTTP errors were being ignored earlier. [#23844](https://github.com/ClickHouse/ClickHouse/pull/23844) ([Vladimir Chebotarev](https://github.com/excitoon)).
* A fix for `s3` table function: better handling of URI's. Fixed an incompatibility with URLs containing `+` symbol, data with such keys could not be read previously. [#23822](https://github.com/ClickHouse/ClickHouse/pull/23822) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Fix error `Can't initialize pipeline with empty pipe` for queries with `GLOBAL IN/JOIN` and `use_hedged_requests`. Fixes [#23431](https://github.com/ClickHouse/ClickHouse/issues/23431). [#23805](https://github.com/ClickHouse/ClickHouse/pull/23805) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix `CLEAR COLUMN` does not work when it is referenced by materialized view. Close [#23764](https://github.com/ClickHouse/ClickHouse/issues/23764). [#23781](https://github.com/ClickHouse/ClickHouse/pull/23781) ([flynn](https://github.com/ucasFL)).
* Fix heap use after free when reading from HDFS if `Values` format is used. [#23761](https://github.com/ClickHouse/ClickHouse/pull/23761) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Avoid possible "Cannot schedule a task" error (in case some exception had been occurred) on INSERT into Distributed. [#23744](https://github.com/ClickHouse/ClickHouse/pull/23744) ([Azat Khuzhin](https://github.com/azat)).
* Fixed a bug in recovery of staled `ReplicatedMergeTree` replica. Some metadata updates could be ignored by staled replica if `ALTER` query was executed during downtime of the replica. [#23742](https://github.com/ClickHouse/ClickHouse/pull/23742) ([tavplubix](https://github.com/tavplubix)).
* Fix a bug with `Join` and `WITH TOTALS`, close [#17718](https://github.com/ClickHouse/ClickHouse/issues/17718). [#23549](https://github.com/ClickHouse/ClickHouse/pull/23549) ([vdimir](https://github.com/vdimir)).
* Fix possible `Block structure mismatch` error for queries with `UNION` which could possibly happen after filter-pushdown optimization. Fixes [#23029](https://github.com/ClickHouse/ClickHouse/issues/23029). [#23359](https://github.com/ClickHouse/ClickHouse/pull/23359) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Add type conversion when the setting `optimize_skip_unused_shards_rewrite_in` is enabled. This fixes MSan report. [#23219](https://github.com/ClickHouse/ClickHouse/pull/23219) ([Azat Khuzhin](https://github.com/azat)).
* Add a missing check when updating nested subcolumns, close issue: [#22353](https://github.com/ClickHouse/ClickHouse/issues/22353). [#22503](https://github.com/ClickHouse/ClickHouse/pull/22503) ([hexiaoting](https://github.com/hexiaoting)).
#### Build/Testing/Packaging Improvement
* Support building on Illumos. [#24144](https://github.com/ClickHouse/ClickHouse/pull/24144). Adds support for building on Solaris-derived operating systems. [#23746](https://github.com/ClickHouse/ClickHouse/pull/23746) ([bnaecker](https://github.com/bnaecker)).
* Add more benchmarks for hash tables, including the Swiss Table from Google (that appeared to be slower than ClickHouse hash map in our specific usage scenario). [#24111](https://github.com/ClickHouse/ClickHouse/pull/24111) ([Maksim Kita](https://github.com/kitaisreal)).
* Update librdkafka 1.6.0-RC3 to 1.6.1. [#23874](https://github.com/ClickHouse/ClickHouse/pull/23874) ([filimonov](https://github.com/filimonov)).
* Always enable `asynchronous-unwind-tables` explicitly. It may fix query profiler on AArch64. [#23602](https://github.com/ClickHouse/ClickHouse/pull/23602) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Avoid possible build dependency on locale and filesystem order. This allows reproducible builds. [#23600](https://github.com/ClickHouse/ClickHouse/pull/23600) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Remove a source of nondeterminism from build. Now builds at different point of time will produce byte-identical binaries. Partially addressed [#22113](https://github.com/ClickHouse/ClickHouse/issues/22113). [#23559](https://github.com/ClickHouse/ClickHouse/pull/23559) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add simple tool for benchmarking (Zoo)Keeper. [#23038](https://github.com/ClickHouse/ClickHouse/pull/23038) ([alesapin](https://github.com/alesapin)).
## ClickHouse release 21.5, 2021-05-20
#### Backward Incompatible Change
* Change comparison of integers and floating point numbers when integer is not exactly representable in the floating point data type. In new version comparison will return false as the rounding error will occur. Example: `9223372036854775808.0 != 9223372036854775808`, because the number `9223372036854775808` is not representable as floating point number exactly (and `9223372036854775808.0` is rounded to `9223372036854776000.0`). But in previous version the comparison will return as the numbers are equal, because if the floating point number `9223372036854776000.0` get converted back to UInt64, it will yield `9223372036854775808`. For the reference, the Python programming language also treats these numbers as equal. But this behaviour was dependend on CPU model (different results on AMD64 and AArch64 for some out-of-range numbers), so we make the comparison more precise. It will treat int and float numbers equal only if int is represented in floating point type exactly. [#22595](https://github.com/ClickHouse/ClickHouse/pull/22595) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Remove support for `argMin` and `argMax` for single `Tuple` argument. The code was not memory-safe. The feature was added by mistake and it is confusing for people. These functions can be reintroduced under different names later. This fixes [#22384](https://github.com/ClickHouse/ClickHouse/issues/22384) and reverts [#17359](https://github.com/ClickHouse/ClickHouse/issues/17359). [#23393](https://github.com/ClickHouse/ClickHouse/pull/23393) ([alexey-milovidov](https://github.com/alexey-milovidov)).
#### New Feature
* Added functions `dictGetChildren(dictionary, key)`, `dictGetDescendants(dictionary, key, level)`. Function `dictGetChildren` return all children as an array if indexes. It is a inverse transformation for `dictGetHierarchy`. Function `dictGetDescendants` return all descendants as if `dictGetChildren` was applied `level` times recursively. Zero `level` value is equivalent to infinity. Improved performance of `dictGetHierarchy`, `dictIsIn` functions. Closes [#14656](https://github.com/ClickHouse/ClickHouse/issues/14656). [#22096](https://github.com/ClickHouse/ClickHouse/pull/22096) ([Maksim Kita](https://github.com/kitaisreal)).
* Added function `dictGetOrNull`. It works like `dictGet`, but return `Null` in case key was not found in dictionary. Closes [#22375](https://github.com/ClickHouse/ClickHouse/issues/22375). [#22413](https://github.com/ClickHouse/ClickHouse/pull/22413) ([Maksim Kita](https://github.com/kitaisreal)).
* Added a table function `s3Cluster`, which allows to process files from `s3` in parallel on every node of a specified cluster. [#22012](https://github.com/ClickHouse/ClickHouse/pull/22012) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Added support for replicas and shards in MySQL/PostgreSQL table engine / table function. You can write `SELECT * FROM mysql('host{1,2}-{1|2}', ...)`. Closes [#20969](https://github.com/ClickHouse/ClickHouse/issues/20969). [#22217](https://github.com/ClickHouse/ClickHouse/pull/22217) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Added `ALTER TABLE ... FETCH PART ...` query. It's similar to `FETCH PARTITION`, but fetches only one part. [#22706](https://github.com/ClickHouse/ClickHouse/pull/22706) ([turbo jason](https://github.com/songenjie)).
* Added a setting `max_distributed_depth` that limits the depth of recursive queries to `Distributed` tables. Closes [#20229](https://github.com/ClickHouse/ClickHouse/issues/20229). [#21942](https://github.com/ClickHouse/ClickHouse/pull/21942) ([flynn](https://github.com/ucasFL)).
#### Performance Improvement
* Improved performance of `intDiv` by dynamic dispatch for AVX2. This closes [#22314](https://github.com/ClickHouse/ClickHouse/issues/22314). [#23000](https://github.com/ClickHouse/ClickHouse/pull/23000) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Improved performance of reading from `ArrowStream` input format for sources other then local file (e.g. URL). [#22673](https://github.com/ClickHouse/ClickHouse/pull/22673) ([nvartolomei](https://github.com/nvartolomei)).
* Disabled compression by default when interacting with localhost (with clickhouse-client or server to server with distributed queries) via native protocol. It may improve performance of some import/export operations. This closes [#22234](https://github.com/ClickHouse/ClickHouse/issues/22234). [#22237](https://github.com/ClickHouse/ClickHouse/pull/22237) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Exclude values that does not belong to the shard from right part of IN section for distributed queries (under `optimize_skip_unused_shards_rewrite_in`, enabled by default, since it still requires `optimize_skip_unused_shards`). [#21511](https://github.com/ClickHouse/ClickHouse/pull/21511) ([Azat Khuzhin](https://github.com/azat)).
* Improved performance of reading a subset of columns with File-like table engine and column-oriented format like Parquet, Arrow or ORC. This closes [#issue:20129](https://github.com/ClickHouse/ClickHouse/issues/20129). [#21302](https://github.com/ClickHouse/ClickHouse/pull/21302) ([keenwolf](https://github.com/keen-wolf)).
* Allow to move more conditions to `PREWHERE` as it was before version 21.1 (adjustment of internal heuristics). Insufficient number of moved condtions could lead to worse performance. [#23397](https://github.com/ClickHouse/ClickHouse/pull/23397) ([Anton Popov](https://github.com/CurtizJ)).
* Improved performance of ODBC connections and fixed all the outstanding issues from the backlog. Using `nanodbc` library instead of `Poco::ODBC`. Closes [#9678](https://github.com/ClickHouse/ClickHouse/issues/9678). Add support for DateTime64 and Decimal* for ODBC table engine. Closes [#21961](https://github.com/ClickHouse/ClickHouse/issues/21961). Fixed issue with cyrillic text being truncated. Closes [#16246](https://github.com/ClickHouse/ClickHouse/issues/16246). Added connection pools for odbc bridge. [#21972](https://github.com/ClickHouse/ClickHouse/pull/21972) ([Kseniia Sumarokova](https://github.com/kssenii)).
#### Improvement
* Increase `max_uri_size` (the maximum size of URL in HTTP interface) to 1 MiB by default. This closes [#21197](https://github.com/ClickHouse/ClickHouse/issues/21197). [#22997](https://github.com/ClickHouse/ClickHouse/pull/22997) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Set `background_fetches_pool_size` to `8` that is better for production usage with frequent small insertions or slow ZooKeeper cluster. [#22945](https://github.com/ClickHouse/ClickHouse/pull/22945) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* FlatDictionary added `initial_array_size`, `max_array_size` options. [#22521](https://github.com/ClickHouse/ClickHouse/pull/22521) ([Maksim Kita](https://github.com/kitaisreal)).
* Add new setting `non_replicated_deduplication_window` for non-replicated MergeTree inserts deduplication. [#22514](https://github.com/ClickHouse/ClickHouse/pull/22514) ([alesapin](https://github.com/alesapin)).
* Update paths to the `CatBoost` model configs in config reloading. [#22434](https://github.com/ClickHouse/ClickHouse/pull/22434) ([Kruglov Pavel](https://github.com/Avogar)).
* Added `Decimal256` type support in dictionaries. `Decimal256` is experimental feature. Closes [#20979](https://github.com/ClickHouse/ClickHouse/issues/20979). [#22960](https://github.com/ClickHouse/ClickHouse/pull/22960) ([Maksim Kita](https://github.com/kitaisreal)).
* Enabled `async_socket_for_remote` by default (using less amount of OS threads for distributed queries). [#23683](https://github.com/ClickHouse/ClickHouse/pull/23683) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fixed `quantile(s)TDigest`. Added special handling of singleton centroids according to tdunning/t-digest 3.2+. Also a bug with over-compression of centroids in implementation of earlier version of the algorithm was fixed. [#23314](https://github.com/ClickHouse/ClickHouse/pull/23314) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Make function name `unhex` case insensitive for compatibility with MySQL. [#23229](https://github.com/ClickHouse/ClickHouse/pull/23229) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Implement functions `arrayHasAny`, `arrayHasAll`, `has`, `indexOf`, `countEqual` for generic case when types of array elements are different. In previous versions the functions `arrayHasAny`, `arrayHasAll` returned false and `has`, `indexOf`, `countEqual` thrown exception. Also add support for `Decimal` and big integer types in functions `has` and similar. This closes [#20272](https://github.com/ClickHouse/ClickHouse/issues/20272). [#23044](https://github.com/ClickHouse/ClickHouse/pull/23044) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Raised the threshold on max number of matches in result of the function `extractAllGroupsHorizontal`. [#23036](https://github.com/ClickHouse/ClickHouse/pull/23036) ([Vasily Nemkov](https://github.com/Enmk)).
* Do not perform `optimize_skip_unused_shards` for cluster with one node. [#22999](https://github.com/ClickHouse/ClickHouse/pull/22999) ([Azat Khuzhin](https://github.com/azat)).
* Added ability to run clickhouse-keeper (experimental drop-in replacement to ZooKeeper) with SSL. Config settings `keeper_server.tcp_port_secure` can be used for secure interaction between client and keeper-server. `keeper_server.raft_configuration.secure` can be used to enable internal secure communication between nodes. [#22992](https://github.com/ClickHouse/ClickHouse/pull/22992) ([alesapin](https://github.com/alesapin)).
* Added ability to flush buffer only in background for `Buffer` tables. [#22986](https://github.com/ClickHouse/ClickHouse/pull/22986) ([Azat Khuzhin](https://github.com/azat)).
* When selecting from MergeTree table with NULL in WHERE condition, in rare cases, exception was thrown. This closes [#20019](https://github.com/ClickHouse/ClickHouse/issues/20019). [#22978](https://github.com/ClickHouse/ClickHouse/pull/22978) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix error handling in Poco HTTP Client for AWS. [#22973](https://github.com/ClickHouse/ClickHouse/pull/22973) ([kreuzerkrieg](https://github.com/kreuzerkrieg)).
* Respect `max_part_removal_threads` for `ReplicatedMergeTree`. [#22971](https://github.com/ClickHouse/ClickHouse/pull/22971) ([Azat Khuzhin](https://github.com/azat)).
* Fix obscure corner case of MergeTree settings inactive_parts_to_throw_insert = 0 with inactive_parts_to_delay_insert > 0. [#22947](https://github.com/ClickHouse/ClickHouse/pull/22947) ([Azat Khuzhin](https://github.com/azat)).
* `dateDiff` now works with `DateTime64` arguments (even for values outside of `DateTime` range) [#22931](https://github.com/ClickHouse/ClickHouse/pull/22931) ([Vasily Nemkov](https://github.com/Enmk)).
* MaterializeMySQL (experimental feature): added an ability to replicate MySQL databases containing views without failing. This is accomplished by ignoring the views. [#22760](https://github.com/ClickHouse/ClickHouse/pull/22760) ([Christian](https://github.com/cfroystad)).
* Allow RBAC row policy via postgresql protocol. Closes [#22658](https://github.com/ClickHouse/ClickHouse/issues/22658). PostgreSQL protocol is enabled in configuration by default. [#22755](https://github.com/ClickHouse/ClickHouse/pull/22755) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Add metric to track how much time is spend during waiting for Buffer layer lock. [#22725](https://github.com/ClickHouse/ClickHouse/pull/22725) ([Azat Khuzhin](https://github.com/azat)).
* Allow to use CTE in VIEW definition. This closes [#22491](https://github.com/ClickHouse/ClickHouse/issues/22491). [#22657](https://github.com/ClickHouse/ClickHouse/pull/22657) ([Amos Bird](https://github.com/amosbird)).
* Clear the rest of the screen and show cursor in `clickhouse-client` if previous program has left garbage in terminal. This closes [#16518](https://github.com/ClickHouse/ClickHouse/issues/16518). [#22634](https://github.com/ClickHouse/ClickHouse/pull/22634) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Make `round` function to behave consistently on non-x86_64 platforms. Rounding half to nearest even (Banker's rounding) is used. [#22582](https://github.com/ClickHouse/ClickHouse/pull/22582) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Correctly check structure of blocks of data that are sending by Distributed tables. [#22325](https://github.com/ClickHouse/ClickHouse/pull/22325) ([Azat Khuzhin](https://github.com/azat)).
* Allow publishing Kafka errors to a virtual column of Kafka engine, controlled by the `kafka_handle_error_mode` setting. [#21850](https://github.com/ClickHouse/ClickHouse/pull/21850) ([fastio](https://github.com/fastio)).
* Add aliases `simpleJSONExtract/simpleJSONHas` to `visitParam/visitParamExtract{UInt, Int, Bool, Float, Raw, String}`. Fixes [#21383](https://github.com/ClickHouse/ClickHouse/issues/21383). [#21519](https://github.com/ClickHouse/ClickHouse/pull/21519) ([fastio](https://github.com/fastio)).
* Add `clickhouse-library-bridge` for library dictionary source. Closes [#9502](https://github.com/ClickHouse/ClickHouse/issues/9502). [#21509](https://github.com/ClickHouse/ClickHouse/pull/21509) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Forbid to drop a column if it's referenced by materialized view. Closes [#21164](https://github.com/ClickHouse/ClickHouse/issues/21164). [#21303](https://github.com/ClickHouse/ClickHouse/pull/21303) ([flynn](https://github.com/ucasFL)).
* Support dynamic interserver credentials (rotating credentials without downtime). [#14113](https://github.com/ClickHouse/ClickHouse/pull/14113) ([johnskopis](https://github.com/johnskopis)).
* Add support for Kafka storage with `Arrow` and `ArrowStream` format messages. [#23415](https://github.com/ClickHouse/ClickHouse/pull/23415) ([Chao Ma](https://github.com/godliness)).
* Fixed missing semicolon in exception message. The user may find this exception message unpleasant to read. [#23208](https://github.com/ClickHouse/ClickHouse/pull/23208) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fixed missing whitespace in some exception messages about `LowCardinality` type. [#23207](https://github.com/ClickHouse/ClickHouse/pull/23207) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Some values were formatted with alignment in center in table cells in `Markdown` format. Not anymore. [#23096](https://github.com/ClickHouse/ClickHouse/pull/23096) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Remove non-essential details from suggestions in clickhouse-client. This closes [#22158](https://github.com/ClickHouse/ClickHouse/issues/22158). [#23040](https://github.com/ClickHouse/ClickHouse/pull/23040) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Correct calculation of `bytes_allocated` field in system.dictionaries for sparse_hashed dictionaries. [#22867](https://github.com/ClickHouse/ClickHouse/pull/22867) ([Azat Khuzhin](https://github.com/azat)).
* Fixed approximate total rows accounting for reverse reading from MergeTree. [#22726](https://github.com/ClickHouse/ClickHouse/pull/22726) ([Azat Khuzhin](https://github.com/azat)).
* Fix the case when it was possible to configure dictionary with clickhouse source that was looking to itself that leads to infinite loop. Closes [#14314](https://github.com/ClickHouse/ClickHouse/issues/14314). [#22479](https://github.com/ClickHouse/ClickHouse/pull/22479) ([Maksim Kita](https://github.com/kitaisreal)).
#### Bug Fix
* Multiple fixes for hedged requests. Fixed an error `Can't initialize pipeline with empty pipe` for queries with `GLOBAL IN/JOIN` when the setting `use_hedged_requests` is enabled. Fixes [#23431](https://github.com/ClickHouse/ClickHouse/issues/23431). [#23805](https://github.com/ClickHouse/ClickHouse/pull/23805) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). Fixed a race condition in hedged connections which leads to crash. This fixes [#22161](https://github.com/ClickHouse/ClickHouse/issues/22161). [#22443](https://github.com/ClickHouse/ClickHouse/pull/22443) ([Kruglov Pavel](https://github.com/Avogar)). Fix possible crash in case if `unknown packet` was received from remote query (with `async_socket_for_remote` enabled). Fixes [#21167](https://github.com/ClickHouse/ClickHouse/issues/21167). [#23309](https://github.com/ClickHouse/ClickHouse/pull/23309) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fixed the behavior when disabling `input_format_with_names_use_header ` setting discards all the input with CSVWithNames format. This fixes [#22406](https://github.com/ClickHouse/ClickHouse/issues/22406). [#23202](https://github.com/ClickHouse/ClickHouse/pull/23202) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fixed remote JDBC bridge timeout connection issue. Closes [#9609](https://github.com/ClickHouse/ClickHouse/issues/9609). [#23771](https://github.com/ClickHouse/ClickHouse/pull/23771) ([Maksim Kita](https://github.com/kitaisreal), [alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix the logic of initial load of `complex_key_hashed` if `update_field` is specified. Closes [#23800](https://github.com/ClickHouse/ClickHouse/issues/23800). [#23824](https://github.com/ClickHouse/ClickHouse/pull/23824) ([Maksim Kita](https://github.com/kitaisreal)).
* Fixed crash when `PREWHERE` and row policy filter are both in effect with empty result. [#23763](https://github.com/ClickHouse/ClickHouse/pull/23763) ([Amos Bird](https://github.com/amosbird)).
* Avoid possible "Cannot schedule a task" error (in case some exception had been occurred) on INSERT into Distributed. [#23744](https://github.com/ClickHouse/ClickHouse/pull/23744) ([Azat Khuzhin](https://github.com/azat)).
* Added an exception in case of completely the same values in both samples in aggregate function `mannWhitneyUTest`. This fixes [#23646](https://github.com/ClickHouse/ClickHouse/issues/23646). [#23654](https://github.com/ClickHouse/ClickHouse/pull/23654) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fixed server fault when inserting data through HTTP caused an exception. This fixes [#23512](https://github.com/ClickHouse/ClickHouse/issues/23512). [#23643](https://github.com/ClickHouse/ClickHouse/pull/23643) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fixed misinterpretation of some `LIKE` expressions with escape sequences. [#23610](https://github.com/ClickHouse/ClickHouse/pull/23610) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fixed restart / stop command hanging. Closes [#20214](https://github.com/ClickHouse/ClickHouse/issues/20214). [#23552](https://github.com/ClickHouse/ClickHouse/pull/23552) ([filimonov](https://github.com/filimonov)).
* Fixed `COLUMNS` matcher in case of multiple JOINs in select query. Closes [#22736](https://github.com/ClickHouse/ClickHouse/issues/22736). [#23501](https://github.com/ClickHouse/ClickHouse/pull/23501) ([Maksim Kita](https://github.com/kitaisreal)).
* Fixed a crash when modifying column's default value when a column itself is used as `ReplacingMergeTree`'s parameter. [#23483](https://github.com/ClickHouse/ClickHouse/pull/23483) ([hexiaoting](https://github.com/hexiaoting)).
* Fixed corner cases in vertical merges with `ReplacingMergeTree`. In rare cases they could lead to fails of merges with exceptions like `Incomplete granules are not allowed while blocks are granules size`. [#23459](https://github.com/ClickHouse/ClickHouse/pull/23459) ([Anton Popov](https://github.com/CurtizJ)).
* Fixed bug that does not allow cast from empty array literal, to array with dimensions greater than 1, e.g. `CAST([] AS Array(Array(String)))`. Closes [#14476](https://github.com/ClickHouse/ClickHouse/issues/14476). [#23456](https://github.com/ClickHouse/ClickHouse/pull/23456) ([Maksim Kita](https://github.com/kitaisreal)).
* Fixed a bug when `deltaSum` aggregate function produced incorrect result after resetting the counter. [#23437](https://github.com/ClickHouse/ClickHouse/pull/23437) ([Russ Frank](https://github.com/rf)).
* Fixed `Cannot unlink file` error on unsuccessful creation of ReplicatedMergeTree table with multidisk configuration. This closes [#21755](https://github.com/ClickHouse/ClickHouse/issues/21755). [#23433](https://github.com/ClickHouse/ClickHouse/pull/23433) ([tavplubix](https://github.com/tavplubix)).
* Fixed incompatible constant expression generation during partition pruning based on virtual columns. This fixes https://github.com/ClickHouse/ClickHouse/pull/21401#discussion_r611888913. [#23366](https://github.com/ClickHouse/ClickHouse/pull/23366) ([Amos Bird](https://github.com/amosbird)).
* Fixed a crash when setting join_algorithm is set to 'auto' and Join is performed with a Dictionary. Close [#23002](https://github.com/ClickHouse/ClickHouse/issues/23002). [#23312](https://github.com/ClickHouse/ClickHouse/pull/23312) ([Vladimir](https://github.com/vdimir)).
* Don't relax NOT conditions during partition pruning. This fixes [#23305](https://github.com/ClickHouse/ClickHouse/issues/23305) and [#21539](https://github.com/ClickHouse/ClickHouse/issues/21539). [#23310](https://github.com/ClickHouse/ClickHouse/pull/23310) ([Amos Bird](https://github.com/amosbird)).
* Fixed very rare race condition on background cleanup of old blocks. It might cause a block not to be deduplicated if it's too close to the end of deduplication window. [#23301](https://github.com/ClickHouse/ClickHouse/pull/23301) ([tavplubix](https://github.com/tavplubix)).
* Fixed very rare (distributed) race condition between creation and removal of ReplicatedMergeTree tables. It might cause exceptions like `node doesn't exist` on attempt to create replicated table. Fixes [#21419](https://github.com/ClickHouse/ClickHouse/issues/21419). [#23294](https://github.com/ClickHouse/ClickHouse/pull/23294) ([tavplubix](https://github.com/tavplubix)).
* Fixed simple key dictionary from DDL creation if primary key is not first attribute. Fixes [#23236](https://github.com/ClickHouse/ClickHouse/issues/23236). [#23262](https://github.com/ClickHouse/ClickHouse/pull/23262) ([Maksim Kita](https://github.com/kitaisreal)).
* Fixed reading from ODBC when there are many long column names in a table. Closes [#8853](https://github.com/ClickHouse/ClickHouse/issues/8853). [#23215](https://github.com/ClickHouse/ClickHouse/pull/23215) ([Kseniia Sumarokova](https://github.com/kssenii)).
* MaterializeMySQL (experimental feature): fixed `Not found column` error when selecting from `MaterializeMySQL` with condition on key column. Fixes [#22432](https://github.com/ClickHouse/ClickHouse/issues/22432). [#23200](https://github.com/ClickHouse/ClickHouse/pull/23200) ([tavplubix](https://github.com/tavplubix)).
* Correct aliases handling if subquery was optimized to constant. Fixes [#22924](https://github.com/ClickHouse/ClickHouse/issues/22924). Fixes [#10401](https://github.com/ClickHouse/ClickHouse/issues/10401). [#23191](https://github.com/ClickHouse/ClickHouse/pull/23191) ([Maksim Kita](https://github.com/kitaisreal)).
* Server might fail to start if `data_type_default_nullable` setting is enabled in default profile, it's fixed. Fixes [#22573](https://github.com/ClickHouse/ClickHouse/issues/22573). [#23185](https://github.com/ClickHouse/ClickHouse/pull/23185) ([tavplubix](https://github.com/tavplubix)).
* Fixed a crash on shutdown which happened because of wrong accounting of current connections. [#23154](https://github.com/ClickHouse/ClickHouse/pull/23154) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fixed `Table .inner_id... doesn't exist` error when selecting from Materialized View after detaching it from Atomic database and attaching back. [#23047](https://github.com/ClickHouse/ClickHouse/pull/23047) ([tavplubix](https://github.com/tavplubix)).
* Fix error `Cannot find column in ActionsDAG result` which may happen if subquery uses `untuple`. Fixes [#22290](https://github.com/ClickHouse/ClickHouse/issues/22290). [#22991](https://github.com/ClickHouse/ClickHouse/pull/22991) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix usage of constant columns of type `Map` with nullable values. [#22939](https://github.com/ClickHouse/ClickHouse/pull/22939) ([Anton Popov](https://github.com/CurtizJ)).
* fixed `formatDateTime()` on `DateTime64` and "%C" format specifier fixed `toDateTime64()` for large values and non-zero scale. [#22937](https://github.com/ClickHouse/ClickHouse/pull/22937) ([Vasily Nemkov](https://github.com/Enmk)).
* Fixed a crash when using `mannWhitneyUTest` and `rankCorr` with window functions. This fixes [#22728](https://github.com/ClickHouse/ClickHouse/issues/22728). [#22876](https://github.com/ClickHouse/ClickHouse/pull/22876) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* LIVE VIEW (experimental feature): fixed possible hanging in concurrent DROP/CREATE of TEMPORARY LIVE VIEW in `TemporaryLiveViewCleaner`, [see](https://gist.github.com/vzakaznikov/0c03195960fc86b56bfe2bc73a90019e). [#22858](https://github.com/ClickHouse/ClickHouse/pull/22858) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fixed pushdown of `HAVING` in case, when filter column is used in aggregation. [#22763](https://github.com/ClickHouse/ClickHouse/pull/22763) ([Anton Popov](https://github.com/CurtizJ)).
* Fixed possible hangs in Zookeeper requests in case of OOM exception. Fixes [#22438](https://github.com/ClickHouse/ClickHouse/issues/22438). [#22684](https://github.com/ClickHouse/ClickHouse/pull/22684) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fixed wait for mutations on several replicas for ReplicatedMergeTree table engines. Previously, mutation/alter query may finish before mutation actually executed on other replicas. [#22669](https://github.com/ClickHouse/ClickHouse/pull/22669) ([alesapin](https://github.com/alesapin)).
* Fixed exception for Log with nested types without columns in the SELECT clause. [#22654](https://github.com/ClickHouse/ClickHouse/pull/22654) ([Azat Khuzhin](https://github.com/azat)).
* Fix unlimited wait for auxiliary AWS requests. [#22594](https://github.com/ClickHouse/ClickHouse/pull/22594) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Fixed a crash when client closes connection very early [#22579](https://github.com/ClickHouse/ClickHouse/issues/22579). [#22591](https://github.com/ClickHouse/ClickHouse/pull/22591) ([nvartolomei](https://github.com/nvartolomei)).
* `Map` data type (experimental feature): fixed an incorrect formatting of function `map` in distributed queries. [#22588](https://github.com/ClickHouse/ClickHouse/pull/22588) ([foolchi](https://github.com/foolchi)).
* Fixed deserialization of empty string without newline at end of TSV format. This closes [#20244](https://github.com/ClickHouse/ClickHouse/issues/20244). Possible workaround without version update: set `input_format_null_as_default` to zero. It was zero in old versions. [#22527](https://github.com/ClickHouse/ClickHouse/pull/22527) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fixed wrong cast of a column of `LowCardinality` type in Merge Join algorithm. Close [#22386](https://github.com/ClickHouse/ClickHouse/issues/22386), close [#22388](https://github.com/ClickHouse/ClickHouse/issues/22388). [#22510](https://github.com/ClickHouse/ClickHouse/pull/22510) ([Vladimir](https://github.com/vdimir)).
* Buffer overflow (on read) was possible in `tokenbf_v1` full text index. The excessive bytes are not used but the read operation may lead to crash in rare cases. This closes [#19233](https://github.com/ClickHouse/ClickHouse/issues/19233). [#22421](https://github.com/ClickHouse/ClickHouse/pull/22421) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Do not limit HTTP chunk size. Fixes [#21907](https://github.com/ClickHouse/ClickHouse/issues/21907). [#22322](https://github.com/ClickHouse/ClickHouse/pull/22322) ([Ivan](https://github.com/abyss7)).
* Fixed a bug, which leads to underaggregation of data in case of enabled `optimize_aggregation_in_order` and many parts in table. Slightly improve performance of aggregation with enabled `optimize_aggregation_in_order`. [#21889](https://github.com/ClickHouse/ClickHouse/pull/21889) ([Anton Popov](https://github.com/CurtizJ)).
* Check if table function view is used as a column. This complements #20350. [#21465](https://github.com/ClickHouse/ClickHouse/pull/21465) ([Amos Bird](https://github.com/amosbird)).
* Fix "unknown column" error for tables with `Merge` engine in queris with `JOIN` and aggregation. Closes [#18368](https://github.com/ClickHouse/ClickHouse/issues/18368), close [#22226](https://github.com/ClickHouse/ClickHouse/issues/22226). [#21370](https://github.com/ClickHouse/ClickHouse/pull/21370) ([Vladimir](https://github.com/vdimir)).
* Fixed name clashes in pushdown optimization. It caused incorrect `WHERE` filtration after FULL JOIN. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)).
* Fixed very rare bug when quorum insert with `quorum_parallel=1` is not really "quorum" because of deduplication. [#18215](https://github.com/ClickHouse/ClickHouse/pull/18215) ([filimonov](https://github.com/filimonov) - reported, [alesapin](https://github.com/alesapin) - fixed).
#### Build/Testing/Packaging Improvement
* Run stateless tests in parallel in CI. [#22300](https://github.com/ClickHouse/ClickHouse/pull/22300) ([alesapin](https://github.com/alesapin)).
* Simplify debian packages. This fixes [#21698](https://github.com/ClickHouse/ClickHouse/issues/21698). [#22976](https://github.com/ClickHouse/ClickHouse/pull/22976) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Added support for ClickHouse build on Apple M1. [#21639](https://github.com/ClickHouse/ClickHouse/pull/21639) ([changvvb](https://github.com/changvvb)).
* Fixed ClickHouse Keeper build for MacOS. [#22860](https://github.com/ClickHouse/ClickHouse/pull/22860) ([alesapin](https://github.com/alesapin)).
* Fixed some tests on AArch64 platform. [#22596](https://github.com/ClickHouse/ClickHouse/pull/22596) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Added function alignment for possibly better performance. [#21431](https://github.com/ClickHouse/ClickHouse/pull/21431) ([Danila Kutenin](https://github.com/danlark1)).
* Adjust some tests to output identical results on amd64 and aarch64 (qemu). The result was depending on implementation specific CPU behaviour. [#22590](https://github.com/ClickHouse/ClickHouse/pull/22590) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Allow query profiling only on x86_64. See [#15174](https://github.com/ClickHouse/ClickHouse/issues/15174#issuecomment-812954965) and [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638#issuecomment-703805337). This closes [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638). [#22580](https://github.com/ClickHouse/ClickHouse/pull/22580) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Allow building with unbundled xz (lzma) using `USE_INTERNAL_XZ_LIBRARY=OFF` CMake option. [#22571](https://github.com/ClickHouse/ClickHouse/pull/22571) ([Kfir Itzhak](https://github.com/mastertheknife)).
* Enable bundled `openldap` on `ppc64le` [#22487](https://github.com/ClickHouse/ClickHouse/pull/22487) ([Kfir Itzhak](https://github.com/mastertheknife)).
* Disable incompatible libraries (platform specific typically) on `ppc64le` [#22475](https://github.com/ClickHouse/ClickHouse/pull/22475) ([Kfir Itzhak](https://github.com/mastertheknife)).
* Add Jepsen test in CI for clickhouse Keeper. [#22373](https://github.com/ClickHouse/ClickHouse/pull/22373) ([alesapin](https://github.com/alesapin)).
* Build `jemalloc` with support for [heap profiling](https://github.com/jemalloc/jemalloc/wiki/Use-Case%3A-Heap-Profiling). [#22834](https://github.com/ClickHouse/ClickHouse/pull/22834) ([nvartolomei](https://github.com/nvartolomei)).
* Avoid UB in `*Log` engines for rwlock unlock due to unlock from another thread. [#22583](https://github.com/ClickHouse/ClickHouse/pull/22583) ([Azat Khuzhin](https://github.com/azat)).
* Fixed UB by unlocking the rwlock of the TinyLog from the same thread. [#22560](https://github.com/ClickHouse/ClickHouse/pull/22560) ([Azat Khuzhin](https://github.com/azat)).
## ClickHouse release 21.4
### ClickHouse release 21.4.1 2021-04-12
#### Backward Incompatible Change
* The `toStartOfIntervalFunction` will align hour intervals to the midnight (in previous versions they were aligned to the start of unix epoch). For example, `toStartOfInterval(x, INTERVAL 11 HOUR)` will split every day into three intervals: `00:00:00..10:59:59`, `11:00:00..21:59:59` and `22:00:00..23:59:59`. This behaviour is more suited for practical needs. This closes [#9510](https://github.com/ClickHouse/ClickHouse/issues/9510). [#22060](https://github.com/ClickHouse/ClickHouse/pull/22060) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* `Age` and `Precision` in graphite rollup configs should increase from retention to retention. Now it's checked and the wrong config raises an exception. [#21496](https://github.com/ClickHouse/ClickHouse/pull/21496) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix `cutToFirstSignificantSubdomainCustom()`/`firstSignificantSubdomainCustom()` returning wrong result for 3+ level domains present in custom top-level domain list. For input domains matching these custom top-level domains, the third-level domain was considered to be the first significant one. This is now fixed. This change may introduce incompatibility if the function is used in e.g. the sharding key. [#21946](https://github.com/ClickHouse/ClickHouse/pull/21946) ([Azat Khuzhin](https://github.com/azat)).
* Column `keys` in table `system.dictionaries` was replaced to columns `key.names` and `key.types`. Columns `key.names`, `key.types`, `attribute.names`, `attribute.types` from `system.dictionaries` table does not require dictionary to be loaded. [#21884](https://github.com/ClickHouse/ClickHouse/pull/21884) ([Maksim Kita](https://github.com/kitaisreal)).
* Now replicas that are processing the `ALTER TABLE ATTACH PART[ITION]` command search in their `detached/` folders before fetching the data from other replicas. As an implementation detail, a new command `ATTACH_PART` is introduced in the replicated log. Parts are searched and compared by their checksums. [#18978](https://github.com/ClickHouse/ClickHouse/pull/18978) ([Mike Kot](https://github.com/myrrc)). **Note**:
* `ATTACH PART[ITION]` queries may not work during cluster upgrade.
* It's not possible to rollback to older ClickHouse version after executing `ALTER ... ATTACH` query in new version as the old servers would fail to pass the `ATTACH_PART` entry in the replicated log.
* In this version, empty `<remote_url_allow_hosts></remote_url_allow_hosts>` will block all access to remote hosts while in previous versions it did nothing. If you want to keep old behaviour and you have empty `remote_url_allow_hosts` element in configuration file, remove it. [#20058](https://github.com/ClickHouse/ClickHouse/pull/20058) ([Vladimir Chebotarev](https://github.com/excitoon)).
#### New Feature
* Extended range of `DateTime64` to support dates from year 1925 to 2283. Improved support of `DateTime` around zero date (`1970-01-01`). [#9404](https://github.com/ClickHouse/ClickHouse/pull/9404) ([alexey-milovidov](https://github.com/alexey-milovidov), [Vasily Nemkov](https://github.com/Enmk)). Not every time and date functions are working for extended range of dates.
* Added support of Kerberos authentication for preconfigured users and HTTP requests (GSS-SPNEGO). [#14995](https://github.com/ClickHouse/ClickHouse/pull/14995) ([Denis Glazachev](https://github.com/traceon)).
* Add `prefer_column_name_to_alias` setting to use original column names instead of aliases. it is needed to be more compatible with common databases' aliasing rules. This is for [#9715](https://github.com/ClickHouse/ClickHouse/issues/9715) and [#9887](https://github.com/ClickHouse/ClickHouse/issues/9887). [#22044](https://github.com/ClickHouse/ClickHouse/pull/22044) ([Amos Bird](https://github.com/amosbird)).
* Added functions `dictGetChildren(dictionary, key)`, `dictGetDescendants(dictionary, key, level)`. Function `dictGetChildren` return all children as an array if indexes. It is a inverse transformation for `dictGetHierarchy`. Function `dictGetDescendants` return all descendants as if `dictGetChildren` was applied `level` times recursively. Zero `level` value is equivalent to infinity. Closes [#14656](https://github.com/ClickHouse/ClickHouse/issues/14656). [#22096](https://github.com/ClickHouse/ClickHouse/pull/22096) ([Maksim Kita](https://github.com/kitaisreal)).
* Added `executable_pool` dictionary source. Close [#14528](https://github.com/ClickHouse/ClickHouse/issues/14528). [#21321](https://github.com/ClickHouse/ClickHouse/pull/21321) ([Maksim Kita](https://github.com/kitaisreal)).
* Added table function `dictionary`. It works the same way as `Dictionary` engine. Closes [#21560](https://github.com/ClickHouse/ClickHouse/issues/21560). [#21910](https://github.com/ClickHouse/ClickHouse/pull/21910) ([Maksim Kita](https://github.com/kitaisreal)).
* Support `Nullable` type for `PolygonDictionary` attribute. [#21890](https://github.com/ClickHouse/ClickHouse/pull/21890) ([Maksim Kita](https://github.com/kitaisreal)).
* Functions `dictGet`, `dictHas` use current database name if it is not specified for dictionaries created with DDL. Closes [#21632](https://github.com/ClickHouse/ClickHouse/issues/21632). [#21859](https://github.com/ClickHouse/ClickHouse/pull/21859) ([Maksim Kita](https://github.com/kitaisreal)).
* Added function `dictGetOrNull`. It works like `dictGet`, but return `Null` in case key was not found in dictionary. Closes [#22375](https://github.com/ClickHouse/ClickHouse/issues/22375). [#22413](https://github.com/ClickHouse/ClickHouse/pull/22413) ([Maksim Kita](https://github.com/kitaisreal)).
* Added async update in `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for `Nullable` type in `Cache`, `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for multiple attributes fetch with `dictGet`, `dictGetOrDefault` functions. Fixes [#21517](https://github.com/ClickHouse/ClickHouse/issues/21517). [#20595](https://github.com/ClickHouse/ClickHouse/pull/20595) ([Maksim Kita](https://github.com/kitaisreal)).
* Support `dictHas` function for `RangeHashedDictionary`. Fixes [#6680](https://github.com/ClickHouse/ClickHouse/issues/6680). [#19816](https://github.com/ClickHouse/ClickHouse/pull/19816) ([Maksim Kita](https://github.com/kitaisreal)).
* Add function `timezoneOf` that returns the timezone name of `DateTime` or `DateTime64` data types. This does not close [#9959](https://github.com/ClickHouse/ClickHouse/issues/9959). Fix inconsistencies in function names: add aliases `timezone` and `timeZone` as well as `toTimezone` and `toTimeZone` and `timezoneOf` and `timeZoneOf`. [#22001](https://github.com/ClickHouse/ClickHouse/pull/22001) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add new optional clause `GRANTEES` for `CREATE/ALTER USER` commands. It specifies users or roles which are allowed to receive grants from this user on condition this user has also all required access granted with grant option. By default `GRANTEES ANY` is used which means a user with grant option can grant to anyone. Syntax: `CREATE USER ... GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]`. [#21641](https://github.com/ClickHouse/ClickHouse/pull/21641) ([Vitaly Baranov](https://github.com/vitlibar)).
* Add new column `slowdowns_count` to `system.clusters`. When using hedged requests, it shows how many times we switched to another replica because this replica was responding slowly. Also show actual value of `errors_count` in `system.clusters`. [#21480](https://github.com/ClickHouse/ClickHouse/pull/21480) ([Kruglov Pavel](https://github.com/Avogar)).
* Add `_partition_id` virtual column for `MergeTree*` engines. Allow to prune partitions by `_partition_id`. Add `partitionID()` function to calculate partition id string. [#21401](https://github.com/ClickHouse/ClickHouse/pull/21401) ([Amos Bird](https://github.com/amosbird)).
* Add function `isIPAddressInRange` to test if an IPv4 or IPv6 address is contained in a given CIDR network prefix. [#21329](https://github.com/ClickHouse/ClickHouse/pull/21329) ([PHO](https://github.com/depressed-pho)).
* Added new SQL command `ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name'`. This command is needed to properly remove 'freezed' partitions from all disks. [#21142](https://github.com/ClickHouse/ClickHouse/pull/21142) ([Pavel Kovalenko](https://github.com/Jokser)).
* Supports implicit key type conversion for JOIN. [#19885](https://github.com/ClickHouse/ClickHouse/pull/19885) ([Vladimir](https://github.com/vdimir)).
#### Experimental Feature
* Support `RANGE OFFSET` frame (for window functions) for floating point types. Implement `lagInFrame`/`leadInFrame` window functions, which are analogous to `lag`/`lead`, but respect the window frame. They are identical when the frame is `between unbounded preceding and unbounded following`. This closes [#5485](https://github.com/ClickHouse/ClickHouse/issues/5485). [#21895](https://github.com/ClickHouse/ClickHouse/pull/21895) ([Alexander Kuzmenkov](https://github.com/akuzm)).
* Zero-copy replication for `ReplicatedMergeTree` over S3 storage. [#16240](https://github.com/ClickHouse/ClickHouse/pull/16240) ([ianton-ru](https://github.com/ianton-ru)).
* Added possibility to migrate existing S3 disk to the schema with backup-restore capabilities. [#22070](https://github.com/ClickHouse/ClickHouse/pull/22070) ([Pavel Kovalenko](https://github.com/Jokser)).
#### Performance Improvement
* Supported parallel formatting in `clickhouse-local` and everywhere else. [#21630](https://github.com/ClickHouse/ClickHouse/pull/21630) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Support parallel parsing for `CSVWithNames` and `TSVWithNames` formats. This closes [#21085](https://github.com/ClickHouse/ClickHouse/issues/21085). [#21149](https://github.com/ClickHouse/ClickHouse/pull/21149) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Enable read with mmap IO for file ranges from 64 MiB (the settings `min_bytes_to_use_mmap_io`). It may lead to moderate performance improvement. [#22326](https://github.com/ClickHouse/ClickHouse/pull/22326) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add cache for files read with `min_bytes_to_use_mmap_io` setting. It makes significant (2x and more) performance improvement when the value of the setting is small by avoiding frequent mmap/munmap calls and the consequent page faults. Note that mmap IO has major drawbacks that makes it less reliable in production (e.g. hung or SIGBUS on faulty disks; less controllable memory usage). Nevertheless it is good in benchmarks. [#22206](https://github.com/ClickHouse/ClickHouse/pull/22206) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Avoid unnecessary data copy when using codec `NONE`. Please note that codec `NONE` is mostly useless - it's recommended to always use compression (`LZ4` is by default). Despite the common belief, disabling compression may not improve performance (the opposite effect is possible). The `NONE` codec is useful in some cases: - when data is uncompressable; - for synthetic benchmarks. [#22145](https://github.com/ClickHouse/ClickHouse/pull/22145) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Faster `GROUP BY` with small `max_rows_to_group_by` and `group_by_overflow_mode='any'`. [#21856](https://github.com/ClickHouse/ClickHouse/pull/21856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Optimize performance of queries like `SELECT ... FINAL ... WHERE`. Now in queries with `FINAL` it's allowed to move to `PREWHERE` columns, which are in sorting key. [#21830](https://github.com/ClickHouse/ClickHouse/pull/21830) ([foolchi](https://github.com/foolchi)).
* Improved performance by replacing `memcpy` to another implementation. This closes [#18583](https://github.com/ClickHouse/ClickHouse/issues/18583). [#21520](https://github.com/ClickHouse/ClickHouse/pull/21520) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Improve performance of aggregation in order of sorting key (with enabled setting `optimize_aggregation_in_order`). [#19401](https://github.com/ClickHouse/ClickHouse/pull/19401) ([Anton Popov](https://github.com/CurtizJ)).
#### Improvement
* Add connection pool for PostgreSQL table/database engine and dictionary source. Should fix [#21444](https://github.com/ClickHouse/ClickHouse/issues/21444). [#21839](https://github.com/ClickHouse/ClickHouse/pull/21839) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Support non-default table schema for postgres storage/table-function. Closes [#21701](https://github.com/ClickHouse/ClickHouse/issues/21701). [#21711](https://github.com/ClickHouse/ClickHouse/pull/21711) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Support replicas priority for postgres dictionary source. [#21710](https://github.com/ClickHouse/ClickHouse/pull/21710) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Introduce a new merge tree setting `min_bytes_to_rebalance_partition_over_jbod` which allows assigning new parts to different disks of a JBOD volume in a balanced way. [#16481](https://github.com/ClickHouse/ClickHouse/pull/16481) ([Amos Bird](https://github.com/amosbird)).
* Added `Grant`, `Revoke` and `System` values of `query_kind` column for corresponding queries in `system.query_log`. [#21102](https://github.com/ClickHouse/ClickHouse/pull/21102) ([Vasily Nemkov](https://github.com/Enmk)).
* Allow customizing timeouts for HTTP connections used for replication independently from other HTTP timeouts. [#20088](https://github.com/ClickHouse/ClickHouse/pull/20088) ([nvartolomei](https://github.com/nvartolomei)).
* Better exception message in client in case of exception while server is writing blocks. In previous versions client may get misleading message like `Data compressed with different methods`. [#22427](https://github.com/ClickHouse/ClickHouse/pull/22427) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix error `Directory tmp_fetch_XXX already exists` which could happen after failed fetch part. Delete temporary fetch directory if it already exists. Fixes [#14197](https://github.com/ClickHouse/ClickHouse/issues/14197). [#22411](https://github.com/ClickHouse/ClickHouse/pull/22411) ([nvartolomei](https://github.com/nvartolomei)).
* Fix MSan report for function `range` with `UInt256` argument (support for large integers is experimental). This closes [#22157](https://github.com/ClickHouse/ClickHouse/issues/22157). [#22387](https://github.com/ClickHouse/ClickHouse/pull/22387) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add `current_database` column to `system.processes` table. It contains the current database of the query. [#22365](https://github.com/ClickHouse/ClickHouse/pull/22365) ([Alexander Kuzmenkov](https://github.com/akuzm)).
* Add case-insensitive history search/navigation and subword movement features to `clickhouse-client`. [#22105](https://github.com/ClickHouse/ClickHouse/pull/22105) ([Amos Bird](https://github.com/amosbird)).
* If tuple of NULLs, e.g. `(NULL, NULL)` is on the left hand side of `IN` operator with tuples of non-NULLs on the right hand side, e.g. `SELECT (NULL, NULL) IN ((0, 0), (3, 1))` return 0 instead of throwing an exception about incompatible types. The expression may also appear due to optimization of something like `SELECT (NULL, NULL) = (8, 0) OR (NULL, NULL) = (3, 2) OR (NULL, NULL) = (0, 0) OR (NULL, NULL) = (3, 1)`. This closes [#22017](https://github.com/ClickHouse/ClickHouse/issues/22017). [#22063](https://github.com/ClickHouse/ClickHouse/pull/22063) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Update used version of simdjson to 0.9.1. This fixes [#21984](https://github.com/ClickHouse/ClickHouse/issues/21984). [#22057](https://github.com/ClickHouse/ClickHouse/pull/22057) ([Vitaly Baranov](https://github.com/vitlibar)).
* Added case insensitive aliases for `CONNECTION_ID()` and `VERSION()` functions. This fixes [#22028](https://github.com/ClickHouse/ClickHouse/issues/22028). [#22042](https://github.com/ClickHouse/ClickHouse/pull/22042) ([Eugene Klimov](https://github.com/Slach)).
* Add option `strict_increase` to `windowFunnel` function to calculate each event once (resolve [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835)). [#22025](https://github.com/ClickHouse/ClickHouse/pull/22025) ([Vladimir](https://github.com/vdimir)).
* If partition key of a `MergeTree` table does not include `Date` or `DateTime` columns but includes exactly one `DateTime64` column, expose its values in the `min_time` and `max_time` columns in `system.parts` and `system.parts_columns` tables. Add `min_time` and `max_time` columns to `system.parts_columns` table (these was inconsistency to the `system.parts` table). This closes [#18244](https://github.com/ClickHouse/ClickHouse/issues/18244). [#22011](https://github.com/ClickHouse/ClickHouse/pull/22011) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Supported `replication_alter_partitions_sync=1` setting in `clickhouse-copier` for moving partitions from helping table to destination. Decreased default timeouts. Fixes [#21911](https://github.com/ClickHouse/ClickHouse/issues/21911). [#21912](https://github.com/ClickHouse/ClickHouse/pull/21912) ([turbo jason](https://github.com/songenjie)).
* Show path to data directory of `EmbeddedRocksDB` tables in system tables. [#21903](https://github.com/ClickHouse/ClickHouse/pull/21903) ([tavplubix](https://github.com/tavplubix)).
* Add profile event `HedgedRequestsChangeReplica`, change read data timeout from sec to ms. [#21886](https://github.com/ClickHouse/ClickHouse/pull/21886) ([Kruglov Pavel](https://github.com/Avogar)).
* DiskS3 (experimental feature under development). Fixed bug with the impossibility to move directory if the destination is not empty and cache disk is used. [#21837](https://github.com/ClickHouse/ClickHouse/pull/21837) ([Pavel Kovalenko](https://github.com/Jokser)).
* Better formatting for `Array` and `Map` data types in Web UI. [#21798](https://github.com/ClickHouse/ClickHouse/pull/21798) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Update clusters only if their configurations were updated. [#21685](https://github.com/ClickHouse/ClickHouse/pull/21685) ([Kruglov Pavel](https://github.com/Avogar)).
* Propagate query and session settings for distributed DDL queries. Set `distributed_ddl_entry_format_version` to 2 to enable this. Added `distributed_ddl_output_mode` setting. Supported modes: `none`, `throw` (default), `null_status_on_timeout` and `never_throw`. Miscellaneous fixes and improvements for `Replicated` database engine. [#21535](https://github.com/ClickHouse/ClickHouse/pull/21535) ([tavplubix](https://github.com/tavplubix)).
* If `PODArray` was instantiated with element size that is neither a fraction or a multiple of 16, buffer overflow was possible. No bugs in current releases exist. [#21533](https://github.com/ClickHouse/ClickHouse/pull/21533) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add `last_error_time`/`last_error_message`/`last_error_stacktrace`/`remote` columns for `system.errors`. [#21529](https://github.com/ClickHouse/ClickHouse/pull/21529) ([Azat Khuzhin](https://github.com/azat)).
* Add aliases `simpleJSONExtract/simpleJSONHas` to `visitParam/visitParamExtract{UInt, Int, Bool, Float, Raw, String}`. Fixes #21383. [#21519](https://github.com/ClickHouse/ClickHouse/pull/21519) ([fastio](https://github.com/fastio)).
* Add setting `optimize_skip_unused_shards_limit` to limit the number of sharding key values for `optimize_skip_unused_shards`. [#21512](https://github.com/ClickHouse/ClickHouse/pull/21512) ([Azat Khuzhin](https://github.com/azat)).
* Improve `clickhouse-format` to not throw exception when there are extra spaces or comment after the last query, and throw exception early with readable message when format `ASTInsertQuery` with data . [#21311](https://github.com/ClickHouse/ClickHouse/pull/21311) ([flynn](https://github.com/ucasFL)).
* Improve support of integer keys in data type `Map`. [#21157](https://github.com/ClickHouse/ClickHouse/pull/21157) ([Anton Popov](https://github.com/CurtizJ)).
* MaterializeMySQL: attempt to reconnect to MySQL if the connection is lost. [#20961](https://github.com/ClickHouse/ClickHouse/pull/20961) ([Håvard Kvålen](https://github.com/havardk)).
* Support more cases to rewrite `CROSS JOIN` to `INNER JOIN`. [#20392](https://github.com/ClickHouse/ClickHouse/pull/20392) ([Vladimir](https://github.com/vdimir)).
* Do not create empty parts on INSERT when `optimize_on_insert` setting enabled. Fixes [#20304](https://github.com/ClickHouse/ClickHouse/issues/20304). [#20387](https://github.com/ClickHouse/ClickHouse/pull/20387) ([Kruglov Pavel](https://github.com/Avogar)).
* `MaterializeMySQL`: add minmax skipping index for `_version` column. [#20382](https://github.com/ClickHouse/ClickHouse/pull/20382) ([Stig Bakken](https://github.com/stigsb)).
* Add option `--backslash` for `clickhouse-format`, which can add a backslash at the end of each line of the formatted query. [#21494](https://github.com/ClickHouse/ClickHouse/pull/21494) ([flynn](https://github.com/ucasFL)).
* Now clickhouse will not throw `LOGICAL_ERROR` exception when we try to mutate the already covered part. Fixes [#22013](https://github.com/ClickHouse/ClickHouse/issues/22013). [#22291](https://github.com/ClickHouse/ClickHouse/pull/22291) ([alesapin](https://github.com/alesapin)).
#### Bug Fix
* Remove socket from epoll before cancelling packet receiver in `HedgedConnections` to prevent possible race. Fixes [#22161](https://github.com/ClickHouse/ClickHouse/issues/22161). [#22443](https://github.com/ClickHouse/ClickHouse/pull/22443) ([Kruglov Pavel](https://github.com/Avogar)).
* Add (missing) memory accounting in parallel parsing routines. In previous versions OOM was possible when the resultset contains very large blocks of data. This closes [#22008](https://github.com/ClickHouse/ClickHouse/issues/22008). [#22425](https://github.com/ClickHouse/ClickHouse/pull/22425) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix exception which may happen when `SELECT` has constant `WHERE` condition and source table has columns which names are digits. [#22270](https://github.com/ClickHouse/ClickHouse/pull/22270) ([LiuNeng](https://github.com/liuneng1994)).
* Fix query cancellation with `use_hedged_requests=0` and `async_socket_for_remote=1`. [#22183](https://github.com/ClickHouse/ClickHouse/pull/22183) ([Azat Khuzhin](https://github.com/azat)).
* Fix uncaught exception in `InterserverIOHTTPHandler`. [#22146](https://github.com/ClickHouse/ClickHouse/pull/22146) ([Azat Khuzhin](https://github.com/azat)).
* Fix docker entrypoint in case `http_port` is not in the config. [#22132](https://github.com/ClickHouse/ClickHouse/pull/22132) ([Ewout](https://github.com/devwout)).
* Fix error `Invalid number of rows in Chunk` in `JOIN` with `TOTALS` and `arrayJoin`. Closes [#19303](https://github.com/ClickHouse/ClickHouse/issues/19303). [#22129](https://github.com/ClickHouse/ClickHouse/pull/22129) ([Vladimir](https://github.com/vdimir)).
* Fix the background thread pool name which used to poll message from Kafka. The Kafka engine with the broken thread pool will not consume the message from message queue. [#22122](https://github.com/ClickHouse/ClickHouse/pull/22122) ([fastio](https://github.com/fastio)).
* Fix waiting for `OPTIMIZE` and `ALTER` queries for `ReplicatedMergeTree` table engines. Now the query will not hang when the table was detached or restarted. [#22118](https://github.com/ClickHouse/ClickHouse/pull/22118) ([alesapin](https://github.com/alesapin)).
* Disable `async_socket_for_remote`/`use_hedged_requests` for buggy Linux kernels. [#22109](https://github.com/ClickHouse/ClickHouse/pull/22109) ([Azat Khuzhin](https://github.com/azat)).
* Docker entrypoint: avoid chown of `.` in case when `LOG_PATH` is empty. Closes [#22100](https://github.com/ClickHouse/ClickHouse/issues/22100). [#22102](https://github.com/ClickHouse/ClickHouse/pull/22102) ([filimonov](https://github.com/filimonov)).
* The function `decrypt` was lacking a check for the minimal size of data encrypted in `AEAD` mode. This closes [#21897](https://github.com/ClickHouse/ClickHouse/issues/21897). [#22064](https://github.com/ClickHouse/ClickHouse/pull/22064) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* In rare case, merge for `CollapsingMergeTree` may create granule with `index_granularity + 1` rows. Because of this, internal check, added in [#18928](https://github.com/ClickHouse/ClickHouse/issues/18928) (affects 21.2 and 21.3), may fail with error `Incomplete granules are not allowed while blocks are granules size`. This error did not allow parts to merge. [#21976](https://github.com/ClickHouse/ClickHouse/pull/21976) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Reverted [#15454](https://github.com/ClickHouse/ClickHouse/issues/15454) that may cause significant increase in memory usage while loading external dictionaries of hashed type. This closes [#21935](https://github.com/ClickHouse/ClickHouse/issues/21935). [#21948](https://github.com/ClickHouse/ClickHouse/pull/21948) ([Maksim Kita](https://github.com/kitaisreal)).
* Prevent hedged connections overlaps (`Unknown packet 9 from server` error). [#21941](https://github.com/ClickHouse/ClickHouse/pull/21941) ([Azat Khuzhin](https://github.com/azat)).
* Fix reading the HTTP POST request with "multipart/form-data" content type in some cases. [#21936](https://github.com/ClickHouse/ClickHouse/pull/21936) ([Ivan](https://github.com/abyss7)).
* Fix wrong `ORDER BY` results when a query contains window functions, and optimization for reading in primary key order is applied. Fixes [#21828](https://github.com/ClickHouse/ClickHouse/issues/21828). [#21915](https://github.com/ClickHouse/ClickHouse/pull/21915) ([Alexander Kuzmenkov](https://github.com/akuzm)).
* Fix deadlock in first catboost model execution. Closes [#13832](https://github.com/ClickHouse/ClickHouse/issues/13832). [#21844](https://github.com/ClickHouse/ClickHouse/pull/21844) ([Kruglov Pavel](https://github.com/Avogar)).
* Fix incorrect query result (and possible crash) which could happen when `WHERE` or `HAVING` condition is pushed before `GROUP BY`. Fixes [#21773](https://github.com/ClickHouse/ClickHouse/issues/21773). [#21841](https://github.com/ClickHouse/ClickHouse/pull/21841) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Better error handling and logging in `WriteBufferFromS3`. [#21836](https://github.com/ClickHouse/ClickHouse/pull/21836) ([Pavel Kovalenko](https://github.com/Jokser)).
* Fix possible crashes in aggregate functions with combinator `Distinct`, while using two-level aggregation. This is a follow-up fix of [#18365](https://github.com/ClickHouse/ClickHouse/pull/18365) . Can only reproduced in production env. [#21818](https://github.com/ClickHouse/ClickHouse/pull/21818) ([Amos Bird](https://github.com/amosbird)).
* Fix scalar subquery index analysis. This fixes [#21717](https://github.com/ClickHouse/ClickHouse/issues/21717) , which was introduced in [#18896](https://github.com/ClickHouse/ClickHouse/pull/18896). [#21766](https://github.com/ClickHouse/ClickHouse/pull/21766) ([Amos Bird](https://github.com/amosbird)).
* Fix bug for `ReplicatedMerge` table engines when `ALTER MODIFY COLUMN` query doesn't change the type of `Decimal` column if its size (32 bit or 64 bit) doesn't change. [#21728](https://github.com/ClickHouse/ClickHouse/pull/21728) ([alesapin](https://github.com/alesapin)).
* Fix possible infinite waiting when concurrent `OPTIMIZE` and `DROP` are run for `ReplicatedMergeTree`. [#21716](https://github.com/ClickHouse/ClickHouse/pull/21716) ([Azat Khuzhin](https://github.com/azat)).
* Fix function `arrayElement` with type `Map` for constant integer arguments. [#21699](https://github.com/ClickHouse/ClickHouse/pull/21699) ([Anton Popov](https://github.com/CurtizJ)).
* Fix SIGSEGV on not existing attributes from `ip_trie` with `access_to_key_from_attributes`. [#21692](https://github.com/ClickHouse/ClickHouse/pull/21692) ([Azat Khuzhin](https://github.com/azat)).
* Server now start accepting connections only after `DDLWorker` and dictionaries initialization. [#21676](https://github.com/ClickHouse/ClickHouse/pull/21676) ([Azat Khuzhin](https://github.com/azat)).
* Add type conversion for keys of tables of type `Join` (previously led to SIGSEGV). [#21646](https://github.com/ClickHouse/ClickHouse/pull/21646) ([Azat Khuzhin](https://github.com/azat)).
* Fix distributed requests cancellation (for example simple select from multiple shards with limit, i.e. `select * from remote('127.{2,3}', system.numbers) limit 100`) with `async_socket_for_remote=1`. [#21643](https://github.com/ClickHouse/ClickHouse/pull/21643) ([Azat Khuzhin](https://github.com/azat)).
* Fix `fsync_part_directory` for horizontal merge. [#21642](https://github.com/ClickHouse/ClickHouse/pull/21642) ([Azat Khuzhin](https://github.com/azat)).
* Remove unknown columns from joined table in `WHERE` for queries to external database engines (MySQL, PostgreSQL). close [#14614](https://github.com/ClickHouse/ClickHouse/issues/14614), close [#19288](https://github.com/ClickHouse/ClickHouse/issues/19288) (dup), close [#19645](https://github.com/ClickHouse/ClickHouse/issues/19645) (dup). [#21640](https://github.com/ClickHouse/ClickHouse/pull/21640) ([Vladimir](https://github.com/vdimir)).
* `std::terminate` was called if there is an error writing data into s3. [#21624](https://github.com/ClickHouse/ClickHouse/pull/21624) ([Vladimir](https://github.com/vdimir)).
* Fix possible error `Cannot find column` when `optimize_skip_unused_shards` is enabled and zero shards are used. [#21579](https://github.com/ClickHouse/ClickHouse/pull/21579) ([Azat Khuzhin](https://github.com/azat)).
* In case if query has constant `WHERE` condition, and setting `optimize_skip_unused_shards` enabled, all shards may be skipped and query could return incorrect empty result. [#21550](https://github.com/ClickHouse/ClickHouse/pull/21550) ([Amos Bird](https://github.com/amosbird)).
* Fix table function `clusterAllReplicas` returns wrong `_shard_num`. close [#21481](https://github.com/ClickHouse/ClickHouse/issues/21481). [#21498](https://github.com/ClickHouse/ClickHouse/pull/21498) ([flynn](https://github.com/ucasFL)).
* Fix that S3 table holds old credentials after config update. [#21457](https://github.com/ClickHouse/ClickHouse/pull/21457) ([Grigory Pervakov](https://github.com/GrigoryPervakov)).
* Fixed race on SSL object inside `SecureSocket` in Poco. [#21456](https://github.com/ClickHouse/ClickHouse/pull/21456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix `Avro` format parsing for `Kafka`. Fixes [#21437](https://github.com/ClickHouse/ClickHouse/issues/21437). [#21438](https://github.com/ClickHouse/ClickHouse/pull/21438) ([Ilya Golshtein](https://github.com/ilejn)).
* Fix receive and send timeouts and non-blocking read in secure socket. [#21429](https://github.com/ClickHouse/ClickHouse/pull/21429) ([Kruglov Pavel](https://github.com/Avogar)).
* `force_drop_table` flag didn't work for `MATERIALIZED VIEW`, it's fixed. Fixes [#18943](https://github.com/ClickHouse/ClickHouse/issues/18943). [#20626](https://github.com/ClickHouse/ClickHouse/pull/20626) ([tavplubix](https://github.com/tavplubix)).
* Fix name clashes in `PredicateRewriteVisitor`. It caused incorrect `WHERE` filtration after full join. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)).
#### Build/Testing/Packaging Improvement
* Add [Jepsen](https://github.com/jepsen-io/jepsen) tests for ClickHouse Keeper. [#21677](https://github.com/ClickHouse/ClickHouse/pull/21677) ([alesapin](https://github.com/alesapin)).
* Run stateless tests in parallel in CI. Depends on [#22181](https://github.com/ClickHouse/ClickHouse/issues/22181). [#22300](https://github.com/ClickHouse/ClickHouse/pull/22300) ([alesapin](https://github.com/alesapin)).
* Enable status check for [SQLancer](https://github.com/sqlancer/sqlancer) CI run. [#22015](https://github.com/ClickHouse/ClickHouse/pull/22015) ([Ilya Yatsishin](https://github.com/qoega)).
* Multiple preparations for PowerPC builds: Enable the bundled openldap on `ppc64le`. [#22487](https://github.com/ClickHouse/ClickHouse/pull/22487) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable compiling on `ppc64le` with Clang. [#22476](https://github.com/ClickHouse/ClickHouse/pull/22476) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix compiling boost on `ppc64le`. [#22474](https://github.com/ClickHouse/ClickHouse/pull/22474) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix CMake error about internal CMake variable `CMAKE_ASM_COMPILE_OBJECT` not set on `ppc64le`. [#22469](https://github.com/ClickHouse/ClickHouse/pull/22469) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix Fedora/RHEL/CentOS not finding `libclang_rt.builtins` on `ppc64le`. [#22458](https://github.com/ClickHouse/ClickHouse/pull/22458) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable building with `jemalloc` on `ppc64le`. [#22447](https://github.com/ClickHouse/ClickHouse/pull/22447) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix ClickHouse's config embedding and cctz's timezone embedding on `ppc64le`. [#22445](https://github.com/ClickHouse/ClickHouse/pull/22445) ([Kfir Itzhak](https://github.com/mastertheknife)). Fixed compiling on `ppc64le` and use the correct instruction pointer register on `ppc64le`. [#22430](https://github.com/ClickHouse/ClickHouse/pull/22430) ([Kfir Itzhak](https://github.com/mastertheknife)).
* Re-enable the S3 (AWS) library on `aarch64`. [#22484](https://github.com/ClickHouse/ClickHouse/pull/22484) ([Kfir Itzhak](https://github.com/mastertheknife)).
* Add `tzdata` to Docker containers because reading `ORC` formats requires it. This closes [#14156](https://github.com/ClickHouse/ClickHouse/issues/14156). [#22000](https://github.com/ClickHouse/ClickHouse/pull/22000) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Introduce 2 arguments for `clickhouse-server` image Dockerfile: `deb_location` & `single_binary_location`. [#21977](https://github.com/ClickHouse/ClickHouse/pull/21977) ([filimonov](https://github.com/filimonov)).
* Allow to use clang-tidy with release builds by enabling assertions if it is used. [#21914](https://github.com/ClickHouse/ClickHouse/pull/21914) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add llvm-12 binaries name to search in cmake scripts. Implicit constants conversions to mute clang warnings. Updated submodules to build with CMake 3.19. Mute recursion in macro expansion in `readpassphrase` library. Deprecated `-fuse-ld` changed to `--ld-path` for clang. [#21597](https://github.com/ClickHouse/ClickHouse/pull/21597) ([Ilya Yatsishin](https://github.com/qoega)).
* Updating `docker/test/testflows/runner/dockerd-entrypoint.sh` to use Yandex dockerhub-proxy, because Docker Hub has enabled very restrictive rate limits [#21551](https://github.com/ClickHouse/ClickHouse/pull/21551) ([vzakaznikov](https://github.com/vzakaznikov)).
* Fix macOS shared lib build. [#20184](https://github.com/ClickHouse/ClickHouse/pull/20184) ([nvartolomei](https://github.com/nvartolomei)).
* Add `ctime` option to `zookeeper-dump-tree`. It allows to dump node creation time. [#21842](https://github.com/ClickHouse/ClickHouse/pull/21842) ([Ilya](https://github.com/HumanUser)).
## ClickHouse release 21.3 (LTS)
### ClickHouse release v21.3, 2021-03-12
#### Backward Incompatible Change
* Now it's not allowed to create MergeTree tables in old syntax with table TTL because it's just ignored. Attach of old tables is still possible. [#20282](https://github.com/ClickHouse/ClickHouse/pull/20282) ([alesapin](https://github.com/alesapin)).
* Now all case-insensitive function names will be rewritten to their canonical representations. This is needed for projection query routing (the upcoming feature). [#20174](https://github.com/ClickHouse/ClickHouse/pull/20174) ([Amos Bird](https://github.com/amosbird)).
* Fix creation of `TTL` in cases, when its expression is a function and it is the same as `ORDER BY` key. Now it's allowed to set custom aggregation to primary key columns in `TTL` with `GROUP BY`. Backward incompatible: For primary key columns, which are not in `GROUP BY` and aren't set explicitly now is applied function `any` instead of `max`, when TTL is expired. Also if you use TTL with `WHERE` or `GROUP BY` you can see exceptions at merges, while making rolling update. [#15450](https://github.com/ClickHouse/ClickHouse/pull/15450) ([Anton Popov](https://github.com/CurtizJ)).
#### New Feature
* Add file engine settings: `engine_file_empty_if_not_exists` and `engine_file_truncate_on_insert`. [#20620](https://github.com/ClickHouse/ClickHouse/pull/20620) ([M0r64n](https://github.com/M0r64n)).
* Add aggregate function `deltaSum` for summing the differences between consecutive rows. [#20057](https://github.com/ClickHouse/ClickHouse/pull/20057) ([Russ Frank](https://github.com/rf)).
* New `event_time_microseconds` column in `system.part_log` table. [#20027](https://github.com/ClickHouse/ClickHouse/pull/20027) ([Bharat Nallan](https://github.com/bharatnc)).
* Added `timezoneOffset(datetime)` function which will give the offset from UTC in seconds. This close [#issue:19850](https://github.com/ClickHouse/ClickHouse/issues/19850). [#19962](https://github.com/ClickHouse/ClickHouse/pull/19962) ([keenwolf](https://github.com/keen-wolf)).
* Add setting `insert_shard_id` to support insert data into specific shard from distributed table. [#19961](https://github.com/ClickHouse/ClickHouse/pull/19961) ([flynn](https://github.com/ucasFL)).
* Function `reinterpretAs` updated to support big integers. Fixes [#19691](https://github.com/ClickHouse/ClickHouse/issues/19691). [#19858](https://github.com/ClickHouse/ClickHouse/pull/19858) ([Maksim Kita](https://github.com/kitaisreal)).
* Added Server Side Encryption Customer Keys (the `x-amz-server-side-encryption-customer-(key/md5)` header) support in S3 client. See [the link](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). Closes [#19428](https://github.com/ClickHouse/ClickHouse/issues/19428). [#19748](https://github.com/ClickHouse/ClickHouse/pull/19748) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Added `implicit_key` option for `executable` dictionary source. It allows to avoid printing key for every record if records comes in the same order as the input keys. Implements [#14527](https://github.com/ClickHouse/ClickHouse/issues/14527). [#19677](https://github.com/ClickHouse/ClickHouse/pull/19677) ([Maksim Kita](https://github.com/kitaisreal)).
* Add quota type `query_selects` and `query_inserts`. [#19603](https://github.com/ClickHouse/ClickHouse/pull/19603) ([JackyWoo](https://github.com/JackyWoo)).
* Add function `extractTextFromHTML` [#19600](https://github.com/ClickHouse/ClickHouse/pull/19600) ([zlx19950903](https://github.com/zlx19950903)), ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Tables with `MergeTree*` engine now have two new table-level settings for query concurrency control. Setting `max_concurrent_queries` limits the number of concurrently executed queries which are related to this table. Setting `min_marks_to_honor_max_concurrent_queries` tells to apply previous setting only if query reads at least this number of marks. [#19544](https://github.com/ClickHouse/ClickHouse/pull/19544) ([Amos Bird](https://github.com/amosbird)).
* Added `file` function to read file from user_files directory as a String. This is different from the `file` table function. This implements [#issue:18851](https://github.com/ClickHouse/ClickHouse/issues/18851). [#19204](https://github.com/ClickHouse/ClickHouse/pull/19204) ([keenwolf](https://github.com/keen-wolf)).
#### Experimental feature
* Add experimental `Replicated` database engine. It replicates DDL queries across multiple hosts. [#16193](https://github.com/ClickHouse/ClickHouse/pull/16193) ([tavplubix](https://github.com/tavplubix)).
* Introduce experimental support for window functions, enabled with `allow_experimental_window_functions = 1`. This is a preliminary, alpha-quality implementation that is not suitable for production use and will change in backward-incompatible ways in future releases. Please see [the documentation](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/sql-reference/window-functions/index.md#experimental-window-functions) for the list of supported features. [#20337](https://github.com/ClickHouse/ClickHouse/pull/20337) ([Alexander Kuzmenkov](https://github.com/akuzm)).
* Add the ability to backup/restore metadata files for DiskS3. [#18377](https://github.com/ClickHouse/ClickHouse/pull/18377) ([Pavel Kovalenko](https://github.com/Jokser)).
#### Performance Improvement
* Hedged requests for remote queries. When setting `use_hedged_requests` enabled (off by default), allow to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within `hedged_connection_timeout` or no data was received within `receive_data_timeout`. Query uses the first connection which send non empty progress packet (or data packet, if `allow_changing_replica_until_first_data_packet`); other connections are cancelled. Queries with `max_parallel_replicas > 1` are supported. [#19291](https://github.com/ClickHouse/ClickHouse/pull/19291) ([Kruglov Pavel](https://github.com/Avogar)). This allows to significantly reduce tail latencies on very large clusters.
* Added support for `PREWHERE` (and enable the corresponding optimization) when tables have row-level security expressions specified. [#19576](https://github.com/ClickHouse/ClickHouse/pull/19576) ([Denis Glazachev](https://github.com/traceon)).
* The setting `distributed_aggregation_memory_efficient` is enabled by default. It will lower memory usage and improve performance of distributed queries. [#20599](https://github.com/ClickHouse/ClickHouse/pull/20599) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Improve performance of GROUP BY multiple fixed size keys. [#20472](https://github.com/ClickHouse/ClickHouse/pull/20472) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Improve performance of aggregate functions by more strict aliasing. [#19946](https://github.com/ClickHouse/ClickHouse/pull/19946) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Speed up reading from `Memory` tables in extreme cases (when reading speed is in order of 50 GB/sec) by simplification of pipeline and (consequently) less lock contention in pipeline scheduling. [#20468](https://github.com/ClickHouse/ClickHouse/pull/20468) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Partially reimplement HTTP server to make it making less copies of incoming and outgoing data. It gives up to 1.5 performance improvement on inserting long records over HTTP. [#19516](https://github.com/ClickHouse/ClickHouse/pull/19516) ([Ivan](https://github.com/abyss7)).
* Add `compress` setting for `Memory` tables. If it's enabled the table will use less RAM. On some machines and datasets it can also work faster on SELECT, but it is not always the case. This closes [#20093](https://github.com/ClickHouse/ClickHouse/issues/20093). Note: there are reasons why Memory tables can work slower than MergeTree: (1) lack of compression (2) static size of blocks (3) lack of indices and prewhere... [#20168](https://github.com/ClickHouse/ClickHouse/pull/20168) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Slightly better code in aggregation. [#20978](https://github.com/ClickHouse/ClickHouse/pull/20978) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add back `intDiv`/`modulo` specializations for better performance. This fixes [#21293](https://github.com/ClickHouse/ClickHouse/issues/21293) . The regression was introduced in https://github.com/ClickHouse/ClickHouse/pull/18145 . [#21307](https://github.com/ClickHouse/ClickHouse/pull/21307) ([Amos Bird](https://github.com/amosbird)).
* Do not squash blocks too much on INSERT SELECT if inserting into Memory table. In previous versions inefficient data representation was created in Memory table after INSERT SELECT. This closes [#13052](https://github.com/ClickHouse/ClickHouse/issues/13052). [#20169](https://github.com/ClickHouse/ClickHouse/pull/20169) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix at least one case when DataType parser may have exponential complexity (found by fuzzer). This closes [#20096](https://github.com/ClickHouse/ClickHouse/issues/20096). [#20132](https://github.com/ClickHouse/ClickHouse/pull/20132) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Parallelize SELECT with FINAL for single part with level > 0 when `do_not_merge_across_partitions_select_final` setting is 1. [#19375](https://github.com/ClickHouse/ClickHouse/pull/19375) ([Kruglov Pavel](https://github.com/Avogar)).
* Fill only requested columns when querying `system.parts` and `system.parts_columns`. Closes [#19570](https://github.com/ClickHouse/ClickHouse/issues/19570). [#21035](https://github.com/ClickHouse/ClickHouse/pull/21035) ([Anmol Arora](https://github.com/anmolarora)).
* Perform algebraic optimizations of arithmetic expressions inside `avg` aggregate function. close [#20092](https://github.com/ClickHouse/ClickHouse/issues/20092). [#20183](https://github.com/ClickHouse/ClickHouse/pull/20183) ([flynn](https://github.com/ucasFL)).
#### Improvement
* Case-insensitive compression methods for table functions. Also fixed LZMA compression method which was checked in upper case. [#21416](https://github.com/ClickHouse/ClickHouse/pull/21416) ([Vladimir Chebotarev](https://github.com/excitoon)).
* Add two settings to delay or throw error during insertion when there are too many inactive parts. This is useful when server fails to clean up parts quickly enough. [#20178](https://github.com/ClickHouse/ClickHouse/pull/20178) ([Amos Bird](https://github.com/amosbird)).
* Provide better compatibility for mysql clients. 1. mysql jdbc 2. mycli. [#21367](https://github.com/ClickHouse/ClickHouse/pull/21367) ([Amos Bird](https://github.com/amosbird)).
* Forbid to drop a column if it's referenced by materialized view. Closes [#21164](https://github.com/ClickHouse/ClickHouse/issues/21164). [#21303](https://github.com/ClickHouse/ClickHouse/pull/21303) ([flynn](https://github.com/ucasFL)).
* MySQL dictionary source will now retry unexpected connection failures (Lost connection to MySQL server during query) which sometimes happen on SSL/TLS connections. [#21237](https://github.com/ClickHouse/ClickHouse/pull/21237) ([Alexander Kazakov](https://github.com/Akazz)).
* Usability improvement: more consistent `DateTime64` parsing: recognize the case when unix timestamp with subsecond resolution is specified as scaled integer (like `1111111111222` instead of `1111111111.222`). This closes [#13194](https://github.com/ClickHouse/ClickHouse/issues/13194). [#21053](https://github.com/ClickHouse/ClickHouse/pull/21053) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Do only merging of sorted blocks on initiator with distributed_group_by_no_merge. [#20882](https://github.com/ClickHouse/ClickHouse/pull/20882) ([Azat Khuzhin](https://github.com/azat)).
* When loading config for mysql source ClickHouse will now randomize the list of replicas with the same priority to ensure the round-robin logics of picking mysql endpoint. This closes [#20629](https://github.com/ClickHouse/ClickHouse/issues/20629). [#20632](https://github.com/ClickHouse/ClickHouse/pull/20632) ([Alexander Kazakov](https://github.com/Akazz)).
* Function 'reinterpretAs(x, Type)' renamed into 'reinterpret(x, Type)'. [#20611](https://github.com/ClickHouse/ClickHouse/pull/20611) ([Maksim Kita](https://github.com/kitaisreal)).
* Support vhost for RabbitMQ engine [#20576](https://github.com/ClickHouse/ClickHouse/issues/20576). [#20596](https://github.com/ClickHouse/ClickHouse/pull/20596) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Improved serialization for data types combined of Arrays and Tuples. Improved matching enum data types to protobuf enum type. Fixed serialization of the `Map` data type. Omitted values are now set by default. [#20506](https://github.com/ClickHouse/ClickHouse/pull/20506) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fixed race between execution of distributed DDL tasks and cleanup of DDL queue. Now DDL task cannot be removed from ZooKeeper if there are active workers. Fixes [#20016](https://github.com/ClickHouse/ClickHouse/issues/20016). [#20448](https://github.com/ClickHouse/ClickHouse/pull/20448) ([tavplubix](https://github.com/tavplubix)).
* Make FQDN and other DNS related functions work correctly in alpine images. [#20336](https://github.com/ClickHouse/ClickHouse/pull/20336) ([filimonov](https://github.com/filimonov)).
* Do not allow early constant folding of explicitly forbidden functions. [#20303](https://github.com/ClickHouse/ClickHouse/pull/20303) ([Azat Khuzhin](https://github.com/azat)).
* Implicit conversion from integer to Decimal type might succeeded if integer value doe not fit into Decimal type. Now it throws `ARGUMENT_OUT_OF_BOUND`. [#20232](https://github.com/ClickHouse/ClickHouse/pull/20232) ([tavplubix](https://github.com/tavplubix)).
* Lockless `SYSTEM FLUSH DISTRIBUTED`. [#20215](https://github.com/ClickHouse/ClickHouse/pull/20215) ([Azat Khuzhin](https://github.com/azat)).
* Normalize count(constant), sum(1) to count(). This is needed for projection query routing. [#20175](https://github.com/ClickHouse/ClickHouse/pull/20175) ([Amos Bird](https://github.com/amosbird)).
* Support all native integer types in bitmap functions. [#20171](https://github.com/ClickHouse/ClickHouse/pull/20171) ([Amos Bird](https://github.com/amosbird)).
* Updated `CacheDictionary`, `ComplexCacheDictionary`, `SSDCacheDictionary`, `SSDComplexKeyDictionary` to use LRUHashMap as underlying index. [#20164](https://github.com/ClickHouse/ClickHouse/pull/20164) ([Maksim Kita](https://github.com/kitaisreal)).
* The setting `access_management` is now configurable on startup by providing `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT`, defaults to disabled (`0`) which was the prior value. [#20139](https://github.com/ClickHouse/ClickHouse/pull/20139) ([Marquitos](https://github.com/sonirico)).
* Fix toDateTime64(toDate()/toDateTime()) for DateTime64 - Implement DateTime64 clamping to match DateTime behaviour. [#20131](https://github.com/ClickHouse/ClickHouse/pull/20131) ([Azat Khuzhin](https://github.com/azat)).
* Quota improvements: SHOW TABLES is now considered as one query in the quota calculations, not two queries. SYSTEM queries now consume quota. Fix calculation of interval's end in quota consumption. [#20106](https://github.com/ClickHouse/ClickHouse/pull/20106) ([Vitaly Baranov](https://github.com/vitlibar)).
* Supports `path IN (set)` expressions for `system.zookeeper` table. [#20105](https://github.com/ClickHouse/ClickHouse/pull/20105) ([小路](https://github.com/nicelulu)).
* Show full details of `MaterializeMySQL` tables in `system.tables`. [#20051](https://github.com/ClickHouse/ClickHouse/pull/20051) ([Stig Bakken](https://github.com/stigsb)).
* Fix data race in executable dictionary that was possible only on misuse (when the script returns data ignoring its input). [#20045](https://github.com/ClickHouse/ClickHouse/pull/20045) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* The value of MYSQL_OPT_RECONNECT option can now be controlled by "opt_reconnect" parameter in the config section of mysql replica. [#19998](https://github.com/ClickHouse/ClickHouse/pull/19998) ([Alexander Kazakov](https://github.com/Akazz)).
* If user calls `JSONExtract` function with `Float32` type requested, allow inaccurate conversion to the result type. For example the number `0.1` in JSON is double precision and is not representable in Float32, but the user still wants to get it. Previous versions return 0 for non-Nullable type and NULL for Nullable type to indicate that conversion is imprecise. The logic was 100% correct but it was surprising to users and leading to questions. This closes [#13962](https://github.com/ClickHouse/ClickHouse/issues/13962). [#19960](https://github.com/ClickHouse/ClickHouse/pull/19960) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add conversion of block structure for INSERT into Distributed tables if it does not match. [#19947](https://github.com/ClickHouse/ClickHouse/pull/19947) ([Azat Khuzhin](https://github.com/azat)).
* Improvement for the `system.distributed_ddl_queue` table. Initialize MaxDDLEntryID to the last value after restarting. Before this PR, MaxDDLEntryID will remain zero until a new DDLTask is processed. [#19924](https://github.com/ClickHouse/ClickHouse/pull/19924) ([Amos Bird](https://github.com/amosbird)).
* Show `MaterializeMySQL` tables in `system.parts`. [#19770](https://github.com/ClickHouse/ClickHouse/pull/19770) ([Stig Bakken](https://github.com/stigsb)).
* Add separate config directive for `Buffer` profile. [#19721](https://github.com/ClickHouse/ClickHouse/pull/19721) ([Azat Khuzhin](https://github.com/azat)).
* Move conditions that are not related to JOIN to WHERE clause. [#18720](https://github.com/ClickHouse/ClickHouse/issues/18720). [#19685](https://github.com/ClickHouse/ClickHouse/pull/19685) ([hexiaoting](https://github.com/hexiaoting)).
* Add ability to throttle INSERT into Distributed based on amount of pending bytes for async send (`bytes_to_delay_insert`/`max_delay_to_insert` and `bytes_to_throw_insert` settings for `Distributed` engine has been added). [#19673](https://github.com/ClickHouse/ClickHouse/pull/19673) ([Azat Khuzhin](https://github.com/azat)).
* Fix some rare cases when write errors can be ignored in destructors. [#19451](https://github.com/ClickHouse/ClickHouse/pull/19451) ([Azat Khuzhin](https://github.com/azat)).
* Print inline frames in stack traces for fatal errors. [#19317](https://github.com/ClickHouse/ClickHouse/pull/19317) ([Ivan](https://github.com/abyss7)).
#### Bug Fix
* Fix redundant reconnects to ZooKeeper and the possibility of two active sessions for a single clickhouse server. Both problems introduced in #14678. [#21264](https://github.com/ClickHouse/ClickHouse/pull/21264) ([alesapin](https://github.com/alesapin)).
* Fix error `Bad cast from type ... to DB::ColumnLowCardinality` while inserting into table with `LowCardinality` column from `Values` format. Fixes #21140 [#21357](https://github.com/ClickHouse/ClickHouse/pull/21357) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix a deadlock in `ALTER DELETE` mutations for non replicated MergeTree table engines when the predicate contains the table itself. Fixes [#20558](https://github.com/ClickHouse/ClickHouse/issues/20558). [#21477](https://github.com/ClickHouse/ClickHouse/pull/21477) ([alesapin](https://github.com/alesapin)).
* Fix SIGSEGV for distributed queries on failures. [#21434](https://github.com/ClickHouse/ClickHouse/pull/21434) ([Azat Khuzhin](https://github.com/azat)).
* Now `ALTER MODIFY COLUMN` queries will correctly affect changes in partition key, skip indices, TTLs, and so on. Fixes [#13675](https://github.com/ClickHouse/ClickHouse/issues/13675). [#21334](https://github.com/ClickHouse/ClickHouse/pull/21334) ([alesapin](https://github.com/alesapin)).
* Fix bug with `join_use_nulls` and joining `TOTALS` from subqueries. This closes [#19362](https://github.com/ClickHouse/ClickHouse/issues/19362) and [#21137](https://github.com/ClickHouse/ClickHouse/issues/21137). [#21248](https://github.com/ClickHouse/ClickHouse/pull/21248) ([vdimir](https://github.com/vdimir)).
* Fix crash in `EXPLAIN` for query with `UNION`. Fixes [#20876](https://github.com/ClickHouse/ClickHouse/issues/20876), [#21170](https://github.com/ClickHouse/ClickHouse/issues/21170). [#21246](https://github.com/ClickHouse/ClickHouse/pull/21246) ([flynn](https://github.com/ucasFL)).
* Now mutations allowed only for table engines that support them (MergeTree family, Memory, MaterializedView). Other engines will report a more clear error. Fixes [#21168](https://github.com/ClickHouse/ClickHouse/issues/21168). [#21183](https://github.com/ClickHouse/ClickHouse/pull/21183) ([alesapin](https://github.com/alesapin)).
* Fixes [#21112](https://github.com/ClickHouse/ClickHouse/issues/21112). Fixed bug that could cause duplicates with insert query (if one of the callbacks came a little too late). [#21138](https://github.com/ClickHouse/ClickHouse/pull/21138) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix `input_format_null_as_default` take effective when types are nullable. This fixes [#21116](https://github.com/ClickHouse/ClickHouse/issues/21116) . [#21121](https://github.com/ClickHouse/ClickHouse/pull/21121) ([Amos Bird](https://github.com/amosbird)).
* fix bug related to cast Tuple to Map. Closes [#21029](https://github.com/ClickHouse/ClickHouse/issues/21029). [#21120](https://github.com/ClickHouse/ClickHouse/pull/21120) ([hexiaoting](https://github.com/hexiaoting)).
* Fix the metadata leak when the Replicated*MergeTree with custom (non default) ZooKeeper cluster is dropped. [#21119](https://github.com/ClickHouse/ClickHouse/pull/21119) ([fastio](https://github.com/fastio)).
* Fix type mismatch issue when using LowCardinality keys in joinGet. This fixes [#21114](https://github.com/ClickHouse/ClickHouse/issues/21114). [#21117](https://github.com/ClickHouse/ClickHouse/pull/21117) ([Amos Bird](https://github.com/amosbird)).
* fix default_replica_path and default_replica_name values are useless on Replicated(*)MergeTree engine when the engine needs specify other parameters. [#21060](https://github.com/ClickHouse/ClickHouse/pull/21060) ([mxzlxy](https://github.com/mxzlxy)).
* Out of bound memory access was possible when formatting specifically crafted out of range value of type `DateTime64`. This closes [#20494](https://github.com/ClickHouse/ClickHouse/issues/20494). This closes [#20543](https://github.com/ClickHouse/ClickHouse/issues/20543). [#21023](https://github.com/ClickHouse/ClickHouse/pull/21023) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Block parallel insertions into storage join. [#21009](https://github.com/ClickHouse/ClickHouse/pull/21009) ([vdimir](https://github.com/vdimir)).
* Fixed behaviour, when `ALTER MODIFY COLUMN` created mutation, that will knowingly fail. [#21007](https://github.com/ClickHouse/ClickHouse/pull/21007) ([Anton Popov](https://github.com/CurtizJ)).
* Closes [#9969](https://github.com/ClickHouse/ClickHouse/issues/9969). Fixed Brotli http compression error, which reproduced for large data sizes, slightly complicated structure and with json output format. Update Brotli to the latest version to include the "fix rare access to uninitialized data in ring-buffer". [#20991](https://github.com/ClickHouse/ClickHouse/pull/20991) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix 'Empty task was returned from async task queue' on query cancellation. [#20881](https://github.com/ClickHouse/ClickHouse/pull/20881) ([Azat Khuzhin](https://github.com/azat)).
* `USE database;` query did not work when using MySQL 5.7 client to connect to ClickHouse server, it's fixed. Fixes [#18926](https://github.com/ClickHouse/ClickHouse/issues/18926). [#20878](https://github.com/ClickHouse/ClickHouse/pull/20878) ([tavplubix](https://github.com/tavplubix)).
* Fix usage of `-Distinct` combinator with `-State` combinator in aggregate functions. [#20866](https://github.com/ClickHouse/ClickHouse/pull/20866) ([Anton Popov](https://github.com/CurtizJ)).
* Fix subquery with union distinct and limit clause. close [#20597](https://github.com/ClickHouse/ClickHouse/issues/20597). [#20610](https://github.com/ClickHouse/ClickHouse/pull/20610) ([flynn](https://github.com/ucasFL)).
* Fixed inconsistent behavior of dictionary in case of queries where we look for absent keys in dictionary. [#20578](https://github.com/ClickHouse/ClickHouse/pull/20578) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix the number of threads for scalar subqueries and subqueries for index (after [#19007](https://github.com/ClickHouse/ClickHouse/issues/19007) single thread was always used). Fixes [#20457](https://github.com/ClickHouse/ClickHouse/issues/20457), [#20512](https://github.com/ClickHouse/ClickHouse/issues/20512). [#20550](https://github.com/ClickHouse/ClickHouse/pull/20550) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix crash which could happen if unknown packet was received from remove query (was introduced in [#17868](https://github.com/ClickHouse/ClickHouse/issues/17868)). [#20547](https://github.com/ClickHouse/ClickHouse/pull/20547) ([Azat Khuzhin](https://github.com/azat)).
* Add proper checks while parsing directory names for async INSERT (fixes SIGSEGV). [#20498](https://github.com/ClickHouse/ClickHouse/pull/20498) ([Azat Khuzhin](https://github.com/azat)).
* Fix function `transform` does not work properly for floating point keys. Closes [#20460](https://github.com/ClickHouse/ClickHouse/issues/20460). [#20479](https://github.com/ClickHouse/ClickHouse/pull/20479) ([flynn](https://github.com/ucasFL)).
* Fix infinite loop when propagating WITH aliases to subqueries. This fixes [#20388](https://github.com/ClickHouse/ClickHouse/issues/20388). [#20476](https://github.com/ClickHouse/ClickHouse/pull/20476) ([Amos Bird](https://github.com/amosbird)).
* Fix abnormal server termination when http client goes away. [#20464](https://github.com/ClickHouse/ClickHouse/pull/20464) ([Azat Khuzhin](https://github.com/azat)).
* Fix `LOGICAL_ERROR` for `join_use_nulls=1` when JOIN contains const from SELECT. [#20461](https://github.com/ClickHouse/ClickHouse/pull/20461) ([Azat Khuzhin](https://github.com/azat)).
* Check if table function `view` is used in expression list and throw an error. This fixes [#20342](https://github.com/ClickHouse/ClickHouse/issues/20342). [#20350](https://github.com/ClickHouse/ClickHouse/pull/20350) ([Amos Bird](https://github.com/amosbird)).
* Avoid invalid dereference in RANGE_HASHED() dictionary. [#20345](https://github.com/ClickHouse/ClickHouse/pull/20345) ([Azat Khuzhin](https://github.com/azat)).
* Fix null dereference with `join_use_nulls=1`. [#20344](https://github.com/ClickHouse/ClickHouse/pull/20344) ([Azat Khuzhin](https://github.com/azat)).
* Fix incorrect result of binary operations between two constant decimals of different scale. Fixes [#20283](https://github.com/ClickHouse/ClickHouse/issues/20283). [#20339](https://github.com/ClickHouse/ClickHouse/pull/20339) ([Maksim Kita](https://github.com/kitaisreal)).
* Fix too often retries of failed background tasks for `ReplicatedMergeTree` table engines family. This could lead to too verbose logging and increased CPU load. Fixes [#20203](https://github.com/ClickHouse/ClickHouse/issues/20203). [#20335](https://github.com/ClickHouse/ClickHouse/pull/20335) ([alesapin](https://github.com/alesapin)).
* Restrict to `DROP` or `RENAME` version column of `*CollapsingMergeTree` and `ReplacingMergeTree` table engines. [#20300](https://github.com/ClickHouse/ClickHouse/pull/20300) ([alesapin](https://github.com/alesapin)).
* Fixed the behavior when in case of broken JSON we tried to read the whole file into memory which leads to exception from the allocator. Fixes [#19719](https://github.com/ClickHouse/ClickHouse/issues/19719). [#20286](https://github.com/ClickHouse/ClickHouse/pull/20286) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix exception during vertical merge for `MergeTree` table engines family which don't allow to perform vertical merges. Fixes [#20259](https://github.com/ClickHouse/ClickHouse/issues/20259). [#20279](https://github.com/ClickHouse/ClickHouse/pull/20279) ([alesapin](https://github.com/alesapin)).
* Fix rare server crash on config reload during the shutdown. Fixes [#19689](https://github.com/ClickHouse/ClickHouse/issues/19689). [#20224](https://github.com/ClickHouse/ClickHouse/pull/20224) ([alesapin](https://github.com/alesapin)).
* Fix CTE when using in INSERT SELECT. This fixes [#20187](https://github.com/ClickHouse/ClickHouse/issues/20187), fixes [#20195](https://github.com/ClickHouse/ClickHouse/issues/20195). [#20211](https://github.com/ClickHouse/ClickHouse/pull/20211) ([Amos Bird](https://github.com/amosbird)).
* Fixes [#19314](https://github.com/ClickHouse/ClickHouse/issues/19314). [#20156](https://github.com/ClickHouse/ClickHouse/pull/20156) ([Ivan](https://github.com/abyss7)).
* fix toMinute function to handle special timezone correctly. [#20149](https://github.com/ClickHouse/ClickHouse/pull/20149) ([keenwolf](https://github.com/keen-wolf)).
* Fix server crash after query with `if` function with `Tuple` type of then/else branches result. `Tuple` type must contain `Array` or another complex type. Fixes [#18356](https://github.com/ClickHouse/ClickHouse/issues/18356). [#20133](https://github.com/ClickHouse/ClickHouse/pull/20133) ([alesapin](https://github.com/alesapin)).
* The `MongoDB` table engine now establishes connection only when it's going to read data. `ATTACH TABLE` won't try to connect anymore. [#20110](https://github.com/ClickHouse/ClickHouse/pull/20110) ([Vitaly Baranov](https://github.com/vitlibar)).
* Bugfix in StorageJoin. [#20079](https://github.com/ClickHouse/ClickHouse/pull/20079) ([vdimir](https://github.com/vdimir)).
* Fix the case when calculating modulo of division of negative number by small divisor, the resulting data type was not large enough to accomodate the negative result. This closes [#20052](https://github.com/ClickHouse/ClickHouse/issues/20052). [#20067](https://github.com/ClickHouse/ClickHouse/pull/20067) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* MaterializeMySQL: Fix replication for statements that update several tables. [#20066](https://github.com/ClickHouse/ClickHouse/pull/20066) ([Håvard Kvålen](https://github.com/havardk)).
* Prevent "Connection refused" in docker during initialization script execution. [#20012](https://github.com/ClickHouse/ClickHouse/pull/20012) ([filimonov](https://github.com/filimonov)).
* `EmbeddedRocksDB` is an experimental storage. Fix the issue with lack of proper type checking. Simplified code. This closes [#19967](https://github.com/ClickHouse/ClickHouse/issues/19967). [#19972](https://github.com/ClickHouse/ClickHouse/pull/19972) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix a segfault in function `fromModifiedJulianDay` when the argument type is `Nullable(T)` for any integral types other than Int32. [#19959](https://github.com/ClickHouse/ClickHouse/pull/19959) ([PHO](https://github.com/depressed-pho)).
* BloomFilter index crash fix. Fixes [#19757](https://github.com/ClickHouse/ClickHouse/issues/19757). [#19884](https://github.com/ClickHouse/ClickHouse/pull/19884) ([Maksim Kita](https://github.com/kitaisreal)).
* Deadlock was possible if system.text_log is enabled. This fixes [#19874](https://github.com/ClickHouse/ClickHouse/issues/19874). [#19875](https://github.com/ClickHouse/ClickHouse/pull/19875) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix starting the server with tables having default expressions containing dictGet(). Allow getting return type of dictGet() without loading dictionary. [#19805](https://github.com/ClickHouse/ClickHouse/pull/19805) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix clickhouse-client abort exception while executing only `select`. [#19790](https://github.com/ClickHouse/ClickHouse/pull/19790) ([taiyang-li](https://github.com/taiyang-li)).
* Fix a bug that moving pieces to destination table may failed in case of launching multiple clickhouse-copiers. [#19743](https://github.com/ClickHouse/ClickHouse/pull/19743) ([madianjun](https://github.com/mdianjun)).
* Background thread which executes `ON CLUSTER` queries might hang waiting for dropped replicated table to do something. It's fixed. [#19684](https://github.com/ClickHouse/ClickHouse/pull/19684) ([yiguolei](https://github.com/yiguolei)).
#### Build/Testing/Packaging Improvement
* Allow to build ClickHouse with AVX-2 enabled globally. It gives slight performance benefits on modern CPUs. Not recommended for production and will not be supported as official build for now. [#20180](https://github.com/ClickHouse/ClickHouse/pull/20180) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Fix some of the issues found by Coverity. See [#19964](https://github.com/ClickHouse/ClickHouse/issues/19964). [#20010](https://github.com/ClickHouse/ClickHouse/pull/20010) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Allow to start up with modified binary under gdb. In previous version if you set up breakpoint in gdb before start, server will refuse to start up due to failed integrity check. [#21258](https://github.com/ClickHouse/ClickHouse/pull/21258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
* Add a test for different compression methods in Kafka. [#21111](https://github.com/ClickHouse/ClickHouse/pull/21111) ([filimonov](https://github.com/filimonov)).
* Fixed port clash from test_storage_kerberized_hdfs test. [#19974](https://github.com/ClickHouse/ClickHouse/pull/19974) ([Ilya Yatsishin](https://github.com/qoega)).
* Print `stdout` and `stderr` to log when failed to start docker in integration tests. Before this PR there was a very short error message in this case which didn't help to investigate the problems. [#20631](https://github.com/ClickHouse/ClickHouse/pull/20631) ([Vitaly Baranov](https://github.com/vitlibar)).
## ClickHouse release 21.2
### ClickHouse release v21.2.2.8-stable, 2021-02-07
@ -189,6 +919,7 @@
* Allow using extended integer types (`Int128`, `Int256`, `UInt256`) in `avg` and `avgWeighted` functions. Also allow using different types (integer, decimal, floating point) for value and for weight in `avgWeighted` function. This is a backward-incompatible change: now the `avg` and `avgWeighted` functions always return `Float64` (as documented). Before this change the return type for `Decimal` arguments was also `Decimal`. [#15419](https://github.com/ClickHouse/ClickHouse/pull/15419) ([Mike](https://github.com/myrrc)).
* Expression `toUUID(N)` no longer works. Replace with `toUUID('00000000-0000-0000-0000-000000000000')`. This change is motivated by non-obvious results of `toUUID(N)` where N is non zero.
* SSL Certificates with incorrect "key usage" are rejected. In previous versions they are used to work. See [#19262](https://github.com/ClickHouse/ClickHouse/issues/19262).
* `incl` references to substitutions file (`/etc/metrika.xml`) were removed from the default config (`<remote_servers>`, `<zookeeper>`, `<macros>`, `<compression>`, `<networks>`). If you were using substitutions file and were relying on those implicit references, you should put them back manually and explicitly by adding corresponding sections with `incl="..."` attributes before the update. See [#18740](https://github.com/ClickHouse/ClickHouse/pull/18740) ([alexey-milovidov](https://github.com/alexey-milovidov)).
#### New Feature

View File

@ -36,9 +36,11 @@ option(FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION
if(FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION)
set(RECONFIGURE_MESSAGE_LEVEL FATAL_ERROR)
else()
set(RECONFIGURE_MESSAGE_LEVEL STATUS)
set(RECONFIGURE_MESSAGE_LEVEL WARNING)
endif()
enable_language(C CXX ASM)
include (cmake/arch.cmake)
include (cmake/target.cmake)
include (cmake/tools.cmake)
@ -66,17 +68,30 @@ endif ()
include (cmake/find/ccache.cmake)
option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling" OFF)
# Take care to add prlimit in command line before ccache, or else ccache thinks that
# prlimit is compiler, and clang++ is its input file, and refuses to work with
# multiple inputs, e.g in ccache log:
# [2021-03-31T18:06:32.655327 36900] Command line: /usr/bin/ccache prlimit --as=10000000000 --data=5000000000 --cpu=600 /usr/bin/clang++-11 - ...... std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -MF src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o.d -o src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -c ../src/Storages/MergeTree/IMergeTreeDataPart.cpp
#
# [2021-03-31T18:06:32.656704 36900] Multiple input files: /usr/bin/clang++-11 and ../src/Storages/MergeTree/IMergeTreeDataPart.cpp
#
# Another way would be to use --ccache-skip option before clang++-11 to make
# ccache ignore it.
option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF)
if (ENABLE_CHECK_HEAVY_BUILDS)
# set DATA (since RSS does not work since 2.6.x+) to 2G
set (RLIMIT_DATA 5000000000)
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
set (RLIMIT_AS 10000000000)
# set CPU time limit to 600 seconds
set (RLIMIT_CPU 600)
# gcc10/gcc10/clang -fsanitize=memory is too heavy
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
set (RLIMIT_DATA 10000000000)
endif()
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=600)
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})
endif ()
if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None")
@ -152,10 +167,10 @@ endif ()
# If turned `ON`, assumes the user has either the system GTest library or the bundled one.
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64.
# Implies ${ENABLE_FASTMEMCPY}
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64 or aarch64.
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
@ -168,24 +183,37 @@ endif ()
# Make sure the final executable has symbols exported
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
if (OS_LINUX)
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
if (OBJCOPY_PATH)
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.")
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
if (ARCH_AMD64)
set(OBJCOPY_ARCH_OPTIONS -O elf64-x86-64 -B i386)
elseif (ARCH_AARCH64)
set(OBJCOPY_ARCH_OPTIONS -O elf64-aarch64 -B aarch64)
if (NOT OBJCOPY_PATH AND OS_DARWIN)
find_program (BREW_PATH NAMES "brew")
if (BREW_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix llvm ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE LLVM_PREFIX)
if (LLVM_PREFIX)
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" PATHS "${LLVM_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
if (NOT OBJCOPY_PATH)
execute_process (COMMAND ${BREW_PATH} --prefix binutils ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE BINUTILS_PREFIX)
if (BINUTILS_PREFIX)
find_program (OBJCOPY_PATH NAMES "objcopy" PATHS "${BINUTILS_PREFIX}/bin" NO_DEFAULT_PATH)
endif ()
endif ()
else ()
message(FATAL_ERROR "Cannot find objcopy.")
endif ()
endif ()
if (OBJCOPY_PATH)
message (STATUS "Using objcopy: ${OBJCOPY_PATH}")
else ()
message (FATAL_ERROR "Cannot find objcopy.")
endif ()
if (OS_DARWIN)
set(WHOLE_ARCHIVE -all_load)
set(NO_WHOLE_ARCHIVE -noall_load)
# The `-all_load` flag forces loading of all symbols from all libraries,
# and leads to multiply-defined symbols. This flag allows force loading
# from a _specific_ library, which is what we need.
set(WHOLE_ARCHIVE -force_load)
# The `-noall_load` flag is the default and now obsolete.
set(NO_WHOLE_ARCHIVE "")
else ()
set(WHOLE_ARCHIVE --whole-archive)
set(NO_WHOLE_ARCHIVE --no-whole-archive)
@ -241,35 +269,52 @@ else()
message(STATUS "Disabling compiler -pipe option (have only ${AVAILABLE_PHYSICAL_MEMORY} mb of memory)")
endif()
if(NOT DISABLE_CPU_OPTIMIZE)
include(cmake/cpu_features.cmake)
endif()
include(cmake/cpu_features.cmake)
option(ARCH_NATIVE "Add -march=native compiler flag")
option(ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries non-portable but more performant code may be generated.")
if (ARCH_NATIVE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
endif ()
if (COMPILER_GCC OR COMPILER_CLANG)
# to make numeric_limits<__int128> works with GCC
set (_CXX_STANDARD "gnu++2a")
else()
set (_CXX_STANDARD "c++2a")
endif()
# Asynchronous unwind tables are needed for Query Profiler.
# They are already by default on some platforms but possibly not on all platforms.
# Enable it explicitly.
set (COMPILER_FLAGS "${COMPILER_FLAGS} -fasynchronous-unwind-tables")
# cmake < 3.12 doesn't support 20. We'll set CMAKE_CXX_FLAGS for now
# set (CMAKE_CXX_STANDARD 20)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
# CMake < 3.12 doesn't support setting 20 as a C++ standard version.
# We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS
set (CMAKE_CXX_STANDARD_REQUIRED ON)
if (COMPILER_GCC OR COMPILER_CLANG)
# to make numeric_limits<__int128> works with GCC
set (_CXX_STANDARD "gnu++2a")
else ()
set (_CXX_STANDARD "c++2a")
endif ()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
else ()
set (CMAKE_CXX_STANDARD 20)
set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
set (CMAKE_CXX_STANDARD_REQUIRED ON)
endif ()
set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON)
set (CMAKE_C_STANDARD_REQUIRED ON)
if (COMPILER_GCC OR COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
endif ()
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
if (COMPILER_GCC OR COMPILER_CLANG)
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
endif ()
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
@ -331,7 +376,7 @@ if (COMPILER_CLANG)
endif ()
# Always prefer llvm tools when using clang. For instance, we cannot use GNU ar when llvm LTO is enabled
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
if (LLVM_AR_PATH)
message(STATUS "Using llvm-ar: ${LLVM_AR_PATH}.")
@ -340,7 +385,7 @@ if (COMPILER_CLANG)
message(WARNING "Cannot find llvm-ar. System ar will be used instead. It does not work with ThinLTO.")
endif ()
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
if (LLVM_RANLIB_PATH)
message(STATUS "Using llvm-ranlib: ${LLVM_RANLIB_PATH}.")
@ -457,6 +502,7 @@ find_contrib_lib(double-conversion) # Must be before parquet
include (cmake/find/ssl.cmake)
include (cmake/find/ldap.cmake) # after ssl
include (cmake/find/icu.cmake)
include (cmake/find/xz.cmake)
include (cmake/find/zlib.cmake)
include (cmake/find/zstd.cmake)
include (cmake/find/ltdl.cmake) # for odbc
@ -467,10 +513,10 @@ include (cmake/find/krb5.cmake)
include (cmake/find/libgsasl.cmake)
include (cmake/find/cyrus-sasl.cmake)
include (cmake/find/rdkafka.cmake)
include (cmake/find/libuv.cmake) # for amqpcpp and cassandra
include (cmake/find/amqpcpp.cmake)
include (cmake/find/capnp.cmake)
include (cmake/find/llvm.cmake)
include (cmake/find/termcap.cmake) # for external static llvm
include (cmake/find/h3.cmake)
include (cmake/find/libxml2.cmake)
include (cmake/find/brotli.cmake)
@ -489,10 +535,13 @@ include (cmake/find/fast_float.cmake)
include (cmake/find/rapidjson.cmake)
include (cmake/find/fastops.cmake)
include (cmake/find/odbc.cmake)
include (cmake/find/nanodbc.cmake)
include (cmake/find/sqlite.cmake)
include (cmake/find/rocksdb.cmake)
include (cmake/find/libpqxx.cmake)
include (cmake/find/nuraft.cmake)
include (cmake/find/yaml-cpp.cmake)
include (cmake/find/s2geometry.cmake)
if(NOT USE_INTERNAL_PARQUET_LIBRARY)
set (ENABLE_ORC OFF CACHE INTERNAL "")
@ -504,6 +553,7 @@ include (cmake/find/msgpack.cmake)
include (cmake/find/cassandra.cmake)
include (cmake/find/sentry.cmake)
include (cmake/find/stats.cmake)
include (cmake/find/datasketches.cmake)
set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "")
find_contrib_lib(cityhash)
@ -536,7 +586,7 @@ macro (add_executable target)
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:clickhouse_memcpy>)
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc> $<TARGET_OBJECTS:memcpy>)
else ()
_add_executable (${ARGV} $<TARGET_OBJECTS:clickhouse_malloc>)
endif ()
@ -557,6 +607,9 @@ include_directories(${ConfigIncludePath})
# Add as many warnings as possible for our own code.
include (cmake/warnings.cmake)
# Check if needed compiler flags are supported
include (cmake/check_flags.cmake)
add_subdirectory (base)
add_subdirectory (src)
add_subdirectory (programs)

View File

@ -8,7 +8,7 @@ ClickHouse® is an open-source column-oriented database management system that a
* [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
* [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information.
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-ly9m4w1x-6j7x5Ts_pQZqrctAbRZ3cg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-rxm3rdrk-lIUmhLC3V8WTaL0TGxsOmg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.

View File

@ -8,6 +8,7 @@ add_subdirectory (loggers)
add_subdirectory (pcg-random)
add_subdirectory (widechar_width)
add_subdirectory (readpassphrase)
add_subdirectory (bridge)
if (USE_MYSQL)
add_subdirectory (mysqlxx)

View File

@ -0,0 +1,13 @@
add_library (bridge
IBridge.cpp
)
target_include_directories (daemon PUBLIC ..)
target_link_libraries (bridge
PRIVATE
daemon
dbms
Poco::Data
Poco::Data::ODBC
)

266
base/bridge/IBridge.cpp Normal file
View File

@ -0,0 +1,266 @@
#include "IBridge.h"
#include <boost/program_options.hpp>
#include <Poco/Net/NetException.h>
#include <Poco/Util/HelpFormatter.h>
#include <common/logger_useful.h>
#include <common/range.h>
#include <Common/StringUtils/StringUtils.h>
#include <Common/SensitiveDataMasker.h>
#include <common/errnoToString.h>
#include <IO/ReadHelpers.h>
#include <Formats/registerFormats.h>
#include <Server/HTTP/HTTPServer.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/WriteHelpers.h>
#include <sys/time.h>
#include <sys/resource.h>
#if USE_ODBC
# include <Poco/Data/ODBC/Connector.h>
#endif
namespace DB
{
namespace ErrorCodes
{
extern const int ARGUMENT_OUT_OF_BOUND;
}
namespace
{
Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
{
Poco::Net::SocketAddress socket_address;
try
{
socket_address = Poco::Net::SocketAddress(host, port);
}
catch (const Poco::Net::DNSException & e)
{
const auto code = e.code();
if (code == EAI_FAMILY
#if defined(EAI_ADDRFAMILY)
|| code == EAI_ADDRFAMILY
#endif
)
{
LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. If it is an IPv6 address and your host has disabled IPv6, then consider to specify IPv4 address to listen in <listen_host> element of configuration file. Example: <listen_host>0.0.0.0</listen_host>", host, e.code(), e.message());
}
throw;
}
return socket_address;
}
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log)
{
auto address = makeSocketAddress(host, port, log);
#if POCO_VERSION < 0x01080000
socket.bind(address, /* reuseAddress = */ true);
#else
socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ false);
#endif
socket.listen(/* backlog = */ 64);
return address;
}
}
void IBridge::handleHelp(const std::string &, const std::string &)
{
Poco::Util::HelpFormatter help_formatter(options());
help_formatter.setCommand(commandName());
help_formatter.setHeader("HTTP-proxy for odbc requests");
help_formatter.setUsage("--http-port <port>");
help_formatter.format(std::cerr);
stopOptionsProcessing();
}
void IBridge::defineOptions(Poco::Util::OptionSet & options)
{
options.addOption(
Poco::Util::Option("http-port", "", "port to listen").argument("http-port", true) .binding("http-port"));
options.addOption(
Poco::Util::Option("listen-host", "", "hostname or address to listen, default 127.0.0.1").argument("listen-host").binding("listen-host"));
options.addOption(
Poco::Util::Option("http-timeout", "", "http timeout for socket, default 1800").argument("http-timeout").binding("http-timeout"));
options.addOption(
Poco::Util::Option("max-server-connections", "", "max connections to server, default 1024").argument("max-server-connections").binding("max-server-connections"));
options.addOption(
Poco::Util::Option("keep-alive-timeout", "", "keepalive timeout, default 10").argument("keep-alive-timeout").binding("keep-alive-timeout"));
options.addOption(
Poco::Util::Option("log-level", "", "sets log level, default info") .argument("log-level").binding("logger.level"));
options.addOption(
Poco::Util::Option("log-path", "", "log path for all logs, default console").argument("log-path").binding("logger.log"));
options.addOption(
Poco::Util::Option("err-log-path", "", "err log path for all logs, default no").argument("err-log-path").binding("logger.errorlog"));
options.addOption(
Poco::Util::Option("stdout-path", "", "stdout log path, default console").argument("stdout-path").binding("logger.stdout"));
options.addOption(
Poco::Util::Option("stderr-path", "", "stderr log path, default console").argument("stderr-path").binding("logger.stderr"));
using Me = std::decay_t<decltype(*this)>;
options.addOption(
Poco::Util::Option("help", "", "produce this help message").binding("help").callback(Poco::Util::OptionCallback<Me>(this, &Me::handleHelp)));
ServerApplication::defineOptions(options); // NOLINT Don't need complex BaseDaemon's .xml config
}
void IBridge::initialize(Application & self)
{
BaseDaemon::closeFDs();
is_help = config().has("help");
if (is_help)
return;
config().setString("logger", bridgeName());
/// Redirect stdout, stderr to specified files.
/// Some libraries and sanitizers write to stderr in case of errors.
const auto stdout_path = config().getString("logger.stdout", "");
if (!stdout_path.empty())
{
if (!freopen(stdout_path.c_str(), "a+", stdout))
throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path);
/// Disable buffering for stdout.
setbuf(stdout, nullptr);
}
const auto stderr_path = config().getString("logger.stderr", "");
if (!stderr_path.empty())
{
if (!freopen(stderr_path.c_str(), "a+", stderr))
throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path);
/// Disable buffering for stderr.
setbuf(stderr, nullptr);
}
buildLoggers(config(), logger(), self.commandName());
BaseDaemon::logRevision();
log = &logger();
hostname = config().getString("listen-host", "127.0.0.1");
port = config().getUInt("http-port");
if (port > 0xFFFF)
throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
http_timeout = config().getUInt64("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
max_server_connections = config().getUInt("max-server-connections", 1024);
keep_alive_timeout = config().getUInt64("keep-alive-timeout", 10);
struct rlimit limit;
const UInt64 gb = 1024 * 1024 * 1024;
/// Set maximum RSS to 1 GiB.
limit.rlim_max = limit.rlim_cur = gb;
if (setrlimit(RLIMIT_RSS, &limit))
LOG_WARNING(log, "Unable to set maximum RSS to 1GB: {} (current rlim_cur={}, rlim_max={})",
errnoToString(errno), limit.rlim_cur, limit.rlim_max);
if (!getrlimit(RLIMIT_RSS, &limit))
LOG_INFO(log, "RSS limit: cur={}, max={}", limit.rlim_cur, limit.rlim_max);
try
{
const auto oom_score = toString(config().getUInt64("bridge_oom_score", 500));
WriteBufferFromFile buf("/proc/self/oom_score_adj");
buf.write(oom_score.data(), oom_score.size());
buf.close();
LOG_INFO(log, "OOM score is set to {}", oom_score);
}
catch (const Exception & e)
{
LOG_WARNING(log, "Failed to set OOM score, error: {}", e.what());
}
initializeTerminationAndSignalProcessing();
ServerApplication::initialize(self); // NOLINT
}
void IBridge::uninitialize()
{
BaseDaemon::uninitialize();
}
int IBridge::main(const std::vector<std::string> & /*args*/)
{
if (is_help)
return Application::EXIT_OK;
registerFormats();
LOG_INFO(log, "Starting up {} on host: {}, port: {}", bridgeName(), hostname, port);
Poco::Net::ServerSocket socket;
auto address = socketBindListen(socket, hostname, port, log);
socket.setReceiveTimeout(http_timeout);
socket.setSendTimeout(http_timeout);
Poco::ThreadPool server_pool(3, max_server_connections);
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
http_params->setTimeout(http_timeout);
http_params->setKeepAliveTimeout(keep_alive_timeout);
auto shared_context = Context::createShared();
auto context = Context::createGlobal(shared_context.get());
context->makeGlobalContext();
if (config().has("query_masking_rules"))
SensitiveDataMasker::setInstance(std::make_unique<SensitiveDataMasker>(config(), "query_masking_rules"));
auto server = HTTPServer(
context,
getHandlerFactoryPtr(context),
server_pool,
socket,
http_params);
SCOPE_EXIT({
LOG_DEBUG(log, "Received termination signal.");
LOG_DEBUG(log, "Waiting for current connections to close.");
server.stop();
for (size_t count : collections::range(1, 6))
{
if (server.currentConnections() == 0)
break;
LOG_DEBUG(log, "Waiting for {} connections, try {}", server.currentConnections(), count);
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}
});
server.start();
LOG_INFO(log, "Listening http://{}", address.toString());
waitForTerminationRequest();
return Application::EXIT_OK;
}
}

51
base/bridge/IBridge.h Normal file
View File

@ -0,0 +1,51 @@
#pragma once
#include <Interpreters/Context.h>
#include <Server/HTTP/HTTPRequestHandlerFactory.h>
#include <daemon/BaseDaemon.h>
#include <Poco/Logger.h>
#include <Poco/Util/ServerApplication.h>
namespace DB
{
/// Class represents base for clickhouse-odbc-bridge and clickhouse-library-bridge servers.
/// Listens to incoming HTTP POST and GET requests on specified port and host.
/// Has two handlers '/' for all incoming POST requests and /ping for GET request about service status.
class IBridge : public BaseDaemon
{
public:
/// Define command line arguments
void defineOptions(Poco::Util::OptionSet & options) override;
protected:
using HandlerFactoryPtr = std::shared_ptr<HTTPRequestHandlerFactory>;
void initialize(Application & self) override;
void uninitialize() override;
int main(const std::vector<std::string> & args) override;
virtual std::string bridgeName() const = 0;
virtual HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const = 0;
size_t keep_alive_timeout;
private:
void handleHelp(const std::string &, const std::string &);
bool is_help;
std::string hostname;
size_t port;
std::string log_level;
size_t max_server_connections;
size_t http_timeout;
Poco::Logger * log;
};
}

View File

@ -0,0 +1,156 @@
#pragma once
#include <cstdint>
#include <vector>
#include <chrono>
#include <mutex>
#include <condition_variable>
#include <common/defines.h>
#include <common/MoveOrCopyIfThrow.h>
/** Pool for limited size objects that cannot be used from different threads simultaneously.
* The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime
* and have to be initialized on demand.
* Two main properties of pool are allocated objects size and borrowed objects size.
* Allocated objects size is size of objects that are currently allocated by the pool.
* Borrowed objects size is size of objects that are borrowed by clients.
* If max_size == 0 then pool has unlimited size and objects will be allocated without limit.
*
* Pool provides following strategy for borrowing object:
* If max_size == 0 then pool has unlimited size and objects will be allocated without limit.
* 1. If pool has objects that can be borrowed increase borrowed objects size and return it.
* 2. If pool allocatedObjectsSize is lower than max objects size or pool has unlimited size
* allocate new object, increase borrowed objects size and return it.
* 3. If pool is full wait on condition variable with or without timeout until some object
* will be returned to the pool.
*/
template <typename T>
class BorrowedObjectPool final
{
public:
explicit BorrowedObjectPool(size_t max_size_) : max_size(max_size_) {}
/// Borrow object from pool. If pull is full and all objects were borrowed
/// then calling thread will wait until some object will be returned into pool.
template <typename FactoryFunc>
void borrowObject(T & dest, FactoryFunc && func)
{
std::unique_lock<std::mutex> lock(objects_mutex);
if (!objects.empty())
{
dest = borrowFromObjects(lock);
return;
}
bool has_unlimited_size = (max_size == 0);
if (unlikely(has_unlimited_size) || allocated_objects_size < max_size)
{
dest = allocateObjectForBorrowing(lock, std::forward<FactoryFunc>(func));
return;
}
condition_variable.wait(lock, [this] { return !objects.empty(); });
dest = borrowFromObjects(lock);
}
/// Same as borrowObject function, but wait with timeout.
/// Returns true if object was borrowed during timeout.
template <typename FactoryFunc>
bool tryBorrowObject(T & dest, FactoryFunc && func, size_t timeout_in_milliseconds = 0)
{
std::unique_lock<std::mutex> lock(objects_mutex);
if (!objects.empty())
{
dest = borrowFromObjects(lock);
return true;
}
bool has_unlimited_size = (max_size == 0);
if (unlikely(has_unlimited_size) || allocated_objects_size < max_size)
{
dest = allocateObjectForBorrowing(lock, std::forward<FactoryFunc>(func));
return true;
}
bool wait_result = condition_variable.wait_for(lock, std::chrono::milliseconds(timeout_in_milliseconds), [this] { return !objects.empty(); });
if (wait_result)
dest = borrowFromObjects(lock);
return wait_result;
}
/// Return object into pool. Client must return same object that was borrowed.
inline void returnObject(T && object_to_return)
{
std::unique_lock<std::mutex> lck(objects_mutex);
objects.emplace_back(std::move(object_to_return));
--borrowed_objects_size;
condition_variable.notify_one();
}
/// Max pool size
inline size_t maxSize() const
{
return max_size;
}
/// Allocated objects size by the pool. If allocatedObjectsSize == maxSize then pool is full.
inline size_t allocatedObjectsSize() const
{
std::unique_lock<std::mutex> lock(objects_mutex);
return allocated_objects_size;
}
/// Returns allocatedObjectsSize == maxSize
inline bool isFull() const
{
std::unique_lock<std::mutex> lock(objects_mutex);
return allocated_objects_size == max_size;
}
/// Borrowed objects size. If borrowedObjectsSize == allocatedObjectsSize and pool is full.
/// Then client will wait during borrowObject function call.
inline size_t borrowedObjectsSize() const
{
std::unique_lock<std::mutex> lock(objects_mutex);
return borrowed_objects_size;
}
private:
template <typename FactoryFunc>
inline T allocateObjectForBorrowing(const std::unique_lock<std::mutex> &, FactoryFunc && func)
{
++allocated_objects_size;
++borrowed_objects_size;
return std::forward<FactoryFunc>(func)();
}
inline T borrowFromObjects(const std::unique_lock<std::mutex> &)
{
T dst;
detail::moveOrCopyIfThrow(std::move(objects.back()), dst);
objects.pop_back();
++borrowed_objects_size;
return dst;
}
size_t max_size;
mutable std::mutex objects_mutex;
std::condition_variable condition_variable;
size_t allocated_objects_size = 0;
size_t borrowed_objects_size = 0;
std::vector<T> objects;
};

View File

@ -29,7 +29,7 @@ elseif (ENABLE_READLINE)
endif ()
if (USE_DEBUG_HELPERS)
set (INCLUDE_DEBUG_HELPERS "-include ${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h")
set (INCLUDE_DEBUG_HELPERS "-include \"${ClickHouse_SOURCE_DIR}/base/common/iostream_debug_helpers.h\"")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
endif ()
@ -45,7 +45,11 @@ if (USE_INTERNAL_CCTZ)
set_source_files_properties(DateLUTImpl.cpp PROPERTIES COMPILE_DEFINITIONS USE_INTERNAL_CCTZ)
endif()
target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..)
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
endif()
# Allow explicit fallback to readline
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
@ -74,7 +78,6 @@ target_link_libraries (common
${CITYHASH_LIBRARIES}
boost::headers_only
boost::system
FastMemcpy
Poco::Net
Poco::Net::SSL
Poco::Util

View File

@ -152,7 +152,7 @@ const DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) co
auto it = impls.emplace(time_zone, nullptr).first;
if (!it->second)
it->second = std::make_unique<DateLUTImpl>(time_zone);
it->second = std::unique_ptr<DateLUTImpl>(new DateLUTImpl(time_zone));
return *it->second;
}

View File

@ -17,7 +17,7 @@ class DateLUT : private boost::noncopyable
{
public:
/// Return singleton DateLUTImpl instance for the default time zone.
static ALWAYS_INLINE const DateLUTImpl & instance()
static ALWAYS_INLINE const DateLUTImpl & instance() // -V1071
{
const auto & date_lut = getInstance();
return *date_lut.default_impl.load(std::memory_order_acquire);
@ -32,7 +32,6 @@ public:
return date_lut.getImplementation(time_zone);
}
static void setDefaultTimezone(const std::string & time_zone)
{
auto & date_lut = getInstance();

View File

@ -46,24 +46,41 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
if (&inside_main)
assert(inside_main);
size_t i = 0;
time_t start_of_day = 0;
cctz::time_zone cctz_time_zone;
if (!cctz::load_time_zone(time_zone, &cctz_time_zone))
throw Poco::Exception("Cannot load time zone " + time_zone_);
cctz::time_zone::absolute_lookup start_of_epoch_lookup = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(start_of_day));
offset_at_start_of_epoch = start_of_epoch_lookup.offset;
offset_is_whole_number_of_hours_everytime = true;
constexpr cctz::civil_day epoch{1970, 1, 1};
constexpr cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1};
time_t start_of_day;
cctz::civil_day date{1970, 1, 1};
/// Note: it's validated against all timezones in the system.
static_assert((epoch - lut_start) == daynum_offset_epoch);
offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset;
offset_at_start_of_lut = cctz_time_zone.lookup(cctz_time_zone.lookup(lut_start).pre).offset;
offset_is_whole_number_of_hours_during_epoch = true;
cctz::civil_day date = lut_start;
UInt32 i = 0;
do
{
cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date);
start_of_day = std::chrono::system_clock::to_time_t(lookup.pre); /// Ambiguity is possible.
/// Ambiguity is possible if time was changed backwards at the midnight
/// or after midnight time has been changed back to midnight, for example one hour backwards at 01:00
/// or after midnight time has been changed to the previous day, for example two hours backwards at 01:00
/// Then midnight appears twice. Usually time change happens exactly at 00:00 or 01:00.
/// If transition did not involve previous day, we should use the first midnight as the start of the day,
/// otherwise it's better to use the second midnight.
std::chrono::time_point start_of_day_time_point = lookup.trans < lookup.post
? lookup.post /* Second midnight appears after transition, so there was a piece of previous day after transition */
: lookup.pre;
start_of_day = std::chrono::system_clock::to_time_t(start_of_day_time_point);
Values & values = lut[i];
values.year = date.year();
@ -72,7 +89,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
values.day_of_week = getDayOfWeek(date);
values.date = start_of_day;
assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR);
assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR + 1);
assert(values.month >= 1 && values.month <= 12);
assert(values.day_of_month >= 1 && values.day_of_month <= 31);
assert(values.day_of_week >= 1 && values.day_of_week <= 7);
@ -85,50 +102,42 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
else
values.days_in_month = i != 0 ? lut[i - 1].days_in_month : 31;
values.time_at_offset_change = 0;
values.amount_of_offset_change = 0;
values.time_at_offset_change_value = 0;
values.amount_of_offset_change_value = 0;
if (start_of_day % 3600)
offset_is_whole_number_of_hours_everytime = false;
if (offset_is_whole_number_of_hours_during_epoch && start_of_day > 0 && start_of_day % 3600)
offset_is_whole_number_of_hours_during_epoch = false;
/// If UTC offset was changed in previous day.
if (i != 0)
/// If UTC offset was changed this day.
/// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST
cctz::time_zone::civil_transition transition{};
if (cctz_time_zone.next_transition(start_of_day_time_point - std::chrono::seconds(1), &transition)
&& (cctz::civil_day(transition.from) == date || cctz::civil_day(transition.to) == date)
&& transition.from != transition.to)
{
auto amount_of_offset_change_at_prev_day = 86400 - (lut[i].date - lut[i - 1].date);
if (amount_of_offset_change_at_prev_day)
{
lut[i - 1].amount_of_offset_change = amount_of_offset_change_at_prev_day;
values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor;
values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor;
const auto utc_offset_at_beginning_of_day = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(lut[i - 1].date)).offset;
// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n";
// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n";
/// Find a time (timestamp offset from beginning of day),
/// when UTC offset was changed. Search is performed with 15-minute granularity, assuming it is enough.
/// We don't support too large changes.
if (values.amount_of_offset_change_value > 24 * 4)
values.amount_of_offset_change_value = 24 * 4;
else if (values.amount_of_offset_change_value < -24 * 4)
values.amount_of_offset_change_value = -24 * 4;
time_t time_at_offset_change = 900;
while (time_at_offset_change < 86400)
{
auto utc_offset_at_current_time = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(
lut[i - 1].date + time_at_offset_change)).offset;
if (utc_offset_at_current_time != utc_offset_at_beginning_of_day)
break;
time_at_offset_change += 900;
}
lut[i - 1].time_at_offset_change = time_at_offset_change;
/// We doesn't support cases when time change results in switching to previous day.
if (static_cast<int>(lut[i - 1].time_at_offset_change) + static_cast<int>(lut[i - 1].amount_of_offset_change) < 0)
lut[i - 1].time_at_offset_change = -lut[i - 1].amount_of_offset_change;
}
/// We don't support cases when time change results in switching to previous day.
/// Shift the point of time change later.
if (values.time_at_offset_change_value + values.amount_of_offset_change_value < 0)
values.time_at_offset_change_value = -values.amount_of_offset_change_value;
}
/// Going to next day.
++date;
++i;
}
while (start_of_day <= DATE_LUT_MAX && i <= DATE_LUT_MAX_DAY_NUM);
while (i < DATE_LUT_SIZE && lut[i - 1].year <= DATE_LUT_MAX_YEAR);
/// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases.
while (i < DATE_LUT_SIZE)

File diff suppressed because it is too large Load Diff

View File

@ -7,3 +7,8 @@
* See DateLUTImpl for usage examples.
*/
STRONG_TYPEDEF(UInt16, DayNum)
/** Represent number of days since 1970-01-01 but in extended range,
* for dates before 1970-01-01 and after 2105
*/
STRONG_TYPEDEF(Int32, ExtendedDayNum)

View File

@ -0,0 +1,219 @@
#pragma once
#include <cstdint>
#include <cstddef>
#include <cstring>
#include <common/extended_types.h>
/// Allows to check the internals of IEEE-754 floating point number.
template <typename T> struct FloatTraits;
template <>
struct FloatTraits<float>
{
using UInt = uint32_t;
static constexpr size_t bits = 32;
static constexpr size_t exponent_bits = 8;
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
};
template <>
struct FloatTraits<double>
{
using UInt = uint64_t;
static constexpr size_t bits = 64;
static constexpr size_t exponent_bits = 11;
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
};
/// x = sign * (2 ^ normalized_exponent) * (1 + mantissa * 2 ^ -mantissa_bits)
/// x = sign * (2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits))
template <typename T>
struct DecomposedFloat
{
using Traits = FloatTraits<T>;
DecomposedFloat(T x)
{
memcpy(&x_uint, &x, sizeof(x));
}
typename Traits::UInt x_uint;
bool is_negative() const
{
return x_uint >> (Traits::bits - 1);
}
/// Returns 0 for both +0. and -0.
int sign() const
{
return (exponent() == 0 && mantissa() == 0)
? 0
: (is_negative()
? -1
: 1);
}
uint16_t exponent() const
{
return (x_uint >> (Traits::mantissa_bits)) & (((1ull << (Traits::exponent_bits + 1)) - 1) >> 1);
}
int16_t normalized_exponent() const
{
return int16_t(exponent()) - ((1ull << (Traits::exponent_bits - 1)) - 1);
}
uint64_t mantissa() const
{
return x_uint & ((1ull << Traits::mantissa_bits) - 1);
}
int64_t mantissa_with_sign() const
{
return is_negative() ? -mantissa() : mantissa();
}
/// NOTE Probably floating point instructions can be better.
bool is_integer_in_representable_range() const
{
return x_uint == 0
|| (normalized_exponent() >= 0 /// The number is not less than one
/// The number is inside the range where every integer has exact representation in float
&& normalized_exponent() <= static_cast<int16_t>(Traits::mantissa_bits)
/// After multiplying by 2^exp, the fractional part becomes zero, means the number is integer
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0));
}
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
/// This function is generic, big integers (128, 256 bit) are supported as well.
/// Infinities are compared correctly. NaNs are treat similarly to infinities, so they can be less than all numbers.
/// (note that we need total order)
/// Returns -1, 0 or 1.
template <typename Int>
int compare(Int rhs) const
{
if (rhs == 0)
return sign();
/// Different signs
if (is_negative() && rhs > 0)
return -1;
if (!is_negative() && rhs < 0)
return 1;
/// Fractional number with magnitude less than one
if (normalized_exponent() < 0)
{
if (!is_negative())
return rhs > 0 ? -1 : 1;
else
return rhs >= 0 ? -1 : 1;
}
/// The case of the most negative integer
if constexpr (is_signed_v<Int>)
{
if (rhs == std::numeric_limits<Int>::lowest())
{
assert(is_negative());
if (normalized_exponent() < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
return 1;
if (normalized_exponent() > static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
return -1;
if (mantissa() == 0)
return 0;
else
return -1;
}
}
/// Too large number: abs(float) > abs(rhs). Also the case with infinities and NaN.
if (normalized_exponent() >= static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>))
return is_negative() ? -1 : 1;
using UInt = std::conditional_t<(sizeof(Int) > sizeof(typename Traits::UInt)), make_unsigned_t<Int>, typename Traits::UInt>;
UInt uint_rhs = rhs < 0 ? -rhs : rhs;
/// Smaller octave: abs(rhs) < abs(float)
/// FYI, TIL: octave is also called "binade", https://en.wikipedia.org/wiki/Binade
if (uint_rhs < (static_cast<UInt>(1) << normalized_exponent()))
return is_negative() ? -1 : 1;
/// Larger octave: abs(rhs) > abs(float)
if (normalized_exponent() + 1 < static_cast<int16_t>(8 * sizeof(Int) - is_signed_v<Int>)
&& uint_rhs >= (static_cast<UInt>(1) << (normalized_exponent() + 1)))
return is_negative() ? 1 : -1;
/// The same octave
/// uint_rhs == 2 ^ normalized_exponent + mantissa * 2 ^ (normalized_exponent - mantissa_bits)
bool large_and_always_integer = normalized_exponent() >= static_cast<int16_t>(Traits::mantissa_bits);
UInt a = large_and_always_integer
? static_cast<UInt>(mantissa()) << (normalized_exponent() - Traits::mantissa_bits)
: static_cast<UInt>(mantissa()) >> (Traits::mantissa_bits - normalized_exponent());
UInt b = uint_rhs - (static_cast<UInt>(1) << normalized_exponent());
if (a < b)
return is_negative() ? 1 : -1;
if (a > b)
return is_negative() ? -1 : 1;
/// Float has no fractional part means that the numbers are equal.
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalized_exponent())) - 1)) == 0)
return 0;
else
/// Float has fractional part means its abs value is larger.
return is_negative() ? -1 : 1;
}
template <typename Int>
bool equals(Int rhs) const
{
return compare(rhs) == 0;
}
template <typename Int>
bool notEquals(Int rhs) const
{
return compare(rhs) != 0;
}
template <typename Int>
bool less(Int rhs) const
{
return compare(rhs) < 0;
}
template <typename Int>
bool greater(Int rhs) const
{
return compare(rhs) > 0;
}
template <typename Int>
bool lessOrEquals(Int rhs) const
{
return compare(rhs) <= 0;
}
template <typename Int>
bool greaterOrEquals(Int rhs) const
{
return compare(rhs) >= 0;
}
};
using DecomposedFloat64 = DecomposedFloat<double>;
using DecomposedFloat32 = DecomposedFloat<float>;

View File

@ -0,0 +1,41 @@
#include <functional>
/** Adapt functor to static method where functor passed as context.
* Main use case to convert lambda into function that can be passed into JIT code.
*/
template <typename Functor>
class FunctorToStaticMethodAdaptor : public FunctorToStaticMethodAdaptor<decltype(&Functor::operator())>
{
};
template <typename R, typename C, typename ...Args>
class FunctorToStaticMethodAdaptor<R (C::*)(Args...) const>
{
public:
static R call(C * ptr, Args &&... arguments)
{
return std::invoke(&C::operator(), ptr, std::forward<Args>(arguments)...);
}
static R unsafeCall(char * ptr, Args &&... arguments)
{
C * ptr_typed = reinterpret_cast<C*>(ptr);
return std::invoke(&C::operator(), ptr_typed, std::forward<Args>(arguments)...);
}
};
template <typename R, typename C, typename ...Args>
class FunctorToStaticMethodAdaptor<R (C::*)(Args...)>
{
public:
static R call(C * ptr, Args &&... arguments)
{
return std::invoke(&C::operator(), ptr, std::forward<Args>(arguments)...);
}
static R unsafeCall(char * ptr, Args &&... arguments)
{
C * ptr_typed = static_cast<C*>(ptr);
return std::invoke(&C::operator(), ptr_typed, std::forward<Args>(arguments)...);
}
};

View File

@ -92,20 +92,10 @@ public:
LocalDate(const LocalDate &) noexcept = default;
LocalDate & operator= (const LocalDate &) noexcept = default;
LocalDate & operator= (time_t time)
{
init(time);
return *this;
}
operator time_t() const
{
return DateLUT::instance().makeDate(m_year, m_month, m_day);
}
DayNum getDayNum() const
{
return DateLUT::instance().makeDayNum(m_year, m_month, m_day);
const auto & lut = DateLUT::instance();
return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
}
operator DayNum() const
@ -166,12 +156,3 @@ public:
};
static_assert(sizeof(LocalDate) == 4);
namespace std
{
inline string to_string(const LocalDate & date)
{
return date.toString();
}
}

View File

@ -29,29 +29,16 @@ private:
/// NOTE We may use attribute packed instead, but it is less portable.
unsigned char pad = 0;
void init(time_t time)
void init(time_t time, const DateLUTImpl & time_zone)
{
if (unlikely(time > DATE_LUT_MAX || time == 0))
{
m_year = 0;
m_month = 0;
m_day = 0;
m_hour = 0;
m_minute = 0;
m_second = 0;
DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(time);
return;
}
const auto & date_lut = DateLUT::instance();
const auto & values = date_lut.getValues(time);
m_year = values.year;
m_month = values.month;
m_day = values.day_of_month;
m_hour = date_lut.toHour(time);
m_minute = date_lut.toMinute(time);
m_second = date_lut.toSecond(time);
m_year = components.date.year;
m_month = components.date.month;
m_day = components.date.day;
m_hour = components.time.hour;
m_minute = components.time.minute;
m_second = components.time.second;
(void)pad; /// Suppress unused private field warning.
}
@ -73,9 +60,9 @@ private:
}
public:
explicit LocalDateTime(time_t time)
explicit LocalDateTime(time_t time, const DateLUTImpl & time_zone = DateLUT::instance())
{
init(time);
init(time, time_zone);
}
LocalDateTime(unsigned short year_, unsigned char month_, unsigned char day_,
@ -104,19 +91,6 @@ public:
LocalDateTime(const LocalDateTime &) noexcept = default;
LocalDateTime & operator= (const LocalDateTime &) noexcept = default;
LocalDateTime & operator= (time_t time)
{
init(time);
return *this;
}
operator time_t() const
{
return m_year == 0
? 0
: DateLUT::instance().makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second);
}
unsigned short year() const { return m_year; }
unsigned char month() const { return m_month; }
unsigned char day() const { return m_day; }
@ -132,8 +106,30 @@ public:
void second(unsigned char x) { m_second = x; }
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
LocalDateTime toStartOfDate() { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
std::string toString() const
{
std::string s{"0000-00-00 00:00:00"};
s[0] += m_year / 1000;
s[1] += (m_year / 100) % 10;
s[2] += (m_year / 10) % 10;
s[3] += m_year % 10;
s[5] += m_month / 10;
s[6] += m_month % 10;
s[8] += m_day / 10;
s[9] += m_day % 10;
s[11] += m_hour / 10;
s[12] += m_hour % 10;
s[14] += m_minute / 10;
s[15] += m_minute % 10;
s[17] += m_second / 10;
s[18] += m_second % 10;
return s;
}
bool operator< (const LocalDateTime & other) const
{
@ -167,14 +163,3 @@ public:
};
static_assert(sizeof(LocalDateTime) == 8);
namespace std
{
inline string to_string(const LocalDateTime & datetime)
{
stringstream str;
str << datetime;
return str.str();
}
}

View File

@ -0,0 +1,33 @@
#pragma once
#include <common/types.h>
namespace detail
{
template <typename T, bool is_nothrow_move_assignable = std::is_nothrow_move_assignable_v<T>>
struct MoveOrCopyIfThrow;
template <typename T>
struct MoveOrCopyIfThrow<T, true>
{
void operator()(T && src, T & dst) const
{
dst = std::forward<T>(src);
}
};
template <typename T>
struct MoveOrCopyIfThrow<T, false>
{
void operator()(T && src, T & dst) const
{
dst = src;
}
};
template <typename T>
void moveOrCopyIfThrow(T && src, T & dst)
{
MoveOrCopyIfThrow<T>()(std::forward<T>(src), dst);
}
}

View File

@ -1,6 +1,6 @@
#include <common/ReadlineLineReader.h>
#include <common/errnoToString.h>
#include <ext/scope_guard.h>
#include <common/scope_guard.h>
#include <errno.h>
#include <signal.h>

View File

@ -1,8 +1,9 @@
#include <common/ReplxxLineReader.h>
#include <common/errnoToString.h>
#include <errno.h>
#include <string.h>
#include <chrono>
#include <cerrno>
#include <cstring>
#include <unistd.h>
#include <functional>
#include <sys/file.h>
@ -24,6 +25,94 @@ void trim(String & s)
s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end());
}
/// Copied from replxx::src/util.cxx::now_ms_str() under the terms of 3-clause BSD license of Replxx.
/// Copyright (c) 2017-2018, Marcin Konarski (amok at codestation.org)
/// Copyright (c) 2010, Salvatore Sanfilippo (antirez at gmail dot com)
/// Copyright (c) 2010, Pieter Noordhuis (pcnoordhuis at gmail dot com)
std::string replxx_now_ms_str()
{
std::chrono::milliseconds ms(std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()));
time_t t = ms.count() / 1000;
tm broken;
if (!localtime_r(&t, &broken))
{
return std::string();
}
static int const BUFF_SIZE(32);
char str[BUFF_SIZE];
strftime(str, BUFF_SIZE, "%Y-%m-%d %H:%M:%S.", &broken);
snprintf(str + sizeof("YYYY-mm-dd HH:MM:SS"), 5, "%03d", static_cast<int>(ms.count() % 1000));
return str;
}
/// Convert from readline to replxx format.
///
/// replxx requires each history line to prepended with time line:
///
/// ### YYYY-MM-DD HH:MM:SS.SSS
/// select 1
///
/// And w/o those service lines it will load all lines from history file as
/// one history line for suggestion. And if there are lots of lines in file it
/// will take lots of time (getline() + tons of reallocations).
///
/// NOTE: this code uses std::ifstream/std::ofstream like original replxx code.
void convertHistoryFile(const std::string & path, replxx::Replxx & rx)
{
std::ifstream in(path);
if (!in)
{
rx.print("Cannot open %s reading (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
std::string line;
if (!getline(in, line).good())
{
rx.print("Cannot read from %s (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
/// This is the marker of the date, no need to convert.
static char const REPLXX_TIMESTAMP_PATTERN[] = "### dddd-dd-dd dd:dd:dd.ddd";
if (line.starts_with("### ") && line.size() == strlen(REPLXX_TIMESTAMP_PATTERN))
{
return;
}
std::vector<std::string> lines;
in.seekg(0);
while (getline(in, line).good())
{
lines.push_back(line);
}
in.close();
size_t lines_size = lines.size();
std::sort(lines.begin(), lines.end());
lines.erase(std::unique(lines.begin(), lines.end()), lines.end());
rx.print("The history file (%s) is in old format. %zu lines, %zu unique lines.\n",
path.c_str(), lines_size, lines.size());
std::ofstream out(path);
if (!out)
{
rx.print("Cannot open %s for writing (for conversion): %s\n",
path.c_str(), errnoToString(errno).c_str());
return;
}
const std::string & timestamp = replxx_now_ms_str();
for (const auto & out_line : lines)
{
out << "### " << timestamp << "\n" << out_line << std::endl;
}
out.close();
}
}
ReplxxLineReader::ReplxxLineReader(
@ -47,6 +136,8 @@ ReplxxLineReader::ReplxxLineReader(
}
else
{
convertHistoryFile(history_file_path, rx);
if (flock(history_file_fd, LOCK_SH))
{
rx.print("Shared lock of history file failed: %s\n", errnoToString(errno).c_str());
@ -91,6 +182,10 @@ ReplxxLineReader::ReplxxLineReader(
/// it also binded to M-p/M-n).
rx.bind_key(Replxx::KEY::meta('N'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_NEXT, code); });
rx.bind_key(Replxx::KEY::meta('P'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::COMPLETE_PREVIOUS, code); });
/// By default M-BACKSPACE is KILL_TO_WHITESPACE_ON_LEFT, while in readline it is backward-kill-word
rx.bind_key(Replxx::KEY::meta(Replxx::KEY::BACKSPACE), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_BEGINING_OF_WORD, code); });
/// By default C-w is KILL_TO_BEGINING_OF_WORD, while in readline it is unix-word-rubout
rx.bind_key(Replxx::KEY::control('W'), [this](char32_t code) { return rx.invoke(Replxx::ACTION::KILL_TO_WHITESPACE_ON_LEFT, code); });
rx.bind_key(Replxx::KEY::meta('E'), [this](char32_t) { openEditor(); return Replxx::ACTION_RESULT::CONTINUE; });
}

View File

@ -3,7 +3,7 @@
#include <map>
#include <tuple>
#include <mutex>
#include <ext/function_traits.h>
#include <common/function_traits.h>
/** The simplest cache for a free function.
@ -32,10 +32,11 @@ public:
template <typename... Args>
Result operator() (Args &&... args)
{
Key key{std::forward<Args>(args)...};
{
std::lock_guard lock(mutex);
Key key{std::forward<Args>(args)...};
auto it = cache.find(key);
if (cache.end() != it)
@ -43,7 +44,7 @@ public:
}
/// The calculations themselves are not done under mutex.
Result res = f(std::forward<Args>(args)...);
Result res = std::apply(f, key);
{
std::lock_guard lock(mutex);
@ -57,11 +58,12 @@ public:
template <typename... Args>
void update(Args &&... args)
{
Result res = f(std::forward<Args>(args)...);
Key key{std::forward<Args>(args)...};
Result res = std::apply(f, key);
{
std::lock_guard lock(mutex);
Key key{std::forward<Args>(args)...};
cache[key] = std::move(res);
}
}

View File

@ -25,6 +25,12 @@ namespace common
return x - y;
}
template <typename T>
inline auto NO_SANITIZE_UNDEFINED negateIgnoreOverflow(T x)
{
return -x;
}
template <typename T>
inline bool addOverflow(T x, T y, T & res)
{
@ -50,27 +56,33 @@ namespace common
}
template <>
inline bool addOverflow(__int128 x, __int128 y, __int128 & res)
inline bool addOverflow(Int128 x, Int128 y, Int128 & res)
{
static constexpr __int128 min_int128 = minInt128();
static constexpr __int128 max_int128 = maxInt128();
res = addIgnoreOverflow(x, y);
return (y > 0 && x > max_int128 - y) || (y < 0 && x < min_int128 - y);
return (y > 0 && x > std::numeric_limits<Int128>::max() - y) ||
(y < 0 && x < std::numeric_limits<Int128>::min() - y);
}
template <>
inline bool addOverflow(wInt256 x, wInt256 y, wInt256 & res)
inline bool addOverflow(UInt128 x, UInt128 y, UInt128 & res)
{
res = addIgnoreOverflow(x, y);
return (y > 0 && x > std::numeric_limits<wInt256>::max() - y) ||
(y < 0 && x < std::numeric_limits<wInt256>::min() - y);
return x > std::numeric_limits<UInt128>::max() - y;
}
template <>
inline bool addOverflow(wUInt256 x, wUInt256 y, wUInt256 & res)
inline bool addOverflow(Int256 x, Int256 y, Int256 & res)
{
res = addIgnoreOverflow(x, y);
return x > std::numeric_limits<wUInt256>::max() - y;
return (y > 0 && x > std::numeric_limits<Int256>::max() - y) ||
(y < 0 && x < std::numeric_limits<Int256>::min() - y);
}
template <>
inline bool addOverflow(UInt256 x, UInt256 y, UInt256 & res)
{
res = addIgnoreOverflow(x, y);
return x > std::numeric_limits<UInt256>::max() - y;
}
template <typename T>
@ -98,24 +110,30 @@ namespace common
}
template <>
inline bool subOverflow(__int128 x, __int128 y, __int128 & res)
inline bool subOverflow(Int128 x, Int128 y, Int128 & res)
{
static constexpr __int128 min_int128 = minInt128();
static constexpr __int128 max_int128 = maxInt128();
res = subIgnoreOverflow(x, y);
return (y < 0 && x > max_int128 + y) || (y > 0 && x < min_int128 + y);
return (y < 0 && x > std::numeric_limits<Int128>::max() + y) ||
(y > 0 && x < std::numeric_limits<Int128>::min() + y);
}
template <>
inline bool subOverflow(wInt256 x, wInt256 y, wInt256 & res)
inline bool subOverflow(UInt128 x, UInt128 y, UInt128 & res)
{
res = subIgnoreOverflow(x, y);
return (y < 0 && x > std::numeric_limits<wInt256>::max() + y) ||
(y > 0 && x < std::numeric_limits<wInt256>::min() + y);
return x < y;
}
template <>
inline bool subOverflow(wUInt256 x, wUInt256 y, wUInt256 & res)
inline bool subOverflow(Int256 x, Int256 y, Int256 & res)
{
res = subIgnoreOverflow(x, y);
return (y < 0 && x > std::numeric_limits<Int256>::max() + y) ||
(y > 0 && x < std::numeric_limits<Int256>::min() + y);
}
template <>
inline bool subOverflow(UInt256 x, UInt256 y, UInt256 & res)
{
res = subIgnoreOverflow(x, y);
return x < y;
@ -145,36 +163,33 @@ namespace common
return __builtin_smulll_overflow(x, y, &res);
}
/// Overflow check is not implemented for big integers.
template <>
inline bool mulOverflow(__int128 x, __int128 y, __int128 & res)
inline bool mulOverflow(Int128 x, Int128 y, Int128 & res)
{
res = mulIgnoreOverflow(x, y);
if (!x || !y)
return false;
unsigned __int128 a = (x > 0) ? x : -x;
unsigned __int128 b = (y > 0) ? y : -y;
return mulIgnoreOverflow(a, b) / b != a;
return false;
}
template <>
inline bool mulOverflow(wInt256 x, wInt256 y, wInt256 & res)
inline bool mulOverflow(Int256 x, Int256 y, Int256 & res)
{
res = mulIgnoreOverflow(x, y);
if (!x || !y)
return false;
wInt256 a = (x > 0) ? x : -x;
wInt256 b = (y > 0) ? y : -y;
return mulIgnoreOverflow(a, b) / b != a;
return false;
}
template <>
inline bool mulOverflow(wUInt256 x, wUInt256 y, wUInt256 & res)
inline bool mulOverflow(UInt128 x, UInt128 y, UInt128 & res)
{
res = mulIgnoreOverflow(x, y);
if (!x || !y)
return false;
return res / y != x;
return false;
}
template <>
inline bool mulOverflow(UInt256 x, UInt256 y, UInt256 & res)
{
res = mulIgnoreOverflow(x, y);
return false;
}
}

7
base/common/arraySize.h Normal file
View File

@ -0,0 +1,7 @@
#pragma once
#include <cstdlib>
/** \brief Returns number of elements in an automatic array. */
template <typename T, std::size_t N>
constexpr size_t arraySize(const T (&)[N]) noexcept { return N; }

27
base/common/bit_cast.h Normal file
View File

@ -0,0 +1,27 @@
#pragma once
#include <string.h>
#include <algorithm>
#include <type_traits>
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> bit_cast(const From & from)
{
To res {};
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
return res;
}
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> safe_bit_cast(const From & from)
{
static_assert(sizeof(To) == sizeof(From), "bit cast on types of different width");
return bit_cast<To, From>(from);
}

46
base/common/chrono_io.h Normal file
View File

@ -0,0 +1,46 @@
#pragma once
#include <chrono>
#include <string>
#include <sstream>
#include <cctz/time_zone.h>
inline std::string to_string(const std::time_t & time)
{
return cctz::format("%Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(time), cctz::local_time_zone());
}
template <typename Clock, typename Duration = typename Clock::duration>
std::string to_string(const std::chrono::time_point<Clock, Duration> & tp)
{
// Don't use DateLUT because it shows weird characters for
// TimePoint::max(). I wish we could use C++20 format, but it's not
// there yet.
// return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp));
auto in_time_t = std::chrono::system_clock::to_time_t(tp);
return to_string(in_time_t);
}
template <typename Rep, typename Period = std::ratio<1>>
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
{
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(duration);
if (seconds_as_int == duration)
return std::to_string(seconds_as_int.count()) + "s";
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(duration);
return std::to_string(seconds_as_double.count()) + "s";
}
template <typename Clock, typename Duration = typename Clock::duration>
std::ostream & operator<<(std::ostream & o, const std::chrono::time_point<Clock, Duration> & tp)
{
return o << to_string(tp);
}
template <typename Rep, typename Period = std::ratio<1>>
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & duration)
{
return o << to_string(duration);
}

View File

@ -1,5 +1,20 @@
#pragma once
/// __has_feature supported only by clang.
///
/// But libcxx/libcxxabi overrides it to 0,
/// thus the checks for __has_feature will be wrong.
///
/// NOTE:
/// - __has_feature cannot be simply undefined,
/// since this will be broken if some C++ header will be included after
/// including <common/defines.h>
/// - it should not have fallback to 0,
/// since this may create false-positive detection (common problem)
#if defined(__clang__) && defined(__has_feature)
# define ch_has_feature __has_feature
#endif
#if defined(_MSC_VER)
# if !defined(likely)
# define likely(x) (x)
@ -32,8 +47,8 @@
/// Check for presence of address sanitizer
#if !defined(ADDRESS_SANITIZER)
# if defined(__has_feature)
# if __has_feature(address_sanitizer)
# if defined(ch_has_feature)
# if ch_has_feature(address_sanitizer)
# define ADDRESS_SANITIZER 1
# endif
# elif defined(__SANITIZE_ADDRESS__)
@ -42,8 +57,8 @@
#endif
#if !defined(THREAD_SANITIZER)
# if defined(__has_feature)
# if __has_feature(thread_sanitizer)
# if defined(ch_has_feature)
# if ch_has_feature(thread_sanitizer)
# define THREAD_SANITIZER 1
# endif
# elif defined(__SANITIZE_THREAD__)
@ -52,8 +67,8 @@
#endif
#if !defined(MEMORY_SANITIZER)
# if defined(__has_feature)
# if __has_feature(memory_sanitizer)
# if defined(ch_has_feature)
# if ch_has_feature(memory_sanitizer)
# define MEMORY_SANITIZER 1
# endif
# elif defined(__MEMORY_SANITIZER__)
@ -61,6 +76,16 @@
# endif
#endif
#if !defined(UNDEFINED_BEHAVIOR_SANITIZER)
# if defined(__has_feature)
# if __has_feature(undefined_behavior_sanitizer)
# define UNDEFINED_BEHAVIOR_SANITIZER 1
# endif
# elif defined(__UNDEFINED_BEHAVIOR_SANITIZER__)
# define UNDEFINED_BEHAVIOR_SANITIZER 1
# endif
#endif
#if defined(ADDRESS_SANITIZER)
# define BOOST_USE_ASAN 1
# define BOOST_USE_UCONTEXT 1

View File

@ -5,16 +5,14 @@
#include <common/types.h>
#include <common/wide_integer.h>
using Int128 = __int128;
using wInt256 = wide::integer<256, signed>;
using wUInt256 = wide::integer<256, unsigned>;
using Int128 = wide::integer<128, signed>;
using UInt128 = wide::integer<128, unsigned>;
using Int256 = wide::integer<256, signed>;
using UInt256 = wide::integer<256, unsigned>;
static_assert(sizeof(wInt256) == 32);
static_assert(sizeof(wUInt256) == 32);
static constexpr __int128 minInt128() { return static_cast<unsigned __int128>(1) << 127; }
static constexpr __int128 maxInt128() { return (static_cast<unsigned __int128>(1) << 127) - 1; }
static_assert(sizeof(Int256) == 32);
static_assert(sizeof(UInt256) == 32);
/// The standard library type traits, such as std::is_arithmetic, with one exception
/// (std::common_type), are "set in stone". Attempting to specialize them causes undefined behavior.
@ -26,7 +24,7 @@ struct is_signed
};
template <> struct is_signed<Int128> { static constexpr bool value = true; };
template <> struct is_signed<wInt256> { static constexpr bool value = true; };
template <> struct is_signed<Int256> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_signed_v = is_signed<T>::value;
@ -37,7 +35,8 @@ struct is_unsigned
static constexpr bool value = std::is_unsigned_v<T>;
};
template <> struct is_unsigned<wUInt256> { static constexpr bool value = true; };
template <> struct is_unsigned<UInt128> { static constexpr bool value = true; };
template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
@ -51,8 +50,9 @@ struct is_integer
};
template <> struct is_integer<Int128> { static constexpr bool value = true; };
template <> struct is_integer<wInt256> { static constexpr bool value = true; };
template <> struct is_integer<wUInt256> { static constexpr bool value = true; };
template <> struct is_integer<UInt128> { static constexpr bool value = true; };
template <> struct is_integer<Int256> { static constexpr bool value = true; };
template <> struct is_integer<UInt256> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_integer_v = is_integer<T>::value;
@ -64,7 +64,11 @@ struct is_arithmetic
static constexpr bool value = std::is_arithmetic_v<T>;
};
template <> struct is_arithmetic<__int128> { static constexpr bool value = true; };
template <> struct is_arithmetic<Int128> { static constexpr bool value = true; };
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
@ -75,9 +79,10 @@ struct make_unsigned
typedef std::make_unsigned_t<T> type;
};
template <> struct make_unsigned<Int128> { using type = unsigned __int128; };
template <> struct make_unsigned<wInt256> { using type = wUInt256; };
template <> struct make_unsigned<wUInt256> { using type = wUInt256; };
template <> struct make_unsigned<Int128> { using type = UInt128; };
template <> struct make_unsigned<UInt128> { using type = UInt128; };
template <> struct make_unsigned<Int256> { using type = UInt256; };
template <> struct make_unsigned<UInt256> { using type = UInt256; };
template <typename T> using make_unsigned_t = typename make_unsigned<T>::type;
@ -87,8 +92,10 @@ struct make_signed
typedef std::make_signed_t<T> type;
};
template <> struct make_signed<wInt256> { using type = wInt256; };
template <> struct make_signed<wUInt256> { using type = wInt256; };
template <> struct make_signed<Int128> { using type = Int128; };
template <> struct make_signed<UInt128> { using type = Int128; };
template <> struct make_signed<Int256> { using type = Int256; };
template <> struct make_signed<UInt256> { using type = Int256; };
template <typename T> using make_signed_t = typename make_signed<T>::type;
@ -98,8 +105,10 @@ struct is_big_int
static constexpr bool value = false;
};
template <> struct is_big_int<wInt256> { static constexpr bool value = true; };
template <> struct is_big_int<wUInt256> { static constexpr bool value = true; };
template <> struct is_big_int<Int128> { static constexpr bool value = true; };
template <> struct is_big_int<UInt128> { static constexpr bool value = true; };
template <> struct is_big_int<Int256> { static constexpr bool value = true; };
template <> struct is_big_int<UInt256> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_big_int_v = is_big_int<T>::value;

View File

@ -4,23 +4,42 @@
#include <string>
#include <boost/algorithm/string/replace.hpp>
std::string_view getResource(std::string_view name)
{
// Convert the resource file name into the form generated by `ld -r -b binary`.
std::string name_replaced(name);
std::replace(name_replaced.begin(), name_replaced.end(), '/', '_');
std::replace(name_replaced.begin(), name_replaced.end(), '-', '_');
std::replace(name_replaced.begin(), name_replaced.end(), '.', '_');
boost::replace_all(name_replaced, "+", "_PLUS_");
/// These are the names that are generated by "ld -r -b binary"
std::string symbol_name_data = "_binary_" + name_replaced + "_start";
std::string symbol_name_size = "_binary_" + name_replaced + "_size";
// In most `dlsym(3)` APIs, one passes the symbol name as it appears via
// something like `nm` or `objdump -t`. For example, a symbol `_foo` would be
// looked up with the string `"_foo"`.
//
// Apple's linker is confusingly different. The NOTES on the man page for
// `dlsym(3)` claim that one looks up the symbol with "the name used in C
// source code". In this example, that would mean using the string `"foo"`.
// This apparently applies even in the case where the symbol did not originate
// from C source, such as the embedded binary resource files used here. So
// the symbol name must not have a leading `_` on Apple platforms. It's not
// clear how this applies to other symbols, such as those which _have_ a leading
// underscore in them by design, many leading underscores, etc.
#if defined OS_DARWIN
std::string prefix = "binary_";
#else
std::string prefix = "_binary_";
#endif
std::string symbol_name_start = prefix + name_replaced + "_start";
std::string symbol_name_end = prefix + name_replaced + "_end";
const void * sym_data = dlsym(RTLD_DEFAULT, symbol_name_data.c_str());
const void * sym_size = dlsym(RTLD_DEFAULT, symbol_name_size.c_str());
const char* sym_start = reinterpret_cast<const char*>(dlsym(RTLD_DEFAULT, symbol_name_start.c_str()));
const char* sym_end = reinterpret_cast<const char*>(dlsym(RTLD_DEFAULT, symbol_name_end.c_str()));
if (sym_data && sym_size)
return { static_cast<const char *>(sym_data), unalignedLoad<size_t>(&sym_size) };
if (sym_start && sym_end)
{
auto resource_size = static_cast<size_t>(std::distance(sym_start, sym_end));
return { sym_start, resource_size };
}
return {};
}

View File

@ -25,6 +25,10 @@ uint64_t getThreadId()
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
#elif defined(OS_FREEBSD)
current_tid = pthread_getthreadid_np();
#elif defined(OS_SUNOS)
// On Solaris-derived systems, this returns the ID of the LWP, analogous
// to a thread.
current_tid = static_cast<uint64_t>(pthread_self());
#else
if (0 != pthread_threadid_np(nullptr, &current_tid))
throw std::logic_error("pthread_threadid_np returned error");

View File

@ -30,9 +30,8 @@
#include <cstddef>
#include <cstring>
#include <type_traits>
#include <common/extended_types.h>
using int128_t = __int128;
using uint128_t = unsigned __int128;
namespace impl
{
@ -106,7 +105,7 @@ using UnsignedOfSize = typename SelectType
uint16_t,
uint32_t,
uint64_t,
uint128_t
__uint128_t
>::Result;
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
@ -313,7 +312,8 @@ namespace convert
}
}
static inline int digits10(uint128_t x)
template <typename T>
static inline int digits10(T x)
{
if (x < 10ULL)
return 1;
@ -346,8 +346,11 @@ static inline int digits10(uint128_t x)
return 12 + digits10(x / 1000000000000ULL);
}
static inline char * writeUIntText(uint128_t x, char * p)
template <typename T>
static inline char * writeUIntText(T x, char * p)
{
static_assert(is_unsigned_v<T>);
int len = digits10(x);
auto pp = p + len;
while (x >= 100)
@ -370,14 +373,28 @@ static inline char * writeLeadingMinus(char * pos)
return pos + 1;
}
static inline char * writeSIntText(int128_t x, char * pos)
template <typename T>
static inline char * writeSIntText(T x, char * pos)
{
static constexpr int128_t min_int128 = uint128_t(1) << 127;
static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
if (unlikely(x == min_int128))
using UnsignedT = make_unsigned_t<T>;
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
if (unlikely(x == min_int))
{
memcpy(pos, "-170141183460469231731687303715884105728", 40);
return pos + 40;
if constexpr (std::is_same_v<T, Int128>)
{
const char * res = "-170141183460469231731687303715884105728";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
else if constexpr (std::is_same_v<T, Int256>)
{
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
memcpy(pos, res, strlen(res));
return pos + strlen(res);
}
}
if (x < 0)
@ -385,7 +402,7 @@ static inline char * writeSIntText(int128_t x, char * pos)
x = -x;
pos = writeLeadingMinus(pos);
}
return writeUIntText(static_cast<uint128_t>(x), pos);
return writeUIntText(UnsignedT(x), pos);
}
}
@ -403,13 +420,25 @@ inline char * itoa(char8_t i, char * p)
}
template <>
inline char * itoa<uint128_t>(uint128_t i, char * p)
inline char * itoa(UInt128 i, char * p)
{
return impl::writeUIntText(i, p);
}
template <>
inline char * itoa<int128_t>(int128_t i, char * p)
inline char * itoa(Int128 i, char * p)
{
return impl::writeSIntText(i, p);
}
template <>
inline char * itoa(UInt256 i, char * p)
{
return impl::writeUIntText(i, p);
}
template <>
inline char * itoa(Int256 i, char * p)
{
return impl::writeSIntText(i, p);
}

52
base/common/map.h Normal file
View File

@ -0,0 +1,52 @@
#pragma once
#include <type_traits>
#include <boost/iterator/transform_iterator.hpp>
namespace collections
{
/// \brief Strip type off top level reference and cv-qualifiers thus allowing storage in containers
template <typename T>
using unqualified_t = std::remove_cv_t<std::remove_reference_t<T>>;
/** \brief Returns collection of the same container-type as the input collection,
* with each element transformed by the application of `mapper`.
*/
template <template <typename...> class Collection, typename... Params, typename Mapper>
auto map(const Collection<Params...> & collection, Mapper && mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return Collection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
/** \brief Returns collection of specified container-type,
* with each element transformed by the application of `mapper`.
* Allows conversion between different container-types, e.g. std::vector to std::list
*/
template <template <typename...> class ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return ResultCollection<value_type>(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
/** \brief Returns collection of specified type,
* with each element transformed by the application of `mapper`.
* Allows leveraging implicit conversion between the result of applying `mapper` and R::value_type.
*/
template <typename ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, Mapper && mapper)
{
return ResultCollection(
boost::make_transform_iterator(std::begin(collection), std::forward<Mapper>(mapper)),
boost::make_transform_iterator(std::end(collection), std::forward<Mapper>(mapper)));
}
}

View File

@ -15,11 +15,11 @@
#endif
#define __msan_unpoison(X, Y) // NOLINT
#if defined(__has_feature)
# if __has_feature(memory_sanitizer)
# undef __msan_unpoison
# include <sanitizer/msan_interface.h>
# endif
#if defined(ch_has_feature)
# if ch_has_feature(memory_sanitizer)
# undef __msan_unpoison
# include <sanitizer/msan_interface.h>
# endif
#endif
#include <link.h>

View File

@ -4,9 +4,9 @@
#include <boost/range/adaptor/transformed.hpp>
#include <type_traits>
namespace ext
namespace collections
{
namespace internal
{
template <typename ResultType, typename CountingType, typename BeginType, typename EndType>
@ -24,11 +24,11 @@ namespace internal
/// For loop adaptor which is used to iterate through a half-closed interval [begin, end).
/// The parameters `begin` and `end` can have any integral or enum types.
template <typename BeginType,
typename EndType,
typename = std::enable_if_t<
(std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>), void>>
typename EndType,
typename = std::enable_if_t<
(std::is_integral_v<BeginType> || std::is_enum_v<BeginType>) &&
(std::is_integral_v<EndType> || std::is_enum_v<EndType>) &&
(!std::is_enum_v<BeginType> || !std::is_enum_v<EndType> || std::is_same_v<BeginType, EndType>), void>>
inline auto range(BeginType begin, EndType end)
{
if constexpr (std::is_integral_v<BeginType> && std::is_integral_v<EndType>)
@ -51,7 +51,7 @@ inline auto range(BeginType begin, EndType end)
/// The parameter `end` can have any integral or enum type.
/// The same as range(0, end).
template <typename Type,
typename = std::enable_if_t<std::is_integral_v<Type> || std::is_enum_v<Type>, void>>
typename = std::enable_if_t<std::is_integral_v<Type> || std::is_enum_v<Type>, void>>
inline auto range(Type end)
{
if constexpr (std::is_integral_v<Type>)
@ -59,4 +59,5 @@ inline auto range(Type end)
else
return internal::rangeImpl<Type, std::underlying_type_t<Type>>(0, end);
}
}

View File

@ -4,9 +4,6 @@
#include <memory>
#include <utility>
namespace ext
{
template <class F>
class [[nodiscard]] basic_scope_guard
{
@ -105,10 +102,9 @@ using scope_guard = basic_scope_guard<std::function<void(void)>>;
template <class F>
inline basic_scope_guard<F> make_scope_guard(F && function_) { return std::forward<F>(function_); }
}
#define SCOPE_EXIT_CONCAT(n, ...) \
const auto scope_exit##n = ext::make_scope_guard([&] { __VA_ARGS__; })
const auto scope_exit##n = make_scope_guard([&] { __VA_ARGS__; })
#define SCOPE_EXIT_FWD(n, ...) SCOPE_EXIT_CONCAT(n, __VA_ARGS__)
#define SCOPE_EXIT(...) SCOPE_EXIT_FWD(__LINE__, __VA_ARGS__)

View File

@ -0,0 +1,68 @@
#pragma once
#include <common/scope_guard.h>
#include <common/logger_useful.h>
#include <Common/MemoryTracker.h>
/// Same as SCOPE_EXIT() but block the MEMORY_LIMIT_EXCEEDED errors.
///
/// Typical example of SCOPE_EXIT_MEMORY() usage is when code under it may do
/// some tiny allocations, that may fail under high memory pressure or/and low
/// max_memory_usage (and related limits).
///
/// NOTE: it should be used with caution.
#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \
MemoryTracker::LockExceptionInThread \
lock_memory_tracker(VariableContext::Global); \
__VA_ARGS__; \
)
/// Same as SCOPE_EXIT() but try/catch/tryLogCurrentException any exceptions.
///
/// SCOPE_EXIT_SAFE() should be used in case the exception during the code
/// under SCOPE_EXIT() is not "that fatal" and error message in log is enough.
///
/// Good example is calling CurrentThread::detachQueryIfNotDetached().
///
/// Anti-pattern is calling WriteBuffer::finalize() under SCOPE_EXIT_SAFE()
/// (since finalize() can do final write and it is better to fail abnormally
/// instead of ignoring write error).
///
/// NOTE: it should be used with double caution.
#define SCOPE_EXIT_SAFE(...) SCOPE_EXIT( \
try \
{ \
__VA_ARGS__; \
} \
catch (...) \
{ \
tryLogCurrentException(__PRETTY_FUNCTION__); \
} \
)
/// Same as SCOPE_EXIT() but:
/// - block the MEMORY_LIMIT_EXCEEDED errors,
/// - try/catch/tryLogCurrentException any exceptions.
///
/// SCOPE_EXIT_MEMORY_SAFE() can be used when the error can be ignored, and in
/// addition to SCOPE_EXIT_SAFE() it will also lock MEMORY_LIMIT_EXCEEDED to
/// avoid such exceptions.
///
/// It does exists as a separate helper, since you do not need to lock
/// MEMORY_LIMIT_EXCEEDED always (there are cases when code under SCOPE_EXIT does
/// not do any allocations, while LockExceptionInThread increment atomic
/// variable).
///
/// NOTE: it should be used with triple caution.
#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \
try \
{ \
MemoryTracker::LockExceptionInThread \
lock_memory_tracker(VariableContext::Global); \
__VA_ARGS__; \
} \
catch (...) \
{ \
tryLogCurrentException(__PRETTY_FUNCTION__); \
} \
)

View File

@ -1,45 +1,28 @@
// https://stackoverflow.com/questions/1413445/reading-a-password-from-stdcin
#include <common/setTerminalEcho.h>
#include <common/errnoToString.h>
#include <stdexcept>
#include <cstring>
#include <string>
#ifdef WIN32
#include <windows.h>
#else
#include <termios.h>
#include <unistd.h>
#include <errno.h>
#endif
void setTerminalEcho(bool enable)
{
#ifdef WIN32
auto handle = GetStdHandle(STD_INPUT_HANDLE);
DWORD mode;
if (!GetConsoleMode(handle, &mode))
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + std::to_string(GetLastError()));
/// Obtain terminal attributes,
/// toggle the ECHO flag
/// and set them back.
if (!enable)
mode &= ~ENABLE_ECHO_INPUT;
else
mode |= ENABLE_ECHO_INPUT;
struct termios tty{};
if (!SetConsoleMode(handle, mode))
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + std::to_string(GetLastError()));
#else
struct termios tty;
if (tcgetattr(STDIN_FILENO, &tty))
if (0 != tcgetattr(STDIN_FILENO, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno));
if (!enable)
tty.c_lflag &= ~ECHO;
else
tty.c_lflag |= ECHO;
auto ret = tcsetattr(STDIN_FILENO, TCSANOW, &tty);
if (ret)
if (enable)
tty.c_lflag |= ECHO;
else
tty.c_lflag &= ~ECHO;
if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno));
#endif
}

View File

@ -2,8 +2,6 @@
#include <memory>
namespace ext
{
/** Allows to make std::shared_ptr from T with protected constructor.
*
@ -36,4 +34,3 @@ struct is_shared_ptr<std::shared_ptr<T>>
template <typename T>
inline constexpr bool is_shared_ptr_v = is_shared_ptr<T>::value;
}

View File

@ -4,7 +4,8 @@
#include <type_traits>
#include <utility>
template <class T, class Tag>
template <typename T, typename Tag>
struct StrongTypedef
{
private:
@ -12,6 +13,7 @@ private:
T t;
public:
using UnderlyingType = T;
template <class Enable = typename std::is_copy_constructible<T>::type>
explicit StrongTypedef(const T & t_) : t(t_) {}
template <class Enable = typename std::is_move_constructible<T>::type>
@ -37,14 +39,16 @@ public:
bool operator==(const Self & rhs) const { return t == rhs.t; }
bool operator<(const Self & rhs) const { return t < rhs.t; }
bool operator>(const Self & rhs) const { return t > rhs.t; }
T & toUnderType() { return t; }
const T & toUnderType() const { return t; }
};
namespace std
{
template <class T, class Tag>
template <typename T, typename Tag>
struct hash<StrongTypedef<T, Tag>>
{
size_t operator()(const StrongTypedef<T, Tag> & x) const

View File

@ -1,25 +1,2 @@
include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake)
add_executable (date_lut2 date_lut2.cpp)
add_executable (date_lut3 date_lut3.cpp)
add_executable (date_lut_default_timezone date_lut_default_timezone.cpp)
add_executable (local_date_time_comparison local_date_time_comparison.cpp)
add_executable (realloc-perf allocator.cpp)
set(PLATFORM_LIBS ${CMAKE_DL_LIBS})
target_link_libraries (date_lut2 PRIVATE common ${PLATFORM_LIBS})
target_link_libraries (date_lut3 PRIVATE common ${PLATFORM_LIBS})
target_link_libraries (date_lut_default_timezone PRIVATE common ${PLATFORM_LIBS})
target_link_libraries (local_date_time_comparison PRIVATE common)
target_link_libraries (realloc-perf PRIVATE common)
add_check(local_date_time_comparison)
if(USE_GTEST)
add_executable(unit_tests_libcommon gtest_json_test.cpp gtest_strong_typedef.cpp gtest_find_symbols.cpp)
target_link_libraries(unit_tests_libcommon PRIVATE common ${GTEST_MAIN_LIBRARIES} ${GTEST_LIBRARIES})
add_check(unit_tests_libcommon)
endif()
add_executable (dump_variable dump_variable.cpp)
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)

View File

@ -1,47 +0,0 @@
#include <cstdlib>
#include <cstring>
#include <vector>
#include <thread>
void thread_func()
{
for (size_t i = 0; i < 100; ++i)
{
size_t size = 4096;
void * buf = malloc(size);
if (!buf)
abort();
memset(buf, 0, size);
while (size < 1048576)
{
size_t next_size = size * 4;
void * new_buf = realloc(buf, next_size);
if (!new_buf)
abort();
buf = new_buf;
memset(reinterpret_cast<char*>(buf) + size, 0, next_size - size);
size = next_size;
}
free(buf);
}
}
int main(int, char **)
{
std::vector<std::thread> threads(16);
for (size_t i = 0; i < 1000; ++i)
{
for (auto & thread : threads)
thread = std::thread(thread_func);
for (auto & thread : threads)
thread.join();
}
return 0;
}

View File

@ -1,53 +0,0 @@
#include <iostream>
#include <cstring>
#include <common/DateLUT.h>
static std::string toString(time_t Value)
{
struct tm tm;
char buf[96];
localtime_r(&Value, &tm);
snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
return buf;
}
static time_t orderedIdentifierToDate(unsigned value)
{
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_year = value / 10000 - 1900;
tm.tm_mon = (value % 10000) / 100 - 1;
tm.tm_mday = value % 100;
tm.tm_isdst = -1;
return mktime(&tm);
}
void loop(time_t begin, time_t end, int step)
{
const auto & date_lut = DateLUT::instance();
for (time_t t = begin; t < end; t += step)
std::cout << toString(t)
<< ", " << toString(date_lut.toTime(t))
<< ", " << date_lut.toHour(t)
<< std::endl;
}
int main(int, char **)
{
loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
loop(orderedIdentifierToDate(20141020), orderedIdentifierToDate(20141106), 15 * 60);
return 0;
}

View File

@ -1,62 +0,0 @@
#include <iostream>
#include <cstring>
#include <Poco/Exception.h>
#include <common/DateLUT.h>
static std::string toString(time_t Value)
{
struct tm tm;
char buf[96];
localtime_r(&Value, &tm);
snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
return buf;
}
static time_t orderedIdentifierToDate(unsigned value)
{
struct tm tm;
memset(&tm, 0, sizeof(tm));
tm.tm_year = value / 10000 - 1900;
tm.tm_mon = (value % 10000) / 100 - 1;
tm.tm_mday = value % 100;
tm.tm_isdst = -1;
return mktime(&tm);
}
void loop(time_t begin, time_t end, int step)
{
const auto & date_lut = DateLUT::instance();
for (time_t t = begin; t < end; t += step)
{
time_t t2 = date_lut.makeDateTime(date_lut.toYear(t), date_lut.toMonth(t), date_lut.toDayOfMonth(t),
date_lut.toHour(t), date_lut.toMinute(t), date_lut.toSecond(t));
std::string s1 = toString(t);
std::string s2 = toString(t2);
std::cerr << s1 << ", " << s2 << std::endl;
if (s1 != s2)
throw Poco::Exception("Test failed.");
}
}
int main(int, char **)
{
loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
return 0;
}

View File

@ -1,31 +0,0 @@
#include <iostream>
#include <common/DateLUT.h>
#include <Poco/Exception.h>
int main(int, char **)
{
try
{
const auto & date_lut = DateLUT::instance();
std::cout << "Detected default timezone: `" << date_lut.getTimeZone() << "'" << std::endl;
time_t now = time(nullptr);
std::cout << "Current time: " << date_lut.timeToString(now)
<< ", UTC: " << DateLUT::instance("UTC").timeToString(now) << std::endl;
}
catch (const Poco::Exception & e)
{
std::cerr << e.displayText() << std::endl;
return 1;
}
catch (std::exception & e)
{
std::cerr << "std::exception: " << e.what() << std::endl;
return 2;
}
catch (...)
{
std::cerr << "Some exception" << std::endl;
return 3;
}
return 0;
}

View File

@ -1,656 +0,0 @@
#include <vector>
#include <string>
#include <exception>
#include <common/JSON.h>
#include <boost/range/irange.hpp>
using namespace std::literals::string_literals;
#include <gtest/gtest.h>
enum class ResultType
{
Return,
Throw
};
struct GetStringTestRecord
{
const char * input;
ResultType result_type;
const char * result;
};
TEST(JSONSuite, SimpleTest)
{
std::vector<GetStringTestRecord> test_data =
{
{ R"("name")", ResultType::Return, "name" },
{ R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("184509")", ResultType::Return, "184509" },
{ R"("category")", ResultType::Return, "category" },
{ R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("В наличии")", ResultType::Return, "В наличии" },
{ R"("price")", ResultType::Return, "price" },
{ R"("2390.00")", ResultType::Return, "2390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("Карточка")", ResultType::Return, "Карточка" },
{ R"("position")", ResultType::Return, "position" },
{ R"("detail")", ResultType::Return, "detail" },
{ R"("actionField")", ResultType::Return, "actionField" },
{ R"("list")", ResultType::Return, "list" },
{ R"("http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc")", ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc" },
{ R"("action")", ResultType::Return, "action" },
{ R"("detail")", ResultType::Return, "detail" },
{ R"("products")", ResultType::Return, "products" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
{ R"("id")", ResultType::Return, "id" },
{ R"("184509")", ResultType::Return, "184509" },
{ R"("price")", ResultType::Return, "price" },
{ R"("2390.00")", ResultType::Return, "2390.00" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("Vitek")", ResultType::Return, "Vitek" },
{ R"("category")", ResultType::Return, "category" },
{ R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("В наличии")", ResultType::Return, "В наличии" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("isAuthorized")", ResultType::Return, "isAuthorized" },
{ R"("isSubscriber")", ResultType::Return, "isSubscriber" },
{ R"("postType")", ResultType::Return, "postType" },
{ R"("Новости")", ResultType::Return, "Новости" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("Электроплита GEFEST Брест ЭПНД 5140-01 0001")", ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001" },
{ R"("price")", ResultType::Return, "price" },
{ R"("currencyCode")", ResultType::Return, "currencyCode" },
{ R"("RUB")", ResultType::Return, "RUB" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("trash_login")", ResultType::Return, "trash_login" },
{ R"("novikoff")", ResultType::Return, "novikoff" },
{ R"("trash_cat_link")", ResultType::Return, "trash_cat_link" },
{ R"("progs")", ResultType::Return, "progs" },
{ R"("trash_parent_link")", ResultType::Return, "trash_parent_link" },
{ R"("content")", ResultType::Return, "content" },
{ R"("trash_posted_parent")", ResultType::Return, "trash_posted_parent" },
{ R"("content.01.2016")", ResultType::Return, "content.01.2016" },
{ R"("trash_posted_cat")", ResultType::Return, "trash_posted_cat" },
{ R"("progs.01.2016")", ResultType::Return, "progs.01.2016" },
{ R"("trash_virus_count")", ResultType::Return, "trash_virus_count" },
{ R"("trash_is_android")", ResultType::Return, "trash_is_android" },
{ R"("trash_is_wp8")", ResultType::Return, "trash_is_wp8" },
{ R"("trash_is_ios")", ResultType::Return, "trash_is_ios" },
{ R"("trash_posted")", ResultType::Return, "trash_posted" },
{ R"("01.2016")", ResultType::Return, "01.2016" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("merchantId")", ResultType::Return, "merchantId" },
{ R"("13694_49246")", ResultType::Return, "13694_49246" },
{ R"("cps-source")", ResultType::Return, "cps-source" },
{ R"("wargaming")", ResultType::Return, "wargaming" },
{ R"("cps_provider")", ResultType::Return, "cps_provider" },
{ R"("default")", ResultType::Return, "default" },
{ R"("errorReason")", ResultType::Return, "errorReason" },
{ R"("no errors")", ResultType::Return, "no errors" },
{ R"("scid")", ResultType::Return, "scid" },
{ R"("isAuthPayment")", ResultType::Return, "isAuthPayment" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("rubric")", ResultType::Return, "rubric" },
{ R"("")", ResultType::Return, "" },
{ R"("rubric")", ResultType::Return, "rubric" },
{ R"("Мир")", ResultType::Return, "Мир" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("__ym")", ResultType::Return, "__ym" },
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
{ R"("impressions")", ResultType::Return, "impressions" },
{ R"("id")", ResultType::Return, "id" },
{ R"("863813")", ResultType::Return, "863813" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Happy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("863839")", ResultType::Return, "863839" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("863847")", ResultType::Return, "863847" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911480")", ResultType::Return, "911480" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911484")", ResultType::Return, "911484" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Little bears, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911489")", ResultType::Return, "911489" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911496")", ResultType::Return, "911496" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Pretty, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911504")", ResultType::Return, "911504" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911508")", ResultType::Return, "911508" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Kittens, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911512")", ResultType::Return, "911512" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911516")", ResultType::Return, "911516" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911520")", ResultType::Return, "911520" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911524")", ResultType::Return, "911524" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("911528")", ResultType::Return, "911528" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Футболка детская 3D Turtle, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("390.00")", ResultType::Return, "390.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("888616")", ResultType::Return, "888616" },
{ R"("name")", ResultType::Return, "name" },
{ "\"3Д Футболка мужская \\\"Collorista\\\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж\"", ResultType::Return, "3Д Футболка мужская \"Collorista\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Одежда и обувь/Мужская одежда/Футболки/")", ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("406.60")", ResultType::Return, "406.60" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913361")", ResultType::Return, "913361" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913364")", ResultType::Return, "913364" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913367")", ResultType::Return, "913367" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913385")", ResultType::Return, "913385" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("id")", ResultType::Return, "id" },
{ R"("913391")", ResultType::Return, "913391" },
{ R"("name")", ResultType::Return, "name" },
{ R"("3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж" },
{ R"("category")", ResultType::Return, "category" },
{ R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("")", ResultType::Return, "" },
{ R"("price")", ResultType::Return, "price" },
{ R"("470.00")", ResultType::Return, "470.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
{ R"("position")", ResultType::Return, "position" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
{ R"("usertype")", ResultType::Return, "usertype" },
{ R"("visitor")", ResultType::Return, "visitor" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("__ym")", ResultType::Return, "__ym" },
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
{ R"("impressions")", ResultType::Return, "impressions" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("experiments")", ResultType::Return, "experiments" },
{ R"("lang")", ResultType::Return, "lang" },
{ R"("ru")", ResultType::Return, "ru" },
{ R"("los_portal")", ResultType::Return, "los_portal" },
{ R"("los_level")", ResultType::Return, "los_level" },
{ R"("none")", ResultType::Return, "none" },
{ R"("__ym")", ResultType::Return, "__ym" },
{ R"("ecommerce")", ResultType::Return, "ecommerce" },
{ R"("currencyCode")", ResultType::Return, "currencyCode" },
{ R"("RUR")", ResultType::Return, "RUR" },
{ R"("impressions")", ResultType::Return, "impressions" },
{ R"("name")", ResultType::Return, "name" },
{ R"("Чайник электрический Mystery MEK-1627, белый")", ResultType::Return, "Чайник электрический Mystery MEK-1627, белый" },
{ R"("brand")", ResultType::Return, "brand" },
{ R"("Mystery")", ResultType::Return, "Mystery" },
{ R"("id")", ResultType::Return, "id" },
{ R"("187180")", ResultType::Return, "187180" },
{ R"("category")", ResultType::Return, "category" },
{ R"("Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery")", ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery" },
{ R"("variant")", ResultType::Return, "variant" },
{ R"("В наличии")", ResultType::Return, "В наличии" },
{ R"("price")", ResultType::Return, "price" },
{ R"("1630.00")", ResultType::Return, "1630.00" },
{ R"("list")", ResultType::Return, "list" },
{ R"("Карточка")", ResultType::Return, "Карточка" },
{ R"("position")", ResultType::Return, "position" },
{ R"("detail")", ResultType::Return, "detail" },
{ R"("actionField")", ResultType::Return, "actionField" },
{ R"("list")", ResultType::Return, "list" },
{ "\0\"", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/igrushki/konstruktory\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рисование/Инструменты и кра\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0t", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/Хозтовары/Хранение вещей и организа\xD1\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Хозтовары/Товары для стир\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"li\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kosmetika-i-parfyum/parfyumeriya/mu\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/ko\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "", ResultType::Throw, "JSON: begin >= end." },
{ "\"/stroitelstvo-i-remont/stroit\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/s\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Строительство и ремонт/Строительный инструмент/Изм\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/avto/soputstvuy\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/str\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xFF", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Мелкая бытовая техника/Мелки\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Пряжа \\\"Бамбук стрейч\\0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Карандаш чёрнографитны\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Канцтовары/Ежедневники и блокн\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kanctovary/ezhednevniki-i-blok\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Стакан \xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\x80", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Органайзер для хранения аксессуаров, \0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"quantity\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Сменный блок для тетрадей на кольцах А5, 160 листов клетка, офсет \xE2\x84\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Сувениры/Ф\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"\0\"", ResultType::Return, "\0" },
{ "\"\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"va\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"В \0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/letnie-tovary/z\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Посудомоечная машина Ha\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Крупная бытов\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"var\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Телевизоры и видеотехника/Всё для домашних кинотеатр\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Флеш-диск Transcend JetFlash 620 8GB (TS8GJF62\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Табурет Мег\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"variant\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Катал\xD0\0\"", ResultType::Return, "Катал\xD0\0" },
{ "\"К\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"17\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/igrushki/razvivayusc\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Ключница \\\"\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Игр\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Игрушки/Игрушки для девочек/Игровые модули дл\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Крупная бытовая техника/Стиральные машины/С фронт\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"Светодиодная лента SMD3528, 5 м. IP33, 60LED, зеленый, 4,8W/мет\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Сантехника/Мебель для ванных комнат/Стол\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0o", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/igrushki/konstruktory\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-instrumenty/kuhonnye-pr\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рисование/Инструменты и кра\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
{ "\"/Хозтовары/Хранение вещей и организа\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Хозтовары/Товары для стир\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"li\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/igrushki/igrus\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kosmetika-i-parfyum/parfyumeriya/mu\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/ko\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/avto/avtomobilnyy\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroit\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/s\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Строительство и ремонт/Строительный инструмент/Изм\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/avto/soputstvuy\0\"", ResultType::Return, "/avto/soputstvuy\0" },
{ "\"/str\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Чайник электрический Vitesse\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Мелкая бытовая техника/Мелки\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Пряжа \\\"Бамбук стрейч\\0о", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Карандаш чёрнографитны\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0\"", ResultType::Return, "/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0" },
{ "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"ca\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Подаро\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Средство для прочис\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"i\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/p\0\"", ResultType::Return, "/p\0" },
{ "\"/Сувениры/Магниты, н\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Дерев\xD0\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/prazdniki/svadba/svadebnaya-c\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Канцт\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Праздники/То\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"v\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Косметика \xD0\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Спорт и отдых/Настольные игры/Покер, руле\xD1\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"categ\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/retailr\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/retailrocket\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Ежедневник недат А5 140л кл,ляссе,обл пв\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/432809/ezhednevnik-organayzer-sredniy-s-remeshkom-na-knopke-v-oblozhke-kalkulyator-kalendar-do-\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/Канцтовары/Ежедневники и блокн\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"/kanctovary/ezhednevniki-i-blok\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Стакан \xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
{ "\"c\0\"", ResultType::Return, "c\0" },
};
for (auto i : boost::irange(0, 1/*00000*/))
{
static_cast<void>(i);
for (auto & r : test_data)
{
try
{
JSON j(r.input, r.input + strlen(r.input));
ASSERT_EQ(j.getString(), r.result);
ASSERT_TRUE(r.result_type == ResultType::Return);
}
catch (JSONException & e)
{
ASSERT_TRUE(r.result_type == ResultType::Throw);
ASSERT_EQ(e.message(), r.result);
}
}
}
}

View File

@ -1,29 +0,0 @@
#include <iostream>
#include <stdexcept>
#include <common/LocalDateTime.h>
void fillStackWithGarbage()
{
volatile uint64_t a = 0xAABBCCDDEEFF0011ULL;
volatile uint64_t b = 0x2233445566778899ULL;
std::cout << a + b << '\n';
}
void checkComparison()
{
LocalDateTime a("2018-07-18 01:02:03");
LocalDateTime b("2018-07-18 01:02:03");
if (a != b)
throw std::runtime_error("Test failed");
}
int main(int, char **)
{
fillStackWithGarbage();
checkComparison();
return 0;
}

View File

@ -1,13 +1,15 @@
#pragma once
#include <stdexcept>
/// Throw DB::Exception-like exception before its definition.
/// DB::Exception derived from Poco::Exception derived from std::exception.
/// DB::Exception generally cought as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes.
/// DB::Exception generally caught as Poco::Exception. std::exception generally has other catch blocks and could lead to other outcomes.
/// DB::Exception is not defined yet. It'd better to throw Poco::Exception but we do not want to include any big header here, even <string>.
/// So we throw some std::exception instead in the hope its catch block is the same as DB::Exception one.
template <typename T>
inline void throwError(const T & err)
[[noreturn]] inline void throwError(const T & err)
{
throw std::runtime_error(err);
}

View File

@ -2,7 +2,7 @@
#include <time.h>
#if defined (OS_DARWIN)
#if defined (OS_DARWIN) || defined (OS_SUNOS)
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC
#elif defined (OS_FREEBSD)
# define CLOCK_MONOTONIC_COARSE CLOCK_MONOTONIC_FAST

View File

@ -13,7 +13,12 @@ using char8_t = unsigned char;
#endif
/// This is needed for more strict aliasing. https://godbolt.org/z/xpJBSb https://stackoverflow.com/a/57453713
#if !defined(PVS_STUDIO) /// But PVS-Studio does not treat it correctly.
using UInt8 = char8_t;
#else
using UInt8 = uint8_t;
#endif
using UInt16 = uint16_t;
using UInt32 = uint32_t;
using UInt64 = uint64_t;

View File

@ -58,9 +58,11 @@ public:
using signed_base_type = int64_t;
// ctors
constexpr integer() noexcept;
constexpr integer() noexcept = default;
template <typename T>
constexpr integer(T rhs) noexcept;
template <typename T>
constexpr integer(std::initializer_list<T> il) noexcept;
@ -107,10 +109,7 @@ public:
constexpr explicit operator bool() const noexcept;
template <class T>
using __integral_not_wide_integer_class = typename std::enable_if<std::is_arithmetic<T>::value, T>::type;
template <class T, class = __integral_not_wide_integer_class<T>>
template <typename T, typename = std::enable_if_t<std::is_arithmetic_v<T>, T>>
constexpr operator T() const noexcept;
constexpr operator long double() const noexcept;
@ -119,25 +118,27 @@ public:
struct _impl;
base_type items[_impl::item_count];
private:
template <size_t Bits2, typename Signed2>
friend class integer;
friend class std::numeric_limits<integer<Bits, signed>>;
friend class std::numeric_limits<integer<Bits, unsigned>>;
base_type items[_impl::item_count];
};
template <typename T>
static constexpr bool ArithmeticConcept() noexcept;
template <class T1, class T2>
using __only_arithmetic = typename std::enable_if<ArithmeticConcept<T1>() && ArithmeticConcept<T2>()>::type;
using _only_arithmetic = typename std::enable_if<ArithmeticConcept<T1>() && ArithmeticConcept<T2>()>::type;
template <typename T>
static constexpr bool IntegralConcept() noexcept;
template <class T, class T2>
using __only_integer = typename std::enable_if<IntegralConcept<T>() && IntegralConcept<T2>()>::type;
using _only_integer = typename std::enable_if<IntegralConcept<T>() && IntegralConcept<T2>()>::type;
// Unary operators
template <size_t Bits, typename Signed>
@ -153,54 +154,55 @@ constexpr integer<Bits, Signed> operator+(const integer<Bits, Signed> & lhs) noe
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator*(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator*(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator/(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator/(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator+(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator+(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator-(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
std::common_type_t<Arithmetic, Arithmetic2> constexpr operator-(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator%(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>>
template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator%(const Integral & rhs, const Integral2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator&(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>>
template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator&(const Integral & rhs, const Integral2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator|(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>>
template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator|(const Integral & rhs, const Integral2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>> constexpr
operator^(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Integral, typename Integral2, class = __only_integer<Integral, Integral2>>
template <typename Integral, typename Integral2, class = _only_integer<Integral, Integral2>>
std::common_type_t<Integral, Integral2> constexpr operator^(const Integral & rhs, const Integral2 & lhs);
// TODO: Integral
template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, int n) noexcept;
template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept;
@ -217,32 +219,32 @@ constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, In
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator<(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator<(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator>(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator<=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator<=(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator>=(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator==(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator==(const Arithmetic & rhs, const Arithmetic2 & lhs);
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator!=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs);
template <typename Arithmetic, typename Arithmetic2, class = __only_arithmetic<Arithmetic, Arithmetic2>>
template <typename Arithmetic, typename Arithmetic2, class = _only_arithmetic<Arithmetic, Arithmetic2>>
constexpr bool operator!=(const Arithmetic & rhs, const Arithmetic2 & lhs);
}

View File

@ -5,6 +5,7 @@
/// (See at http://www.boost.org/LICENSE_1_0.txt)
#include "throwError.h"
#include <cmath>
#include <cfloat>
#include <cassert>
@ -81,7 +82,7 @@ public:
res.items[T::_impl::big(0)] = std::numeric_limits<typename wide::integer<Bits, Signed>::signed_base_type>::min();
return res;
}
return 0;
return wide::integer<Bits, Signed>(0);
}
static constexpr wide::integer<Bits, Signed> max() noexcept
@ -176,7 +177,7 @@ struct integer<Bits, Signed>::_impl
constexpr static bool is_negative(const integer<B, T> & n) noexcept
{
if constexpr (std::is_same_v<T, signed>)
return static_cast<signed_base_type>(n.items[big(0)]) < 0;
return static_cast<signed_base_type>(n.items[integer<B, T>::_impl::big(0)]) < 0;
else
return false;
}
@ -193,40 +194,36 @@ struct integer<Bits, Signed>::_impl
template <size_t B, class S>
constexpr static integer<B, S> make_positive(const integer<B, S> & n) noexcept
{
return is_negative(n) ? operator_unary_minus(n) : n;
return is_negative(n) ? integer<B, S>(operator_unary_minus(n)) : n;
}
template <typename T>
__attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept
{
if constexpr (std::is_same_v<T, __int128>)
return f;
else if constexpr (std::is_signed_v<T>)
if constexpr (std::is_signed_v<T>)
return static_cast<int64_t>(f);
else
return static_cast<uint64_t>(f);
}
template <typename Integral>
constexpr static void wide_integer_from_bultin(integer<Bits, Signed> & self, Integral rhs) noexcept
constexpr static void wide_integer_from_builtin(integer<Bits, Signed> & self, Integral rhs) noexcept
{
self.items[0] = _impl::to_Integral(rhs);
if constexpr (std::is_same_v<Integral, __int128>)
self.items[1] = rhs >> base_bits;
static_assert(sizeof(Integral) <= sizeof(base_type));
constexpr const unsigned start = (sizeof(Integral) == 16) ? 2 : 1;
self.items[0] = _impl::to_Integral(rhs);
if constexpr (std::is_signed_v<Integral>)
{
if (rhs < 0)
{
for (unsigned i = start; i < item_count; ++i)
for (size_t i = 1; i < item_count; ++i)
self.items[i] = -1;
return;
}
}
for (unsigned i = start; i < item_count; ++i)
for (size_t i = 1; i < item_count; ++i)
self.items[i] = 0;
}
@ -239,7 +236,8 @@ struct integer<Bits, Signed>::_impl
* a_(n - 1) = a_n * max_int + b2, a_n <= max_int <- base case.
*/
template <class T>
constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept {
constexpr static void set_multiplier(integer<Bits, Signed> & self, T t) noexcept
{
constexpr uint64_t max_int = std::numeric_limits<uint64_t>::max();
/// Implementation specific behaviour on overflow (if we don't check here, stack overflow will triggered in bigint_cast).
@ -249,20 +247,21 @@ struct integer<Bits, Signed>::_impl
return;
}
const T alpha = t / max_int;
const T alpha = t / static_cast<T>(max_int);
if (alpha <= max_int)
if (alpha <= static_cast<T>(max_int))
self = static_cast<uint64_t>(alpha);
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
set_multiplier<double>(self, alpha);
self *= max_int;
self += static_cast<uint64_t>(t - alpha * max_int); // += b_i
self += static_cast<uint64_t>(t - floor(alpha) * static_cast<T>(max_int)); // += b_i
}
constexpr static void wide_integer_from_bultin(integer<Bits, Signed>& self, double rhs) noexcept {
constexpr static void wide_integer_from_builtin(integer<Bits, Signed> & self, double rhs) noexcept
{
constexpr int64_t max_int = std::numeric_limits<int64_t>::max();
constexpr int64_t min_int = std::numeric_limits<int64_t>::min();
constexpr int64_t min_int = std::numeric_limits<int64_t>::lowest();
/// There are values in int64 that have more than 53 significant bits (in terms of double
/// representation). Such values, being promoted to double, are rounded up or down. If they are rounded up,
@ -271,11 +270,15 @@ struct integer<Bits, Signed>::_impl
/// As to_Integral does a static_cast to int64_t, it may result in UB.
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
/// int64_t max value precisely.
static_assert(LDBL_MANT_DIG >= 64,
"On your system long double has less than 64 precision bits,"
"which may result in UB when initializing double from int64_t");
if ((rhs > 0 && rhs < max_int) || (rhs < 0 && rhs > min_int))
// TODO Be compatible with Apple aarch64
#if not (defined(__APPLE__) && defined(__aarch64__))
static_assert(LDBL_MANT_DIG >= 64,
"On your system long double has less than 64 precision bits, "
"which may result in UB when initializing double from int64_t");
#endif
if (rhs > static_cast<long double>(min_int) && rhs < static_cast<long double>(max_int))
{
self = static_cast<int64_t>(rhs);
return;
@ -379,13 +382,13 @@ struct integer<Bits, Signed>::_impl
if (bit_shift)
lhs.items[big(items_shift)] |= std::numeric_limits<base_type>::max() << (base_bits - bit_shift);
for (unsigned i = item_count - items_shift; i < items_shift; ++i)
lhs.items[little(i)] = std::numeric_limits<base_type>::max();
for (unsigned i = 0; i < items_shift; ++i)
lhs.items[big(i)] = std::numeric_limits<base_type>::max();
}
else
{
for (unsigned i = item_count - items_shift; i < items_shift; ++i)
lhs.items[little(i)] = 0;
for (unsigned i = 0; i < items_shift; ++i)
lhs.items[big(i)] = 0;
}
return lhs;
@ -393,23 +396,23 @@ struct integer<Bits, Signed>::_impl
private:
template <typename T>
constexpr static base_type get_item(const T & x, unsigned number)
constexpr static base_type get_item(const T & x, unsigned idx)
{
if constexpr (IsWideInteger<T>::value)
{
if (number < T::_impl::item_count)
return x.items[number];
if (idx < T::_impl::item_count)
return x.items[idx];
return 0;
}
else
{
if constexpr (sizeof(T) <= sizeof(base_type))
{
if (!number)
if (0 == idx)
return x;
}
else if (number * sizeof(base_type) < sizeof(T))
return x >> (number * base_bits); // & std::numeric_limits<base_type>::max()
else if (idx * sizeof(base_type) < sizeof(T))
return x >> (idx * base_bits); // & std::numeric_limits<base_type>::max()
return 0;
}
}
@ -435,7 +438,7 @@ private:
for (unsigned i = 1; i < item_count; ++i)
{
if (underflows[i-1])
if (underflows[i - 1])
{
base_type & res_item = res.items[little(i)];
if (res_item == 0)
@ -468,7 +471,7 @@ private:
for (unsigned i = 1; i < item_count; ++i)
{
if (overflows[i-1])
if (overflows[i - 1])
{
base_type & res_item = res.items[little(i)];
++res_item;
@ -528,6 +531,17 @@ private:
res.items[little(2)] = r12 >> 64;
return res;
}
else if constexpr (Bits == 128 && sizeof(base_type) == 8)
{
using CompilerUInt128 = unsigned __int128;
CompilerUInt128 a = (CompilerUInt128(lhs.items[1]) << 64) + lhs.items[0];
CompilerUInt128 b = (CompilerUInt128(rhs.items[1]) << 64) + rhs.items[0];
CompilerUInt128 c = a * b;
integer<Bits, Signed> res;
res.items[0] = c;
res.items[1] = c >> 64;
return res;
}
else
{
integer<Bits, Signed> res{};
@ -653,7 +667,7 @@ public:
}
template <typename T>
constexpr static bool operator_more(const integer<Bits, Signed> & lhs, const T & rhs) noexcept
constexpr static bool operator_greater(const integer<Bits, Signed> & lhs, const T & rhs) noexcept
{
if constexpr (should_keep_size<T>())
{
@ -673,7 +687,7 @@ public:
else
{
static_assert(IsWideInteger<T>::value);
return std::common_type_t<integer<Bits, Signed>, T>::_impl::operator_more(T(lhs), rhs);
return std::common_type_t<integer<Bits, Signed>, T>::_impl::operator_greater(T(lhs), rhs);
}
}
@ -760,7 +774,6 @@ public:
}
}
private:
template <typename T>
constexpr static bool is_zero(const T & x)
{
@ -777,46 +790,65 @@ private:
}
/// returns quotient as result and remainder in numerator.
template <typename T>
constexpr static T divide(T & numerator, T && denominator)
template <size_t Bits2>
constexpr static integer<Bits2, unsigned> divide(integer<Bits2, unsigned> & numerator, integer<Bits2, unsigned> denominator)
{
if (is_zero(denominator))
throwError("divide by zero");
static_assert(std::is_unsigned_v<Signed>);
T & n = numerator;
T & d = denominator;
T x = 1;
T quotient = 0;
while (!operator_more(d, n) && operator_eq(operator_amp(shift_right(d, base_bits * item_count - 1), 1), 0))
if constexpr (Bits == 128 && sizeof(base_type) == 8)
{
x = shift_left(x, 1);
d = shift_left(d, 1);
using CompilerUInt128 = unsigned __int128;
CompilerUInt128 a = (CompilerUInt128(numerator.items[1]) << 64) + numerator.items[0];
CompilerUInt128 b = (CompilerUInt128(denominator.items[1]) << 64) + denominator.items[0];
CompilerUInt128 c = a / b;
integer<Bits, Signed> res;
res.items[0] = c;
res.items[1] = c >> 64;
CompilerUInt128 remainder = a - b * c;
numerator.items[0] = remainder;
numerator.items[1] = remainder >> 64;
return res;
}
while (!operator_eq(x, 0))
if (is_zero(denominator))
throwError("Division by zero");
integer<Bits2, unsigned> x = 1;
integer<Bits2, unsigned> quotient = 0;
while (!operator_greater(denominator, numerator) && is_zero(operator_amp(shift_right(denominator, Bits2 - 1), 1)))
{
if (!operator_more(d, n))
x = shift_left(x, 1);
denominator = shift_left(denominator, 1);
}
while (!is_zero(x))
{
if (!operator_greater(denominator, numerator))
{
n = operator_minus(n, d);
numerator = operator_minus(numerator, denominator);
quotient = operator_pipe(quotient, x);
}
x = shift_right(x, 1);
d = shift_right(d, 1);
denominator = shift_right(denominator, 1);
}
return quotient;
}
public:
template <typename T>
constexpr static auto operator_slash(const integer<Bits, Signed> & lhs, const T & rhs)
{
if constexpr (should_keep_size<T>())
{
integer<Bits, Signed> numerator = make_positive(lhs);
integer<Bits, Signed> quotient = divide(numerator, make_positive(integer<Bits, Signed>(rhs)));
integer<Bits, unsigned> numerator = make_positive(lhs);
integer<Bits, unsigned> denominator = make_positive(integer<Bits, Signed>(rhs));
integer<Bits, unsigned> quotient = integer<Bits, unsigned>::_impl::divide(numerator, std::move(denominator));
if (std::is_same_v<Signed, signed> && is_negative(rhs) != is_negative(lhs))
quotient = operator_unary_minus(quotient);
@ -834,8 +866,9 @@ public:
{
if constexpr (should_keep_size<T>())
{
integer<Bits, Signed> remainder = make_positive(lhs);
divide(remainder, make_positive(integer<Bits, Signed>(rhs)));
integer<Bits, unsigned> remainder = make_positive(lhs);
integer<Bits, unsigned> denominator = make_positive(integer<Bits, Signed>(rhs));
integer<Bits, unsigned>::_impl::divide(remainder, std::move(denominator));
if (std::is_same_v<Signed, signed> && is_negative(lhs))
remainder = operator_unary_minus(remainder);
@ -901,7 +934,7 @@ public:
++c;
}
else
throwError("invalid char from");
throwError("Invalid char from");
}
}
else
@ -909,7 +942,7 @@ public:
while (*c)
{
if (*c < '0' || *c > '9')
throwError("invalid char from");
throwError("Invalid char from");
res = multiply(res, 10U);
res = plus(res, *c - '0');
@ -926,11 +959,6 @@ public:
// Members
template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed>::integer() noexcept
: items{}
{}
template <size_t Bits, typename Signed>
template <typename T>
constexpr integer<Bits, Signed>::integer(T rhs) noexcept
@ -939,7 +967,7 @@ constexpr integer<Bits, Signed>::integer(T rhs) noexcept
if constexpr (IsWideInteger<T>::value)
_impl::wide_integer_from_wide_integer(*this, rhs);
else
_impl::wide_integer_from_bultin(*this, rhs);
_impl::wide_integer_from_builtin(*this, rhs);
}
template <size_t Bits, typename Signed>
@ -952,10 +980,19 @@ constexpr integer<Bits, Signed>::integer(std::initializer_list<T> il) noexcept
if constexpr (IsWideInteger<T>::value)
_impl::wide_integer_from_wide_integer(*this, *il.begin());
else
_impl::wide_integer_from_bultin(*this, *il.begin());
_impl::wide_integer_from_builtin(*this, *il.begin());
}
else if (il.size() == 0)
{
_impl::wide_integer_from_builtin(*this, 0);
}
else
_impl::wide_integer_from_bultin(*this, 0);
{
auto it = il.begin();
for (size_t i = 0; i < _impl::item_count; ++i)
if (it < il.end())
items[i] = *it;
}
}
template <size_t Bits, typename Signed>
@ -970,7 +1007,7 @@ template <size_t Bits, typename Signed>
template <typename T>
constexpr integer<Bits, Signed> & integer<Bits, Signed>::operator=(T rhs) noexcept
{
_impl::wide_integer_from_bultin(*this, rhs);
_impl::wide_integer_from_builtin(*this, rhs);
return *this;
}
@ -1053,7 +1090,7 @@ constexpr integer<Bits, Signed> & integer<Bits, Signed>::operator>>=(int n) noex
{
if (static_cast<size_t>(n) >= Bits)
{
if (is_negative(*this))
if (_impl::is_negative(*this))
*this = -1;
else
*this = 0;
@ -1103,16 +1140,17 @@ template <size_t Bits, typename Signed>
template <class T, class>
constexpr integer<Bits, Signed>::operator T() const noexcept
{
if constexpr (std::is_same_v<T, __int128>)
{
static_assert(Bits >= 128);
return (__int128(items[1]) << 64) | items[0];
}
else
{
static_assert(std::numeric_limits<T>::is_integer);
return items[0];
}
static_assert(std::numeric_limits<T>::is_integer);
/// NOTE: memcpy will suffice, but unfortunately, this function is constexpr.
using UnsignedT = std::make_unsigned_t<T>;
UnsignedT res{};
for (unsigned i = 0; i < _impl::item_count && i < (sizeof(T) + sizeof(base_type) - 1) / sizeof(base_type); ++i)
res += UnsignedT(items[i]) << (sizeof(base_type) * 8 * i);
return res;
}
template <size_t Bits, typename Signed>
@ -1276,7 +1314,7 @@ template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator<<(const integer<Bits, Signed> & lhs, int n) noexcept
{
if (static_cast<size_t>(n) >= Bits)
return 0;
return integer<Bits, Signed>(0);
if (n <= 0)
return lhs;
return integer<Bits, Signed>::_impl::shift_left(lhs, n);
@ -1285,7 +1323,7 @@ template <size_t Bits, typename Signed>
constexpr integer<Bits, Signed> operator>>(const integer<Bits, Signed> & lhs, int n) noexcept
{
if (static_cast<size_t>(n) >= Bits)
return 0;
return integer<Bits, Signed>(0);
if (n <= 0)
return lhs;
return integer<Bits, Signed>::_impl::shift_right(lhs, n);
@ -1305,7 +1343,7 @@ constexpr bool operator<(const Arithmetic & lhs, const Arithmetic2 & rhs)
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs)
{
return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_more(lhs, rhs);
return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_greater(lhs, rhs);
}
template <typename Arithmetic, typename Arithmetic2, class>
constexpr bool operator>(const Arithmetic & lhs, const Arithmetic2 & rhs)
@ -1328,7 +1366,7 @@ constexpr bool operator<=(const Arithmetic & lhs, const Arithmetic2 & rhs)
template <size_t Bits, typename Signed, size_t Bits2, typename Signed2>
constexpr bool operator>=(const integer<Bits, Signed> & lhs, const integer<Bits2, Signed2> & rhs)
{
return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_more(lhs, rhs)
return std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_greater(lhs, rhs)
|| std::common_type_t<integer<Bits, Signed>, integer<Bits2, Signed2>>::_impl::operator_eq(lhs, rhs);
}
template <typename Arithmetic, typename Arithmetic2, class>

View File

@ -1,9 +1,12 @@
#pragma once
#include <string>
#include <ostream>
#include <fmt/format.h>
#include "wide_integer.h"
namespace wide
{
@ -33,3 +36,34 @@ inline std::string to_string(const integer<Bits, Signed> & n)
}
}
template <size_t Bits, typename Signed>
std::ostream & operator<<(std::ostream & out, const wide::integer<Bits, Signed> & value)
{
return out << to_string(value);
}
/// See https://fmt.dev/latest/api.html#formatting-user-defined-types
template <size_t Bits, typename Signed>
struct fmt::formatter<wide::integer<Bits, Signed>>
{
constexpr auto parse(format_parse_context & ctx)
{
auto it = ctx.begin();
auto end = ctx.end();
/// Only support {}.
if (it != end && *it != '}')
throw format_error("invalid format");
return it;
}
template <typename FormatContext>
auto format(const wide::integer<Bits, Signed> & value, FormatContext & ctx)
{
return format_to(ctx.out(), "{}", to_string(value));
}
};

View File

@ -35,7 +35,7 @@ PEERDIR(
CFLAGS(-g0)
SRCS(
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F Replxx | grep -v -F Readline | sed 's/^\.\// /' | sort ?>
<? find . -name '*.cpp' | grep -v -F tests/ | grep -v -F examples | grep -v -F Replxx | grep -v -F Readline | sed 's/^\.\// /' | sort ?>
)
END()

View File

@ -21,13 +21,11 @@
#include <fstream>
#include <sstream>
#include <memory>
#include <ext/scope_guard.h>
#include <common/scope_guard.h>
#include <Poco/Observer.h>
#include <Poco/AutoPtr.h>
#include <Poco/PatternFormatter.h>
#include <Poco/File.h>
#include <Poco/Path.h>
#include <Poco/Message.h>
#include <Poco/Util/Application.h>
#include <Poco/Exception.h>
@ -59,6 +57,7 @@
#include <Common/getExecutablePath.h>
#include <Common/getHashOfLoadedBinary.h>
#include <Common/Elf.h>
#include <filesystem>
#if !defined(ARCADIA_BUILD)
# include <Common/config_version.h>
@ -70,6 +69,7 @@
#endif
#include <ucontext.h>
namespace fs = std::filesystem;
DB::PipeFDs signal_pipe;
@ -437,11 +437,11 @@ static void sanitizerDeathCallback()
static std::string createDirectory(const std::string & file)
{
auto path = Poco::Path(file).makeParent();
if (path.toString().empty())
fs::path path = fs::path(file).parent_path();
if (path.empty())
return "";
Poco::File(path).createDirectories();
return path.toString();
fs::create_directories(path);
return path;
};
@ -449,7 +449,7 @@ static bool tryCreateDirectories(Poco::Logger * logger, const std::string & path
{
try
{
Poco::File(path).createDirectories();
fs::create_directories(path);
return true;
}
catch (...)
@ -468,9 +468,9 @@ void BaseDaemon::reloadConfiguration()
* instead of using files specified in config.xml.
* (It's convenient to log in console when you start server without any command line parameters.)
*/
config_path = config().getString("config-file", "config.xml");
config_path = config().getString("config-file", getDefaultConfigFileName());
DB::ConfigProcessor config_processor(config_path, false, true);
config_processor.setConfigPath(Poco::Path(config_path).makeParent().toString());
config_processor.setConfigPath(fs::path(config_path).parent_path());
loaded_config = config_processor.loadConfig(/* allow_zk_includes = */ true);
if (last_configuration != nullptr)
@ -516,21 +516,28 @@ std::string BaseDaemon::getDefaultCorePath() const
return "/opt/cores/";
}
std::string BaseDaemon::getDefaultConfigFileName() const
{
return "config.xml";
}
void BaseDaemon::closeFDs()
{
#if defined(OS_FREEBSD) || defined(OS_DARWIN)
Poco::File proc_path{"/dev/fd"};
fs::path proc_path{"/dev/fd"};
#else
Poco::File proc_path{"/proc/self/fd"};
fs::path proc_path{"/proc/self/fd"};
#endif
if (proc_path.isDirectory()) /// Hooray, proc exists
if (fs::is_directory(proc_path)) /// Hooray, proc exists
{
std::vector<std::string> fds;
/// in /proc/self/fd directory filenames are numeric file descriptors
proc_path.list(fds);
for (const auto & fd_str : fds)
/// in /proc/self/fd directory filenames are numeric file descriptors.
/// Iterate directory separately from closing fds to avoid closing iterated directory fd.
std::vector<int> fds;
for (const auto & path : fs::directory_iterator(proc_path))
fds.push_back(DB::parse<int>(path.path().filename()));
for (const auto & fd : fds)
{
int fd = DB::parse<int>(fd_str);
if (fd > 2 && fd != signal_pipe.fds_rw[0] && fd != signal_pipe.fds_rw[1])
::close(fd);
}
@ -592,7 +599,7 @@ void BaseDaemon::initialize(Application & self)
{
/** When creating pid file and looking for config, will search for paths relative to the working path of the program when started.
*/
std::string path = Poco::Path(config().getString("application.path")).setFileName("").toString();
std::string path = fs::path(config().getString("application.path")).replace_filename("");
if (0 != chdir(path.c_str()))
throw Poco::Exception("Cannot change directory to " + path);
}
@ -640,7 +647,7 @@ void BaseDaemon::initialize(Application & self)
std::string log_path = config().getString("logger.log", "");
if (!log_path.empty())
log_path = Poco::Path(log_path).setFileName("").toString();
log_path = fs::path(log_path).replace_filename("");
/** Redirect stdout, stderr to separate files in the log directory (or in the specified file).
* Some libraries write to stderr in case of errors in debug mode,
@ -703,8 +710,7 @@ void BaseDaemon::initialize(Application & self)
tryCreateDirectories(&logger(), core_path);
Poco::File cores = core_path;
if (!(cores.exists() && cores.isDirectory()))
if (!(fs::exists(core_path) && fs::is_directory(core_path)))
{
core_path = !log_path.empty() ? log_path : "/opt/";
tryCreateDirectories(&logger(), core_path);

View File

@ -149,6 +149,8 @@ protected:
virtual std::string getDefaultCorePath() const;
virtual std::string getDefaultConfigFileName() const;
std::optional<DB::StatusFile> pid_file;
std::atomic_bool is_cancelled{false};

View File

@ -5,6 +5,11 @@ add_library (daemon
)
target_include_directories (daemon PUBLIC ..)
if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
endif()
target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${EXECINFO_LIBRARIES})
if (USE_SENTRY)

View File

@ -1,6 +1,5 @@
#include <daemon/SentryWriter.h>
#include <Poco/File.h>
#include <Poco/Util/Application.h>
#include <Poco/Util/LayeredConfiguration.h>
@ -9,6 +8,7 @@
#include <common/getMemoryAmount.h>
#include <common/logger_useful.h>
#include <Common/formatReadable.h>
#include <Common/SymbolIndex.h>
#include <Common/StackTrace.h>
#include <Common/getNumberOfPhysicalCPUCores.h>
@ -24,6 +24,7 @@
# include <stdio.h>
# include <filesystem>
namespace fs = std::filesystem;
namespace
{
@ -52,8 +53,7 @@ void setExtras()
sentry_set_extra("physical_cpu_cores", sentry_value_new_int32(getNumberOfPhysicalCPUCores()));
if (!server_data_path.empty())
sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(
Poco::File(server_data_path).freeSpace()).c_str()));
sentry_set_extra("disk_free_space", sentry_value_new_string(formatReadableSizeWithBinarySuffix(fs::space(server_data_path).free).c_str()));
}
void sentry_logger(sentry_level_e level, const char * message, va_list args, void *)
@ -101,7 +101,7 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
auto * logger = &Poco::Logger::get("SentryWriter");
if (config.getBool("send_crash_reports.enabled", false))
{
if (debug || (strlen(VERSION_OFFICIAL) > 0))
if (debug || (strlen(VERSION_OFFICIAL) > 0)) //-V560
{
enabled = true;
}
@ -109,12 +109,12 @@ void SentryWriter::initialize(Poco::Util::LayeredConfiguration & config)
if (enabled)
{
server_data_path = config.getString("path", "");
const std::filesystem::path & default_tmp_path = std::filesystem::path(config.getString("tmp_path", Poco::Path::temp())) / "sentry";
const std::filesystem::path & default_tmp_path = fs::path(config.getString("tmp_path", fs::temp_directory_path())) / "sentry";
const std::string & endpoint
= config.getString("send_crash_reports.endpoint");
const std::string & temp_folder_path
= config.getString("send_crash_reports.tmp_path", default_tmp_path);
Poco::File(temp_folder_path).createDirectories();
fs::create_directories(temp_folder_path);
sentry_options_t * options = sentry_options_new(); /// will be freed by sentry_init or sentry_shutdown
sentry_options_set_release(options, VERSION_STRING_SHORT);

View File

@ -1,30 +0,0 @@
#pragma once
#include <string.h>
#include <algorithm>
#include <type_traits>
namespace ext
{
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> bit_cast(const From & from)
{
To res {};
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
return res;
}
/** \brief Returns value `from` converted to type `To` while retaining bit representation.
* `To` and `From` must satisfy `CopyConstructible`.
*/
template <typename To, typename From>
std::decay_t<To> safe_bit_cast(const From & from)
{
static_assert(sizeof(To) == sizeof(From), "bit cast on types of different width");
return bit_cast<To, From>(from);
}
}

View File

@ -1,49 +0,0 @@
#pragma once
#include <chrono>
#include <string>
#include <sstream>
#include <cctz/time_zone.h>
namespace ext
{
inline std::string to_string(const std::time_t & time)
{
return cctz::format("%Y-%m-%d %H:%M:%S", std::chrono::system_clock::from_time_t(time), cctz::local_time_zone());
}
template <typename Clock, typename Duration = typename Clock::duration>
std::string to_string(const std::chrono::time_point<Clock, Duration> & tp)
{
// Don't use DateLUT because it shows weird characters for
// TimePoint::max(). I wish we could use C++20 format, but it's not
// there yet.
// return DateLUT::instance().timeToString(std::chrono::system_clock::to_time_t(tp));
auto in_time_t = std::chrono::system_clock::to_time_t(tp);
return to_string(in_time_t);
}
template <typename Rep, typename Period = std::ratio<1>>
std::string to_string(const std::chrono::duration<Rep, Period> & duration)
{
auto seconds_as_int = std::chrono::duration_cast<std::chrono::seconds>(duration);
if (seconds_as_int == duration)
return std::to_string(seconds_as_int.count()) + "s";
auto seconds_as_double = std::chrono::duration_cast<std::chrono::duration<double>>(duration);
return std::to_string(seconds_as_double.count()) + "s";
}
template <typename Clock, typename Duration = typename Clock::duration>
std::ostream & operator<<(std::ostream & o, const std::chrono::time_point<Clock, Duration> & tp)
{
return o << to_string(tp);
}
template <typename Rep, typename Period = std::ratio<1>>
std::ostream & operator<<(std::ostream & o, const std::chrono::duration<Rep, Period> & duration)
{
return o << to_string(duration);
}
}

View File

@ -1,24 +0,0 @@
#pragma once
#include <iterator>
namespace ext
{
/** \brief Returns collection of specified container-type.
* Retains stored value_type, constructs resulting collection using iterator range. */
template <template <typename...> class ResultCollection, typename Collection>
auto collection_cast(const Collection & collection)
{
using value_type = typename Collection::value_type;
return ResultCollection<value_type>(std::begin(collection), std::end(collection));
}
/** \brief Returns collection of specified type.
* Performs implicit conversion of between source and result value_type, if available and required. */
template <typename ResultCollection, typename Collection>
auto collection_cast(const Collection & collection)
{
return ResultCollection(std::begin(collection), std::end(collection));
}
}

View File

@ -1,60 +0,0 @@
#pragma once
#include <ext/size.h>
#include <type_traits>
#include <utility>
#include <iterator>
/** \brief Provides a wrapper view around a container, allowing to iterate over it's elements and indices.
* Allow writing code like shown below:
*
* std::vector<T> v = getVector();
* for (const std::pair<const std::size_t, T &> index_and_value : ext::enumerate(v))
* std::cout << "element " << index_and_value.first << " is " << index_and_value.second << std::endl;
*/
namespace ext
{
template <typename It> struct enumerate_iterator
{
using traits = typename std::iterator_traits<It>;
using iterator_category = typename traits::iterator_category;
using value_type = std::pair<const std::size_t, typename traits::value_type>;
using difference_type = typename traits::difference_type;
using reference = std::pair<const std::size_t, typename traits::reference>;
std::size_t idx;
It it;
enumerate_iterator(const std::size_t idx_, It it_) : idx{idx_}, it{it_} {}
auto operator*() const { return reference(idx, *it); }
bool operator!=(const enumerate_iterator & other) const { return it != other.it; }
enumerate_iterator & operator++() { return ++idx, ++it, *this; }
};
template <typename Collection> struct enumerate_wrapper
{
using underlying_iterator = decltype(std::begin(std::declval<Collection &>()));
using iterator = enumerate_iterator<underlying_iterator>;
Collection & collection;
enumerate_wrapper(Collection & collection_) : collection(collection_) {}
auto begin() { return iterator(0, std::begin(collection)); }
auto end() { return iterator(ext::size(collection), std::end(collection)); }
};
template <typename Collection> auto enumerate(Collection & collection)
{
return enumerate_wrapper<Collection>{collection};
}
template <typename Collection> auto enumerate(const Collection & collection)
{
return enumerate_wrapper<const Collection>{collection};
}
}

View File

@ -1,24 +0,0 @@
#pragma once
#include <utility>
namespace ext
{
/// \brief Identity function for use with other algorithms as a pass-through.
class identity
{
/** \brief Function pointer type template for converting identity to a function pointer.
* Presumably useless, provided for completeness. */
template <typename T> using function_ptr_t = T &&(*)(T &&);
/** \brief Implementation of identity as a non-instance member function for taking function pointer. */
template <typename T> static T && invoke(T && t) { return std::forward<T>(t); }
public:
/** \brief Returns the value passed as a sole argument using perfect forwarding. */
template <typename T> T && operator()(T && t) const { return std::forward<T>(t); }
/** \brief Allows conversion of identity instance to a function pointer. */
template <typename T> operator function_ptr_t<T>() const { return &invoke; };
};
}

View File

@ -1,43 +0,0 @@
#pragma once
#include <utility>
#include <type_traits>
#include <array>
/** \brief Produces std::array of specified size, containing copies of provided object.
* Copy is performed N-1 times, and the last element is being moved.
* This helper allows to initialize std::array in place.
*/
namespace ext
{
namespace detail
{
template<std::size_t size, typename T, std::size_t... indexes>
constexpr auto make_array_n_impl(T && value, std::index_sequence<indexes...>)
{
/// Comma is used to make N-1 copies of value
return std::array<std::decay_t<T>, size>{ (static_cast<void>(indexes), value)..., std::forward<T>(value) };
}
}
template<typename T>
constexpr auto make_array_n(std::integral_constant<std::size_t, 0>, T &&)
{
return std::array<std::decay_t<T>, 0>{};
}
template<std::size_t size, typename T>
constexpr auto make_array_n(std::integral_constant<std::size_t, size>, T && value)
{
return detail::make_array_n_impl<size>(std::forward<T>(value), std::make_index_sequence<size - 1>{});
}
template<std::size_t size, typename T>
constexpr auto make_array_n(T && value)
{
return make_array_n(std::integral_constant<std::size_t, size>{}, std::forward<T>(value));
}
}

View File

@ -1,51 +0,0 @@
#pragma once
#include <type_traits>
#include <boost/iterator/transform_iterator.hpp>
namespace ext
{
/// \brief Strip type off top level reference and cv-qualifiers thus allowing storage in containers
template <typename T>
using unqualified_t = std::remove_cv_t<std::remove_reference_t<T>>;
/** \brief Returns collection of the same container-type as the input collection,
* with each element transformed by the application of `mapper`.
*/
template <template <typename...> class Collection, typename... Params, typename Mapper>
auto map(const Collection<Params...> & collection, const Mapper mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return Collection<value_type>(
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified container-type,
* with each element transformed by the application of `mapper`.
* Allows conversion between different container-types, e.g. std::vector to std::list
*/
template <template <typename...> class ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, const Mapper mapper)
{
using value_type = unqualified_t<decltype(mapper(*std::begin(collection)))>;
return ResultCollection<value_type>(
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
/** \brief Returns collection of specified type,
* with each element transformed by the application of `mapper`.
* Allows leveraging implicit conversion between the result of applying `mapper` and R::value_type.
*/
template <typename ResultCollection, typename Collection, typename Mapper>
auto map(const Collection & collection, const Mapper mapper)
{
return ResultCollection(
boost::make_transform_iterator(std::begin(collection), mapper),
boost::make_transform_iterator(std::end(collection), mapper));
}
}

View File

@ -1,25 +0,0 @@
#pragma once
#include <vector>
namespace ext
{
/// Moves all arguments starting from the second to the end of the vector.
/// For example, `push_back(vec, a1, a2, a3)` is a more compact way to write
/// `vec.push_back(a1); vec.push_back(a2); vec.push_back(a3);`
/// This function is like boost::range::push_back() but works for noncopyable types too.
template <typename T>
void push_back(std::vector<T> &)
{
}
template <typename T, typename FirstArg, typename... OtherArgs>
void push_back(std::vector<T> & vec, FirstArg && first, OtherArgs &&... other)
{
vec.reserve(vec.size() + sizeof...(other) + 1);
vec.emplace_back(std::move(first));
push_back(vec, std::move(other)...);
}
}

View File

@ -1,14 +0,0 @@
#pragma once
#include <cstdlib>
namespace ext
{
/** \brief Returns number of elements in an automatic array. */
template <typename T, std::size_t N>
constexpr std::size_t size(const T (&)[N]) noexcept { return N; }
/** \brief Returns number of in a container providing size() member function. */
template <typename T> constexpr auto size(const T & t) { return t.size(); }
}

View File

@ -1,27 +0,0 @@
#pragma once
namespace ext
{
template <typename T>
class unlock_guard
{
public:
unlock_guard(T & mutex_) : mutex(mutex_)
{
mutex.unlock();
}
~unlock_guard()
{
mutex.lock();
}
unlock_guard(const unlock_guard &) = delete;
unlock_guard & operator=(const unlock_guard &) = delete;
private:
T & mutex;
};
}

View File

@ -1,5 +1,8 @@
if (GLIBC_COMPATIBILITY)
set (ENABLE_FASTMEMCPY ON)
add_subdirectory(memcpy)
if(TARGET memcpy)
set(MEMCPY_LIBRARY memcpy)
endif()
enable_language(ASM)
include(CheckIncludeFile)
@ -12,7 +15,7 @@ if (GLIBC_COMPATIBILITY)
add_headers_and_sources(glibc_compatibility .)
add_headers_and_sources(glibc_compatibility musl)
if (ARCH_ARM)
if (ARCH_AARCH64)
list (APPEND glibc_compatibility_sources musl/aarch64/syscall.s musl/aarch64/longjmp.s)
set (musl_arch_include_dir musl/aarch64)
elseif (ARCH_AMD64)
@ -27,13 +30,6 @@ if (GLIBC_COMPATIBILITY)
list(APPEND glibc_compatibility_sources musl/getentropy.c)
endif()
if (NOT ARCH_ARM)
# clickhouse_memcpy don't support ARCH_ARM, see https://github.com/ClickHouse/ClickHouse/issues/18951
add_library (clickhouse_memcpy OBJECT
${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy/memcpy_wrapper.c
)
endif()
# Need to omit frame pointers to match the performance of glibc
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
@ -51,15 +47,16 @@ if (GLIBC_COMPATIBILITY)
target_compile_options(glibc-compatibility PRIVATE -fPIC)
endif ()
target_link_libraries(global-libs INTERFACE glibc-compatibility)
target_link_libraries(global-libs INTERFACE glibc-compatibility ${MEMCPY_LIBRARY})
install(
TARGETS glibc-compatibility
TARGETS glibc-compatibility ${MEMCPY_LIBRARY}
EXPORT global
ARCHIVE DESTINATION lib
)
message (STATUS "Some symbols from glibc will be replaced for compatibility")
elseif (YANDEX_OFFICIAL_BUILD)
message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.")
endif ()

View File

@ -8,13 +8,6 @@
extern "C" {
#endif
#include <pthread.h>
size_t __pthread_get_minstack(const pthread_attr_t * attr)
{
return 1048576; /// This is a guess. Don't sure it is correct.
}
#include <signal.h>
#include <unistd.h>
#include <string.h>
@ -141,6 +134,8 @@ int __open_2(const char *path, int oflag)
}
#include <pthread.h>
/// No-ops.
int pthread_setname_np(pthread_t thread, const char *name) { return 0; }
int pthread_getname_np(pthread_t thread, char *name, size_t len) { name[0] = '\0'; return 0; };

View File

@ -0,0 +1,8 @@
if (ARCH_AMD64)
add_library(memcpy STATIC memcpy.cpp)
# We allow to include memcpy.h from user code for better inlining.
target_include_directories(memcpy PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
target_compile_options(memcpy PRIVATE -fno-builtin-memcpy)
endif ()

View File

@ -0,0 +1,6 @@
#include "memcpy.h"
extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size)
{
return inline_memcpy(dst, src, size);
}

View File

@ -0,0 +1,217 @@
#include <cstddef>
#include <emmintrin.h>
/** Custom memcpy implementation for ClickHouse.
* It has the following benefits over using glibc's implementation:
* 1. Avoiding dependency on specific version of glibc's symbol, like memcpy@@GLIBC_2.14 for portability.
* 2. Avoiding indirect call via PLT due to shared linking, that can be less efficient.
* 3. It's possible to include this header and call inline_memcpy directly for better inlining or interprocedural analysis.
* 4. Better results on our performance tests on current CPUs: up to 25% on some queries and up to 0.7%..1% in average across all queries.
*
* Writing our own memcpy is extremely difficult for the following reasons:
* 1. The optimal variant depends on the specific CPU model.
* 2. The optimal variant depends on the distribution of size arguments.
* 3. It depends on the number of threads copying data concurrently.
* 4. It also depends on how the calling code is using the copied data and how the different memcpy calls are related to each other.
* Due to vast range of scenarios it makes proper testing especially difficult.
* When writing our own memcpy there is a risk to overoptimize it
* on non-representative microbenchmarks while making real-world use cases actually worse.
*
* Most of the benchmarks for memcpy on the internet are wrong.
*
* Let's look at the details:
*
* For small size, the order of branches in code is important.
* There are variants with specific order of branches (like here or in glibc)
* or with jump table (in asm code see example from Cosmopolitan libc:
* https://github.com/jart/cosmopolitan/blob/de09bec215675e9b0beb722df89c6f794da74f3f/libc/nexgen32e/memcpy.S#L61)
* or with Duff device in C (see https://github.com/skywind3000/FastMemcpy/)
*
* It's also important how to copy uneven sizes.
* Almost every implementation, including this, is using two overlapping movs.
*
* It is important to disable -ftree-loop-distribute-patterns when compiling memcpy implementation,
* otherwise the compiler can replace internal loops to a call to memcpy that will lead to infinite recursion.
*
* For larger sizes it's important to choose the instructions used:
* - SSE or AVX or AVX-512;
* - rep movsb;
* Performance will depend on the size threshold, on the CPU model, on the "erms" flag
* ("Enhansed Rep MovS" - it indicates that performance of "rep movsb" is decent for large sizes)
* https://stackoverflow.com/questions/43343231/enhanced-rep-movsb-for-memcpy
*
* Using AVX-512 can be bad due to throttling.
* Using AVX can be bad if most code is using SSE due to switching penalty
* (it also depends on the usage of "vzeroupper" instruction).
* But in some cases AVX gives a win.
*
* It also depends on how many times the loop will be unrolled.
* We are unrolling the loop 8 times (by the number of available registers), but it not always the best.
*
* It also depends on the usage of aligned or unaligned loads/stores.
* We are using unaligned loads and aligned stores.
*
* It also depends on the usage of prefetch instructions. It makes sense on some Intel CPUs but can slow down performance on AMD.
* Setting up correct offset for prefetching is non-obvious.
*
* Non-temporary (cache bypassing) stores can be used for very large sizes (more than a half of L3 cache).
* But the exact threshold is unclear - when doing memcpy from multiple threads the optimal threshold can be lower,
* because L3 cache is shared (and L2 cache is partially shared).
*
* Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
* so we don't pay attention to using non-temporary stores.
*
* On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
* even comparing to non-temporary aligned unrolled stores even with the most wide registers.
*
* memcpy can be written in asm, C or C++. The latter can also use inline asm.
* The asm implementation can be better to make sure that compiler won't make the code worse,
* to ensure the order of branches, the code layout, the usage of all required registers.
* But if it is located in separate translation unit, inlining will not be possible
* (inline asm can be used to overcome this limitation).
* Sometimes C or C++ code can be further optimized by compiler.
* For example, clang is capable replacing SSE intrinsics to AVX code if -mavx is used.
*
* Please note that compiler can replace plain code to memcpy and vice versa.
* - memcpy with compile-time known small size is replaced to simple instructions without a call to memcpy;
* it is controlled by -fbuiltin-memcpy and can be manually ensured by calling __builtin_memcpy.
* This is often used to implement unaligned load/store without undefined behaviour in C++.
* - a loop with copying bytes can be recognized and replaced by a call to memcpy;
* it is controlled by -ftree-loop-distribute-patterns.
* - also note that a loop with copying bytes can be unrolled, peeled and vectorized that will give you
* inline code somewhat similar to a decent implementation of memcpy.
*
* This description is up to date as of Mar 2021.
*
* How to test the memcpy implementation for performance:
* 1. Test on real production workload.
* 2. For synthetic test, see utils/memcpy-bench, but make sure you will do the best to exhaust the wide range of scenarios.
*
* TODO: Add self-tuning memcpy with bayesian bandits algorithm for large sizes.
* See https://habr.com/en/company/yandex/blog/457612/
*/
static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size)
{
/// We will use pointer arithmetic, so char pointer will be used.
/// Note that __restrict makes sense (otherwise compiler will reload data from memory
/// instead of using the value of registers due to possible aliasing).
char * __restrict dst = reinterpret_cast<char * __restrict>(dst_);
const char * __restrict src = reinterpret_cast<const char * __restrict>(src_);
/// Standard memcpy returns the original value of dst. It is rarely used but we have to do it.
/// If you use memcpy with small but non-constant sizes, you can call inline_memcpy directly
/// for inlining and removing this single instruction.
void * ret = dst;
tail:
/// Small sizes and tails after the loop for large sizes.
/// The order of branches is important but in fact the optimal order depends on the distribution of sizes in your application.
/// This order of branches is from the disassembly of glibc's code.
/// We copy chunks of possibly uneven size with two overlapping movs.
/// Example: to copy 5 bytes [0, 1, 2, 3, 4] we will copy tail [1, 2, 3, 4] first and then head [0, 1, 2, 3].
if (size <= 16)
{
if (size >= 8)
{
/// Chunks of 8..16 bytes.
__builtin_memcpy(dst + size - 8, src + size - 8, 8);
__builtin_memcpy(dst, src, 8);
}
else if (size >= 4)
{
/// Chunks of 4..7 bytes.
__builtin_memcpy(dst + size - 4, src + size - 4, 4);
__builtin_memcpy(dst, src, 4);
}
else if (size >= 2)
{
/// Chunks of 2..3 bytes.
__builtin_memcpy(dst + size - 2, src + size - 2, 2);
__builtin_memcpy(dst, src, 2);
}
else if (size >= 1)
{
/// A single byte.
*dst = *src;
}
/// No bytes remaining.
}
else
{
/// Medium and large sizes.
if (size <= 128)
{
/// Medium size, not enough for full loop unrolling.
/// We will copy the last 16 bytes.
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast<const __m128i *>(src + size - 16)));
/// Then we will copy every 16 bytes from the beginning in a loop.
/// The last loop iteration will possibly overwrite some part of already copied last 16 bytes.
/// This is Ok, similar to the code for small sizes above.
while (size > 16)
{
_mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast<const __m128i *>(src)));
dst += 16;
src += 16;
size -= 16;
}
}
else
{
/// Large size with fully unrolled loop.
/// Align destination to 16 bytes boundary.
size_t padding = (16 - (reinterpret_cast<size_t>(dst) & 15)) & 15;
/// If not aligned - we will copy first 16 bytes with unaligned stores.
if (padding > 0)
{
__m128i head = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src));
_mm_storeu_si128(reinterpret_cast<__m128i*>(dst), head);
dst += padding;
src += padding;
size -= padding;
}
/// Aligned unrolled copy. We will use half of available SSE registers.
/// It's not possible to have both src and dst aligned.
/// So, we will use aligned stores and unaligned loads.
__m128i c0, c1, c2, c3, c4, c5, c6, c7;
while (size >= 128)
{
c0 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 0);
c1 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 1);
c2 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 2);
c3 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 3);
c4 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 4);
c5 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 5);
c6 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 6);
c7 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src) + 7);
src += 128;
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 0), c0);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 1), c1);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 2), c2);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 3), c3);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 4), c4);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 5), c5);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 6), c6);
_mm_store_si128((reinterpret_cast<__m128i*>(dst) + 7), c7);
dst += 128;
size -= 128;
}
/// The latest remaining 0..127 bytes will be processed as usual.
goto tail;
}
}
return ret;
}

View File

@ -78,6 +78,9 @@
*
*/
// Disable warnings by PVS-Studio
//-V::GA
static const double
pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */

View File

@ -85,6 +85,9 @@
*
*/
// Disable warnings by PVS-Studio
//-V::GA
#include <stdint.h>
#include <math.h>
#include "libm.h"

View File

@ -155,7 +155,7 @@ static inline long double fp_barrierl(long double x)
static inline void fp_force_evalf(float x)
{
volatile float y;
y = x;
y = x; //-V1001
}
#endif
@ -164,7 +164,7 @@ static inline void fp_force_evalf(float x)
static inline void fp_force_eval(double x)
{
volatile double y;
y = x;
y = x; //-V1001
}
#endif
@ -173,7 +173,7 @@ static inline void fp_force_eval(double x)
static inline void fp_force_evall(long double x)
{
volatile long double y;
y = x;
y = x; //-V1001
}
#endif

View File

@ -3,6 +3,9 @@
* SPDX-License-Identifier: MIT
*/
// Disable warnings by PVS-Studio
//-V::GA
#include <math.h>
#include <stdint.h>
#include "libm.h"

View File

@ -1,4 +1,4 @@
include(${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake)
include("${ClickHouse_SOURCE_DIR}/cmake/dbms_glob_sources.cmake")
add_headers_and_sources(loggers .)
add_library(loggers ${loggers_sources} ${loggers_headers})
target_link_libraries(loggers PRIVATE dbms clickhouse_common_io)

View File

@ -6,10 +6,11 @@
#include "OwnFormattingChannel.h"
#include "OwnPatternFormatter.h"
#include <Poco/ConsoleChannel.h>
#include <Poco/File.h>
#include <Poco/Logger.h>
#include <Poco/Net/RemoteSyslogChannel.h>
#include <Poco/Path.h>
#include <filesystem>
namespace fs = std::filesystem;
namespace DB
{
@ -20,11 +21,11 @@ namespace DB
// TODO: move to libcommon
static std::string createDirectory(const std::string & file)
{
auto path = Poco::Path(file).makeParent();
if (path.toString().empty())
auto path = fs::path(file).parent_path();
if (path.empty())
return "";
Poco::File(path).createDirectories();
return path.toString();
fs::create_directories(path);
return path;
};
void Loggers::setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority)
@ -40,7 +41,7 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
split->addTextLog(log, text_log_max_priority);
auto current_logger = config.getString("logger", "");
if (config_logger == current_logger)
if (config_logger == current_logger) //-V1051
return;
config_logger = current_logger;
@ -51,16 +52,26 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
/// Use extended interface of Channel for more comprehensive logging.
split = new DB::OwnSplitChannel();
auto log_level = config.getString("logger.level", "trace");
auto log_level_string = config.getString("logger.level", "trace");
/// different channels (log, console, syslog) may have different loglevels configured
/// The maximum (the most verbose) of those will be used as default for Poco loggers
int max_log_level = 0;
const auto log_path = config.getString("logger.log", "");
if (!log_path.empty())
{
createDirectory(log_path);
std::cerr << "Logging " << log_level << " to " << log_path << std::endl;
std::cerr << "Logging " << log_level_string << " to " << log_path << std::endl;
auto log_level = Poco::Logger::parseLevel(log_level_string);
if (log_level > max_log_level)
{
max_log_level = log_level;
}
// Set up two channel chains.
log_file = new Poco::FileChannel;
log_file->setProperty(Poco::FileChannel::PROP_PATH, Poco::Path(log_path).absolute().toString());
log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(log_path));
log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
@ -69,9 +80,10 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
log_file->open();
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this);
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, log_file);
log->setLevel(log_level);
split->addChannel(log);
}
@ -79,10 +91,19 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
if (!errorlog_path.empty())
{
createDirectory(errorlog_path);
// NOTE: we don't use notice & critical in the code, so in practice error log collects fatal & error & warning.
// (!) Warnings are important, they require attention and should never be silenced / ignored.
auto errorlog_level = Poco::Logger::parseLevel(config.getString("logger.errorlog_level", "notice"));
if (errorlog_level > max_log_level)
{
max_log_level = errorlog_level;
}
std::cerr << "Logging errors to " << errorlog_path << std::endl;
error_log_file = new Poco::FileChannel;
error_log_file->setProperty(Poco::FileChannel::PROP_PATH, Poco::Path(errorlog_path).absolute().toString());
error_log_file->setProperty(Poco::FileChannel::PROP_PATH, fs::weakly_canonical(errorlog_path));
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATION, config.getRawString("logger.size", "100M"));
error_log_file->setProperty(Poco::FileChannel::PROP_ARCHIVE, "number");
error_log_file->setProperty(Poco::FileChannel::PROP_COMPRESS, config.getRawString("logger.compress", "true"));
@ -90,20 +111,22 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
error_log_file->setProperty(Poco::FileChannel::PROP_FLUSH, config.getRawString("logger.flush", "true"));
error_log_file->setProperty(Poco::FileChannel::PROP_ROTATEONOPEN, config.getRawString("logger.rotateOnOpen", "false"));
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this);
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> errorlog = new DB::OwnFormattingChannel(pf, error_log_file);
errorlog->setLevel(Poco::Message::PRIO_NOTICE);
errorlog->setLevel(errorlog_level);
errorlog->open();
split->addChannel(errorlog);
}
/// "dynamic_layer_selection" is needed only for Yandex.Metrika, that share part of ClickHouse code.
/// We don't need this configuration parameter.
if (config.getBool("logger.use_syslog", false) || config.getBool("dynamic_layer_selection", false))
if (config.getBool("logger.use_syslog", false))
{
//const std::string & cmd_name = commandName();
auto syslog_level = Poco::Logger::parseLevel(config.getString("logger.syslog_level", log_level_string));
if (syslog_level > max_log_level)
{
max_log_level = syslog_level;
}
if (config.has("logger.syslog.address"))
{
@ -127,9 +150,11 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
}
syslog_channel->open();
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_LAYER_TAG);
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter;
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, syslog_channel);
log->setLevel(syslog_level);
split->addChannel(log);
}
@ -141,9 +166,17 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
{
bool color_enabled = config.getBool("logger.color_terminal", color_logs_by_default);
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(this, OwnPatternFormatter::ADD_NOTHING, color_enabled);
auto console_log_level_string = config.getString("logger.console_log_level", log_level_string);
auto console_log_level = Poco::Logger::parseLevel(console_log_level_string);
if (console_log_level > max_log_level)
{
max_log_level = console_log_level;
}
Poco::AutoPtr<OwnPatternFormatter> pf = new OwnPatternFormatter(color_enabled);
Poco::AutoPtr<DB::OwnFormattingChannel> log = new DB::OwnFormattingChannel(pf, new Poco::ConsoleChannel);
logger.warning("Logging " + log_level + " to console");
logger.warning("Logging " + console_log_level_string + " to console");
log->setLevel(console_log_level);
split->addChannel(log);
}
@ -152,17 +185,17 @@ void Loggers::buildLoggers(Poco::Util::AbstractConfiguration & config, Poco::Log
logger.setChannel(split);
// Global logging level (it can be overridden for specific loggers).
logger.setLevel(log_level);
logger.setLevel(max_log_level);
// Set level to all already created loggers
std::vector<std::string> names;
//logger_root = Logger::root();
logger.root().names(names);
for (const auto & name : names)
logger.root().get(name).setLevel(log_level);
logger.root().get(name).setLevel(max_log_level);
// Attach to the root logger.
logger.root().setLevel(log_level);
logger.root().setLevel(max_log_level);
logger.root().setChannel(logger.getChannel());
// Explicitly specified log levels for specific loggers.

View File

@ -8,6 +8,7 @@
#include <Interpreters/TextLog.h>
#include "OwnSplitChannel.h"
namespace Poco::Util
{
class AbstractConfiguration;
@ -21,16 +22,8 @@ public:
/// Close log files. On next log write files will be reopened.
void closeLogs(Poco::Logger & logger);
std::optional<size_t> getLayer() const
{
return layer; /// layer set in inheritor class BaseDaemonApplication.
}
void setTextLog(std::shared_ptr<DB::TextLog> log, int max_priority);
protected:
std::optional<size_t> layer;
private:
Poco::AutoPtr<Poco::FileChannel> log_file;
Poco::AutoPtr<Poco::FileChannel> error_log_file;

View File

@ -22,6 +22,9 @@ public:
void setLevel(Poco::Message::Priority priority_) { priority = priority_; }
// Poco::Logger::parseLevel returns ints
void setLevel(int level) { priority = static_cast<Poco::Message::Priority>(level); }
void open() override
{
if (pChannel)

View File

@ -13,31 +13,18 @@
#include "Loggers.h"
OwnPatternFormatter::OwnPatternFormatter(const Loggers * loggers_, OwnPatternFormatter::Options options_, bool color_)
: Poco::PatternFormatter(""), loggers(loggers_), options(options_), color(color_)
OwnPatternFormatter::OwnPatternFormatter(bool color_)
: Poco::PatternFormatter(""), color(color_)
{
}
void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text)
void OwnPatternFormatter::formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const
{
DB::WriteBufferFromString wb(text);
const Poco::Message & msg = msg_ext.base;
/// For syslog: tag must be before message and first whitespace.
/// This code is only used in Yandex.Metrika and unneeded in ClickHouse.
if ((options & ADD_LAYER_TAG) && loggers)
{
auto layer = loggers->getLayer();
if (layer)
{
writeCString("layer[", wb);
DB::writeIntText(*layer, wb);
writeCString("]: ", wb);
}
}
/// Change delimiters in date for compatibility with old logs.
DB::writeDateTimeText<'.', ':'>(msg_ext.time_seconds, wb);

View File

@ -24,20 +24,11 @@ class Loggers;
class OwnPatternFormatter : public Poco::PatternFormatter
{
public:
/// ADD_LAYER_TAG is needed only for Yandex.Metrika, that share part of ClickHouse code.
enum Options
{
ADD_NOTHING = 0,
ADD_LAYER_TAG = 1 << 0
};
OwnPatternFormatter(const Loggers * loggers_, Options options_ = ADD_NOTHING, bool color_ = false);
OwnPatternFormatter(bool color_ = false);
void format(const Poco::Message & msg, std::string & text) override;
void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text);
void formatExtended(const DB::ExtendedLogMessage & msg_ext, std::string & text) const;
private:
const Loggers * loggers;
Options options;
bool color;
};

View File

@ -4,12 +4,14 @@
#include <Core/Block.h>
#include <Interpreters/InternalTextLogsQueue.h>
#include <Interpreters/TextLog.h>
#include <IO/WriteBufferFromFileDescriptor.h>
#include <sys/time.h>
#include <Poco/Message.h>
#include <Common/CurrentThread.h>
#include <Common/DNSResolver.h>
#include <common/getThreadId.h>
#include <Common/SensitiveDataMasker.h>
#include <Common/IO.h>
namespace DB
{
@ -26,16 +28,48 @@ void OwnSplitChannel::log(const Poco::Message & msg)
auto matches = masker->wipeSensitiveData(message_text);
if (matches > 0)
{
logSplit({msg, message_text}); // we will continue with the copy of original message with text modified
tryLogSplit({msg, message_text}); // we will continue with the copy of original message with text modified
return;
}
}
logSplit(msg);
tryLogSplit(msg);
}
void OwnSplitChannel::tryLogSplit(const Poco::Message & msg)
{
try
{
logSplit(msg);
}
/// It is better to catch the errors here in order to avoid
/// breaking some functionality because of unexpected "File not
/// found" (or similar) error.
///
/// For example StorageDistributedDirectoryMonitor will mark batch
/// as broken, some MergeTree code can also be affected.
///
/// Also note, that we cannot log the exception here, since this
/// will lead to recursion, using regular tryLogCurrentException().
/// but let's log it into the stderr at least.
catch (...)
{
MemoryTracker::LockExceptionInThread lock_memory_tracker(VariableContext::Global);
const std::string & exception_message = getCurrentExceptionMessage(true);
const std::string & message = msg.getText();
/// NOTE: errors are ignored, since nothing can be done.
writeRetry(STDERR_FILENO, "Cannot add message to the log: ");
writeRetry(STDERR_FILENO, message.data(), message.size());
writeRetry(STDERR_FILENO, "\n");
writeRetry(STDERR_FILENO, exception_message.data(), exception_message.size());
writeRetry(STDERR_FILENO, "\n");
}
}
void OwnSplitChannel::logSplit(const Poco::Message & msg)
{
ExtendedLogMessage msg_ext = ExtendedLogMessage::getFrom(msg);

View File

@ -24,6 +24,7 @@ public:
private:
void logSplit(const Poco::Message & msg);
void tryLogSplit(const Poco::Message & msg);
using ChannelPtr = Poco::AutoPtr<Poco::Channel>;
/// Handler and its pointer casted to extended interface

View File

@ -14,8 +14,8 @@ add_library (mysqlxx
target_include_directories (mysqlxx PUBLIC ..)
if (USE_INTERNAL_MYSQL_LIBRARY)
target_include_directories (mysqlxx PUBLIC ${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include)
target_include_directories (mysqlxx PUBLIC ${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include)
target_include_directories (mysqlxx PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/mariadb-connector-c/include")
target_include_directories (mysqlxx PUBLIC "${ClickHouse_BINARY_DIR}/contrib/mariadb-connector-c/include")
else ()
set(PLATFORM_LIBRARIES ${CMAKE_DL_LIBS})

View File

@ -51,10 +51,11 @@ Connection::Connection(
const char* ssl_key,
unsigned timeout,
unsigned rw_timeout,
bool enable_local_infile)
bool enable_local_infile,
bool opt_reconnect)
: Connection()
{
connect(db, server, user, password, port, socket, ssl_ca, ssl_cert, ssl_key, timeout, rw_timeout, enable_local_infile);
connect(db, server, user, password, port, socket, ssl_ca, ssl_cert, ssl_key, timeout, rw_timeout, enable_local_infile, opt_reconnect);
}
Connection::Connection(const std::string & config_name)
@ -80,7 +81,8 @@ void Connection::connect(const char* db,
const char * ssl_key,
unsigned timeout,
unsigned rw_timeout,
bool enable_local_infile)
bool enable_local_infile,
bool opt_reconnect)
{
if (is_connected)
disconnect();
@ -104,9 +106,8 @@ void Connection::connect(const char* db,
if (mysql_options(driver.get(), MYSQL_OPT_LOCAL_INFILE, &enable_local_infile_arg))
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
/// Enables auto-reconnect.
bool reconnect = true;
if (mysql_options(driver.get(), MYSQL_OPT_RECONNECT, reinterpret_cast<const char *>(&reconnect)))
/// See C API Developer Guide: Automatic Reconnection Control
if (mysql_options(driver.get(), MYSQL_OPT_RECONNECT, reinterpret_cast<const char *>(&opt_reconnect)))
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
/// Specifies particular ssl key and certificate if it needs

View File

@ -14,6 +14,8 @@
/// Disable LOAD DATA LOCAL INFILE because it is insecure
#define MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE false
/// See https://dev.mysql.com/doc/c-api/5.7/en/c-api-auto-reconnect.html
#define MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT true
namespace mysqlxx
@ -76,7 +78,8 @@ public:
const char * ssl_key = "",
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
bool opt_reconnect = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
/// Creates connection. Can be used if Poco::Util::Application is using.
/// All settings will be got from config_name section of configuration.
@ -96,7 +99,8 @@ public:
const char* ssl_key,
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
bool opt_reconnect = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
void connect(const std::string & config_name)
{
@ -112,6 +116,7 @@ public:
std::string ssl_cert = cfg.getString(config_name + ".ssl_cert", "");
std::string ssl_key = cfg.getString(config_name + ".ssl_key", "");
bool enable_local_infile = cfg.getBool(config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
bool opt_reconnect = cfg.getBool(config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
unsigned timeout =
cfg.getInt(config_name + ".connect_timeout",
@ -135,7 +140,8 @@ public:
ssl_key.c_str(),
timeout,
rw_timeout,
enable_local_infile);
enable_local_infile,
opt_reconnect);
}
/// If MySQL connection was established.

View File

@ -26,6 +26,15 @@ struct ConnectionFailed : public Exception
};
/// Connection to MySQL server was lost
struct ConnectionLost : public Exception
{
ConnectionLost(const std::string & msg, int code = 0) : Exception(msg, code) {}
const char * name() const throw() override { return "mysqlxx::ConnectionLost"; }
const char * className() const throw() override { return "mysqlxx::ConnectionLost"; }
};
/// Erroneous query.
struct BadQuery : public Exception
{

View File

@ -10,7 +10,6 @@
#include <common/sleep.h>
#include <Poco/Util/Application.h>
#include <Poco/Util/LayeredConfiguration.h>
@ -41,7 +40,9 @@ void Pool::Entry::decrementRefCount()
Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & config_name,
unsigned default_connections_, unsigned max_connections_,
const char * parent_config_name_)
: default_connections(default_connections_), max_connections(max_connections_)
: logger(Poco::Logger::get("mysqlxx::Pool"))
, default_connections(default_connections_)
, max_connections(max_connections_)
{
server = cfg.getString(config_name + ".host");
@ -78,6 +79,9 @@ Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & co
enable_local_infile = cfg.getBool(config_name + ".enable_local_infile",
cfg.getBool(parent_config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE));
opt_reconnect = cfg.getBool(config_name + ".opt_reconnect",
cfg.getBool(parent_config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT));
}
else
{
@ -96,6 +100,8 @@ Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & co
enable_local_infile = cfg.getBool(
config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
opt_reconnect = cfg.getBool(config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
}
connect_timeout = cfg.getInt(config_name + ".connect_timeout",
@ -125,20 +131,30 @@ Pool::Entry Pool::get()
initialize();
for (;;)
{
logger.trace("(%s): Iterating through existing MySQL connections", getDescription());
for (auto & connection : connections)
{
if (connection->ref_count == 0)
return Entry(connection, this);
}
logger.trace("(%s): Trying to allocate a new connection.", getDescription());
if (connections.size() < static_cast<size_t>(max_connections))
{
Connection * conn = allocConnection();
if (conn)
return Entry(conn, this);
logger.trace("(%s): Unable to create a new connection: Allocation failed.", getDescription());
}
else
{
logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription());
}
lock.unlock();
logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
lock.lock();
}
@ -158,12 +174,13 @@ Pool::Entry Pool::tryGet()
/// Fixme: There is a race condition here b/c we do not synchronize with Pool::Entry's copy-assignment operator
if (connection_ptr->ref_count == 0)
{
Entry res(connection_ptr, this);
if (res.tryForceConnected()) /// Tries to reestablish connection as well
return res;
{
Entry res(connection_ptr, this);
if (res.tryForceConnected()) /// Tries to reestablish connection as well
return res;
}
auto & logger = Poco::Util::Application::instance().logger();
logger.information("Idle connection to mysql server cannot be recovered, dropping it.");
logger.debug("(%s): Idle connection to MySQL server cannot be recovered, dropping it.", getDescription());
/// This one is disconnected, cannot be reestablished and so needs to be disposed of.
connection_it = connections.erase(connection_it);
@ -186,6 +203,8 @@ Pool::Entry Pool::tryGet()
void Pool::removeConnection(Connection* connection)
{
logger.trace("(%s): Removing connection.", getDescription());
std::lock_guard<std::mutex> lock(mutex);
if (connection)
{
@ -210,8 +229,6 @@ void Pool::Entry::forceConnected() const
if (data == nullptr)
throw Poco::RuntimeException("Tried to access NULL database connection.");
Poco::Util::Application & app = Poco::Util::Application::instance();
bool first = true;
while (!tryForceConnected())
{
@ -220,7 +237,7 @@ void Pool::Entry::forceConnected() const
else
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
app.logger().information("MYSQL: Reconnecting to " + pool->description);
pool->logger.debug("Entry: Reconnecting to MySQL server %s", pool->description);
data->conn.connect(
pool->db.c_str(),
pool->server.c_str(),
@ -233,7 +250,8 @@ void Pool::Entry::forceConnected() const
pool->ssl_key.c_str(),
pool->connect_timeout,
pool->rw_timeout,
pool->enable_local_infile);
pool->enable_local_infile,
pool->opt_reconnect);
}
}
@ -242,18 +260,22 @@ bool Pool::Entry::tryForceConnected() const
{
auto * const mysql_driver = data->conn.getDriver();
const auto prev_connection_id = mysql_thread_id(mysql_driver);
pool->logger.trace("Entry(connection %lu): sending PING to check if it is alive.", prev_connection_id);
if (data->conn.ping()) /// Attempts to reestablish lost connection
{
const auto current_connection_id = mysql_thread_id(mysql_driver);
if (prev_connection_id != current_connection_id)
{
auto & logger = Poco::Util::Application::instance().logger();
logger.information("Connection to mysql server has been reestablished. Connection id changed: %lu -> %lu",
prev_connection_id, current_connection_id);
pool->logger.debug("Entry(connection %lu): Reconnected to MySQL server. Connection id changed: %lu -> %lu",
current_connection_id, prev_connection_id, current_connection_id);
}
pool->logger.trace("Entry(connection %lu): PING ok.", current_connection_id);
return true;
}
pool->logger.trace("Entry(connection %lu): PING failed.", prev_connection_id);
return false;
}
@ -274,15 +296,13 @@ void Pool::initialize()
Pool::Connection * Pool::allocConnection(bool dont_throw_if_failed_first_time)
{
Poco::Util::Application & app = Poco::Util::Application::instance();
std::unique_ptr<Connection> conn(new Connection);
std::unique_ptr<Connection> conn_ptr{new Connection};
try
{
app.logger().information("MYSQL: Connecting to " + description);
logger.debug("Connecting to %s", description);
conn->conn.connect(
conn_ptr->conn.connect(
db.c_str(),
server.c_str(),
user.c_str(),
@ -294,29 +314,29 @@ Pool::Connection * Pool::allocConnection(bool dont_throw_if_failed_first_time)
ssl_key.c_str(),
connect_timeout,
rw_timeout,
enable_local_infile);
enable_local_infile,
opt_reconnect);
}
catch (mysqlxx::ConnectionFailed & e)
{
logger.error(e.what());
if ((!was_successful && !dont_throw_if_failed_first_time)
|| e.errnum() == ER_ACCESS_DENIED_ERROR
|| e.errnum() == ER_DBACCESS_DENIED_ERROR
|| e.errnum() == ER_BAD_DB_ERROR)
{
app.logger().error(e.what());
throw;
}
else
{
app.logger().error(e.what());
return nullptr;
}
}
connections.push_back(conn_ptr.get());
was_successful = true;
auto * connection = conn.release();
connections.push_back(connection);
return connection;
return conn_ptr.release();
}
}

View File

@ -6,6 +6,8 @@
#include <atomic>
#include <Poco/Exception.h>
#include <Poco/Logger.h>
#include <mysqlxx/Connection.h>
@ -157,27 +159,29 @@ public:
*/
Pool(const std::string & db_,
const std::string & server_,
const std::string & user_ = "",
const std::string & password_ = "",
unsigned port_ = 0,
const std::string & user_,
const std::string & password_,
unsigned port_,
const std::string & socket_ = "",
unsigned connect_timeout_ = MYSQLXX_DEFAULT_TIMEOUT,
unsigned rw_timeout_ = MYSQLXX_DEFAULT_RW_TIMEOUT,
unsigned default_connections_ = MYSQLXX_POOL_DEFAULT_START_CONNECTIONS,
unsigned max_connections_ = MYSQLXX_POOL_DEFAULT_MAX_CONNECTIONS,
unsigned enable_local_infile_ = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE)
: default_connections(default_connections_), max_connections(max_connections_),
db(db_), server(server_), user(user_), password(password_), port(port_), socket(socket_),
connect_timeout(connect_timeout_), rw_timeout(rw_timeout_), enable_local_infile(enable_local_infile_) {}
unsigned enable_local_infile_ = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
bool opt_reconnect_ = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT)
: logger(Poco::Logger::get("mysqlxx::Pool")), default_connections(default_connections_),
max_connections(max_connections_), db(db_), server(server_), user(user_), password(password_), port(port_), socket(socket_),
connect_timeout(connect_timeout_), rw_timeout(rw_timeout_), enable_local_infile(enable_local_infile_),
opt_reconnect(opt_reconnect_) {}
Pool(const Pool & other)
: default_connections{other.default_connections},
: logger(other.logger), default_connections{other.default_connections},
max_connections{other.max_connections},
db{other.db}, server{other.server},
user{other.user}, password{other.password},
port{other.port}, socket{other.socket},
connect_timeout{other.connect_timeout}, rw_timeout{other.rw_timeout},
enable_local_infile{other.enable_local_infile}
enable_local_infile{other.enable_local_infile}, opt_reconnect(other.opt_reconnect)
{}
Pool & operator=(const Pool &) = delete;
@ -201,6 +205,8 @@ public:
void removeConnection(Connection * connection);
protected:
Poco::Logger & logger;
/// Number of MySQL connections which are created at launch.
unsigned default_connections;
/// Maximum possible number of connections
@ -231,6 +237,7 @@ private:
std::string ssl_cert;
std::string ssl_key;
bool enable_local_infile;
bool opt_reconnect;
/// True if connection was established at least once.
bool was_successful{false};

View File

@ -1,3 +1,7 @@
#include <algorithm>
#include <ctime>
#include <random>
#include <thread>
#include <mysqlxx/PoolWithFailover.h>
@ -10,9 +14,12 @@ static bool startsWith(const std::string & s, const char * prefix)
using namespace mysqlxx;
PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & config_,
const std::string & config_name_, const unsigned default_connections_,
const unsigned max_connections_, const size_t max_tries_)
PoolWithFailover::PoolWithFailover(
const Poco::Util::AbstractConfiguration & config_,
const std::string & config_name_,
const unsigned default_connections_,
const unsigned max_connections_,
const size_t max_tries_)
: max_tries(max_tries_)
{
shareable = config_.getBool(config_name_ + ".share_connection", false);
@ -33,6 +40,19 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & con
std::make_shared<Pool>(config_, replica_name, default_connections_, max_connections_, config_name_.c_str()));
}
}
/// PoolWithFailover objects are stored in a cache inside PoolFactory.
/// This cache is reset by ExternalDictionariesLoader after every SYSTEM RELOAD DICTIONAR{Y|IES}
/// which triggers massive re-constructing of connection pools.
/// The state of PRNGs like std::mt19937 is considered to be quite heavy
/// thus here we attempt to optimize its construction.
static thread_local std::mt19937 rnd_generator(
std::hash<std::thread::id>{}(std::this_thread::get_id()) + std::clock());
for (auto & [_, replicas] : replicas_by_priority)
{
if (replicas.size() > 1)
std::shuffle(replicas.begin(), replicas.end(), rnd_generator);
}
}
else
{
@ -41,16 +61,46 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & con
}
}
PoolWithFailover::PoolWithFailover(const std::string & config_name_, const unsigned default_connections_,
const unsigned max_connections_, const size_t max_tries_)
: PoolWithFailover{
Poco::Util::Application::instance().config(), config_name_,
default_connections_, max_connections_, max_tries_}
PoolWithFailover::PoolWithFailover(
const std::string & config_name_,
const unsigned default_connections_,
const unsigned max_connections_,
const size_t max_tries_)
: PoolWithFailover{Poco::Util::Application::instance().config(),
config_name_, default_connections_, max_connections_, max_tries_}
{
}
PoolWithFailover::PoolWithFailover(
const std::string & database,
const RemoteDescription & addresses,
const std::string & user,
const std::string & password,
unsigned default_connections_,
unsigned max_connections_,
size_t max_tries_)
: max_tries(max_tries_)
, shareable(false)
{
/// Replicas have the same priority, but traversed replicas are moved to the end of the queue.
for (const auto & [host, port] : addresses)
{
replicas_by_priority[0].emplace_back(std::make_shared<Pool>(database,
host, user, password, port,
/* socket_ = */ "",
MYSQLXX_DEFAULT_TIMEOUT,
MYSQLXX_DEFAULT_RW_TIMEOUT,
default_connections_,
max_connections_));
}
}
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
: max_tries{other.max_tries}, shareable{other.shareable}
: max_tries{other.max_tries}
, shareable{other.shareable}
{
if (shareable)
{

View File

@ -11,6 +11,8 @@
namespace mysqlxx
{
/** MySQL connection pool with support of failover.
*
* For dictionary source:
* Have information about replicas and their priorities.
* Tries to connect to replica in an order of priority. When equal priority, choose replica with maximum time without connections.
*
@ -68,42 +70,60 @@ namespace mysqlxx
using PoolPtr = std::shared_ptr<Pool>;
using Replicas = std::vector<PoolPtr>;
/// [priority][index] -> replica.
/// [priority][index] -> replica. Highest priority is 0.
using ReplicasByPriority = std::map<int, Replicas>;
ReplicasByPriority replicas_by_priority;
/// Number of connection tries.
size_t max_tries;
/// Mutex for set of replicas.
std::mutex mutex;
/// Can the Pool be shared
bool shareable;
public:
using Entry = Pool::Entry;
using RemoteDescription = std::vector<std::pair<std::string, uint16_t>>;
/**
* config_name Name of parameter in configuration file.
* * Mysql dictionary sourse related params:
* config_name Name of parameter in configuration file for dictionary source.
*
* * Mysql storage related parameters:
* replicas_description
*
* * Mutual parameters:
* default_connections Number of connection in pool to each replica at start.
* max_connections Maximum number of connections in pool to each replica.
* max_tries_ Max number of connection tries.
*/
PoolWithFailover(const std::string & config_name_,
PoolWithFailover(
const std::string & config_name_,
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
PoolWithFailover(const Poco::Util::AbstractConfiguration & config_,
PoolWithFailover(
const Poco::Util::AbstractConfiguration & config_,
const std::string & config_name_,
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
PoolWithFailover(
const std::string & database,
const RemoteDescription & addresses,
const std::string & user,
const std::string & password,
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
PoolWithFailover(const PoolWithFailover & other);
/** Allocates a connection to use. */
Entry get();
};
using PoolWithFailoverPtr = std::shared_ptr<PoolWithFailover>;
}

View File

@ -1,11 +1,16 @@
#if __has_include(<mysql.h>)
#include <errmsg.h>
#include <mysql.h>
#else
#include <mysql/errmsg.h> //Y_IGNORE
#include <mysql/mysql.h>
#endif
#include <Poco/Logger.h>
#include <mysqlxx/Connection.h>
#include <mysqlxx/Query.h>
#include <mysqlxx/Types.h>
namespace mysqlxx
@ -57,8 +62,24 @@ void Query::reset()
void Query::executeImpl()
{
std::string query_string = query_buf.str();
if (mysql_real_query(conn->getDriver(), query_string.data(), query_string.size()))
throw BadQuery(errorMessage(conn->getDriver()), mysql_errno(conn->getDriver()));
MYSQL* mysql_driver = conn->getDriver();
auto & logger = Poco::Logger::get("mysqlxx::Query");
logger.trace("Running MySQL query using connection %lu", mysql_thread_id(mysql_driver));
if (mysql_real_query(mysql_driver, query_string.data(), query_string.size()))
{
const auto err_no = mysql_errno(mysql_driver);
switch (err_no)
{
case CR_SERVER_GONE_ERROR:
[[fallthrough]];
case CR_SERVER_LOST:
throw ConnectionLost(errorMessage(mysql_driver), err_no);
default:
throw BadQuery(errorMessage(mysql_driver), err_no);
}
}
}
UseQueryResult Query::use()

View File

@ -1,5 +1,2 @@
add_executable (mysqlxx_test mysqlxx_test.cpp)
target_link_libraries (mysqlxx_test PRIVATE mysqlxx)
add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp)
target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx)

View File

@ -1,21 +0,0 @@
<?xml version = '1.0' encoding = 'utf-8'?>
<yandex>
<mysql_goals>
<port>3306</port>
<user>root</user>
<db>Metrica</db>
<password>qwerty</password>
<replica>
<host>example02t</host>
<priority>0</priority>
</replica>
<replica>
<host>example02t</host>
<port>3306</port>
<user>root</user>
<password>qwerty</password>
<db>Metrica</db>
<priority>1</priority>
</replica>
</mysql_goals>
</yandex>

Some files were not shown because too many files have changed in this diff Show More