diff --git a/.gitattributes b/.gitattributes
index bcc7d57b904..efb059f169a 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,2 +1,4 @@
contrib/* linguist-vendored
*.h linguist-language=C++
+# to avoid frequent conflicts
+tests/queries/0_stateless/arcadia_skip_list.txt text merge=union
diff --git a/.gitmodules b/.gitmodules
index 7a2c5600e65..f7dcf5f4ac1 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -93,7 +93,7 @@
url = https://github.com/ClickHouse-Extras/libunwind.git
[submodule "contrib/simdjson"]
path = contrib/simdjson
- url = https://github.com/ClickHouse-Extras/simdjson.git
+ url = https://github.com/simdjson/simdjson.git
[submodule "contrib/rapidjson"]
path = contrib/rapidjson
url = https://github.com/ClickHouse-Extras/rapidjson
@@ -133,7 +133,7 @@
url = https://github.com/unicode-org/icu.git
[submodule "contrib/flatbuffers"]
path = contrib/flatbuffers
- url = https://github.com/google/flatbuffers.git
+ url = https://github.com/ClickHouse-Extras/flatbuffers.git
[submodule "contrib/libc-headers"]
path = contrib/libc-headers
url = https://github.com/ClickHouse-Extras/libc-headers.git
@@ -221,3 +221,9 @@
[submodule "contrib/NuRaft"]
path = contrib/NuRaft
url = https://github.com/ClickHouse-Extras/NuRaft.git
+[submodule "contrib/nanodbc"]
+ path = contrib/nanodbc
+ url = https://github.com/ClickHouse-Extras/nanodbc.git
+[submodule "contrib/datasketches-cpp"]
+ path = contrib/datasketches-cpp
+ url = https://github.com/ClickHouse-Extras/datasketches-cpp.git
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e2c777b3bcf..cc1ec835a7b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,312 @@
+## ClickHouse release 21.4
+
+### ClickHouse release 21.4.1 2021-04-12
+
+#### Backward Incompatible Change
+
+* The `toStartOfIntervalFunction` will align hour intervals to the midnight (in previous versions they were aligned to the start of unix epoch). For example, `toStartOfInterval(x, INTERVAL 11 HOUR)` will split every day into three intervals: `00:00:00..10:59:59`, `11:00:00..21:59:59` and `22:00:00..23:59:59`. This behaviour is more suited for practical needs. This closes [#9510](https://github.com/ClickHouse/ClickHouse/issues/9510). [#22060](https://github.com/ClickHouse/ClickHouse/pull/22060) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* `Age` and `Precision` in graphite rollup configs should increase from retention to retention. Now it's checked and the wrong config raises an exception. [#21496](https://github.com/ClickHouse/ClickHouse/pull/21496) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
+* Fix `cutToFirstSignificantSubdomainCustom()`/`firstSignificantSubdomainCustom()` returning wrong result for 3+ level domains present in custom top-level domain list. For input domains matching these custom top-level domains, the third-level domain was considered to be the first significant one. This is now fixed. This change may introduce incompatibility if the function is used in e.g. the sharding key. [#21946](https://github.com/ClickHouse/ClickHouse/pull/21946) ([Azat Khuzhin](https://github.com/azat)).
+* Column `keys` in table `system.dictionaries` was replaced to columns `key.names` and `key.types`. Columns `key.names`, `key.types`, `attribute.names`, `attribute.types` from `system.dictionaries` table does not require dictionary to be loaded. [#21884](https://github.com/ClickHouse/ClickHouse/pull/21884) ([Maksim Kita](https://github.com/kitaisreal)).
+* Now replicas that are processing the `ALTER TABLE ATTACH PART[ITION]` command search in their `detached/` folders before fetching the data from other replicas. As an implementation detail, a new command `ATTACH_PART` is introduced in the replicated log. Parts are searched and compared by their checksums. [#18978](https://github.com/ClickHouse/ClickHouse/pull/18978) ([Mike Kot](https://github.com/myrrc)). **Note**:
+ * `ATTACH PART[ITION]` queries may not work during cluster upgrade.
+ * It's not possible to rollback to older ClickHouse version after executing `ALTER ... ATTACH` query in new version as the old servers would fail to pass the `ATTACH_PART` entry in the replicated log.
+* In this version, empty `` will block all access to remote hosts while in previous versions it did nothing. If you want to keep old behaviour and you have empty `remote_url_allow_hosts` element in configuration file, remove it. [#20058](https://github.com/ClickHouse/ClickHouse/pull/20058) ([Vladimir Chebotarev](https://github.com/excitoon)).
+
+
+#### New Feature
+
+* Extended range of `DateTime64` to support dates from year 1925 to 2283. Improved support of `DateTime` around zero date (`1970-01-01`). [#9404](https://github.com/ClickHouse/ClickHouse/pull/9404) ([alexey-milovidov](https://github.com/alexey-milovidov), [Vasily Nemkov](https://github.com/Enmk)). Not every time and date functions are working for extended range of dates.
+* Added support of Kerberos authentication for preconfigured users and HTTP requests (GSS-SPNEGO). [#14995](https://github.com/ClickHouse/ClickHouse/pull/14995) ([Denis Glazachev](https://github.com/traceon)).
+* Add `prefer_column_name_to_alias` setting to use original column names instead of aliases. it is needed to be more compatible with common databases' aliasing rules. This is for [#9715](https://github.com/ClickHouse/ClickHouse/issues/9715) and [#9887](https://github.com/ClickHouse/ClickHouse/issues/9887). [#22044](https://github.com/ClickHouse/ClickHouse/pull/22044) ([Amos Bird](https://github.com/amosbird)).
+* Added functions `dictGetChildren(dictionary, key)`, `dictGetDescendants(dictionary, key, level)`. Function `dictGetChildren` return all children as an array if indexes. It is a inverse transformation for `dictGetHierarchy`. Function `dictGetDescendants` return all descendants as if `dictGetChildren` was applied `level` times recursively. Zero `level` value is equivalent to infinity. Closes [#14656](https://github.com/ClickHouse/ClickHouse/issues/14656). [#22096](https://github.com/ClickHouse/ClickHouse/pull/22096) ([Maksim Kita](https://github.com/kitaisreal)).
+* Added `executable_pool` dictionary source. Close [#14528](https://github.com/ClickHouse/ClickHouse/issues/14528). [#21321](https://github.com/ClickHouse/ClickHouse/pull/21321) ([Maksim Kita](https://github.com/kitaisreal)).
+* Added table function `dictionary`. It works the same way as `Dictionary` engine. Closes [#21560](https://github.com/ClickHouse/ClickHouse/issues/21560). [#21910](https://github.com/ClickHouse/ClickHouse/pull/21910) ([Maksim Kita](https://github.com/kitaisreal)).
+* Support `Nullable` type for `PolygonDictionary` attribute. [#21890](https://github.com/ClickHouse/ClickHouse/pull/21890) ([Maksim Kita](https://github.com/kitaisreal)).
+* Functions `dictGet`, `dictHas` use current database name if it is not specified for dictionaries created with DDL. Closes [#21632](https://github.com/ClickHouse/ClickHouse/issues/21632). [#21859](https://github.com/ClickHouse/ClickHouse/pull/21859) ([Maksim Kita](https://github.com/kitaisreal)).
+* Added function `dictGetOrNull`. It works like `dictGet`, but return `Null` in case key was not found in dictionary. Closes [#22375](https://github.com/ClickHouse/ClickHouse/issues/22375). [#22413](https://github.com/ClickHouse/ClickHouse/pull/22413) ([Maksim Kita](https://github.com/kitaisreal)).
+* Added async update in `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for `Nullable` type in `Cache`, `ComplexKeyCache`, `SSDCache`, `SSDComplexKeyCache` dictionaries. Added support for multiple attributes fetch with `dictGet`, `dictGetOrDefault` functions. Fixes [#21517](https://github.com/ClickHouse/ClickHouse/issues/21517). [#20595](https://github.com/ClickHouse/ClickHouse/pull/20595) ([Maksim Kita](https://github.com/kitaisreal)).
+* Support `dictHas` function for `RangeHashedDictionary`. Fixes [#6680](https://github.com/ClickHouse/ClickHouse/issues/6680). [#19816](https://github.com/ClickHouse/ClickHouse/pull/19816) ([Maksim Kita](https://github.com/kitaisreal)).
+* Add function `timezoneOf` that returns the timezone name of `DateTime` or `DateTime64` data types. This does not close [#9959](https://github.com/ClickHouse/ClickHouse/issues/9959). Fix inconsistencies in function names: add aliases `timezone` and `timeZone` as well as `toTimezone` and `toTimeZone` and `timezoneOf` and `timeZoneOf`. [#22001](https://github.com/ClickHouse/ClickHouse/pull/22001) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add new optional clause `GRANTEES` for `CREATE/ALTER USER` commands. It specifies users or roles which are allowed to receive grants from this user on condition this user has also all required access granted with grant option. By default `GRANTEES ANY` is used which means a user with grant option can grant to anyone. Syntax: `CREATE USER ... GRANTEES {user | role | ANY | NONE} [,...] [EXCEPT {user | role} [,...]]`. [#21641](https://github.com/ClickHouse/ClickHouse/pull/21641) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Add new column `slowdowns_count` to `system.clusters`. When using hedged requests, it shows how many times we switched to another replica because this replica was responding slowly. Also show actual value of `errors_count` in `system.clusters`. [#21480](https://github.com/ClickHouse/ClickHouse/pull/21480) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add `_partition_id` virtual column for `MergeTree*` engines. Allow to prune partitions by `_partition_id`. Add `partitionID()` function to calculate partition id string. [#21401](https://github.com/ClickHouse/ClickHouse/pull/21401) ([Amos Bird](https://github.com/amosbird)).
+* Add function `isIPAddressInRange` to test if an IPv4 or IPv6 address is contained in a given CIDR network prefix. [#21329](https://github.com/ClickHouse/ClickHouse/pull/21329) ([PHO](https://github.com/depressed-pho)).
+* Added new SQL command `ALTER TABLE 'table_name' UNFREEZE [PARTITION 'part_expr'] WITH NAME 'backup_name'`. This command is needed to properly remove 'freezed' partitions from all disks. [#21142](https://github.com/ClickHouse/ClickHouse/pull/21142) ([Pavel Kovalenko](https://github.com/Jokser)).
+* Supports implicit key type conversion for JOIN. [#19885](https://github.com/ClickHouse/ClickHouse/pull/19885) ([Vladimir](https://github.com/vdimir)).
+
+#### Experimental Feature
+
+* Support `RANGE OFFSET` frame (for window functions) for floating point types. Implement `lagInFrame`/`leadInFrame` window functions, which are analogous to `lag`/`lead`, but respect the window frame. They are identical when the frame is `between unbounded preceding and unbounded following`. This closes [#5485](https://github.com/ClickHouse/ClickHouse/issues/5485). [#21895](https://github.com/ClickHouse/ClickHouse/pull/21895) ([Alexander Kuzmenkov](https://github.com/akuzm)).
+* Zero-copy replication for `ReplicatedMergeTree` over S3 storage. [#16240](https://github.com/ClickHouse/ClickHouse/pull/16240) ([ianton-ru](https://github.com/ianton-ru)).
+* Added possibility to migrate existing S3 disk to the schema with backup-restore capabilities. [#22070](https://github.com/ClickHouse/ClickHouse/pull/22070) ([Pavel Kovalenko](https://github.com/Jokser)).
+
+#### Performance Improvement
+
+* Supported parallel formatting in `clickhouse-local` and everywhere else. [#21630](https://github.com/ClickHouse/ClickHouse/pull/21630) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Support parallel parsing for `CSVWithNames` and `TSVWithNames` formats. This closes [#21085](https://github.com/ClickHouse/ClickHouse/issues/21085). [#21149](https://github.com/ClickHouse/ClickHouse/pull/21149) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Enable read with mmap IO for file ranges from 64 MiB (the settings `min_bytes_to_use_mmap_io`). It may lead to moderate performance improvement. [#22326](https://github.com/ClickHouse/ClickHouse/pull/22326) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add cache for files read with `min_bytes_to_use_mmap_io` setting. It makes significant (2x and more) performance improvement when the value of the setting is small by avoiding frequent mmap/munmap calls and the consequent page faults. Note that mmap IO has major drawbacks that makes it less reliable in production (e.g. hung or SIGBUS on faulty disks; less controllable memory usage). Nevertheless it is good in benchmarks. [#22206](https://github.com/ClickHouse/ClickHouse/pull/22206) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Avoid unnecessary data copy when using codec `NONE`. Please note that codec `NONE` is mostly useless - it's recommended to always use compression (`LZ4` is by default). Despite the common belief, disabling compression may not improve performance (the opposite effect is possible). The `NONE` codec is useful in some cases: - when data is uncompressable; - for synthetic benchmarks. [#22145](https://github.com/ClickHouse/ClickHouse/pull/22145) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Faster `GROUP BY` with small `max_rows_to_group_by` and `group_by_overflow_mode='any'`. [#21856](https://github.com/ClickHouse/ClickHouse/pull/21856) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Optimize performance of queries like `SELECT ... FINAL ... WHERE`. Now in queries with `FINAL` it's allowed to move to `PREWHERE` columns, which are in sorting key. [#21830](https://github.com/ClickHouse/ClickHouse/pull/21830) ([foolchi](https://github.com/foolchi)).
+* Improved performance by replacing `memcpy` to another implementation. This closes [#18583](https://github.com/ClickHouse/ClickHouse/issues/18583). [#21520](https://github.com/ClickHouse/ClickHouse/pull/21520) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Improve performance of aggregation in order of sorting key (with enabled setting `optimize_aggregation_in_order`). [#19401](https://github.com/ClickHouse/ClickHouse/pull/19401) ([Anton Popov](https://github.com/CurtizJ)).
+
+#### Improvement
+
+* Add connection pool for PostgreSQL table/database engine and dictionary source. Should fix [#21444](https://github.com/ClickHouse/ClickHouse/issues/21444). [#21839](https://github.com/ClickHouse/ClickHouse/pull/21839) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Support non-default table schema for postgres storage/table-function. Closes [#21701](https://github.com/ClickHouse/ClickHouse/issues/21701). [#21711](https://github.com/ClickHouse/ClickHouse/pull/21711) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Support replicas priority for postgres dictionary source. [#21710](https://github.com/ClickHouse/ClickHouse/pull/21710) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Introduce a new merge tree setting `min_bytes_to_rebalance_partition_over_jbod` which allows assigning new parts to different disks of a JBOD volume in a balanced way. [#16481](https://github.com/ClickHouse/ClickHouse/pull/16481) ([Amos Bird](https://github.com/amosbird)).
+* Added `Grant`, `Revoke` and `System` values of `query_kind` column for corresponding queries in `system.query_log`. [#21102](https://github.com/ClickHouse/ClickHouse/pull/21102) ([Vasily Nemkov](https://github.com/Enmk)).
+* Allow customizing timeouts for HTTP connections used for replication independently from other HTTP timeouts. [#20088](https://github.com/ClickHouse/ClickHouse/pull/20088) ([nvartolomei](https://github.com/nvartolomei)).
+* Better exception message in client in case of exception while server is writing blocks. In previous versions client may get misleading message like `Data compressed with different methods`. [#22427](https://github.com/ClickHouse/ClickHouse/pull/22427) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Fix error `Directory tmp_fetch_XXX already exists` which could happen after failed fetch part. Delete temporary fetch directory if it already exists. Fixes [#14197](https://github.com/ClickHouse/ClickHouse/issues/14197). [#22411](https://github.com/ClickHouse/ClickHouse/pull/22411) ([nvartolomei](https://github.com/nvartolomei)).
+* Fix MSan report for function `range` with `UInt256` argument (support for large integers is experimental). This closes [#22157](https://github.com/ClickHouse/ClickHouse/issues/22157). [#22387](https://github.com/ClickHouse/ClickHouse/pull/22387) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add `current_database` column to `system.processes` table. It contains the current database of the query. [#22365](https://github.com/ClickHouse/ClickHouse/pull/22365) ([Alexander Kuzmenkov](https://github.com/akuzm)).
+* Add case-insensitive history search/navigation and subword movement features to `clickhouse-client`. [#22105](https://github.com/ClickHouse/ClickHouse/pull/22105) ([Amos Bird](https://github.com/amosbird)).
+* If tuple of NULLs, e.g. `(NULL, NULL)` is on the left hand side of `IN` operator with tuples of non-NULLs on the right hand side, e.g. `SELECT (NULL, NULL) IN ((0, 0), (3, 1))` return 0 instead of throwing an exception about incompatible types. The expression may also appear due to optimization of something like `SELECT (NULL, NULL) = (8, 0) OR (NULL, NULL) = (3, 2) OR (NULL, NULL) = (0, 0) OR (NULL, NULL) = (3, 1)`. This closes [#22017](https://github.com/ClickHouse/ClickHouse/issues/22017). [#22063](https://github.com/ClickHouse/ClickHouse/pull/22063) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Update used version of simdjson to 0.9.1. This fixes [#21984](https://github.com/ClickHouse/ClickHouse/issues/21984). [#22057](https://github.com/ClickHouse/ClickHouse/pull/22057) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Added case insensitive aliases for `CONNECTION_ID()` and `VERSION()` functions. This fixes [#22028](https://github.com/ClickHouse/ClickHouse/issues/22028). [#22042](https://github.com/ClickHouse/ClickHouse/pull/22042) ([Eugene Klimov](https://github.com/Slach)).
+* Add option `strict_increase` to `windowFunnel` function to calculate each event once (resolve [#21835](https://github.com/ClickHouse/ClickHouse/issues/21835)). [#22025](https://github.com/ClickHouse/ClickHouse/pull/22025) ([Vladimir](https://github.com/vdimir)).
+* If partition key of a `MergeTree` table does not include `Date` or `DateTime` columns but includes exactly one `DateTime64` column, expose its values in the `min_time` and `max_time` columns in `system.parts` and `system.parts_columns` tables. Add `min_time` and `max_time` columns to `system.parts_columns` table (these was inconsistency to the `system.parts` table). This closes [#18244](https://github.com/ClickHouse/ClickHouse/issues/18244). [#22011](https://github.com/ClickHouse/ClickHouse/pull/22011) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Supported `replication_alter_partitions_sync=1` setting in `clickhouse-copier` for moving partitions from helping table to destination. Decreased default timeouts. Fixes [#21911](https://github.com/ClickHouse/ClickHouse/issues/21911). [#21912](https://github.com/ClickHouse/ClickHouse/pull/21912) ([turbo jason](https://github.com/songenjie)).
+* Show path to data directory of `EmbeddedRocksDB` tables in system tables. [#21903](https://github.com/ClickHouse/ClickHouse/pull/21903) ([tavplubix](https://github.com/tavplubix)).
+* Add profile event `HedgedRequestsChangeReplica`, change read data timeout from sec to ms. [#21886](https://github.com/ClickHouse/ClickHouse/pull/21886) ([Kruglov Pavel](https://github.com/Avogar)).
+* DiskS3 (experimental feature under development). Fixed bug with the impossibility to move directory if the destination is not empty and cache disk is used. [#21837](https://github.com/ClickHouse/ClickHouse/pull/21837) ([Pavel Kovalenko](https://github.com/Jokser)).
+* Better formatting for `Array` and `Map` data types in Web UI. [#21798](https://github.com/ClickHouse/ClickHouse/pull/21798) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Update clusters only if their configurations were updated. [#21685](https://github.com/ClickHouse/ClickHouse/pull/21685) ([Kruglov Pavel](https://github.com/Avogar)).
+* Propagate query and session settings for distributed DDL queries. Set `distributed_ddl_entry_format_version` to 2 to enable this. Added `distributed_ddl_output_mode` setting. Supported modes: `none`, `throw` (default), `null_status_on_timeout` and `never_throw`. Miscellaneous fixes and improvements for `Replicated` database engine. [#21535](https://github.com/ClickHouse/ClickHouse/pull/21535) ([tavplubix](https://github.com/tavplubix)).
+* If `PODArray` was instantiated with element size that is neither a fraction or a multiple of 16, buffer overflow was possible. No bugs in current releases exist. [#21533](https://github.com/ClickHouse/ClickHouse/pull/21533) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add `last_error_time`/`last_error_message`/`last_error_stacktrace`/`remote` columns for `system.errors`. [#21529](https://github.com/ClickHouse/ClickHouse/pull/21529) ([Azat Khuzhin](https://github.com/azat)).
+* Add aliases `simpleJSONExtract/simpleJSONHas` to `visitParam/visitParamExtract{UInt, Int, Bool, Float, Raw, String}`. Fixes #21383. [#21519](https://github.com/ClickHouse/ClickHouse/pull/21519) ([fastio](https://github.com/fastio)).
+* Add setting `optimize_skip_unused_shards_limit` to limit the number of sharding key values for `optimize_skip_unused_shards`. [#21512](https://github.com/ClickHouse/ClickHouse/pull/21512) ([Azat Khuzhin](https://github.com/azat)).
+* Improve `clickhouse-format` to not throw exception when there are extra spaces or comment after the last query, and throw exception early with readable message when format `ASTInsertQuery` with data . [#21311](https://github.com/ClickHouse/ClickHouse/pull/21311) ([flynn](https://github.com/ucasFL)).
+* Improve support of integer keys in data type `Map`. [#21157](https://github.com/ClickHouse/ClickHouse/pull/21157) ([Anton Popov](https://github.com/CurtizJ)).
+* MaterializeMySQL: attempt to reconnect to MySQL if the connection is lost. [#20961](https://github.com/ClickHouse/ClickHouse/pull/20961) ([Håvard Kvålen](https://github.com/havardk)).
+* Support more cases to rewrite `CROSS JOIN` to `INNER JOIN`. [#20392](https://github.com/ClickHouse/ClickHouse/pull/20392) ([Vladimir](https://github.com/vdimir)).
+* Do not create empty parts on INSERT when `optimize_on_insert` setting enabled. Fixes [#20304](https://github.com/ClickHouse/ClickHouse/issues/20304). [#20387](https://github.com/ClickHouse/ClickHouse/pull/20387) ([Kruglov Pavel](https://github.com/Avogar)).
+* `MaterializeMySQL`: add minmax skipping index for `_version` column. [#20382](https://github.com/ClickHouse/ClickHouse/pull/20382) ([Stig Bakken](https://github.com/stigsb)).
+* Add option `--backslash` for `clickhouse-format`, which can add a backslash at the end of each line of the formatted query. [#21494](https://github.com/ClickHouse/ClickHouse/pull/21494) ([flynn](https://github.com/ucasFL)).
+* Now clickhouse will not throw `LOGICAL_ERROR` exception when we try to mutate the already covered part. Fixes [#22013](https://github.com/ClickHouse/ClickHouse/issues/22013). [#22291](https://github.com/ClickHouse/ClickHouse/pull/22291) ([alesapin](https://github.com/alesapin)).
+
+#### Bug Fix
+
+* Remove socket from epoll before cancelling packet receiver in `HedgedConnections` to prevent possible race. Fixes [#22161](https://github.com/ClickHouse/ClickHouse/issues/22161). [#22443](https://github.com/ClickHouse/ClickHouse/pull/22443) ([Kruglov Pavel](https://github.com/Avogar)).
+* Add (missing) memory accounting in parallel parsing routines. In previous versions OOM was possible when the resultset contains very large blocks of data. This closes [#22008](https://github.com/ClickHouse/ClickHouse/issues/22008). [#22425](https://github.com/ClickHouse/ClickHouse/pull/22425) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Fix exception which may happen when `SELECT` has constant `WHERE` condition and source table has columns which names are digits. [#22270](https://github.com/ClickHouse/ClickHouse/pull/22270) ([LiuNeng](https://github.com/liuneng1994)).
+* Fix query cancellation with `use_hedged_requests=0` and `async_socket_for_remote=1`. [#22183](https://github.com/ClickHouse/ClickHouse/pull/22183) ([Azat Khuzhin](https://github.com/azat)).
+* Fix uncaught exception in `InterserverIOHTTPHandler`. [#22146](https://github.com/ClickHouse/ClickHouse/pull/22146) ([Azat Khuzhin](https://github.com/azat)).
+* Fix docker entrypoint in case `http_port` is not in the config. [#22132](https://github.com/ClickHouse/ClickHouse/pull/22132) ([Ewout](https://github.com/devwout)).
+* Fix error `Invalid number of rows in Chunk` in `JOIN` with `TOTALS` and `arrayJoin`. Closes [#19303](https://github.com/ClickHouse/ClickHouse/issues/19303). [#22129](https://github.com/ClickHouse/ClickHouse/pull/22129) ([Vladimir](https://github.com/vdimir)).
+* Fix the background thread pool name which used to poll message from Kafka. The Kafka engine with the broken thread pool will not consume the message from message queue. [#22122](https://github.com/ClickHouse/ClickHouse/pull/22122) ([fastio](https://github.com/fastio)).
+* Fix waiting for `OPTIMIZE` and `ALTER` queries for `ReplicatedMergeTree` table engines. Now the query will not hang when the table was detached or restarted. [#22118](https://github.com/ClickHouse/ClickHouse/pull/22118) ([alesapin](https://github.com/alesapin)).
+* Disable `async_socket_for_remote`/`use_hedged_requests` for buggy Linux kernels. [#22109](https://github.com/ClickHouse/ClickHouse/pull/22109) ([Azat Khuzhin](https://github.com/azat)).
+* Docker entrypoint: avoid chown of `.` in case when `LOG_PATH` is empty. Closes [#22100](https://github.com/ClickHouse/ClickHouse/issues/22100). [#22102](https://github.com/ClickHouse/ClickHouse/pull/22102) ([filimonov](https://github.com/filimonov)).
+* The function `decrypt` was lacking a check for the minimal size of data encrypted in `AEAD` mode. This closes [#21897](https://github.com/ClickHouse/ClickHouse/issues/21897). [#22064](https://github.com/ClickHouse/ClickHouse/pull/22064) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* In rare case, merge for `CollapsingMergeTree` may create granule with `index_granularity + 1` rows. Because of this, internal check, added in [#18928](https://github.com/ClickHouse/ClickHouse/issues/18928) (affects 21.2 and 21.3), may fail with error `Incomplete granules are not allowed while blocks are granules size`. This error did not allow parts to merge. [#21976](https://github.com/ClickHouse/ClickHouse/pull/21976) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Reverted [#15454](https://github.com/ClickHouse/ClickHouse/issues/15454) that may cause significant increase in memory usage while loading external dictionaries of hashed type. This closes [#21935](https://github.com/ClickHouse/ClickHouse/issues/21935). [#21948](https://github.com/ClickHouse/ClickHouse/pull/21948) ([Maksim Kita](https://github.com/kitaisreal)).
+* Prevent hedged connections overlaps (`Unknown packet 9 from server` error). [#21941](https://github.com/ClickHouse/ClickHouse/pull/21941) ([Azat Khuzhin](https://github.com/azat)).
+* Fix reading the HTTP POST request with "multipart/form-data" content type in some cases. [#21936](https://github.com/ClickHouse/ClickHouse/pull/21936) ([Ivan](https://github.com/abyss7)).
+* Fix wrong `ORDER BY` results when a query contains window functions, and optimization for reading in primary key order is applied. Fixes [#21828](https://github.com/ClickHouse/ClickHouse/issues/21828). [#21915](https://github.com/ClickHouse/ClickHouse/pull/21915) ([Alexander Kuzmenkov](https://github.com/akuzm)).
+* Fix deadlock in first catboost model execution. Closes [#13832](https://github.com/ClickHouse/ClickHouse/issues/13832). [#21844](https://github.com/ClickHouse/ClickHouse/pull/21844) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fix incorrect query result (and possible crash) which could happen when `WHERE` or `HAVING` condition is pushed before `GROUP BY`. Fixes [#21773](https://github.com/ClickHouse/ClickHouse/issues/21773). [#21841](https://github.com/ClickHouse/ClickHouse/pull/21841) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Better error handling and logging in `WriteBufferFromS3`. [#21836](https://github.com/ClickHouse/ClickHouse/pull/21836) ([Pavel Kovalenko](https://github.com/Jokser)).
+* Fix possible crashes in aggregate functions with combinator `Distinct`, while using two-level aggregation. This is a follow-up fix of [#18365](https://github.com/ClickHouse/ClickHouse/pull/18365) . Can only reproduced in production env. [#21818](https://github.com/ClickHouse/ClickHouse/pull/21818) ([Amos Bird](https://github.com/amosbird)).
+* Fix scalar subquery index analysis. This fixes [#21717](https://github.com/ClickHouse/ClickHouse/issues/21717) , which was introduced in [#18896](https://github.com/ClickHouse/ClickHouse/pull/18896). [#21766](https://github.com/ClickHouse/ClickHouse/pull/21766) ([Amos Bird](https://github.com/amosbird)).
+* Fix bug for `ReplicatedMerge` table engines when `ALTER MODIFY COLUMN` query doesn't change the type of `Decimal` column if its size (32 bit or 64 bit) doesn't change. [#21728](https://github.com/ClickHouse/ClickHouse/pull/21728) ([alesapin](https://github.com/alesapin)).
+* Fix possible infinite waiting when concurrent `OPTIMIZE` and `DROP` are run for `ReplicatedMergeTree`. [#21716](https://github.com/ClickHouse/ClickHouse/pull/21716) ([Azat Khuzhin](https://github.com/azat)).
+* Fix function `arrayElement` with type `Map` for constant integer arguments. [#21699](https://github.com/ClickHouse/ClickHouse/pull/21699) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix SIGSEGV on not existing attributes from `ip_trie` with `access_to_key_from_attributes`. [#21692](https://github.com/ClickHouse/ClickHouse/pull/21692) ([Azat Khuzhin](https://github.com/azat)).
+* Server now start accepting connections only after `DDLWorker` and dictionaries initialization. [#21676](https://github.com/ClickHouse/ClickHouse/pull/21676) ([Azat Khuzhin](https://github.com/azat)).
+* Add type conversion for keys of tables of type `Join` (previously led to SIGSEGV). [#21646](https://github.com/ClickHouse/ClickHouse/pull/21646) ([Azat Khuzhin](https://github.com/azat)).
+* Fix distributed requests cancellation (for example simple select from multiple shards with limit, i.e. `select * from remote('127.{2,3}', system.numbers) limit 100`) with `async_socket_for_remote=1`. [#21643](https://github.com/ClickHouse/ClickHouse/pull/21643) ([Azat Khuzhin](https://github.com/azat)).
+* Fix `fsync_part_directory` for horizontal merge. [#21642](https://github.com/ClickHouse/ClickHouse/pull/21642) ([Azat Khuzhin](https://github.com/azat)).
+* Remove unknown columns from joined table in `WHERE` for queries to external database engines (MySQL, PostgreSQL). close [#14614](https://github.com/ClickHouse/ClickHouse/issues/14614), close [#19288](https://github.com/ClickHouse/ClickHouse/issues/19288) (dup), close [#19645](https://github.com/ClickHouse/ClickHouse/issues/19645) (dup). [#21640](https://github.com/ClickHouse/ClickHouse/pull/21640) ([Vladimir](https://github.com/vdimir)).
+* `std::terminate` was called if there is an error writing data into s3. [#21624](https://github.com/ClickHouse/ClickHouse/pull/21624) ([Vladimir](https://github.com/vdimir)).
+* Fix possible error `Cannot find column` when `optimize_skip_unused_shards` is enabled and zero shards are used. [#21579](https://github.com/ClickHouse/ClickHouse/pull/21579) ([Azat Khuzhin](https://github.com/azat)).
+* In case if query has constant `WHERE` condition, and setting `optimize_skip_unused_shards` enabled, all shards may be skipped and query could return incorrect empty result. [#21550](https://github.com/ClickHouse/ClickHouse/pull/21550) ([Amos Bird](https://github.com/amosbird)).
+* Fix table function `clusterAllReplicas` returns wrong `_shard_num`. close [#21481](https://github.com/ClickHouse/ClickHouse/issues/21481). [#21498](https://github.com/ClickHouse/ClickHouse/pull/21498) ([flynn](https://github.com/ucasFL)).
+* Fix that S3 table holds old credentials after config update. [#21457](https://github.com/ClickHouse/ClickHouse/pull/21457) ([Grigory Pervakov](https://github.com/GrigoryPervakov)).
+* Fixed race on SSL object inside `SecureSocket` in Poco. [#21456](https://github.com/ClickHouse/ClickHouse/pull/21456) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Fix `Avro` format parsing for `Kafka`. Fixes [#21437](https://github.com/ClickHouse/ClickHouse/issues/21437). [#21438](https://github.com/ClickHouse/ClickHouse/pull/21438) ([Ilya Golshtein](https://github.com/ilejn)).
+* Fix receive and send timeouts and non-blocking read in secure socket. [#21429](https://github.com/ClickHouse/ClickHouse/pull/21429) ([Kruglov Pavel](https://github.com/Avogar)).
+* `force_drop_table` flag didn't work for `MATERIALIZED VIEW`, it's fixed. Fixes [#18943](https://github.com/ClickHouse/ClickHouse/issues/18943). [#20626](https://github.com/ClickHouse/ClickHouse/pull/20626) ([tavplubix](https://github.com/tavplubix)).
+* Fix name clashes in `PredicateRewriteVisitor`. It caused incorrect `WHERE` filtration after full join. Close [#20497](https://github.com/ClickHouse/ClickHouse/issues/20497). [#20622](https://github.com/ClickHouse/ClickHouse/pull/20622) ([Vladimir](https://github.com/vdimir)).
+
+#### Build/Testing/Packaging Improvement
+
+* Add [Jepsen](https://github.com/jepsen-io/jepsen) tests for ClickHouse Keeper. [#21677](https://github.com/ClickHouse/ClickHouse/pull/21677) ([alesapin](https://github.com/alesapin)).
+* Run stateless tests in parallel in CI. Depends on [#22181](https://github.com/ClickHouse/ClickHouse/issues/22181). [#22300](https://github.com/ClickHouse/ClickHouse/pull/22300) ([alesapin](https://github.com/alesapin)).
+* Enable status check for [SQLancer](https://github.com/sqlancer/sqlancer) CI run. [#22015](https://github.com/ClickHouse/ClickHouse/pull/22015) ([Ilya Yatsishin](https://github.com/qoega)).
+* Multiple preparations for PowerPC builds: Enable the bundled openldap on `ppc64le`. [#22487](https://github.com/ClickHouse/ClickHouse/pull/22487) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable compiling on `ppc64le` with Clang. [#22476](https://github.com/ClickHouse/ClickHouse/pull/22476) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix compiling boost on `ppc64le`. [#22474](https://github.com/ClickHouse/ClickHouse/pull/22474) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix CMake error about internal CMake variable `CMAKE_ASM_COMPILE_OBJECT` not set on `ppc64le`. [#22469](https://github.com/ClickHouse/ClickHouse/pull/22469) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix Fedora/RHEL/CentOS not finding `libclang_rt.builtins` on `ppc64le`. [#22458](https://github.com/ClickHouse/ClickHouse/pull/22458) ([Kfir Itzhak](https://github.com/mastertheknife)). Enable building with `jemalloc` on `ppc64le`. [#22447](https://github.com/ClickHouse/ClickHouse/pull/22447) ([Kfir Itzhak](https://github.com/mastertheknife)). Fix ClickHouse's config embedding and cctz's timezone embedding on `ppc64le`. [#22445](https://github.com/ClickHouse/ClickHouse/pull/22445) ([Kfir Itzhak](https://github.com/mastertheknife)). Fixed compiling on `ppc64le` and use the correct instruction pointer register on `ppc64le`. [#22430](https://github.com/ClickHouse/ClickHouse/pull/22430) ([Kfir Itzhak](https://github.com/mastertheknife)).
+* Re-enable the S3 (AWS) library on `aarch64`. [#22484](https://github.com/ClickHouse/ClickHouse/pull/22484) ([Kfir Itzhak](https://github.com/mastertheknife)).
+* Add `tzdata` to Docker containers because reading `ORC` formats requires it. This closes [#14156](https://github.com/ClickHouse/ClickHouse/issues/14156). [#22000](https://github.com/ClickHouse/ClickHouse/pull/22000) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Introduce 2 arguments for `clickhouse-server` image Dockerfile: `deb_location` & `single_binary_location`. [#21977](https://github.com/ClickHouse/ClickHouse/pull/21977) ([filimonov](https://github.com/filimonov)).
+* Allow to use clang-tidy with release builds by enabling assertions if it is used. [#21914](https://github.com/ClickHouse/ClickHouse/pull/21914) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add llvm-12 binaries name to search in cmake scripts. Implicit constants conversions to mute clang warnings. Updated submodules to build with CMake 3.19. Mute recursion in macro expansion in `readpassphrase` library. Deprecated `-fuse-ld` changed to `--ld-path` for clang. [#21597](https://github.com/ClickHouse/ClickHouse/pull/21597) ([Ilya Yatsishin](https://github.com/qoega)).
+* Updating `docker/test/testflows/runner/dockerd-entrypoint.sh` to use Yandex dockerhub-proxy, because Docker Hub has enabled very restrictive rate limits [#21551](https://github.com/ClickHouse/ClickHouse/pull/21551) ([vzakaznikov](https://github.com/vzakaznikov)).
+* Fix macOS shared lib build. [#20184](https://github.com/ClickHouse/ClickHouse/pull/20184) ([nvartolomei](https://github.com/nvartolomei)).
+* Add `ctime` option to `zookeeper-dump-tree`. It allows to dump node creation time. [#21842](https://github.com/ClickHouse/ClickHouse/pull/21842) ([Ilya](https://github.com/HumanUser)).
+
+
+## ClickHouse release 21.3 (LTS)
+
+### ClickHouse release v21.3, 2021-03-12
+
+#### Backward Incompatible Change
+
+* Now it's not allowed to create MergeTree tables in old syntax with table TTL because it's just ignored. Attach of old tables is still possible. [#20282](https://github.com/ClickHouse/ClickHouse/pull/20282) ([alesapin](https://github.com/alesapin)).
+* Now all case-insensitive function names will be rewritten to their canonical representations. This is needed for projection query routing (the upcoming feature). [#20174](https://github.com/ClickHouse/ClickHouse/pull/20174) ([Amos Bird](https://github.com/amosbird)).
+* Fix creation of `TTL` in cases, when its expression is a function and it is the same as `ORDER BY` key. Now it's allowed to set custom aggregation to primary key columns in `TTL` with `GROUP BY`. Backward incompatible: For primary key columns, which are not in `GROUP BY` and aren't set explicitly now is applied function `any` instead of `max`, when TTL is expired. Also if you use TTL with `WHERE` or `GROUP BY` you can see exceptions at merges, while making rolling update. [#15450](https://github.com/ClickHouse/ClickHouse/pull/15450) ([Anton Popov](https://github.com/CurtizJ)).
+
+#### New Feature
+
+* Add file engine settings: `engine_file_empty_if_not_exists` and `engine_file_truncate_on_insert`. [#20620](https://github.com/ClickHouse/ClickHouse/pull/20620) ([M0r64n](https://github.com/M0r64n)).
+* Add aggregate function `deltaSum` for summing the differences between consecutive rows. [#20057](https://github.com/ClickHouse/ClickHouse/pull/20057) ([Russ Frank](https://github.com/rf)).
+* New `event_time_microseconds` column in `system.part_log` table. [#20027](https://github.com/ClickHouse/ClickHouse/pull/20027) ([Bharat Nallan](https://github.com/bharatnc)).
+* Added `timezoneOffset(datetime)` function which will give the offset from UTC in seconds. This close [#issue:19850](https://github.com/ClickHouse/ClickHouse/issues/19850). [#19962](https://github.com/ClickHouse/ClickHouse/pull/19962) ([keenwolf](https://github.com/keen-wolf)).
+* Add setting `insert_shard_id` to support insert data into specific shard from distributed table. [#19961](https://github.com/ClickHouse/ClickHouse/pull/19961) ([flynn](https://github.com/ucasFL)).
+* Function `reinterpretAs` updated to support big integers. Fixes [#19691](https://github.com/ClickHouse/ClickHouse/issues/19691). [#19858](https://github.com/ClickHouse/ClickHouse/pull/19858) ([Maksim Kita](https://github.com/kitaisreal)).
+* Added Server Side Encryption Customer Keys (the `x-amz-server-side-encryption-customer-(key/md5)` header) support in S3 client. See [the link](https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). Closes [#19428](https://github.com/ClickHouse/ClickHouse/issues/19428). [#19748](https://github.com/ClickHouse/ClickHouse/pull/19748) ([Vladimir Chebotarev](https://github.com/excitoon)).
+* Added `implicit_key` option for `executable` dictionary source. It allows to avoid printing key for every record if records comes in the same order as the input keys. Implements [#14527](https://github.com/ClickHouse/ClickHouse/issues/14527). [#19677](https://github.com/ClickHouse/ClickHouse/pull/19677) ([Maksim Kita](https://github.com/kitaisreal)).
+* Add quota type `query_selects` and `query_inserts`. [#19603](https://github.com/ClickHouse/ClickHouse/pull/19603) ([JackyWoo](https://github.com/JackyWoo)).
+* Add function `extractTextFromHTML` [#19600](https://github.com/ClickHouse/ClickHouse/pull/19600) ([zlx19950903](https://github.com/zlx19950903)), ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Tables with `MergeTree*` engine now have two new table-level settings for query concurrency control. Setting `max_concurrent_queries` limits the number of concurrently executed queries which are related to this table. Setting `min_marks_to_honor_max_concurrent_queries` tells to apply previous setting only if query reads at least this number of marks. [#19544](https://github.com/ClickHouse/ClickHouse/pull/19544) ([Amos Bird](https://github.com/amosbird)).
+* Added `file` function to read file from user_files directory as a String. This is different from the `file` table function. This implements [#issue:18851](https://github.com/ClickHouse/ClickHouse/issues/18851). [#19204](https://github.com/ClickHouse/ClickHouse/pull/19204) ([keenwolf](https://github.com/keen-wolf)).
+
+#### Experimental feature
+
+* Add experimental `Replicated` database engine. It replicates DDL queries across multiple hosts. [#16193](https://github.com/ClickHouse/ClickHouse/pull/16193) ([tavplubix](https://github.com/tavplubix)).
+* Introduce experimental support for window functions, enabled with `allow_experimental_window_functions = 1`. This is a preliminary, alpha-quality implementation that is not suitable for production use and will change in backward-incompatible ways in future releases. Please see [the documentation](https://github.com/ClickHouse/ClickHouse/blob/master/docs/en/sql-reference/window-functions/index.md#experimental-window-functions) for the list of supported features. [#20337](https://github.com/ClickHouse/ClickHouse/pull/20337) ([Alexander Kuzmenkov](https://github.com/akuzm)).
+* Add the ability to backup/restore metadata files for DiskS3. [#18377](https://github.com/ClickHouse/ClickHouse/pull/18377) ([Pavel Kovalenko](https://github.com/Jokser)).
+
+#### Performance Improvement
+
+* Hedged requests for remote queries. When setting `use_hedged_requests` enabled (off by default), allow to establish many connections with different replicas for query. New connection is enabled in case existent connection(s) with replica(s) were not established within `hedged_connection_timeout` or no data was received within `receive_data_timeout`. Query uses the first connection which send non empty progress packet (or data packet, if `allow_changing_replica_until_first_data_packet`); other connections are cancelled. Queries with `max_parallel_replicas > 1` are supported. [#19291](https://github.com/ClickHouse/ClickHouse/pull/19291) ([Kruglov Pavel](https://github.com/Avogar)). This allows to significantly reduce tail latencies on very large clusters.
+* Added support for `PREWHERE` (and enable the corresponding optimization) when tables have row-level security expressions specified. [#19576](https://github.com/ClickHouse/ClickHouse/pull/19576) ([Denis Glazachev](https://github.com/traceon)).
+* The setting `distributed_aggregation_memory_efficient` is enabled by default. It will lower memory usage and improve performance of distributed queries. [#20599](https://github.com/ClickHouse/ClickHouse/pull/20599) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Improve performance of GROUP BY multiple fixed size keys. [#20472](https://github.com/ClickHouse/ClickHouse/pull/20472) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Improve performance of aggregate functions by more strict aliasing. [#19946](https://github.com/ClickHouse/ClickHouse/pull/19946) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Speed up reading from `Memory` tables in extreme cases (when reading speed is in order of 50 GB/sec) by simplification of pipeline and (consequently) less lock contention in pipeline scheduling. [#20468](https://github.com/ClickHouse/ClickHouse/pull/20468) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Partially reimplement HTTP server to make it making less copies of incoming and outgoing data. It gives up to 1.5 performance improvement on inserting long records over HTTP. [#19516](https://github.com/ClickHouse/ClickHouse/pull/19516) ([Ivan](https://github.com/abyss7)).
+* Add `compress` setting for `Memory` tables. If it's enabled the table will use less RAM. On some machines and datasets it can also work faster on SELECT, but it is not always the case. This closes [#20093](https://github.com/ClickHouse/ClickHouse/issues/20093). Note: there are reasons why Memory tables can work slower than MergeTree: (1) lack of compression (2) static size of blocks (3) lack of indices and prewhere... [#20168](https://github.com/ClickHouse/ClickHouse/pull/20168) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Slightly better code in aggregation. [#20978](https://github.com/ClickHouse/ClickHouse/pull/20978) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add back `intDiv`/`modulo` specializations for better performance. This fixes [#21293](https://github.com/ClickHouse/ClickHouse/issues/21293) . The regression was introduced in https://github.com/ClickHouse/ClickHouse/pull/18145 . [#21307](https://github.com/ClickHouse/ClickHouse/pull/21307) ([Amos Bird](https://github.com/amosbird)).
+* Do not squash blocks too much on INSERT SELECT if inserting into Memory table. In previous versions inefficient data representation was created in Memory table after INSERT SELECT. This closes [#13052](https://github.com/ClickHouse/ClickHouse/issues/13052). [#20169](https://github.com/ClickHouse/ClickHouse/pull/20169) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Fix at least one case when DataType parser may have exponential complexity (found by fuzzer). This closes [#20096](https://github.com/ClickHouse/ClickHouse/issues/20096). [#20132](https://github.com/ClickHouse/ClickHouse/pull/20132) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Parallelize SELECT with FINAL for single part with level > 0 when `do_not_merge_across_partitions_select_final` setting is 1. [#19375](https://github.com/ClickHouse/ClickHouse/pull/19375) ([Kruglov Pavel](https://github.com/Avogar)).
+* Fill only requested columns when querying `system.parts` and `system.parts_columns`. Closes [#19570](https://github.com/ClickHouse/ClickHouse/issues/19570). [#21035](https://github.com/ClickHouse/ClickHouse/pull/21035) ([Anmol Arora](https://github.com/anmolarora)).
+* Perform algebraic optimizations of arithmetic expressions inside `avg` aggregate function. close [#20092](https://github.com/ClickHouse/ClickHouse/issues/20092). [#20183](https://github.com/ClickHouse/ClickHouse/pull/20183) ([flynn](https://github.com/ucasFL)).
+
+#### Improvement
+
+* Case-insensitive compression methods for table functions. Also fixed LZMA compression method which was checked in upper case. [#21416](https://github.com/ClickHouse/ClickHouse/pull/21416) ([Vladimir Chebotarev](https://github.com/excitoon)).
+* Add two settings to delay or throw error during insertion when there are too many inactive parts. This is useful when server fails to clean up parts quickly enough. [#20178](https://github.com/ClickHouse/ClickHouse/pull/20178) ([Amos Bird](https://github.com/amosbird)).
+* Provide better compatibility for mysql clients. 1. mysql jdbc 2. mycli. [#21367](https://github.com/ClickHouse/ClickHouse/pull/21367) ([Amos Bird](https://github.com/amosbird)).
+* Forbid to drop a column if it's referenced by materialized view. Closes [#21164](https://github.com/ClickHouse/ClickHouse/issues/21164). [#21303](https://github.com/ClickHouse/ClickHouse/pull/21303) ([flynn](https://github.com/ucasFL)).
+* MySQL dictionary source will now retry unexpected connection failures (Lost connection to MySQL server during query) which sometimes happen on SSL/TLS connections. [#21237](https://github.com/ClickHouse/ClickHouse/pull/21237) ([Alexander Kazakov](https://github.com/Akazz)).
+* Usability improvement: more consistent `DateTime64` parsing: recognize the case when unix timestamp with subsecond resolution is specified as scaled integer (like `1111111111222` instead of `1111111111.222`). This closes [#13194](https://github.com/ClickHouse/ClickHouse/issues/13194). [#21053](https://github.com/ClickHouse/ClickHouse/pull/21053) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Do only merging of sorted blocks on initiator with distributed_group_by_no_merge. [#20882](https://github.com/ClickHouse/ClickHouse/pull/20882) ([Azat Khuzhin](https://github.com/azat)).
+* When loading config for mysql source ClickHouse will now randomize the list of replicas with the same priority to ensure the round-robin logics of picking mysql endpoint. This closes [#20629](https://github.com/ClickHouse/ClickHouse/issues/20629). [#20632](https://github.com/ClickHouse/ClickHouse/pull/20632) ([Alexander Kazakov](https://github.com/Akazz)).
+* Function 'reinterpretAs(x, Type)' renamed into 'reinterpret(x, Type)'. [#20611](https://github.com/ClickHouse/ClickHouse/pull/20611) ([Maksim Kita](https://github.com/kitaisreal)).
+* Support vhost for RabbitMQ engine [#20576](https://github.com/ClickHouse/ClickHouse/issues/20576). [#20596](https://github.com/ClickHouse/ClickHouse/pull/20596) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Improved serialization for data types combined of Arrays and Tuples. Improved matching enum data types to protobuf enum type. Fixed serialization of the `Map` data type. Omitted values are now set by default. [#20506](https://github.com/ClickHouse/ClickHouse/pull/20506) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Fixed race between execution of distributed DDL tasks and cleanup of DDL queue. Now DDL task cannot be removed from ZooKeeper if there are active workers. Fixes [#20016](https://github.com/ClickHouse/ClickHouse/issues/20016). [#20448](https://github.com/ClickHouse/ClickHouse/pull/20448) ([tavplubix](https://github.com/tavplubix)).
+* Make FQDN and other DNS related functions work correctly in alpine images. [#20336](https://github.com/ClickHouse/ClickHouse/pull/20336) ([filimonov](https://github.com/filimonov)).
+* Do not allow early constant folding of explicitly forbidden functions. [#20303](https://github.com/ClickHouse/ClickHouse/pull/20303) ([Azat Khuzhin](https://github.com/azat)).
+* Implicit conversion from integer to Decimal type might succeeded if integer value doe not fit into Decimal type. Now it throws `ARGUMENT_OUT_OF_BOUND`. [#20232](https://github.com/ClickHouse/ClickHouse/pull/20232) ([tavplubix](https://github.com/tavplubix)).
+* Lockless `SYSTEM FLUSH DISTRIBUTED`. [#20215](https://github.com/ClickHouse/ClickHouse/pull/20215) ([Azat Khuzhin](https://github.com/azat)).
+* Normalize count(constant), sum(1) to count(). This is needed for projection query routing. [#20175](https://github.com/ClickHouse/ClickHouse/pull/20175) ([Amos Bird](https://github.com/amosbird)).
+* Support all native integer types in bitmap functions. [#20171](https://github.com/ClickHouse/ClickHouse/pull/20171) ([Amos Bird](https://github.com/amosbird)).
+* Updated `CacheDictionary`, `ComplexCacheDictionary`, `SSDCacheDictionary`, `SSDComplexKeyDictionary` to use LRUHashMap as underlying index. [#20164](https://github.com/ClickHouse/ClickHouse/pull/20164) ([Maksim Kita](https://github.com/kitaisreal)).
+* The setting `access_management` is now configurable on startup by providing `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT`, defaults to disabled (`0`) which was the prior value. [#20139](https://github.com/ClickHouse/ClickHouse/pull/20139) ([Marquitos](https://github.com/sonirico)).
+* Fix toDateTime64(toDate()/toDateTime()) for DateTime64 - Implement DateTime64 clamping to match DateTime behaviour. [#20131](https://github.com/ClickHouse/ClickHouse/pull/20131) ([Azat Khuzhin](https://github.com/azat)).
+* Quota improvements: SHOW TABLES is now considered as one query in the quota calculations, not two queries. SYSTEM queries now consume quota. Fix calculation of interval's end in quota consumption. [#20106](https://github.com/ClickHouse/ClickHouse/pull/20106) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Supports `path IN (set)` expressions for `system.zookeeper` table. [#20105](https://github.com/ClickHouse/ClickHouse/pull/20105) ([小路](https://github.com/nicelulu)).
+* Show full details of `MaterializeMySQL` tables in `system.tables`. [#20051](https://github.com/ClickHouse/ClickHouse/pull/20051) ([Stig Bakken](https://github.com/stigsb)).
+* Fix data race in executable dictionary that was possible only on misuse (when the script returns data ignoring its input). [#20045](https://github.com/ClickHouse/ClickHouse/pull/20045) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* The value of MYSQL_OPT_RECONNECT option can now be controlled by "opt_reconnect" parameter in the config section of mysql replica. [#19998](https://github.com/ClickHouse/ClickHouse/pull/19998) ([Alexander Kazakov](https://github.com/Akazz)).
+* If user calls `JSONExtract` function with `Float32` type requested, allow inaccurate conversion to the result type. For example the number `0.1` in JSON is double precision and is not representable in Float32, but the user still wants to get it. Previous versions return 0 for non-Nullable type and NULL for Nullable type to indicate that conversion is imprecise. The logic was 100% correct but it was surprising to users and leading to questions. This closes [#13962](https://github.com/ClickHouse/ClickHouse/issues/13962). [#19960](https://github.com/ClickHouse/ClickHouse/pull/19960) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add conversion of block structure for INSERT into Distributed tables if it does not match. [#19947](https://github.com/ClickHouse/ClickHouse/pull/19947) ([Azat Khuzhin](https://github.com/azat)).
+* Improvement for the `system.distributed_ddl_queue` table. Initialize MaxDDLEntryID to the last value after restarting. Before this PR, MaxDDLEntryID will remain zero until a new DDLTask is processed. [#19924](https://github.com/ClickHouse/ClickHouse/pull/19924) ([Amos Bird](https://github.com/amosbird)).
+* Show `MaterializeMySQL` tables in `system.parts`. [#19770](https://github.com/ClickHouse/ClickHouse/pull/19770) ([Stig Bakken](https://github.com/stigsb)).
+* Add separate config directive for `Buffer` profile. [#19721](https://github.com/ClickHouse/ClickHouse/pull/19721) ([Azat Khuzhin](https://github.com/azat)).
+* Move conditions that are not related to JOIN to WHERE clause. [#18720](https://github.com/ClickHouse/ClickHouse/issues/18720). [#19685](https://github.com/ClickHouse/ClickHouse/pull/19685) ([hexiaoting](https://github.com/hexiaoting)).
+* Add ability to throttle INSERT into Distributed based on amount of pending bytes for async send (`bytes_to_delay_insert`/`max_delay_to_insert` and `bytes_to_throw_insert` settings for `Distributed` engine has been added). [#19673](https://github.com/ClickHouse/ClickHouse/pull/19673) ([Azat Khuzhin](https://github.com/azat)).
+* Fix some rare cases when write errors can be ignored in destructors. [#19451](https://github.com/ClickHouse/ClickHouse/pull/19451) ([Azat Khuzhin](https://github.com/azat)).
+* Print inline frames in stack traces for fatal errors. [#19317](https://github.com/ClickHouse/ClickHouse/pull/19317) ([Ivan](https://github.com/abyss7)).
+
+#### Bug Fix
+
+* Fix redundant reconnects to ZooKeeper and the possibility of two active sessions for a single clickhouse server. Both problems introduced in #14678. [#21264](https://github.com/ClickHouse/ClickHouse/pull/21264) ([alesapin](https://github.com/alesapin)).
+* Fix error `Bad cast from type ... to DB::ColumnLowCardinality` while inserting into table with `LowCardinality` column from `Values` format. Fixes #21140 [#21357](https://github.com/ClickHouse/ClickHouse/pull/21357) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix a deadlock in `ALTER DELETE` mutations for non replicated MergeTree table engines when the predicate contains the table itself. Fixes [#20558](https://github.com/ClickHouse/ClickHouse/issues/20558). [#21477](https://github.com/ClickHouse/ClickHouse/pull/21477) ([alesapin](https://github.com/alesapin)).
+* Fix SIGSEGV for distributed queries on failures. [#21434](https://github.com/ClickHouse/ClickHouse/pull/21434) ([Azat Khuzhin](https://github.com/azat)).
+* Now `ALTER MODIFY COLUMN` queries will correctly affect changes in partition key, skip indices, TTLs, and so on. Fixes [#13675](https://github.com/ClickHouse/ClickHouse/issues/13675). [#21334](https://github.com/ClickHouse/ClickHouse/pull/21334) ([alesapin](https://github.com/alesapin)).
+* Fix bug with `join_use_nulls` and joining `TOTALS` from subqueries. This closes [#19362](https://github.com/ClickHouse/ClickHouse/issues/19362) and [#21137](https://github.com/ClickHouse/ClickHouse/issues/21137). [#21248](https://github.com/ClickHouse/ClickHouse/pull/21248) ([vdimir](https://github.com/vdimir)).
+* Fix crash in `EXPLAIN` for query with `UNION`. Fixes [#20876](https://github.com/ClickHouse/ClickHouse/issues/20876), [#21170](https://github.com/ClickHouse/ClickHouse/issues/21170). [#21246](https://github.com/ClickHouse/ClickHouse/pull/21246) ([flynn](https://github.com/ucasFL)).
+* Now mutations allowed only for table engines that support them (MergeTree family, Memory, MaterializedView). Other engines will report a more clear error. Fixes [#21168](https://github.com/ClickHouse/ClickHouse/issues/21168). [#21183](https://github.com/ClickHouse/ClickHouse/pull/21183) ([alesapin](https://github.com/alesapin)).
+* Fixes [#21112](https://github.com/ClickHouse/ClickHouse/issues/21112). Fixed bug that could cause duplicates with insert query (if one of the callbacks came a little too late). [#21138](https://github.com/ClickHouse/ClickHouse/pull/21138) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Fix `input_format_null_as_default` take effective when types are nullable. This fixes [#21116](https://github.com/ClickHouse/ClickHouse/issues/21116) . [#21121](https://github.com/ClickHouse/ClickHouse/pull/21121) ([Amos Bird](https://github.com/amosbird)).
+* fix bug related to cast Tuple to Map. Closes [#21029](https://github.com/ClickHouse/ClickHouse/issues/21029). [#21120](https://github.com/ClickHouse/ClickHouse/pull/21120) ([hexiaoting](https://github.com/hexiaoting)).
+* Fix the metadata leak when the Replicated*MergeTree with custom (non default) ZooKeeper cluster is dropped. [#21119](https://github.com/ClickHouse/ClickHouse/pull/21119) ([fastio](https://github.com/fastio)).
+* Fix type mismatch issue when using LowCardinality keys in joinGet. This fixes [#21114](https://github.com/ClickHouse/ClickHouse/issues/21114). [#21117](https://github.com/ClickHouse/ClickHouse/pull/21117) ([Amos Bird](https://github.com/amosbird)).
+* fix default_replica_path and default_replica_name values are useless on Replicated(*)MergeTree engine when the engine needs specify other parameters. [#21060](https://github.com/ClickHouse/ClickHouse/pull/21060) ([mxzlxy](https://github.com/mxzlxy)).
+* Out of bound memory access was possible when formatting specifically crafted out of range value of type `DateTime64`. This closes [#20494](https://github.com/ClickHouse/ClickHouse/issues/20494). This closes [#20543](https://github.com/ClickHouse/ClickHouse/issues/20543). [#21023](https://github.com/ClickHouse/ClickHouse/pull/21023) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Block parallel insertions into storage join. [#21009](https://github.com/ClickHouse/ClickHouse/pull/21009) ([vdimir](https://github.com/vdimir)).
+* Fixed behaviour, when `ALTER MODIFY COLUMN` created mutation, that will knowingly fail. [#21007](https://github.com/ClickHouse/ClickHouse/pull/21007) ([Anton Popov](https://github.com/CurtizJ)).
+* Closes [#9969](https://github.com/ClickHouse/ClickHouse/issues/9969). Fixed Brotli http compression error, which reproduced for large data sizes, slightly complicated structure and with json output format. Update Brotli to the latest version to include the "fix rare access to uninitialized data in ring-buffer". [#20991](https://github.com/ClickHouse/ClickHouse/pull/20991) ([Kseniia Sumarokova](https://github.com/kssenii)).
+* Fix 'Empty task was returned from async task queue' on query cancellation. [#20881](https://github.com/ClickHouse/ClickHouse/pull/20881) ([Azat Khuzhin](https://github.com/azat)).
+* `USE database;` query did not work when using MySQL 5.7 client to connect to ClickHouse server, it's fixed. Fixes [#18926](https://github.com/ClickHouse/ClickHouse/issues/18926). [#20878](https://github.com/ClickHouse/ClickHouse/pull/20878) ([tavplubix](https://github.com/tavplubix)).
+* Fix usage of `-Distinct` combinator with `-State` combinator in aggregate functions. [#20866](https://github.com/ClickHouse/ClickHouse/pull/20866) ([Anton Popov](https://github.com/CurtizJ)).
+* Fix subquery with union distinct and limit clause. close [#20597](https://github.com/ClickHouse/ClickHouse/issues/20597). [#20610](https://github.com/ClickHouse/ClickHouse/pull/20610) ([flynn](https://github.com/ucasFL)).
+* Fixed inconsistent behavior of dictionary in case of queries where we look for absent keys in dictionary. [#20578](https://github.com/ClickHouse/ClickHouse/pull/20578) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Fix the number of threads for scalar subqueries and subqueries for index (after [#19007](https://github.com/ClickHouse/ClickHouse/issues/19007) single thread was always used). Fixes [#20457](https://github.com/ClickHouse/ClickHouse/issues/20457), [#20512](https://github.com/ClickHouse/ClickHouse/issues/20512). [#20550](https://github.com/ClickHouse/ClickHouse/pull/20550) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
+* Fix crash which could happen if unknown packet was received from remove query (was introduced in [#17868](https://github.com/ClickHouse/ClickHouse/issues/17868)). [#20547](https://github.com/ClickHouse/ClickHouse/pull/20547) ([Azat Khuzhin](https://github.com/azat)).
+* Add proper checks while parsing directory names for async INSERT (fixes SIGSEGV). [#20498](https://github.com/ClickHouse/ClickHouse/pull/20498) ([Azat Khuzhin](https://github.com/azat)).
+* Fix function `transform` does not work properly for floating point keys. Closes [#20460](https://github.com/ClickHouse/ClickHouse/issues/20460). [#20479](https://github.com/ClickHouse/ClickHouse/pull/20479) ([flynn](https://github.com/ucasFL)).
+* Fix infinite loop when propagating WITH aliases to subqueries. This fixes [#20388](https://github.com/ClickHouse/ClickHouse/issues/20388). [#20476](https://github.com/ClickHouse/ClickHouse/pull/20476) ([Amos Bird](https://github.com/amosbird)).
+* Fix abnormal server termination when http client goes away. [#20464](https://github.com/ClickHouse/ClickHouse/pull/20464) ([Azat Khuzhin](https://github.com/azat)).
+* Fix `LOGICAL_ERROR` for `join_use_nulls=1` when JOIN contains const from SELECT. [#20461](https://github.com/ClickHouse/ClickHouse/pull/20461) ([Azat Khuzhin](https://github.com/azat)).
+* Check if table function `view` is used in expression list and throw an error. This fixes [#20342](https://github.com/ClickHouse/ClickHouse/issues/20342). [#20350](https://github.com/ClickHouse/ClickHouse/pull/20350) ([Amos Bird](https://github.com/amosbird)).
+* Avoid invalid dereference in RANGE_HASHED() dictionary. [#20345](https://github.com/ClickHouse/ClickHouse/pull/20345) ([Azat Khuzhin](https://github.com/azat)).
+* Fix null dereference with `join_use_nulls=1`. [#20344](https://github.com/ClickHouse/ClickHouse/pull/20344) ([Azat Khuzhin](https://github.com/azat)).
+* Fix incorrect result of binary operations between two constant decimals of different scale. Fixes [#20283](https://github.com/ClickHouse/ClickHouse/issues/20283). [#20339](https://github.com/ClickHouse/ClickHouse/pull/20339) ([Maksim Kita](https://github.com/kitaisreal)).
+* Fix too often retries of failed background tasks for `ReplicatedMergeTree` table engines family. This could lead to too verbose logging and increased CPU load. Fixes [#20203](https://github.com/ClickHouse/ClickHouse/issues/20203). [#20335](https://github.com/ClickHouse/ClickHouse/pull/20335) ([alesapin](https://github.com/alesapin)).
+* Restrict to `DROP` or `RENAME` version column of `*CollapsingMergeTree` and `ReplacingMergeTree` table engines. [#20300](https://github.com/ClickHouse/ClickHouse/pull/20300) ([alesapin](https://github.com/alesapin)).
+* Fixed the behavior when in case of broken JSON we tried to read the whole file into memory which leads to exception from the allocator. Fixes [#19719](https://github.com/ClickHouse/ClickHouse/issues/19719). [#20286](https://github.com/ClickHouse/ClickHouse/pull/20286) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
+* Fix exception during vertical merge for `MergeTree` table engines family which don't allow to perform vertical merges. Fixes [#20259](https://github.com/ClickHouse/ClickHouse/issues/20259). [#20279](https://github.com/ClickHouse/ClickHouse/pull/20279) ([alesapin](https://github.com/alesapin)).
+* Fix rare server crash on config reload during the shutdown. Fixes [#19689](https://github.com/ClickHouse/ClickHouse/issues/19689). [#20224](https://github.com/ClickHouse/ClickHouse/pull/20224) ([alesapin](https://github.com/alesapin)).
+* Fix CTE when using in INSERT SELECT. This fixes [#20187](https://github.com/ClickHouse/ClickHouse/issues/20187), fixes [#20195](https://github.com/ClickHouse/ClickHouse/issues/20195). [#20211](https://github.com/ClickHouse/ClickHouse/pull/20211) ([Amos Bird](https://github.com/amosbird)).
+* Fixes [#19314](https://github.com/ClickHouse/ClickHouse/issues/19314). [#20156](https://github.com/ClickHouse/ClickHouse/pull/20156) ([Ivan](https://github.com/abyss7)).
+* fix toMinute function to handle special timezone correctly. [#20149](https://github.com/ClickHouse/ClickHouse/pull/20149) ([keenwolf](https://github.com/keen-wolf)).
+* Fix server crash after query with `if` function with `Tuple` type of then/else branches result. `Tuple` type must contain `Array` or another complex type. Fixes [#18356](https://github.com/ClickHouse/ClickHouse/issues/18356). [#20133](https://github.com/ClickHouse/ClickHouse/pull/20133) ([alesapin](https://github.com/alesapin)).
+* The `MongoDB` table engine now establishes connection only when it's going to read data. `ATTACH TABLE` won't try to connect anymore. [#20110](https://github.com/ClickHouse/ClickHouse/pull/20110) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Bugfix in StorageJoin. [#20079](https://github.com/ClickHouse/ClickHouse/pull/20079) ([vdimir](https://github.com/vdimir)).
+* Fix the case when calculating modulo of division of negative number by small divisor, the resulting data type was not large enough to accomodate the negative result. This closes [#20052](https://github.com/ClickHouse/ClickHouse/issues/20052). [#20067](https://github.com/ClickHouse/ClickHouse/pull/20067) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* MaterializeMySQL: Fix replication for statements that update several tables. [#20066](https://github.com/ClickHouse/ClickHouse/pull/20066) ([Håvard Kvålen](https://github.com/havardk)).
+* Prevent "Connection refused" in docker during initialization script execution. [#20012](https://github.com/ClickHouse/ClickHouse/pull/20012) ([filimonov](https://github.com/filimonov)).
+* `EmbeddedRocksDB` is an experimental storage. Fix the issue with lack of proper type checking. Simplified code. This closes [#19967](https://github.com/ClickHouse/ClickHouse/issues/19967). [#19972](https://github.com/ClickHouse/ClickHouse/pull/19972) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Fix a segfault in function `fromModifiedJulianDay` when the argument type is `Nullable(T)` for any integral types other than Int32. [#19959](https://github.com/ClickHouse/ClickHouse/pull/19959) ([PHO](https://github.com/depressed-pho)).
+* BloomFilter index crash fix. Fixes [#19757](https://github.com/ClickHouse/ClickHouse/issues/19757). [#19884](https://github.com/ClickHouse/ClickHouse/pull/19884) ([Maksim Kita](https://github.com/kitaisreal)).
+* Deadlock was possible if system.text_log is enabled. This fixes [#19874](https://github.com/ClickHouse/ClickHouse/issues/19874). [#19875](https://github.com/ClickHouse/ClickHouse/pull/19875) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Fix starting the server with tables having default expressions containing dictGet(). Allow getting return type of dictGet() without loading dictionary. [#19805](https://github.com/ClickHouse/ClickHouse/pull/19805) ([Vitaly Baranov](https://github.com/vitlibar)).
+* Fix clickhouse-client abort exception while executing only `select`. [#19790](https://github.com/ClickHouse/ClickHouse/pull/19790) ([taiyang-li](https://github.com/taiyang-li)).
+* Fix a bug that moving pieces to destination table may failed in case of launching multiple clickhouse-copiers. [#19743](https://github.com/ClickHouse/ClickHouse/pull/19743) ([madianjun](https://github.com/mdianjun)).
+* Background thread which executes `ON CLUSTER` queries might hang waiting for dropped replicated table to do something. It's fixed. [#19684](https://github.com/ClickHouse/ClickHouse/pull/19684) ([yiguolei](https://github.com/yiguolei)).
+
+#### Build/Testing/Packaging Improvement
+
+* Allow to build ClickHouse with AVX-2 enabled globally. It gives slight performance benefits on modern CPUs. Not recommended for production and will not be supported as official build for now. [#20180](https://github.com/ClickHouse/ClickHouse/pull/20180) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Fix some of the issues found by Coverity. See [#19964](https://github.com/ClickHouse/ClickHouse/issues/19964). [#20010](https://github.com/ClickHouse/ClickHouse/pull/20010) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Allow to start up with modified binary under gdb. In previous version if you set up breakpoint in gdb before start, server will refuse to start up due to failed integrity check. [#21258](https://github.com/ClickHouse/ClickHouse/pull/21258) ([alexey-milovidov](https://github.com/alexey-milovidov)).
+* Add a test for different compression methods in Kafka. [#21111](https://github.com/ClickHouse/ClickHouse/pull/21111) ([filimonov](https://github.com/filimonov)).
+* Fixed port clash from test_storage_kerberized_hdfs test. [#19974](https://github.com/ClickHouse/ClickHouse/pull/19974) ([Ilya Yatsishin](https://github.com/qoega)).
+* Print `stdout` and `stderr` to log when failed to start docker in integration tests. Before this PR there was a very short error message in this case which didn't help to investigate the problems. [#20631](https://github.com/ClickHouse/ClickHouse/pull/20631) ([Vitaly Baranov](https://github.com/vitlibar)).
+
+
## ClickHouse release 21.2
### ClickHouse release v21.2.2.8-stable, 2021-02-07
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9002f1df140..1423f3a0bc2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -39,6 +39,8 @@ else()
set(RECONFIGURE_MESSAGE_LEVEL STATUS)
endif()
+enable_language(C CXX ASM)
+
include (cmake/arch.cmake)
include (cmake/target.cmake)
include (cmake/tools.cmake)
@@ -66,17 +68,30 @@ endif ()
include (cmake/find/ccache.cmake)
-option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling" OFF)
+# Take care to add prlimit in command line before ccache, or else ccache thinks that
+# prlimit is compiler, and clang++ is its input file, and refuses to work with
+# multiple inputs, e.g in ccache log:
+# [2021-03-31T18:06:32.655327 36900] Command line: /usr/bin/ccache prlimit --as=10000000000 --data=5000000000 --cpu=600 /usr/bin/clang++-11 - ...... std=gnu++2a -MD -MT src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -MF src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o.d -o src/CMakeFiles/dbms.dir/Storages/MergeTree/IMergeTreeDataPart.cpp.o -c ../src/Storages/MergeTree/IMergeTreeDataPart.cpp
+#
+# [2021-03-31T18:06:32.656704 36900] Multiple input files: /usr/bin/clang++-11 and ../src/Storages/MergeTree/IMergeTreeDataPart.cpp
+#
+# Another way would be to use --ccache-skip option before clang++-11 to make
+# ccache ignore it.
+option(ENABLE_CHECK_HEAVY_BUILDS "Don't allow C++ translation units to compile too long or to take too much memory while compiling." OFF)
if (ENABLE_CHECK_HEAVY_BUILDS)
# set DATA (since RSS does not work since 2.6.x+) to 2G
set (RLIMIT_DATA 5000000000)
# set VIRT (RLIMIT_AS) to 10G (DATA*10)
set (RLIMIT_AS 10000000000)
+ # set CPU time limit to 600 seconds
+ set (RLIMIT_CPU 600)
+
# gcc10/gcc10/clang -fsanitize=memory is too heavy
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
set (RLIMIT_DATA 10000000000)
endif()
- set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=600)
+
+ set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})
endif ()
if (NOT CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "None")
@@ -155,7 +170,6 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests"
if (OS_LINUX AND NOT UNBUNDLED AND MAKE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND CMAKE_VERSION VERSION_GREATER "3.9.0")
# Only for Linux, x86_64.
- # Implies ${ENABLE_FASTMEMCPY}
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Glibc compatibility cannot be enabled in current configuration")
@@ -169,7 +183,7 @@ endif ()
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
if (OS_LINUX)
- find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
+ find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-12" "llvm-objcopy-11" "llvm-objcopy-10" "llvm-objcopy-9" "llvm-objcopy-8" "objcopy")
if (OBJCOPY_PATH)
message(STATUS "Using objcopy: ${OBJCOPY_PATH}.")
@@ -241,9 +255,7 @@ else()
message(STATUS "Disabling compiler -pipe option (have only ${AVAILABLE_PHYSICAL_MEMORY} mb of memory)")
endif()
-if(NOT DISABLE_CPU_OPTIMIZE)
- include(cmake/cpu_features.cmake)
-endif()
+include(cmake/cpu_features.cmake)
option(ARCH_NATIVE "Add -march=native compiler flag")
@@ -251,25 +263,39 @@ if (ARCH_NATIVE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
endif ()
-if (COMPILER_GCC OR COMPILER_CLANG)
- # to make numeric_limits<__int128> works with GCC
- set (_CXX_STANDARD "gnu++2a")
-else()
- set (_CXX_STANDARD "c++2a")
-endif()
+if (${CMAKE_VERSION} VERSION_LESS "3.12.4")
+ # CMake < 3.12 doesn't support setting 20 as a C++ standard version.
+ # We will add C++ standard controlling flag in CMAKE_CXX_FLAGS manually for now.
-# cmake < 3.12 doesn't support 20. We'll set CMAKE_CXX_FLAGS for now
-# set (CMAKE_CXX_STANDARD 20)
-set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
+ if (COMPILER_GCC OR COMPILER_CLANG)
+ # to make numeric_limits<__int128> works with GCC
+ set (_CXX_STANDARD "gnu++2a")
+ else ()
+ set (_CXX_STANDARD "c++2a")
+ endif ()
-set (CMAKE_CXX_EXTENSIONS 0) # https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html#prop_tgt:CXX_EXTENSIONS
-set (CMAKE_CXX_STANDARD_REQUIRED ON)
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=${_CXX_STANDARD}")
+else ()
+ set (CMAKE_CXX_STANDARD 20)
+ set (CMAKE_CXX_EXTENSIONS ON) # Same as gnu++2a (ON) vs c++2a (OFF): https://cmake.org/cmake/help/latest/prop_tgt/CXX_EXTENSIONS.html
+ set (CMAKE_CXX_STANDARD_REQUIRED ON)
+endif ()
+
+set (CMAKE_C_STANDARD 11)
+set (CMAKE_C_EXTENSIONS ON)
+set (CMAKE_C_STANDARD_REQUIRED ON)
if (COMPILER_GCC OR COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
endif ()
+# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
+# benchmarks.
+if (COMPILER_GCC OR COMPILER_CLANG)
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
+endif ()
+
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
@@ -331,7 +357,7 @@ if (COMPILER_CLANG)
endif ()
# Always prefer llvm tools when using clang. For instance, we cannot use GNU ar when llvm LTO is enabled
- find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
+ find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-12" "llvm-ar-11" "llvm-ar-10" "llvm-ar-9" "llvm-ar-8")
if (LLVM_AR_PATH)
message(STATUS "Using llvm-ar: ${LLVM_AR_PATH}.")
@@ -340,7 +366,7 @@ if (COMPILER_CLANG)
message(WARNING "Cannot find llvm-ar. System ar will be used instead. It does not work with ThinLTO.")
endif ()
- find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
+ find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-12" "llvm-ranlib-11" "llvm-ranlib-10" "llvm-ranlib-9" "llvm-ranlib-8")
if (LLVM_RANLIB_PATH)
message(STATUS "Using llvm-ranlib: ${LLVM_RANLIB_PATH}.")
@@ -457,6 +483,7 @@ find_contrib_lib(double-conversion) # Must be before parquet
include (cmake/find/ssl.cmake)
include (cmake/find/ldap.cmake) # after ssl
include (cmake/find/icu.cmake)
+include (cmake/find/xz.cmake)
include (cmake/find/zlib.cmake)
include (cmake/find/zstd.cmake)
include (cmake/find/ltdl.cmake) # for odbc
@@ -489,6 +516,7 @@ include (cmake/find/fast_float.cmake)
include (cmake/find/rapidjson.cmake)
include (cmake/find/fastops.cmake)
include (cmake/find/odbc.cmake)
+include (cmake/find/nanodbc.cmake)
include (cmake/find/rocksdb.cmake)
include (cmake/find/libpqxx.cmake)
include (cmake/find/nuraft.cmake)
@@ -504,6 +532,7 @@ include (cmake/find/msgpack.cmake)
include (cmake/find/cassandra.cmake)
include (cmake/find/sentry.cmake)
include (cmake/find/stats.cmake)
+include (cmake/find/datasketches.cmake)
set (USE_INTERNAL_CITYHASH_LIBRARY ON CACHE INTERNAL "")
find_contrib_lib(cityhash)
@@ -536,7 +565,7 @@ macro (add_executable target)
# explicitly acquire and interpose malloc symbols by clickhouse_malloc
# if GLIBC_COMPATIBILITY is ON and ENABLE_THINLTO is on than provide memcpy symbol explicitly to neutrialize thinlto's libcall generation.
if (GLIBC_COMPATIBILITY AND ENABLE_THINLTO)
- _add_executable (${ARGV} $ $)
+ _add_executable (${ARGV} $ $)
else ()
_add_executable (${ARGV} $)
endif ()
diff --git a/README.md b/README.md
index 3329a98877f..ea9f365a3c6 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@ ClickHouse® is an open-source column-oriented database management system that a
* [Tutorial](https://clickhouse.tech/docs/en/getting_started/tutorial/) shows how to set up and query small ClickHouse cluster.
* [Documentation](https://clickhouse.tech/docs/en/) provides more in-depth information.
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
-* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-ly9m4w1x-6j7x5Ts_pQZqrctAbRZ3cg) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
+* [Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-nwwakmk4-xOJ6cdy0sJC3It8j348~IA) and [Telegram](https://telegram.me/clickhouse_en) allow to chat with ClickHouse users in real-time.
* [Blog](https://clickhouse.yandex/blog/en/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Code Browser](https://clickhouse.tech/codebrowser/html_report/ClickHouse/index.html) with syntax highlight and navigation.
* [Contacts](https://clickhouse.tech/#contacts) can help to get your questions answered if there are any.
diff --git a/base/CMakeLists.txt b/base/CMakeLists.txt
index 46bd57eda12..023dcaaccae 100644
--- a/base/CMakeLists.txt
+++ b/base/CMakeLists.txt
@@ -8,6 +8,7 @@ add_subdirectory (loggers)
add_subdirectory (pcg-random)
add_subdirectory (widechar_width)
add_subdirectory (readpassphrase)
+add_subdirectory (bridge)
if (USE_MYSQL)
add_subdirectory (mysqlxx)
diff --git a/base/bridge/CMakeLists.txt b/base/bridge/CMakeLists.txt
new file mode 100644
index 00000000000..20b0b651677
--- /dev/null
+++ b/base/bridge/CMakeLists.txt
@@ -0,0 +1,7 @@
+add_library (bridge
+ IBridge.cpp
+)
+
+target_include_directories (daemon PUBLIC ..)
+target_link_libraries (bridge PRIVATE daemon dbms Poco::Data Poco::Data::ODBC)
+
diff --git a/base/bridge/IBridge.cpp b/base/bridge/IBridge.cpp
new file mode 100644
index 00000000000..b1f71315fef
--- /dev/null
+++ b/base/bridge/IBridge.cpp
@@ -0,0 +1,238 @@
+#include "IBridge.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#if USE_ODBC
+# include
+#endif
+
+
+namespace DB
+{
+
+namespace ErrorCodes
+{
+ extern const int ARGUMENT_OUT_OF_BOUND;
+}
+
+namespace
+{
+ Poco::Net::SocketAddress makeSocketAddress(const std::string & host, UInt16 port, Poco::Logger * log)
+ {
+ Poco::Net::SocketAddress socket_address;
+ try
+ {
+ socket_address = Poco::Net::SocketAddress(host, port);
+ }
+ catch (const Poco::Net::DNSException & e)
+ {
+ const auto code = e.code();
+ if (code == EAI_FAMILY
+#if defined(EAI_ADDRFAMILY)
+ || code == EAI_ADDRFAMILY
+#endif
+ )
+ {
+ LOG_ERROR(log, "Cannot resolve listen_host ({}), error {}: {}. If it is an IPv6 address and your host has disabled IPv6, then consider to specify IPv4 address to listen in element of configuration file. Example: 0.0.0.0", host, e.code(), e.message());
+ }
+
+ throw;
+ }
+ return socket_address;
+ }
+
+ Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, Poco::Logger * log)
+ {
+ auto address = makeSocketAddress(host, port, log);
+#if POCO_VERSION < 0x01080000
+ socket.bind(address, /* reuseAddress = */ true);
+#else
+ socket.bind(address, /* reuseAddress = */ true, /* reusePort = */ false);
+#endif
+
+ socket.listen(/* backlog = */ 64);
+
+ return address;
+ }
+}
+
+
+void IBridge::handleHelp(const std::string &, const std::string &)
+{
+ Poco::Util::HelpFormatter help_formatter(options());
+ help_formatter.setCommand(commandName());
+ help_formatter.setHeader("HTTP-proxy for odbc requests");
+ help_formatter.setUsage("--http-port ");
+ help_formatter.format(std::cerr);
+
+ stopOptionsProcessing();
+}
+
+
+void IBridge::defineOptions(Poco::Util::OptionSet & options)
+{
+ options.addOption(
+ Poco::Util::Option("http-port", "", "port to listen").argument("http-port", true) .binding("http-port"));
+
+ options.addOption(
+ Poco::Util::Option("listen-host", "", "hostname or address to listen, default 127.0.0.1").argument("listen-host").binding("listen-host"));
+
+ options.addOption(
+ Poco::Util::Option("http-timeout", "", "http timeout for socket, default 1800").argument("http-timeout").binding("http-timeout"));
+
+ options.addOption(
+ Poco::Util::Option("max-server-connections", "", "max connections to server, default 1024").argument("max-server-connections").binding("max-server-connections"));
+
+ options.addOption(
+ Poco::Util::Option("keep-alive-timeout", "", "keepalive timeout, default 10").argument("keep-alive-timeout").binding("keep-alive-timeout"));
+
+ options.addOption(
+ Poco::Util::Option("log-level", "", "sets log level, default info") .argument("log-level").binding("logger.level"));
+
+ options.addOption(
+ Poco::Util::Option("log-path", "", "log path for all logs, default console").argument("log-path").binding("logger.log"));
+
+ options.addOption(
+ Poco::Util::Option("err-log-path", "", "err log path for all logs, default no").argument("err-log-path").binding("logger.errorlog"));
+
+ options.addOption(
+ Poco::Util::Option("stdout-path", "", "stdout log path, default console").argument("stdout-path").binding("logger.stdout"));
+
+ options.addOption(
+ Poco::Util::Option("stderr-path", "", "stderr log path, default console").argument("stderr-path").binding("logger.stderr"));
+
+ using Me = std::decay_t;
+
+ options.addOption(
+ Poco::Util::Option("help", "", "produce this help message").binding("help").callback(Poco::Util::OptionCallback(this, &Me::handleHelp)));
+
+ ServerApplication::defineOptions(options); // NOLINT Don't need complex BaseDaemon's .xml config
+}
+
+
+void IBridge::initialize(Application & self)
+{
+ BaseDaemon::closeFDs();
+ is_help = config().has("help");
+
+ if (is_help)
+ return;
+
+ config().setString("logger", bridgeName());
+
+ /// Redirect stdout, stderr to specified files.
+ /// Some libraries and sanitizers write to stderr in case of errors.
+ const auto stdout_path = config().getString("logger.stdout", "");
+ if (!stdout_path.empty())
+ {
+ if (!freopen(stdout_path.c_str(), "a+", stdout))
+ throw Poco::OpenFileException("Cannot attach stdout to " + stdout_path);
+
+ /// Disable buffering for stdout.
+ setbuf(stdout, nullptr);
+ }
+ const auto stderr_path = config().getString("logger.stderr", "");
+ if (!stderr_path.empty())
+ {
+ if (!freopen(stderr_path.c_str(), "a+", stderr))
+ throw Poco::OpenFileException("Cannot attach stderr to " + stderr_path);
+
+ /// Disable buffering for stderr.
+ setbuf(stderr, nullptr);
+ }
+
+ buildLoggers(config(), logger(), self.commandName());
+
+ BaseDaemon::logRevision();
+
+ log = &logger();
+ hostname = config().getString("listen-host", "127.0.0.1");
+ port = config().getUInt("http-port");
+ if (port > 0xFFFF)
+ throw Exception("Out of range 'http-port': " + std::to_string(port), ErrorCodes::ARGUMENT_OUT_OF_BOUND);
+
+ http_timeout = config().getUInt("http-timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
+ max_server_connections = config().getUInt("max-server-connections", 1024);
+ keep_alive_timeout = config().getUInt("keep-alive-timeout", 10);
+
+ initializeTerminationAndSignalProcessing();
+
+#if USE_ODBC
+ if (bridgeName() == "ODBCBridge")
+ Poco::Data::ODBC::Connector::registerConnector();
+#endif
+
+ ServerApplication::initialize(self); // NOLINT
+}
+
+
+void IBridge::uninitialize()
+{
+ BaseDaemon::uninitialize();
+}
+
+
+int IBridge::main(const std::vector & /*args*/)
+{
+ if (is_help)
+ return Application::EXIT_OK;
+
+ registerFormats();
+ LOG_INFO(log, "Starting up {} on host: {}, port: {}", bridgeName(), hostname, port);
+
+ Poco::Net::ServerSocket socket;
+ auto address = socketBindListen(socket, hostname, port, log);
+ socket.setReceiveTimeout(http_timeout);
+ socket.setSendTimeout(http_timeout);
+
+ Poco::ThreadPool server_pool(3, max_server_connections);
+
+ Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
+ http_params->setTimeout(http_timeout);
+ http_params->setKeepAliveTimeout(keep_alive_timeout);
+
+ auto shared_context = Context::createShared();
+ auto context = Context::createGlobal(shared_context.get());
+ context->makeGlobalContext();
+
+ if (config().has("query_masking_rules"))
+ SensitiveDataMasker::setInstance(std::make_unique(config(), "query_masking_rules"));
+
+ auto server = HTTPServer(
+ context,
+ getHandlerFactoryPtr(context),
+ server_pool,
+ socket,
+ http_params);
+
+ SCOPE_EXIT({
+ LOG_DEBUG(log, "Received termination signal.");
+ LOG_DEBUG(log, "Waiting for current connections to close.");
+
+ server.stop();
+
+ for (size_t count : ext::range(1, 6))
+ {
+ if (server.currentConnections() == 0)
+ break;
+ LOG_DEBUG(log, "Waiting for {} connections, try {}", server.currentConnections(), count);
+ std::this_thread::sleep_for(std::chrono::milliseconds(1000));
+ }
+ });
+
+ server.start();
+ LOG_INFO(log, "Listening http://{}", address.toString());
+
+ waitForTerminationRequest();
+ return Application::EXIT_OK;
+}
+
+}
diff --git a/base/bridge/IBridge.h b/base/bridge/IBridge.h
new file mode 100644
index 00000000000..c64003d9959
--- /dev/null
+++ b/base/bridge/IBridge.h
@@ -0,0 +1,51 @@
+#pragma once
+
+#include
+#include
+#include
+
+#include
+#include
+
+
+namespace DB
+{
+
+/// Class represents base for clickhouse-odbc-bridge and clickhouse-library-bridge servers.
+/// Listens to incoming HTTP POST and GET requests on specified port and host.
+/// Has two handlers '/' for all incoming POST requests and /ping for GET request about service status.
+class IBridge : public BaseDaemon
+{
+
+public:
+ /// Define command line arguments
+ void defineOptions(Poco::Util::OptionSet & options) override;
+
+protected:
+ using HandlerFactoryPtr = std::shared_ptr;
+
+ void initialize(Application & self) override;
+
+ void uninitialize() override;
+
+ int main(const std::vector & args) override;
+
+ virtual std::string bridgeName() const = 0;
+
+ virtual HandlerFactoryPtr getHandlerFactoryPtr(ContextPtr context) const = 0;
+
+ size_t keep_alive_timeout;
+
+private:
+ void handleHelp(const std::string &, const std::string &);
+
+ bool is_help;
+ std::string hostname;
+ size_t port;
+ std::string log_level;
+ size_t max_server_connections;
+ size_t http_timeout;
+
+ Poco::Logger * log;
+};
+}
diff --git a/base/common/BorrowedObjectPool.h b/base/common/BorrowedObjectPool.h
new file mode 100644
index 00000000000..6a90a7e7122
--- /dev/null
+++ b/base/common/BorrowedObjectPool.h
@@ -0,0 +1,156 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+/** Pool for limited size objects that cannot be used from different threads simultaneously.
+ * The main use case is to have fixed size of objects that can be reused in difference threads during their lifetime
+ * and have to be initialized on demand.
+ * Two main properties of pool are allocated objects size and borrowed objects size.
+ * Allocated objects size is size of objects that are currently allocated by the pool.
+ * Borrowed objects size is size of objects that are borrowed by clients.
+ * If max_size == 0 then pool has unlimited size and objects will be allocated without limit.
+ *
+ * Pool provides following strategy for borrowing object:
+ * If max_size == 0 then pool has unlimited size and objects will be allocated without limit.
+ * 1. If pool has objects that can be borrowed increase borrowed objects size and return it.
+ * 2. If pool allocatedObjectsSize is lower than max objects size or pool has unlimited size
+ * allocate new object, increase borrowed objects size and return it.
+ * 3. If pool is full wait on condition variable with or without timeout until some object
+ * will be returned to the pool.
+ */
+template
+class BorrowedObjectPool final
+{
+public:
+ explicit BorrowedObjectPool(size_t max_size_) : max_size(max_size_) {}
+
+ /// Borrow object from pool. If pull is full and all objects were borrowed
+ /// then calling thread will wait until some object will be returned into pool.
+ template
+ void borrowObject(T & dest, FactoryFunc && func)
+ {
+ std::unique_lock lock(objects_mutex);
+
+ if (!objects.empty())
+ {
+ dest = borrowFromObjects(lock);
+ return;
+ }
+
+ bool has_unlimited_size = (max_size == 0);
+
+ if (unlikely(has_unlimited_size) || allocated_objects_size < max_size)
+ {
+ dest = allocateObjectForBorrowing(lock, std::forward(func));
+ return;
+ }
+
+ condition_variable.wait(lock, [this] { return !objects.empty(); });
+ dest = borrowFromObjects(lock);
+ }
+
+ /// Same as borrowObject function, but wait with timeout.
+ /// Returns true if object was borrowed during timeout.
+ template
+ bool tryBorrowObject(T & dest, FactoryFunc && func, size_t timeout_in_milliseconds = 0)
+ {
+ std::unique_lock lock(objects_mutex);
+
+ if (!objects.empty())
+ {
+ dest = borrowFromObjects(lock);
+ return true;
+ }
+
+ bool has_unlimited_size = (max_size == 0);
+
+ if (unlikely(has_unlimited_size) || allocated_objects_size < max_size)
+ {
+ dest = allocateObjectForBorrowing(lock, std::forward(func));
+ return true;
+ }
+
+ bool wait_result = condition_variable.wait_for(lock, std::chrono::milliseconds(timeout_in_milliseconds), [this] { return !objects.empty(); });
+
+ if (wait_result)
+ dest = borrowFromObjects(lock);
+
+ return wait_result;
+ }
+
+ /// Return object into pool. Client must return same object that was borrowed.
+ inline void returnObject(T && object_to_return)
+ {
+ std::unique_lock lck(objects_mutex);
+
+ objects.emplace_back(std::move(object_to_return));
+ --borrowed_objects_size;
+
+ condition_variable.notify_one();
+ }
+
+ /// Max pool size
+ inline size_t maxSize() const
+ {
+ return max_size;
+ }
+
+ /// Allocated objects size by the pool. If allocatedObjectsSize == maxSize then pool is full.
+ inline size_t allocatedObjectsSize() const
+ {
+ std::unique_lock lock(objects_mutex);
+ return allocated_objects_size;
+ }
+
+ /// Returns allocatedObjectsSize == maxSize
+ inline bool isFull() const
+ {
+ std::unique_lock lock(objects_mutex);
+ return allocated_objects_size == max_size;
+ }
+
+ /// Borrowed objects size. If borrowedObjectsSize == allocatedObjectsSize and pool is full.
+ /// Then client will wait during borrowObject function call.
+ inline size_t borrowedObjectsSize() const
+ {
+ std::unique_lock lock(objects_mutex);
+ return borrowed_objects_size;
+ }
+
+private:
+
+ template
+ inline T allocateObjectForBorrowing(const std::unique_lock &, FactoryFunc && func)
+ {
+ ++allocated_objects_size;
+ ++borrowed_objects_size;
+
+ return std::forward(func)();
+ }
+
+ inline T borrowFromObjects(const std::unique_lock &)
+ {
+ T dst;
+ detail::moveOrCopyIfThrow(std::move(objects.back()), dst);
+ objects.pop_back();
+
+ ++borrowed_objects_size;
+
+ return dst;
+ }
+
+ size_t max_size;
+
+ mutable std::mutex objects_mutex;
+ std::condition_variable condition_variable;
+ size_t allocated_objects_size = 0;
+ size_t borrowed_objects_size = 0;
+ std::vector objects;
+};
diff --git a/base/common/CMakeLists.txt b/base/common/CMakeLists.txt
index cea52b443dd..7dfb9bc10c0 100644
--- a/base/common/CMakeLists.txt
+++ b/base/common/CMakeLists.txt
@@ -47,6 +47,10 @@ endif()
target_include_directories(common PUBLIC .. ${CMAKE_CURRENT_BINARY_DIR}/..)
+if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
+ target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
+endif()
+
# Allow explicit fallback to readline
if (NOT ENABLE_REPLXX AND ENABLE_READLINE)
message (STATUS "Attempt to fallback to readline explicitly")
@@ -74,7 +78,6 @@ target_link_libraries (common
${CITYHASH_LIBRARIES}
boost::headers_only
boost::system
- FastMemcpy
Poco::Net
Poco::Net::SSL
Poco::Util
diff --git a/base/common/DateLUT.cpp b/base/common/DateLUT.cpp
index 6ff0884701c..d14b63cd70a 100644
--- a/base/common/DateLUT.cpp
+++ b/base/common/DateLUT.cpp
@@ -152,7 +152,7 @@ const DateLUTImpl & DateLUT::getImplementation(const std::string & time_zone) co
auto it = impls.emplace(time_zone, nullptr).first;
if (!it->second)
- it->second = std::make_unique(time_zone);
+ it->second = std::unique_ptr(new DateLUTImpl(time_zone));
return *it->second;
}
diff --git a/base/common/DateLUT.h b/base/common/DateLUT.h
index 93c6cb403e2..378b4360f3b 100644
--- a/base/common/DateLUT.h
+++ b/base/common/DateLUT.h
@@ -32,7 +32,6 @@ public:
return date_lut.getImplementation(time_zone);
}
-
static void setDefaultTimezone(const std::string & time_zone)
{
auto & date_lut = getInstance();
diff --git a/base/common/DateLUTImpl.cpp b/base/common/DateLUTImpl.cpp
index 50620e21b8f..e7faeb63760 100644
--- a/base/common/DateLUTImpl.cpp
+++ b/base/common/DateLUTImpl.cpp
@@ -46,24 +46,41 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
if (&inside_main)
assert(inside_main);
- size_t i = 0;
- time_t start_of_day = 0;
-
cctz::time_zone cctz_time_zone;
if (!cctz::load_time_zone(time_zone, &cctz_time_zone))
throw Poco::Exception("Cannot load time zone " + time_zone_);
- cctz::time_zone::absolute_lookup start_of_epoch_lookup = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(start_of_day));
- offset_at_start_of_epoch = start_of_epoch_lookup.offset;
- offset_is_whole_number_of_hours_everytime = true;
+ constexpr cctz::civil_day epoch{1970, 1, 1};
+ constexpr cctz::civil_day lut_start{DATE_LUT_MIN_YEAR, 1, 1};
+ time_t start_of_day;
- cctz::civil_day date{1970, 1, 1};
+ /// Note: it's validated against all timezones in the system.
+ static_assert((epoch - lut_start) == daynum_offset_epoch);
+ offset_at_start_of_epoch = cctz_time_zone.lookup(cctz_time_zone.lookup(epoch).pre).offset;
+ offset_at_start_of_lut = cctz_time_zone.lookup(cctz_time_zone.lookup(lut_start).pre).offset;
+ offset_is_whole_number_of_hours_during_epoch = true;
+
+ cctz::civil_day date = lut_start;
+
+ UInt32 i = 0;
do
{
cctz::time_zone::civil_lookup lookup = cctz_time_zone.lookup(date);
- start_of_day = std::chrono::system_clock::to_time_t(lookup.pre); /// Ambiguity is possible.
+ /// Ambiguity is possible if time was changed backwards at the midnight
+ /// or after midnight time has been changed back to midnight, for example one hour backwards at 01:00
+ /// or after midnight time has been changed to the previous day, for example two hours backwards at 01:00
+ /// Then midnight appears twice. Usually time change happens exactly at 00:00 or 01:00.
+
+ /// If transition did not involve previous day, we should use the first midnight as the start of the day,
+ /// otherwise it's better to use the second midnight.
+
+ std::chrono::time_point start_of_day_time_point = lookup.trans < lookup.post
+ ? lookup.post /* Second midnight appears after transition, so there was a piece of previous day after transition */
+ : lookup.pre;
+
+ start_of_day = std::chrono::system_clock::to_time_t(start_of_day_time_point);
Values & values = lut[i];
values.year = date.year();
@@ -72,7 +89,7 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
values.day_of_week = getDayOfWeek(date);
values.date = start_of_day;
- assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR);
+ assert(values.year >= DATE_LUT_MIN_YEAR && values.year <= DATE_LUT_MAX_YEAR + 1);
assert(values.month >= 1 && values.month <= 12);
assert(values.day_of_month >= 1 && values.day_of_month <= 31);
assert(values.day_of_week >= 1 && values.day_of_week <= 7);
@@ -85,50 +102,42 @@ DateLUTImpl::DateLUTImpl(const std::string & time_zone_)
else
values.days_in_month = i != 0 ? lut[i - 1].days_in_month : 31;
- values.time_at_offset_change = 0;
- values.amount_of_offset_change = 0;
+ values.time_at_offset_change_value = 0;
+ values.amount_of_offset_change_value = 0;
- if (start_of_day % 3600)
- offset_is_whole_number_of_hours_everytime = false;
+ if (offset_is_whole_number_of_hours_during_epoch && start_of_day > 0 && start_of_day % 3600)
+ offset_is_whole_number_of_hours_during_epoch = false;
- /// If UTC offset was changed in previous day.
- if (i != 0)
+ /// If UTC offset was changed this day.
+ /// Change in time zone without transition is possible, e.g. Moscow 1991 Sun, 31 Mar, 02:00 MSK to EEST
+ cctz::time_zone::civil_transition transition{};
+ if (cctz_time_zone.next_transition(start_of_day_time_point - std::chrono::seconds(1), &transition)
+ && (cctz::civil_day(transition.from) == date || cctz::civil_day(transition.to) == date)
+ && transition.from != transition.to)
{
- auto amount_of_offset_change_at_prev_day = 86400 - (lut[i].date - lut[i - 1].date);
- if (amount_of_offset_change_at_prev_day)
- {
- lut[i - 1].amount_of_offset_change = amount_of_offset_change_at_prev_day;
+ values.time_at_offset_change_value = (transition.from - cctz::civil_second(date)) / Values::OffsetChangeFactor;
+ values.amount_of_offset_change_value = (transition.to - transition.from) / Values::OffsetChangeFactor;
- const auto utc_offset_at_beginning_of_day = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(lut[i - 1].date)).offset;
+// std::cerr << time_zone << ", " << date << ": change from " << transition.from << " to " << transition.to << "\n";
+// std::cerr << time_zone << ", " << date << ": change at " << values.time_at_offset_change() << " with " << values.amount_of_offset_change() << "\n";
- /// Find a time (timestamp offset from beginning of day),
- /// when UTC offset was changed. Search is performed with 15-minute granularity, assuming it is enough.
+ /// We don't support too large changes.
+ if (values.amount_of_offset_change_value > 24 * 4)
+ values.amount_of_offset_change_value = 24 * 4;
+ else if (values.amount_of_offset_change_value < -24 * 4)
+ values.amount_of_offset_change_value = -24 * 4;
- time_t time_at_offset_change = 900;
- while (time_at_offset_change < 86400)
- {
- auto utc_offset_at_current_time = cctz_time_zone.lookup(std::chrono::system_clock::from_time_t(
- lut[i - 1].date + time_at_offset_change)).offset;
-
- if (utc_offset_at_current_time != utc_offset_at_beginning_of_day)
- break;
-
- time_at_offset_change += 900;
- }
-
- lut[i - 1].time_at_offset_change = time_at_offset_change;
-
- /// We doesn't support cases when time change results in switching to previous day.
- if (static_cast(lut[i - 1].time_at_offset_change) + static_cast(lut[i - 1].amount_of_offset_change) < 0)
- lut[i - 1].time_at_offset_change = -lut[i - 1].amount_of_offset_change;
- }
+ /// We don't support cases when time change results in switching to previous day.
+ /// Shift the point of time change later.
+ if (values.time_at_offset_change_value + values.amount_of_offset_change_value < 0)
+ values.time_at_offset_change_value = -values.amount_of_offset_change_value;
}
/// Going to next day.
++date;
++i;
}
- while (start_of_day <= DATE_LUT_MAX && i <= DATE_LUT_MAX_DAY_NUM);
+ while (i < DATE_LUT_SIZE && lut[i - 1].year <= DATE_LUT_MAX_YEAR);
/// Fill excessive part of lookup table. This is needed only to simplify handling of overflow cases.
while (i < DATE_LUT_SIZE)
diff --git a/base/common/DateLUTImpl.h b/base/common/DateLUTImpl.h
index 064787fb64e..9e60181e802 100644
--- a/base/common/DateLUTImpl.h
+++ b/base/common/DateLUTImpl.h
@@ -5,23 +5,32 @@
#include "types.h"
#include
+#include
#include
+#include
-#define DATE_LUT_MAX (0xFFFFFFFFU - 86400)
-#define DATE_LUT_MAX_DAY_NUM (0xFFFFFFFFU / 86400)
-/// Table size is bigger than DATE_LUT_MAX_DAY_NUM to fill all indices within UInt16 range: this allows to remove extra check.
-#define DATE_LUT_SIZE 0x10000
-#define DATE_LUT_MIN_YEAR 1970
-#define DATE_LUT_MAX_YEAR 2106 /// Last supported year (incomplete)
+#define DATE_LUT_MIN_YEAR 1925 /// 1925 since wast majority of timezones changed to 15-minute aligned offsets somewhere in 1924 or earlier.
+#define DATE_LUT_MAX_YEAR 2283 /// Last supported year (complete)
#define DATE_LUT_YEARS (1 + DATE_LUT_MAX_YEAR - DATE_LUT_MIN_YEAR) /// Number of years in lookup table
+#define DATE_LUT_SIZE 0x20000
+
+#define DATE_LUT_MAX (0xFFFFFFFFU - 86400)
+#define DATE_LUT_MAX_DAY_NUM 0xFFFF
+
+/// A constant to add to time_t so every supported time point becomes non-negative and still has the same remainder of division by 3600.
+/// If we treat "remainder of division" operation in the sense of modular arithmetic (not like in C++).
+#define DATE_LUT_ADD ((1970 - DATE_LUT_MIN_YEAR) * 366 * 86400)
+
+
#if defined(__PPC__)
-#if !__clang__
+#if !defined(__clang__)
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
#endif
+
/// Flags for toYearWeek() function.
enum class WeekModeFlag : UInt8
{
@@ -37,7 +46,8 @@ using YearWeek = std::pair;
*/
class DateLUTImpl
{
-public:
+private:
+ friend class DateLUT;
explicit DateLUTImpl(const std::string & time_zone);
DateLUTImpl(const DateLUTImpl &) = delete;
@@ -45,14 +55,75 @@ public:
DateLUTImpl(const DateLUTImpl &&) = delete;
DateLUTImpl & operator=(const DateLUTImpl &&) = delete;
+ // Normalized and bound-checked index of element in lut,
+ // has to be a separate type to support overloading
+ // TODO: make sure that any arithmetic on LUTIndex actually results in valid LUTIndex.
+ STRONG_TYPEDEF(UInt32, LUTIndex)
+
+ template
+ friend inline LUTIndex operator+(const LUTIndex & index, const T v)
+ {
+ return LUTIndex{(index.toUnderType() + UInt32(v)) & date_lut_mask};
+ }
+
+ template
+ friend inline LUTIndex operator+(const T v, const LUTIndex & index)
+ {
+ return LUTIndex{(v + index.toUnderType()) & date_lut_mask};
+ }
+
+ friend inline LUTIndex operator+(const LUTIndex & index, const LUTIndex & v)
+ {
+ return LUTIndex{(index.toUnderType() + v.toUnderType()) & date_lut_mask};
+ }
+
+ template
+ friend inline LUTIndex operator-(const LUTIndex & index, const T v)
+ {
+ return LUTIndex{(index.toUnderType() - UInt32(v)) & date_lut_mask};
+ }
+
+ template
+ friend inline LUTIndex operator-(const T v, const LUTIndex & index)
+ {
+ return LUTIndex{(v - index.toUnderType()) & date_lut_mask};
+ }
+
+ friend inline LUTIndex operator-(const LUTIndex & index, const LUTIndex & v)
+ {
+ return LUTIndex{(index.toUnderType() - v.toUnderType()) & date_lut_mask};
+ }
+
+ template
+ friend inline LUTIndex operator*(const LUTIndex & index, const T v)
+ {
+ return LUTIndex{(index.toUnderType() * UInt32(v)) & date_lut_mask};
+ }
+
+ template
+ friend inline LUTIndex operator*(const T v, const LUTIndex & index)
+ {
+ return LUTIndex{(v * index.toUnderType()) & date_lut_mask};
+ }
+
+ template
+ friend inline LUTIndex operator/(const LUTIndex & index, const T v)
+ {
+ return LUTIndex{(index.toUnderType() / UInt32(v)) & date_lut_mask};
+ }
+
+ template
+ friend inline LUTIndex operator/(const T v, const LUTIndex & index)
+ {
+ return LUTIndex{(UInt32(v) / index.toUnderType()) & date_lut_mask};
+ }
+
public:
/// The order of fields matters for alignment and sizeof.
struct Values
{
- /// Least significat 32 bits from time_t at beginning of the day.
- /// If the unix timestamp of beginning of the day is negative (example: 1970-01-01 MSK, where time_t == -10800), then value will overflow.
- /// Change to time_t; change constants above; and recompile the sources if you need to support time after 2105 year.
- UInt32 date;
+ /// time_t at beginning of the day.
+ Int64 date;
/// Properties of the day.
UInt16 year;
@@ -65,107 +136,189 @@ public:
UInt8 days_in_month;
/// For days, when offset from UTC was changed due to daylight saving time or permanent change, following values could be non zero.
- Int16 amount_of_offset_change; /// Usually -3600 or 3600, but look at Lord Howe Island.
- UInt32 time_at_offset_change; /// In seconds from beginning of the day.
+ /// All in OffsetChangeFactor (15 minute) intervals.
+ Int8 amount_of_offset_change_value; /// Usually -4 or 4, but look at Lord Howe Island. Multiply by OffsetChangeFactor
+ UInt8 time_at_offset_change_value; /// In seconds from beginning of the day. Multiply by OffsetChangeFactor
+
+ inline Int32 amount_of_offset_change() const
+ {
+ return static_cast(amount_of_offset_change_value) * OffsetChangeFactor;
+ }
+
+ inline UInt32 time_at_offset_change() const
+ {
+ return static_cast(time_at_offset_change_value) * OffsetChangeFactor;
+ }
+
+ /// Since most of the modern timezones have a DST change aligned to 15 minutes, to save as much space as possible inside Value,
+ /// we are dividing any offset change related value by this factor before setting it to Value,
+ /// hence it has to be explicitly multiplied back by this factor before being used.
+ static constexpr UInt16 OffsetChangeFactor = 900;
};
static_assert(sizeof(Values) == 16);
private:
- /// Lookup table is indexed by DayNum.
+
+ /// Mask is all-ones to allow efficient protection against overflow.
+ static constexpr UInt32 date_lut_mask = 0x1ffff;
+ static_assert(date_lut_mask == DATE_LUT_SIZE - 1);
+
+ /// Offset to epoch in days (ExtendedDayNum) of the first day in LUT.
+ /// "epoch" is the Unix Epoch (starts at unix timestamp zero)
+ static constexpr UInt32 daynum_offset_epoch = 16436;
+ static_assert(daynum_offset_epoch == (1970 - DATE_LUT_MIN_YEAR) * 365 + (1970 - DATE_LUT_MIN_YEAR / 4 * 4) / 4);
+
+ /// Lookup table is indexed by LUTIndex.
/// Day nums are the same in all time zones. 1970-01-01 is 0 and so on.
/// Table is relatively large, so better not to place the object on stack.
/// In comparison to std::vector, plain array is cheaper by one indirection.
- Values lut[DATE_LUT_SIZE];
+ Values lut[DATE_LUT_SIZE + 1];
- /// Year number after DATE_LUT_MIN_YEAR -> day num for start of year.
- DayNum years_lut[DATE_LUT_YEARS];
+ /// Year number after DATE_LUT_MIN_YEAR -> LUTIndex in lut for start of year.
+ LUTIndex years_lut[DATE_LUT_YEARS];
/// Year number after DATE_LUT_MIN_YEAR * month number starting at zero -> day num for first day of month
- DayNum years_months_lut[DATE_LUT_YEARS * 12];
+ LUTIndex years_months_lut[DATE_LUT_YEARS * 12];
/// UTC offset at beginning of the Unix epoch. The same as unix timestamp of 1970-01-01 00:00:00 local time.
time_t offset_at_start_of_epoch;
- bool offset_is_whole_number_of_hours_everytime;
+ /// UTC offset at the beginning of the first supported year.
+ time_t offset_at_start_of_lut;
+ bool offset_is_whole_number_of_hours_during_epoch;
/// Time zone name.
std::string time_zone;
-
- /// We can correctly process only timestamps that less DATE_LUT_MAX (i.e. up to 2105 year inclusively)
- /// We don't care about overflow.
- inline DayNum findIndex(time_t t) const
+ inline LUTIndex findIndex(time_t t) const
{
/// First guess.
- DayNum guess(t / 86400);
+ Int64 guess = (t / 86400) + daynum_offset_epoch;
+
+ /// For negative time_t the integer division was rounded up, so the guess is offset by one.
+ if (unlikely(t < 0))
+ --guess;
+
+ if (guess < 0)
+ return LUTIndex(0);
+ if (guess >= DATE_LUT_SIZE)
+ return LUTIndex(DATE_LUT_SIZE - 1);
/// UTC offset is from -12 to +14 in all known time zones. This requires checking only three indices.
- if ((guess == 0 || t >= lut[guess].date) && t < lut[DayNum(guess + 1)].date)
- return guess;
+ if (t >= lut[guess].date)
+ {
+ if (guess + 1 >= DATE_LUT_SIZE || t < lut[guess + 1].date)
+ return LUTIndex(guess);
- /// Time zones that have offset 0 from UTC do daylight saving time change (if any) towards increasing UTC offset (example: British Standard Time).
- if (t >= lut[DayNum(guess + 1)].date)
- return DayNum(guess + 1);
+ return LUTIndex(guess + 1);
+ }
- return DayNum(guess - 1);
+ return LUTIndex(guess ? guess - 1 : 0);
}
- inline const Values & find(time_t t) const
+ inline LUTIndex toLUTIndex(DayNum d) const
{
- return lut[findIndex(t)];
+ return LUTIndex{(d + daynum_offset_epoch) & date_lut_mask};
+ }
+
+ inline LUTIndex toLUTIndex(ExtendedDayNum d) const
+ {
+ return LUTIndex{static_cast(d + daynum_offset_epoch) & date_lut_mask};
+ }
+
+ inline LUTIndex toLUTIndex(time_t t) const
+ {
+ return findIndex(t);
+ }
+
+ inline LUTIndex toLUTIndex(LUTIndex i) const
+ {
+ return i;
+ }
+
+ template
+ inline const Values & find(DateOrTime v) const
+ {
+ return lut[toLUTIndex(v)];
+ }
+
+ template
+ static inline T roundDown(T x, Divisor divisor)
+ {
+ static_assert(std::is_integral_v && std::is_integral_v);
+ assert(divisor > 0);
+
+ if (likely(x >= 0))
+ return x / divisor * divisor;
+
+ /// Integer division for negative numbers rounds them towards zero (up).
+ /// We will shift the number so it will be rounded towards -inf (down).
+
+ return (x + 1 - divisor) / divisor * divisor;
}
public:
const std::string & getTimeZone() const { return time_zone; }
+ // Methods only for unit-testing, it makes very little sense to use it from user code.
+ auto getOffsetAtStartOfEpoch() const { return offset_at_start_of_epoch; }
+ auto getTimeOffsetAtStartOfLUT() const { return offset_at_start_of_lut; }
+
/// All functions below are thread-safe; arguments are not checked.
- inline time_t toDate(time_t t) const { return find(t).date; }
- inline unsigned toMonth(time_t t) const { return find(t).month; }
- inline unsigned toQuarter(time_t t) const { return (find(t).month - 1) / 3 + 1; }
- inline unsigned toYear(time_t t) const { return find(t).year; }
- inline unsigned toDayOfWeek(time_t t) const { return find(t).day_of_week; }
- inline unsigned toDayOfMonth(time_t t) const { return find(t).day_of_month; }
+ inline ExtendedDayNum toDayNum(ExtendedDayNum d) const
+ {
+ return d;
+ }
+
+ template
+ inline ExtendedDayNum toDayNum(DateOrTime v) const
+ {
+ return ExtendedDayNum{static_cast(toLUTIndex(v).toUnderType() - daynum_offset_epoch)};
+ }
/// Round down to start of monday.
- inline time_t toFirstDayOfWeek(time_t t) const
+ template
+ inline time_t toFirstDayOfWeek(DateOrTime v) const
{
- DayNum index = findIndex(t);
- return lut[DayNum(index - (lut[index].day_of_week - 1))].date;
+ const LUTIndex i = toLUTIndex(v);
+ return lut[i - (lut[i].day_of_week - 1)].date;
}
- inline DayNum toFirstDayNumOfWeek(DayNum d) const
+ template
+ inline ExtendedDayNum toFirstDayNumOfWeek(DateOrTime v) const
{
- return DayNum(d - (lut[d].day_of_week - 1));
- }
-
- inline DayNum toFirstDayNumOfWeek(time_t t) const
- {
- return toFirstDayNumOfWeek(toDayNum(t));
+ const LUTIndex i = toLUTIndex(v);
+ return toDayNum(i - (lut[i].day_of_week - 1));
}
/// Round down to start of month.
- inline time_t toFirstDayOfMonth(time_t t) const
+ template
+ inline time_t toFirstDayOfMonth(DateOrTime v) const
{
- DayNum index = findIndex(t);
- return lut[index - (lut[index].day_of_month - 1)].date;
+ const LUTIndex i = toLUTIndex(v);
+ return lut[i - (lut[i].day_of_month - 1)].date;
}
- inline DayNum toFirstDayNumOfMonth(DayNum d) const
+ template
+ inline ExtendedDayNum toFirstDayNumOfMonth(DateOrTime v) const
{
- return DayNum(d - (lut[d].day_of_month - 1));
- }
-
- inline DayNum toFirstDayNumOfMonth(time_t t) const
- {
- return toFirstDayNumOfMonth(toDayNum(t));
+ const LUTIndex i = toLUTIndex(v);
+ return toDayNum(i - (lut[i].day_of_month - 1));
}
/// Round down to start of quarter.
- inline DayNum toFirstDayNumOfQuarter(DayNum d) const
+ template
+ inline ExtendedDayNum toFirstDayNumOfQuarter(DateOrTime v) const
{
- DayNum index = d;
+ return toDayNum(toFirstDayOfQuarterIndex(v));
+ }
+
+ template
+ inline LUTIndex toFirstDayOfQuarterIndex(DateOrTime v) const
+ {
+ LUTIndex index = toLUTIndex(v);
size_t month_inside_quarter = (lut[index].month - 1) % 3;
index -= lut[index].day_of_month;
@@ -175,17 +328,13 @@ public:
--month_inside_quarter;
}
- return DayNum(index + 1);
+ return index + 1;
}
- inline DayNum toFirstDayNumOfQuarter(time_t t) const
+ template
+ inline time_t toFirstDayOfQuarter(DateOrTime v) const
{
- return toFirstDayNumOfQuarter(toDayNum(t));
- }
-
- inline time_t toFirstDayOfQuarter(time_t t) const
- {
- return fromDayNum(toFirstDayNumOfQuarter(t));
+ return toDate(toFirstDayOfQuarterIndex(v));
}
/// Round down to start of year.
@@ -194,48 +343,47 @@ public:
return lut[years_lut[lut[findIndex(t)].year - DATE_LUT_MIN_YEAR]].date;
}
- inline DayNum toFirstDayNumOfYear(DayNum d) const
+ template
+ inline LUTIndex toFirstDayNumOfYearIndex(DateOrTime v) const
{
- return years_lut[lut[d].year - DATE_LUT_MIN_YEAR];
+ return years_lut[lut[toLUTIndex(v)].year - DATE_LUT_MIN_YEAR];
}
- inline DayNum toFirstDayNumOfYear(time_t t) const
+ template
+ inline ExtendedDayNum toFirstDayNumOfYear(DateOrTime v) const
{
- return toFirstDayNumOfYear(toDayNum(t));
+ return toDayNum(toFirstDayNumOfYearIndex(v));
}
inline time_t toFirstDayOfNextMonth(time_t t) const
{
- DayNum index = findIndex(t);
+ LUTIndex index = findIndex(t);
index += 32 - lut[index].day_of_month;
return lut[index - (lut[index].day_of_month - 1)].date;
}
inline time_t toFirstDayOfPrevMonth(time_t t) const
{
- DayNum index = findIndex(t);
+ LUTIndex index = findIndex(t);
index -= lut[index].day_of_month;
return lut[index - (lut[index].day_of_month - 1)].date;
}
- inline UInt8 daysInMonth(DayNum d) const
+ template
+ inline UInt8 daysInMonth(DateOrTime value) const
{
- return lut[d].days_in_month;
+ const LUTIndex i = toLUTIndex(value);
+ return lut[i].days_in_month;
}
- inline UInt8 daysInMonth(time_t t) const
- {
- return find(t).days_in_month;
- }
-
- inline UInt8 daysInMonth(UInt16 year, UInt8 month) const
+ inline UInt8 daysInMonth(Int16 year, UInt8 month) const
{
UInt16 idx = year - DATE_LUT_MIN_YEAR;
if (unlikely(idx >= DATE_LUT_YEARS))
return 31; /// Implementation specific behaviour on overflow.
/// 32 makes arithmetic more simple.
- DayNum any_day_of_month = DayNum(years_lut[idx] + 32 * (month - 1));
+ const auto any_day_of_month = years_lut[year - DATE_LUT_MIN_YEAR] + 32 * (month - 1);
return lut[any_day_of_month].days_in_month;
}
@@ -243,101 +391,111 @@ public:
*/
inline time_t toDateAndShift(time_t t, Int32 days) const
{
- return lut[DayNum(findIndex(t) + days)].date;
+ return lut[findIndex(t) + days].date;
}
inline time_t toTime(time_t t) const
{
- DayNum index = findIndex(t);
-
- if (unlikely(index == 0 || index > DATE_LUT_MAX_DAY_NUM))
- return t + offset_at_start_of_epoch;
+ const LUTIndex index = findIndex(t);
time_t res = t - lut[index].date;
- if (res >= lut[index].time_at_offset_change)
- res += lut[index].amount_of_offset_change;
+ if (res >= lut[index].time_at_offset_change())
+ res += lut[index].amount_of_offset_change();
return res - offset_at_start_of_epoch; /// Starting at 1970-01-01 00:00:00 local time.
}
inline unsigned toHour(time_t t) const
{
- DayNum index = findIndex(t);
-
- /// If it is overflow case,
- /// then limit number of hours to avoid insane results like 1970-01-01 89:28:15
- if (unlikely(index == 0 || index > DATE_LUT_MAX_DAY_NUM))
- return static_cast((t + offset_at_start_of_epoch) / 3600) % 24;
+ const LUTIndex index = findIndex(t);
time_t time = t - lut[index].date;
- if (time >= lut[index].time_at_offset_change)
- time += lut[index].amount_of_offset_change;
+ if (time >= lut[index].time_at_offset_change())
+ time += lut[index].amount_of_offset_change();
unsigned res = time / 3600;
- return res <= 23 ? res : 0;
+
+ /// In case time was changed backwards at the start of next day, we will repeat the hour 23.
+ return res <= 23 ? res : 23;
}
/** Calculating offset from UTC in seconds.
- * which means Using the same literal time of "t" to get the corresponding timestamp in UTC,
- * then subtract the former from the latter to get the offset result.
- * The boundaries when meets DST(daylight saving time) change should be handled very carefully.
- */
+ * which means Using the same literal time of "t" to get the corresponding timestamp in UTC,
+ * then subtract the former from the latter to get the offset result.
+ * The boundaries when meets DST(daylight saving time) change should be handled very carefully.
+ */
inline time_t timezoneOffset(time_t t) const
{
- DayNum index = findIndex(t);
+ const LUTIndex index = findIndex(t);
/// Calculate daylight saving offset first.
/// Because the "amount_of_offset_change" in LUT entry only exists in the change day, it's costly to scan it from the very begin.
/// but we can figure out all the accumulated offsets from 1970-01-01 to that day just by get the whole difference between lut[].date,
/// and then, we can directly subtract multiple 86400s to get the real DST offsets for the leap seconds is not considered now.
- time_t res = (lut[index].date - lut[0].date) % 86400;
+ time_t res = (lut[index].date - lut[daynum_offset_epoch].date) % 86400;
+
/// As so far to know, the maximal DST offset couldn't be more than 2 hours, so after the modulo operation the remainder
/// will sits between [-offset --> 0 --> offset] which respectively corresponds to moving clock forward or backward.
res = res > 43200 ? (86400 - res) : (0 - res);
/// Check if has a offset change during this day. Add the change when cross the line
- if (lut[index].amount_of_offset_change != 0 && t >= lut[index].date + lut[index].time_at_offset_change)
- res += lut[index].amount_of_offset_change;
+ if (lut[index].amount_of_offset_change() != 0 && t >= lut[index].date + lut[index].time_at_offset_change())
+ res += lut[index].amount_of_offset_change();
return res + offset_at_start_of_epoch;
}
- /** Only for time zones with/when offset from UTC is multiple of five minutes.
- * This is true for all time zones: right now, all time zones have an offset that is multiple of 15 minutes.
- *
- * "By 1929, most major countries had adopted hourly time zones. Nepal was the last
- * country to adopt a standard offset, shifting slightly to UTC+5:45 in 1986."
- * - https://en.wikipedia.org/wiki/Time_zone#Offsets_from_UTC
- *
- * Also please note, that unix timestamp doesn't count "leap seconds":
- * each minute, with added or subtracted leap second, spans exactly 60 unix timestamps.
- */
- inline unsigned toSecond(time_t t) const { return UInt32(t) % 60; }
+ inline unsigned toSecond(time_t t) const
+ {
+ auto res = t % 60;
+ if (likely(res >= 0))
+ return res;
+ return res + 60;
+ }
inline unsigned toMinute(time_t t) const
{
- if (offset_is_whole_number_of_hours_everytime)
- return (UInt32(t) / 60) % 60;
+ if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
+ return (t / 60) % 60;
- UInt32 date = find(t).date;
- return (UInt32(t) - date) / 60 % 60;
+ /// To consider the DST changing situation within this day
+ /// also make the special timezones with no whole hour offset such as 'Australia/Lord_Howe' been taken into account.
+
+ LUTIndex index = findIndex(t);
+ UInt32 time = t - lut[index].date;
+
+ if (time >= lut[index].time_at_offset_change())
+ time += lut[index].amount_of_offset_change();
+
+ return time / 60 % 60;
}
- inline time_t toStartOfMinute(time_t t) const { return t / 60 * 60; }
- inline time_t toStartOfFiveMinute(time_t t) const { return t / 300 * 300; }
- inline time_t toStartOfFifteenMinutes(time_t t) const { return t / 900 * 900; }
- inline time_t toStartOfTenMinutes(time_t t) const { return t / 600 * 600; }
+ /// NOTE: Assuming timezone offset is a multiple of 15 minutes.
+ inline time_t toStartOfMinute(time_t t) const { return roundDown(t, 60); }
+ inline time_t toStartOfFiveMinute(time_t t) const { return roundDown(t, 300); }
+ inline time_t toStartOfFifteenMinutes(time_t t) const { return roundDown(t, 900); }
+ inline time_t toStartOfTenMinutes(time_t t) const
+ {
+ if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
+ return t / 600 * 600;
+
+ /// More complex logic is for Nepal - it has offset 05:45. Australia/Eucla is also unfortunate.
+ Int64 date = find(t).date;
+ return date + (t - date) / 600 * 600;
+ }
+
+ /// NOTE: Assuming timezone transitions are multiple of hours. Lord Howe Island in Australia is a notable exception.
inline time_t toStartOfHour(time_t t) const
{
- if (offset_is_whole_number_of_hours_everytime)
+ if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 3600 * 3600;
- UInt32 date = find(t).date;
- return date + (UInt32(t) - date) / 3600 * 3600;
+ Int64 date = find(t).date;
+ return date + (t - date) / 3600 * 3600;
}
/** Number of calendar day since the beginning of UNIX epoch (1970-01-01 is zero)
@@ -348,80 +506,89 @@ public:
* because the same calendar day starts/ends at different timestamps in different time zones)
*/
- inline DayNum toDayNum(time_t t) const { return findIndex(t); }
- inline time_t fromDayNum(DayNum d) const { return lut[d].date; }
+ inline time_t fromDayNum(DayNum d) const { return lut[toLUTIndex(d)].date; }
+ inline time_t fromDayNum(ExtendedDayNum d) const { return lut[toLUTIndex(d)].date; }
- inline time_t toDate(DayNum d) const { return lut[d].date; }
- inline unsigned toMonth(DayNum d) const { return lut[d].month; }
- inline unsigned toQuarter(DayNum d) const { return (lut[d].month - 1) / 3 + 1; }
- inline unsigned toYear(DayNum d) const { return lut[d].year; }
- inline unsigned toDayOfWeek(DayNum d) const { return lut[d].day_of_week; }
- inline unsigned toDayOfMonth(DayNum d) const { return lut[d].day_of_month; }
- inline unsigned toDayOfYear(DayNum d) const { return d + 1 - toFirstDayNumOfYear(d); }
+ template
+ inline time_t toDate(DateOrTime v) const { return lut[toLUTIndex(v)].date; }
- inline unsigned toDayOfYear(time_t t) const { return toDayOfYear(toDayNum(t)); }
+ template
+ inline unsigned toMonth(DateOrTime v) const { return lut[toLUTIndex(v)].month; }
+
+ template
+ inline unsigned toQuarter(DateOrTime v) const { return (lut[toLUTIndex(v)].month - 1) / 3 + 1; }
+
+ template
+ inline Int16 toYear(DateOrTime v) const { return lut[toLUTIndex(v)].year; }
+
+ template
+ inline unsigned toDayOfWeek(DateOrTime v) const { return lut[toLUTIndex(v)].day_of_week; }
+
+ template
+ inline unsigned toDayOfMonth(DateOrTime v) const { return lut[toLUTIndex(v)].day_of_month; }
+
+ template
+ inline unsigned toDayOfYear(DateOrTime v) const
+ {
+ // TODO: different overload for ExtendedDayNum
+ const LUTIndex i = toLUTIndex(v);
+ return i + 1 - toFirstDayNumOfYearIndex(i);
+ }
/// Number of week from some fixed moment in the past. Week begins at monday.
/// (round down to monday and divide DayNum by 7; we made an assumption,
/// that in domain of the function there was no weeks with any other number of days than 7)
- inline unsigned toRelativeWeekNum(DayNum d) const
+ template
+ inline unsigned toRelativeWeekNum(DateOrTime v) const
{
+ const LUTIndex i = toLUTIndex(v);
/// We add 8 to avoid underflow at beginning of unix epoch.
- return (d + 8 - toDayOfWeek(d)) / 7;
- }
-
- inline unsigned toRelativeWeekNum(time_t t) const
- {
- return toRelativeWeekNum(toDayNum(t));
+ return toDayNum(i + 8 - toDayOfWeek(i)) / 7;
}
/// Get year that contains most of the current week. Week begins at monday.
- inline unsigned toISOYear(DayNum d) const
+ template
+ inline unsigned toISOYear(DateOrTime v) const
{
+ const LUTIndex i = toLUTIndex(v);
/// That's effectively the year of thursday of current week.
- return toYear(DayNum(d + 4 - toDayOfWeek(d)));
- }
-
- inline unsigned toISOYear(time_t t) const
- {
- return toISOYear(toDayNum(t));
+ return toYear(toLUTIndex(i + 4 - toDayOfWeek(i)));
}
/// ISO year begins with a monday of the week that is contained more than by half in the corresponding calendar year.
/// Example: ISO year 2019 begins at 2018-12-31. And ISO year 2017 begins at 2017-01-02.
/// https://en.wikipedia.org/wiki/ISO_week_date
- inline DayNum toFirstDayNumOfISOYear(DayNum d) const
+ template
+ inline LUTIndex toFirstDayNumOfISOYearIndex(DateOrTime v) const
{
- auto iso_year = toISOYear(d);
+ const LUTIndex i = toLUTIndex(v);
+ auto iso_year = toISOYear(i);
- DayNum first_day_of_year = years_lut[iso_year - DATE_LUT_MIN_YEAR];
+ const auto first_day_of_year = years_lut[iso_year - DATE_LUT_MIN_YEAR];
auto first_day_of_week_of_year = lut[first_day_of_year].day_of_week;
- return DayNum(first_day_of_week_of_year <= 4
+ return LUTIndex{first_day_of_week_of_year <= 4
? first_day_of_year + 1 - first_day_of_week_of_year
- : first_day_of_year + 8 - first_day_of_week_of_year);
+ : first_day_of_year + 8 - first_day_of_week_of_year};
}
- inline DayNum toFirstDayNumOfISOYear(time_t t) const
+ template
+ inline ExtendedDayNum toFirstDayNumOfISOYear(DateOrTime v) const
{
- return toFirstDayNumOfISOYear(toDayNum(t));
+ return toDayNum(toFirstDayNumOfISOYearIndex(v));
}
inline time_t toFirstDayOfISOYear(time_t t) const
{
- return fromDayNum(toFirstDayNumOfISOYear(t));
+ return lut[toFirstDayNumOfISOYearIndex(t)].date;
}
/// ISO 8601 week number. Week begins at monday.
/// The week number 1 is the first week in year that contains 4 or more days (that's more than half).
- inline unsigned toISOWeek(DayNum d) const
+ template
+ inline unsigned toISOWeek(DateOrTime v) const
{
- return 1 + DayNum(toFirstDayNumOfWeek(d) - toFirstDayNumOfISOYear(d)) / 7;
- }
-
- inline unsigned toISOWeek(time_t t) const
- {
- return toISOWeek(toDayNum(t));
+ return 1 + (toFirstDayNumOfWeek(v) - toFirstDayNumOfISOYear(v)) / 7;
}
/*
@@ -457,30 +624,33 @@ public:
Otherwise it is the last week of the previous year, and the
next week is week 1.
*/
- inline YearWeek toYearWeek(DayNum d, UInt8 week_mode) const
+ template
+ inline YearWeek toYearWeek(DateOrTime v, UInt8 week_mode) const
{
- bool newyear_day_mode = week_mode & static_cast(WeekModeFlag::NEWYEAR_DAY);
+ const bool newyear_day_mode = week_mode & static_cast(WeekModeFlag::NEWYEAR_DAY);
week_mode = check_week_mode(week_mode);
- bool monday_first_mode = week_mode & static_cast(WeekModeFlag::MONDAY_FIRST);
+ const bool monday_first_mode = week_mode & static_cast(WeekModeFlag::MONDAY_FIRST);
bool week_year_mode = week_mode & static_cast(WeekModeFlag::YEAR);
- bool first_weekday_mode = week_mode & static_cast(WeekModeFlag::FIRST_WEEKDAY);
+ const bool first_weekday_mode = week_mode & static_cast(WeekModeFlag::FIRST_WEEKDAY);
+
+ const LUTIndex i = toLUTIndex(v);
// Calculate week number of WeekModeFlag::NEWYEAR_DAY mode
if (newyear_day_mode)
{
- return toYearWeekOfNewyearMode(d, monday_first_mode);
+ return toYearWeekOfNewyearMode(i, monday_first_mode);
}
- YearWeek yw(toYear(d), 0);
+ YearWeek yw(toYear(i), 0);
UInt16 days = 0;
- UInt16 daynr = makeDayNum(yw.first, toMonth(d), toDayOfMonth(d));
- UInt16 first_daynr = makeDayNum(yw.first, 1, 1);
+ const auto daynr = makeDayNum(yw.first, toMonth(i), toDayOfMonth(i));
+ auto first_daynr = makeDayNum(yw.first, 1, 1);
// 0 for monday, 1 for tuesday ...
// get weekday from first day in year.
- UInt16 weekday = calc_weekday(DayNum(first_daynr), !monday_first_mode);
+ UInt16 weekday = calc_weekday(first_daynr, !monday_first_mode);
- if (toMonth(d) == 1 && toDayOfMonth(d) <= static_cast(7 - weekday))
+ if (toMonth(i) == 1 && toDayOfMonth(i) <= static_cast(7 - weekday))
{
if (!week_year_mode && ((first_weekday_mode && weekday != 0) || (!first_weekday_mode && weekday >= 4)))
return yw;
@@ -511,48 +681,51 @@ public:
/// Calculate week number of WeekModeFlag::NEWYEAR_DAY mode
/// The week number 1 is the first week in year that contains January 1,
- inline YearWeek toYearWeekOfNewyearMode(DayNum d, bool monday_first_mode) const
+ template
+ inline YearWeek toYearWeekOfNewyearMode(DateOrTime v, bool monday_first_mode) const
{
YearWeek yw(0, 0);
UInt16 offset_day = monday_first_mode ? 0U : 1U;
+ const LUTIndex i = LUTIndex(v);
+
// Checking the week across the year
- yw.first = toYear(DayNum(d + 7 - toDayOfWeek(DayNum(d + offset_day))));
+ yw.first = toYear(i + 7 - toDayOfWeek(i + offset_day));
- DayNum first_day = makeDayNum(yw.first, 1, 1);
- DayNum this_day = d;
+ auto first_day = makeLUTIndex(yw.first, 1, 1);
+ auto this_day = i;
+ // TODO: do not perform calculations in terms of DayNum, since that would under/overflow for extended range.
if (monday_first_mode)
{
// Rounds down a date to the nearest Monday.
first_day = toFirstDayNumOfWeek(first_day);
- this_day = toFirstDayNumOfWeek(d);
+ this_day = toFirstDayNumOfWeek(i);
}
else
{
// Rounds down a date to the nearest Sunday.
if (toDayOfWeek(first_day) != 7)
- first_day = DayNum(first_day - toDayOfWeek(first_day));
- if (toDayOfWeek(d) != 7)
- this_day = DayNum(d - toDayOfWeek(d));
+ first_day = ExtendedDayNum(first_day - toDayOfWeek(first_day));
+ if (toDayOfWeek(i) != 7)
+ this_day = ExtendedDayNum(i - toDayOfWeek(i));
}
yw.second = (this_day - first_day) / 7 + 1;
return yw;
}
- /**
- * get first day of week with week_mode, return Sunday or Monday
- */
- inline DayNum toFirstDayNumOfWeek(DayNum d, UInt8 week_mode) const
+ /// Get first day of week with week_mode, return Sunday or Monday
+ template
+ inline ExtendedDayNum toFirstDayNumOfWeek(DateOrTime v, UInt8 week_mode) const
{
bool monday_first_mode = week_mode & static_cast(WeekModeFlag::MONDAY_FIRST);
if (monday_first_mode)
{
- return toFirstDayNumOfWeek(d);
+ return toFirstDayNumOfWeek(v);
}
else
{
- return (toDayOfWeek(d) != 7) ? DayNum(d - toDayOfWeek(d)) : d;
+ return (toDayOfWeek(v) != 7) ? ExtendedDayNum(v - toDayOfWeek(v)) : toDayNum(v);
}
}
@@ -568,192 +741,231 @@ public:
/** Calculate weekday from d.
* Returns 0 for monday, 1 for tuesday...
*/
- inline unsigned calc_weekday(DayNum d, bool sunday_first_day_of_week) const
+ template
+ inline unsigned calc_weekday(DateOrTime v, bool sunday_first_day_of_week) const
{
+ const LUTIndex i = toLUTIndex(v);
if (!sunday_first_day_of_week)
- return toDayOfWeek(d) - 1;
+ return toDayOfWeek(i) - 1;
else
- return toDayOfWeek(DayNum(d + 1)) - 1;
+ return toDayOfWeek(i + 1) - 1;
}
/// Calculate days in one year.
- inline unsigned calc_days_in_year(UInt16 year) const
+ inline unsigned calc_days_in_year(Int32 year) const
{
return ((year & 3) == 0 && (year % 100 || (year % 400 == 0 && year)) ? 366 : 365);
}
/// Number of month from some fixed moment in the past (year * 12 + month)
- inline unsigned toRelativeMonthNum(DayNum d) const
+ template
+ inline unsigned toRelativeMonthNum(DateOrTime v) const
{
- return lut[d].year * 12 + lut[d].month;
+ const LUTIndex i = toLUTIndex(v);
+ return lut[i].year * 12 + lut[i].month;
}
- inline unsigned toRelativeMonthNum(time_t t) const
+ template
+ inline unsigned toRelativeQuarterNum(DateOrTime v) const
{
- return toRelativeMonthNum(toDayNum(t));
- }
-
- inline unsigned toRelativeQuarterNum(DayNum d) const
- {
- return lut[d].year * 4 + (lut[d].month - 1) / 3;
- }
-
- inline unsigned toRelativeQuarterNum(time_t t) const
- {
- return toRelativeQuarterNum(toDayNum(t));
+ const LUTIndex i = toLUTIndex(v);
+ return lut[i].year * 4 + (lut[i].month - 1) / 3;
}
/// We count all hour-length intervals, unrelated to offset changes.
inline time_t toRelativeHourNum(time_t t) const
{
- if (offset_is_whole_number_of_hours_everytime)
+ if (t >= 0 && offset_is_whole_number_of_hours_during_epoch)
return t / 3600;
/// Assume that if offset was fractional, then the fraction is the same as at the beginning of epoch.
/// NOTE This assumption is false for "Pacific/Pitcairn" and "Pacific/Kiritimati" time zones.
- return (t + 86400 - offset_at_start_of_epoch) / 3600;
+ return (t + DATE_LUT_ADD + 86400 - offset_at_start_of_epoch) / 3600 - (DATE_LUT_ADD / 3600);
}
- inline time_t toRelativeHourNum(DayNum d) const
+ template
+ inline time_t toRelativeHourNum(DateOrTime v) const
{
- return toRelativeHourNum(lut[d].date);
+ return toRelativeHourNum(lut[toLUTIndex(v)].date);
}
inline time_t toRelativeMinuteNum(time_t t) const
{
- return t / 60;
+ return (t + DATE_LUT_ADD) / 60 - (DATE_LUT_ADD / 60);
}
- inline time_t toRelativeMinuteNum(DayNum d) const
+ template
+ inline time_t toRelativeMinuteNum(DateOrTime v) const
{
- return toRelativeMinuteNum(lut[d].date);
+ return toRelativeMinuteNum(lut[toLUTIndex(v)].date);
}
- inline DayNum toStartOfYearInterval(DayNum d, UInt64 years) const
+ template
+ inline ExtendedDayNum toStartOfYearInterval(DateOrTime v, UInt64 years) const
{
if (years == 1)
- return toFirstDayNumOfYear(d);
- return years_lut[(lut[d].year - DATE_LUT_MIN_YEAR) / years * years];
+ return toFirstDayNumOfYear(v);
+
+ const LUTIndex i = toLUTIndex(v);
+
+ UInt16 year = lut[i].year / years * years;
+
+ /// For example, rounding down 1925 to 100 years will be 1900, but it's less than min supported year.
+ if (unlikely(year < DATE_LUT_MIN_YEAR))
+ year = DATE_LUT_MIN_YEAR;
+
+ return toDayNum(years_lut[year - DATE_LUT_MIN_YEAR]);
}
- inline DayNum toStartOfQuarterInterval(DayNum d, UInt64 quarters) const
+ inline ExtendedDayNum toStartOfQuarterInterval(ExtendedDayNum d, UInt64 quarters) const
{
if (quarters == 1)
return toFirstDayNumOfQuarter(d);
return toStartOfMonthInterval(d, quarters * 3);
}
- inline DayNum toStartOfMonthInterval(DayNum d, UInt64 months) const
+ inline ExtendedDayNum toStartOfMonthInterval(ExtendedDayNum d, UInt64 months) const
{
if (months == 1)
return toFirstDayNumOfMonth(d);
- const auto & date = lut[d];
- UInt32 month_total_index = (date.year - DATE_LUT_MIN_YEAR) * 12 + date.month - 1;
- return years_months_lut[month_total_index / months * months];
+ const Values & values = lut[toLUTIndex(d)];
+ UInt32 month_total_index = (values.year - DATE_LUT_MIN_YEAR) * 12 + values.month - 1;
+ return toDayNum(years_months_lut[month_total_index / months * months]);
}
- inline DayNum toStartOfWeekInterval(DayNum d, UInt64 weeks) const
+ inline ExtendedDayNum toStartOfWeekInterval(ExtendedDayNum d, UInt64 weeks) const
{
if (weeks == 1)
return toFirstDayNumOfWeek(d);
UInt64 days = weeks * 7;
// January 1st 1970 was Thursday so we need this 4-days offset to make weeks start on Monday.
- return DayNum(4 + (d - 4) / days * days);
+ return ExtendedDayNum(4 + (d - 4) / days * days);
}
- inline time_t toStartOfDayInterval(DayNum d, UInt64 days) const
+ inline time_t toStartOfDayInterval(ExtendedDayNum d, UInt64 days) const
{
if (days == 1)
return toDate(d);
- return lut[d / days * days].date;
+ return lut[toLUTIndex(ExtendedDayNum(d / days * days))].date;
}
inline time_t toStartOfHourInterval(time_t t, UInt64 hours) const
{
if (hours == 1)
return toStartOfHour(t);
+
+ /** We will round the hour number since the midnight.
+ * It may split the day into non-equal intervals.
+ * For example, if we will round to 11-hour interval,
+ * the day will be split to the intervals 00:00:00..10:59:59, 11:00:00..21:59:59, 22:00:00..23:59:59.
+ * In case of daylight saving time or other transitions,
+ * the intervals can be shortened or prolonged to the amount of transition.
+ */
+
UInt64 seconds = hours * 3600;
- t = t / seconds * seconds;
- if (offset_is_whole_number_of_hours_everytime)
- return t;
- return toStartOfHour(t);
+
+ const LUTIndex index = findIndex(t);
+ const Values & values = lut[index];
+
+ time_t time = t - values.date;
+ if (time >= values.time_at_offset_change())
+ {
+ /// Align to new hour numbers before rounding.
+ time += values.amount_of_offset_change();
+ time = time / seconds * seconds;
+
+ /// Should subtract the shift back but only if rounded time is not before shift.
+ if (time >= values.time_at_offset_change())
+ {
+ time -= values.amount_of_offset_change();
+
+ /// With cutoff at the time of the shift. Otherwise we may end up with something like 23:00 previous day.
+ if (time < values.time_at_offset_change())
+ time = values.time_at_offset_change();
+ }
+ }
+ else
+ {
+ time = time / seconds * seconds;
+ }
+
+ return values.date + time;
}
inline time_t toStartOfMinuteInterval(time_t t, UInt64 minutes) const
{
if (minutes == 1)
return toStartOfMinute(t);
+
+ /** In contrast to "toStartOfHourInterval" function above,
+ * the minute intervals are not aligned to the midnight.
+ * You will get unexpected results if for example, you round down to 60 minute interval
+ * and there was a time shift to 30 minutes.
+ *
+ * But this is not specified in docs and can be changed in future.
+ */
+
UInt64 seconds = 60 * minutes;
- return t / seconds * seconds;
+ return roundDown(t, seconds);
}
inline time_t toStartOfSecondInterval(time_t t, UInt64 seconds) const
{
if (seconds == 1)
return t;
- return t / seconds * seconds;
+
+ return roundDown(t, seconds);
+ }
+
+ inline LUTIndex makeLUTIndex(Int16 year, UInt8 month, UInt8 day_of_month) const
+ {
+ if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31))
+ return LUTIndex(0);
+
+ return LUTIndex{years_months_lut[(year - DATE_LUT_MIN_YEAR) * 12 + month - 1] + day_of_month - 1};
}
/// Create DayNum from year, month, day of month.
- inline DayNum makeDayNum(UInt16 year, UInt8 month, UInt8 day_of_month) const
+ inline ExtendedDayNum makeDayNum(Int16 year, UInt8 month, UInt8 day_of_month) const
{
if (unlikely(year < DATE_LUT_MIN_YEAR || year > DATE_LUT_MAX_YEAR || month < 1 || month > 12 || day_of_month < 1 || day_of_month > 31))
- return DayNum(0); // TODO (nemkov, DateTime64 phase 2): implement creating real date for year outside of LUT range.
+ return ExtendedDayNum(0);
- // The day after 2106-02-07 will not stored fully as struct Values, so just overflow it as 0
- if (unlikely(year == DATE_LUT_MAX_YEAR && (month > 2 || (month == 2 && day_of_month > 7))))
- return DayNum(0);
-
- return DayNum(years_months_lut[(year - DATE_LUT_MIN_YEAR) * 12 + month - 1] + day_of_month - 1);
+ return toDayNum(makeLUTIndex(year, month, day_of_month));
}
- inline time_t makeDate(UInt16 year, UInt8 month, UInt8 day_of_month) const
+ inline time_t makeDate(Int16 year, UInt8 month, UInt8 day_of_month) const
{
- return lut[makeDayNum(year, month, day_of_month)].date;
+ return lut[makeLUTIndex(year, month, day_of_month)].date;
}
/** Does not accept daylight saving time as argument: in case of ambiguity, it choose greater timestamp.
*/
- inline time_t makeDateTime(UInt16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const
+ inline time_t makeDateTime(Int16 year, UInt8 month, UInt8 day_of_month, UInt8 hour, UInt8 minute, UInt8 second) const
{
- size_t index = makeDayNum(year, month, day_of_month);
+ size_t index = makeLUTIndex(year, month, day_of_month);
UInt32 time_offset = hour * 3600 + minute * 60 + second;
- if (time_offset >= lut[index].time_at_offset_change)
- time_offset -= lut[index].amount_of_offset_change;
+ if (time_offset >= lut[index].time_at_offset_change())
+ time_offset -= lut[index].amount_of_offset_change();
- UInt32 res = lut[index].date + time_offset;
-
- if (unlikely(res > DATE_LUT_MAX))
- return 0;
-
- return res;
+ return lut[index].date + time_offset;
}
- inline const Values & getValues(DayNum d) const { return lut[d]; }
- inline const Values & getValues(time_t t) const { return lut[findIndex(t)]; }
+ template
+ inline const Values & getValues(DateOrTime v) const { return lut[toLUTIndex(v)]; }
- inline UInt32 toNumYYYYMM(time_t t) const
+ template
+ inline UInt32 toNumYYYYMM(DateOrTime v) const
{
- const Values & values = find(t);
+ const Values & values = getValues(v);
return values.year * 100 + values.month;
}
- inline UInt32 toNumYYYYMM(DayNum d) const
+ template
+ inline UInt32 toNumYYYYMMDD(DateOrTime v) const
{
- const Values & values = lut[d];
- return values.year * 100 + values.month;
- }
-
- inline UInt32 toNumYYYYMMDD(time_t t) const
- {
- const Values & values = find(t);
- return values.year * 10000 + values.month * 100 + values.day_of_month;
- }
-
- inline UInt32 toNumYYYYMMDD(DayNum d) const
- {
- const Values & values = lut[d];
+ const Values & values = getValues(v);
return values.year * 10000 + values.month * 100 + values.day_of_month;
}
@@ -762,22 +974,85 @@ public:
return makeDate(num / 10000, num / 100 % 100, num % 100);
}
- inline DayNum YYYYMMDDToDayNum(UInt32 num) const
+ inline ExtendedDayNum YYYYMMDDToDayNum(UInt32 num) const
{
return makeDayNum(num / 10000, num / 100 % 100, num % 100);
}
+ struct DateComponents
+ {
+ uint16_t year;
+ uint8_t month;
+ uint8_t day;
+ };
+
+ struct TimeComponents
+ {
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ };
+
+ struct DateTimeComponents
+ {
+ DateComponents date;
+ TimeComponents time;
+ };
+
+ inline DateComponents toDateComponents(time_t t) const
+ {
+ const Values & values = getValues(t);
+ return { values.year, values.month, values.day_of_month };
+ }
+
+ inline DateTimeComponents toDateTimeComponents(time_t t) const
+ {
+ const LUTIndex index = findIndex(t);
+ const Values & values = lut[index];
+
+ DateTimeComponents res;
+
+ res.date.year = values.year;
+ res.date.month = values.month;
+ res.date.day = values.day_of_month;
+
+ time_t time = t - values.date;
+ if (time >= values.time_at_offset_change())
+ time += values.amount_of_offset_change();
+
+ if (unlikely(time < 0))
+ {
+ res.time.second = 0;
+ res.time.minute = 0;
+ res.time.hour = 0;
+ }
+ else
+ {
+ res.time.second = time % 60;
+ res.time.minute = time / 60 % 60;
+ res.time.hour = time / 3600;
+ }
+
+ /// In case time was changed backwards at the start of next day, we will repeat the hour 23.
+ if (unlikely(res.time.hour > 23))
+ res.time.hour = 23;
+
+ return res;
+ }
+
+
inline UInt64 toNumYYYYMMDDhhmmss(time_t t) const
{
- const Values & values = find(t);
+ DateTimeComponents components = toDateTimeComponents(t);
+
return
- toSecond(t)
- + toMinute(t) * 100
- + toHour(t) * 10000
- + UInt64(values.day_of_month) * 1000000
- + UInt64(values.month) * 100000000
- + UInt64(values.year) * 10000000000;
+ components.time.second
+ + components.time.minute * 100
+ + components.time.hour * 10000
+ + UInt64(components.date.day) * 1000000
+ + UInt64(components.date.month) * 100000000
+ + UInt64(components.date.year) * 10000000000;
}
inline time_t YYYYMMDDhhmmssToTime(UInt64 num) const
@@ -796,15 +1071,19 @@ public:
inline NO_SANITIZE_UNDEFINED time_t addDays(time_t t, Int64 delta) const
{
- DayNum index = findIndex(t);
- time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t);
+ const LUTIndex index = findIndex(t);
+ const Values & values = lut[index];
- index += delta;
+ time_t time = t - values.date;
+ if (time >= values.time_at_offset_change())
+ time += values.amount_of_offset_change();
- if (time_offset >= lut[index].time_at_offset_change)
- time_offset -= lut[index].amount_of_offset_change;
+ const LUTIndex new_index = index + delta;
- return lut[index].date + time_offset;
+ if (time >= lut[new_index].time_at_offset_change())
+ time -= lut[new_index].amount_of_offset_change();
+
+ return lut[new_index].date + time;
}
inline NO_SANITIZE_UNDEFINED time_t addWeeks(time_t t, Int64 delta) const
@@ -812,7 +1091,7 @@ public:
return addDays(t, delta * 7);
}
- inline UInt8 saturateDayOfMonth(UInt16 year, UInt8 month, UInt8 day_of_month) const
+ inline UInt8 saturateDayOfMonth(Int16 year, UInt8 month, UInt8 day_of_month) const
{
if (likely(day_of_month <= 28))
return day_of_month;
@@ -825,25 +1104,12 @@ public:
return day_of_month;
}
- /// If resulting month has less deys than source month, then saturation can happen.
- /// Example: 31 Aug + 1 month = 30 Sep.
- inline time_t addMonths(time_t t, Int64 delta) const
+ template
+ inline LUTIndex NO_SANITIZE_UNDEFINED addMonthsIndex(DateOrTime v, Int64 delta) const
{
- DayNum result_day = addMonths(toDayNum(t), delta);
+ const Values & values = lut[toLUTIndex(v)];
- time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t);
-
- if (time_offset >= lut[result_day].time_at_offset_change)
- time_offset -= lut[result_day].amount_of_offset_change;
-
- return lut[result_day].date + time_offset;
- }
-
- inline NO_SANITIZE_UNDEFINED DayNum addMonths(DayNum d, Int64 delta) const
- {
- const Values & values = lut[d];
-
- Int64 month = static_cast(values.month) + delta;
+ Int64 month = values.month + delta;
if (month > 0)
{
@@ -851,7 +1117,7 @@ public:
month = ((month - 1) % 12) + 1;
auto day_of_month = saturateDayOfMonth(year, month, values.day_of_month);
- return makeDayNum(year, month, day_of_month);
+ return makeLUTIndex(year, month, day_of_month);
}
else
{
@@ -859,36 +1125,48 @@ public:
month = 12 - (-month % 12);
auto day_of_month = saturateDayOfMonth(year, month, values.day_of_month);
- return makeDayNum(year, month, day_of_month);
+ return makeLUTIndex(year, month, day_of_month);
}
}
- inline NO_SANITIZE_UNDEFINED time_t addQuarters(time_t t, Int64 delta) const
+ /// If resulting month has less deys than source month, then saturation can happen.
+ /// Example: 31 Aug + 1 month = 30 Sep.
+ inline time_t NO_SANITIZE_UNDEFINED addMonths(time_t t, Int64 delta) const
+ {
+ const auto result_day = addMonthsIndex(t, delta);
+
+ const LUTIndex index = findIndex(t);
+ const Values & values = lut[index];
+
+ time_t time = t - values.date;
+ if (time >= values.time_at_offset_change())
+ time += values.amount_of_offset_change();
+
+ if (time >= lut[result_day].time_at_offset_change())
+ time -= lut[result_day].amount_of_offset_change();
+
+ return lut[result_day].date + time;
+ }
+
+ inline ExtendedDayNum NO_SANITIZE_UNDEFINED addMonths(ExtendedDayNum d, Int64 delta) const
+ {
+ return toDayNum(addMonthsIndex(d, delta));
+ }
+
+ inline time_t NO_SANITIZE_UNDEFINED addQuarters(time_t t, Int64 delta) const
{
return addMonths(t, delta * 3);
}
- inline NO_SANITIZE_UNDEFINED DayNum addQuarters(DayNum d, Int64 delta) const
+ inline ExtendedDayNum addQuarters(ExtendedDayNum d, Int64 delta) const
{
return addMonths(d, delta * 3);
}
- /// Saturation can occur if 29 Feb is mapped to non-leap year.
- inline NO_SANITIZE_UNDEFINED time_t addYears(time_t t, Int64 delta) const
+ template
+ inline LUTIndex NO_SANITIZE_UNDEFINED addYearsIndex(DateOrTime v, Int64 delta) const
{
- DayNum result_day = addYears(toDayNum(t), delta);
-
- time_t time_offset = toHour(t) * 3600 + toMinute(t) * 60 + toSecond(t);
-
- if (time_offset >= lut[result_day].time_at_offset_change)
- time_offset -= lut[result_day].amount_of_offset_change;
-
- return lut[result_day].date + time_offset;
- }
-
- inline NO_SANITIZE_UNDEFINED DayNum addYears(DayNum d, Int64 delta) const
- {
- const Values & values = lut[d];
+ const Values & values = lut[toLUTIndex(v)];
auto year = values.year + delta;
auto month = values.month;
@@ -898,42 +1176,61 @@ public:
if (unlikely(day_of_month == 29 && month == 2))
day_of_month = saturateDayOfMonth(year, month, day_of_month);
- return makeDayNum(year, month, day_of_month);
+ return makeLUTIndex(year, month, day_of_month);
+ }
+
+ /// Saturation can occur if 29 Feb is mapped to non-leap year.
+ inline time_t addYears(time_t t, Int64 delta) const
+ {
+ auto result_day = addYearsIndex(t, delta);
+
+ const LUTIndex index = findIndex(t);
+ const Values & values = lut[index];
+
+ time_t time = t - values.date;
+ if (time >= values.time_at_offset_change())
+ time += values.amount_of_offset_change();
+
+ if (time >= lut[result_day].time_at_offset_change())
+ time -= lut[result_day].amount_of_offset_change();
+
+ return lut[result_day].date + time;
+ }
+
+ inline ExtendedDayNum addYears(ExtendedDayNum d, Int64 delta) const
+ {
+ return toDayNum(addYearsIndex(d, delta));
}
inline std::string timeToString(time_t t) const
{
- const Values & values = find(t);
+ DateTimeComponents components = toDateTimeComponents(t);
std::string s {"0000-00-00 00:00:00"};
- s[0] += values.year / 1000;
- s[1] += (values.year / 100) % 10;
- s[2] += (values.year / 10) % 10;
- s[3] += values.year % 10;
- s[5] += values.month / 10;
- s[6] += values.month % 10;
- s[8] += values.day_of_month / 10;
- s[9] += values.day_of_month % 10;
+ s[0] += components.date.year / 1000;
+ s[1] += (components.date.year / 100) % 10;
+ s[2] += (components.date.year / 10) % 10;
+ s[3] += components.date.year % 10;
+ s[5] += components.date.month / 10;
+ s[6] += components.date.month % 10;
+ s[8] += components.date.day / 10;
+ s[9] += components.date.day % 10;
- auto hour = toHour(t);
- auto minute = toMinute(t);
- auto second = toSecond(t);
-
- s[11] += hour / 10;
- s[12] += hour % 10;
- s[14] += minute / 10;
- s[15] += minute % 10;
- s[17] += second / 10;
- s[18] += second % 10;
+ s[11] += components.time.hour / 10;
+ s[12] += components.time.hour % 10;
+ s[14] += components.time.minute / 10;
+ s[15] += components.time.minute % 10;
+ s[17] += components.time.second / 10;
+ s[18] += components.time.second % 10;
return s;
}
inline std::string dateToString(time_t t) const
{
- const Values & values = find(t);
+ const Values & values = getValues(t);
std::string s {"0000-00-00"};
@@ -949,9 +1246,9 @@ public:
return s;
}
- inline std::string dateToString(DayNum d) const
+ inline std::string dateToString(ExtendedDayNum d) const
{
- const Values & values = lut[d];
+ const Values & values = getValues(d);
std::string s {"0000-00-00"};
@@ -969,7 +1266,7 @@ public:
};
#if defined(__PPC__)
-#if !__clang__
+#if !defined(__clang__)
#pragma GCC diagnostic pop
#endif
#endif
diff --git a/base/common/DayNum.h b/base/common/DayNum.h
index a4ef0c43b69..5cf4d4635c8 100644
--- a/base/common/DayNum.h
+++ b/base/common/DayNum.h
@@ -7,3 +7,8 @@
* See DateLUTImpl for usage examples.
*/
STRONG_TYPEDEF(UInt16, DayNum)
+
+/** Represent number of days since 1970-01-01 but in extended range,
+ * for dates before 1970-01-01 and after 2105
+ */
+STRONG_TYPEDEF(Int32, ExtendedDayNum)
diff --git a/base/common/LocalDate.h b/base/common/LocalDate.h
index e5ebe877bc5..b1e6eeb907c 100644
--- a/base/common/LocalDate.h
+++ b/base/common/LocalDate.h
@@ -92,20 +92,10 @@ public:
LocalDate(const LocalDate &) noexcept = default;
LocalDate & operator= (const LocalDate &) noexcept = default;
- LocalDate & operator= (time_t time)
- {
- init(time);
- return *this;
- }
-
- operator time_t() const
- {
- return DateLUT::instance().makeDate(m_year, m_month, m_day);
- }
-
DayNum getDayNum() const
{
- return DateLUT::instance().makeDayNum(m_year, m_month, m_day);
+ const auto & lut = DateLUT::instance();
+ return DayNum(lut.makeDayNum(m_year, m_month, m_day).toUnderType());
}
operator DayNum() const
@@ -166,12 +156,3 @@ public:
};
static_assert(sizeof(LocalDate) == 4);
-
-
-namespace std
-{
-inline string to_string(const LocalDate & date)
-{
- return date.toString();
-}
-}
diff --git a/base/common/LocalDateTime.h b/base/common/LocalDateTime.h
index 0e237789bd1..dde283e5ebb 100644
--- a/base/common/LocalDateTime.h
+++ b/base/common/LocalDateTime.h
@@ -29,29 +29,16 @@ private:
/// NOTE We may use attribute packed instead, but it is less portable.
unsigned char pad = 0;
- void init(time_t time)
+ void init(time_t time, const DateLUTImpl & time_zone)
{
- if (unlikely(time > DATE_LUT_MAX || time == 0))
- {
- m_year = 0;
- m_month = 0;
- m_day = 0;
- m_hour = 0;
- m_minute = 0;
- m_second = 0;
+ DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(time);
- return;
- }
-
- const auto & date_lut = DateLUT::instance();
- const auto & values = date_lut.getValues(time);
-
- m_year = values.year;
- m_month = values.month;
- m_day = values.day_of_month;
- m_hour = date_lut.toHour(time);
- m_minute = date_lut.toMinute(time);
- m_second = date_lut.toSecond(time);
+ m_year = components.date.year;
+ m_month = components.date.month;
+ m_day = components.date.day;
+ m_hour = components.time.hour;
+ m_minute = components.time.minute;
+ m_second = components.time.second;
(void)pad; /// Suppress unused private field warning.
}
@@ -73,9 +60,9 @@ private:
}
public:
- explicit LocalDateTime(time_t time)
+ explicit LocalDateTime(time_t time, const DateLUTImpl & time_zone = DateLUT::instance())
{
- init(time);
+ init(time, time_zone);
}
LocalDateTime(unsigned short year_, unsigned char month_, unsigned char day_,
@@ -104,19 +91,6 @@ public:
LocalDateTime(const LocalDateTime &) noexcept = default;
LocalDateTime & operator= (const LocalDateTime &) noexcept = default;
- LocalDateTime & operator= (time_t time)
- {
- init(time);
- return *this;
- }
-
- operator time_t() const
- {
- return m_year == 0
- ? 0
- : DateLUT::instance().makeDateTime(m_year, m_month, m_day, m_hour, m_minute, m_second);
- }
-
unsigned short year() const { return m_year; }
unsigned char month() const { return m_month; }
unsigned char day() const { return m_day; }
@@ -132,8 +106,30 @@ public:
void second(unsigned char x) { m_second = x; }
LocalDate toDate() const { return LocalDate(m_year, m_month, m_day); }
+ LocalDateTime toStartOfDate() const { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
- LocalDateTime toStartOfDate() { return LocalDateTime(m_year, m_month, m_day, 0, 0, 0); }
+ std::string toString() const
+ {
+ std::string s{"0000-00-00 00:00:00"};
+
+ s[0] += m_year / 1000;
+ s[1] += (m_year / 100) % 10;
+ s[2] += (m_year / 10) % 10;
+ s[3] += m_year % 10;
+ s[5] += m_month / 10;
+ s[6] += m_month % 10;
+ s[8] += m_day / 10;
+ s[9] += m_day % 10;
+
+ s[11] += m_hour / 10;
+ s[12] += m_hour % 10;
+ s[14] += m_minute / 10;
+ s[15] += m_minute % 10;
+ s[17] += m_second / 10;
+ s[18] += m_second % 10;
+
+ return s;
+ }
bool operator< (const LocalDateTime & other) const
{
@@ -167,14 +163,3 @@ public:
};
static_assert(sizeof(LocalDateTime) == 8);
-
-
-namespace std
-{
-inline string to_string(const LocalDateTime & datetime)
-{
- stringstream str;
- str << datetime;
- return str.str();
-}
-}
diff --git a/base/common/MoveOrCopyIfThrow.h b/base/common/MoveOrCopyIfThrow.h
new file mode 100644
index 00000000000..caa1b51e2bc
--- /dev/null
+++ b/base/common/MoveOrCopyIfThrow.h
@@ -0,0 +1,33 @@
+#pragma once
+
+#include
+
+namespace detail
+{
+ template >
+ struct MoveOrCopyIfThrow;
+
+ template
+ struct MoveOrCopyIfThrow
+ {
+ void operator()(T && src, T & dst) const
+ {
+ dst = std::forward(src);
+ }
+ };
+
+ template
+ struct MoveOrCopyIfThrow
+ {
+ void operator()(T && src, T & dst) const
+ {
+ dst = src;
+ }
+ };
+
+ template
+ void moveOrCopyIfThrow(T && src, T & dst)
+ {
+ MoveOrCopyIfThrow()(std::forward(src), dst);
+ }
+}
diff --git a/base/common/arithmeticOverflow.h b/base/common/arithmeticOverflow.h
index a92fe56b9cb..c170d214636 100644
--- a/base/common/arithmeticOverflow.h
+++ b/base/common/arithmeticOverflow.h
@@ -25,6 +25,12 @@ namespace common
return x - y;
}
+ template
+ inline auto NO_SANITIZE_UNDEFINED negateIgnoreOverflow(T x)
+ {
+ return -x;
+ }
+
template
inline bool addOverflow(T x, T y, T & res)
{
diff --git a/base/common/defines.h b/base/common/defines.h
index 845a53179ef..ada8245f494 100644
--- a/base/common/defines.h
+++ b/base/common/defines.h
@@ -1,5 +1,20 @@
#pragma once
+/// __has_feature supported only by clang.
+///
+/// But libcxx/libcxxabi overrides it to 0,
+/// thus the checks for __has_feature will be wrong.
+///
+/// NOTE:
+/// - __has_feature cannot be simply undefined,
+/// since this will be broken if some C++ header will be included after
+/// including
+/// - it should not have fallback to 0,
+/// since this may create false-positive detection (common problem)
+#if defined(__clang__) && defined(__has_feature)
+# define ch_has_feature __has_feature
+#endif
+
#if defined(_MSC_VER)
# if !defined(likely)
# define likely(x) (x)
@@ -32,8 +47,8 @@
/// Check for presence of address sanitizer
#if !defined(ADDRESS_SANITIZER)
-# if defined(__has_feature)
-# if __has_feature(address_sanitizer)
+# if defined(ch_has_feature)
+# if ch_has_feature(address_sanitizer)
# define ADDRESS_SANITIZER 1
# endif
# elif defined(__SANITIZE_ADDRESS__)
@@ -42,8 +57,8 @@
#endif
#if !defined(THREAD_SANITIZER)
-# if defined(__has_feature)
-# if __has_feature(thread_sanitizer)
+# if defined(ch_has_feature)
+# if ch_has_feature(thread_sanitizer)
# define THREAD_SANITIZER 1
# endif
# elif defined(__SANITIZE_THREAD__)
@@ -52,8 +67,8 @@
#endif
#if !defined(MEMORY_SANITIZER)
-# if defined(__has_feature)
-# if __has_feature(memory_sanitizer)
+# if defined(ch_has_feature)
+# if ch_has_feature(memory_sanitizer)
# define MEMORY_SANITIZER 1
# endif
# elif defined(__MEMORY_SANITIZER__)
@@ -61,6 +76,16 @@
# endif
#endif
+#if !defined(UNDEFINED_BEHAVIOR_SANITIZER)
+# if defined(__has_feature)
+# if __has_feature(undefined_behavior_sanitizer)
+# define UNDEFINED_BEHAVIOR_SANITIZER 1
+# endif
+# elif defined(__UNDEFINED_BEHAVIOR_SANITIZER__)
+# define UNDEFINED_BEHAVIOR_SANITIZER 1
+# endif
+#endif
+
#if defined(ADDRESS_SANITIZER)
# define BOOST_USE_ASAN 1
# define BOOST_USE_UCONTEXT 1
diff --git a/base/common/phdr_cache.cpp b/base/common/phdr_cache.cpp
index 4f6a066adab..49d566dac19 100644
--- a/base/common/phdr_cache.cpp
+++ b/base/common/phdr_cache.cpp
@@ -15,11 +15,11 @@
#endif
#define __msan_unpoison(X, Y) // NOLINT
-#if defined(__has_feature)
-# if __has_feature(memory_sanitizer)
-# undef __msan_unpoison
-# include
-# endif
+#if defined(ch_has_feature)
+# if ch_has_feature(memory_sanitizer)
+# undef __msan_unpoison
+# include
+# endif
#endif
#include
diff --git a/base/common/setTerminalEcho.cpp b/base/common/setTerminalEcho.cpp
index 658f27705ba..66db216a235 100644
--- a/base/common/setTerminalEcho.cpp
+++ b/base/common/setTerminalEcho.cpp
@@ -1,45 +1,28 @@
-// https://stackoverflow.com/questions/1413445/reading-a-password-from-stdcin
-
#include
#include
#include
#include
#include
-
-#ifdef WIN32
-#include
-#else
#include
#include
-#include
-#endif
+
void setTerminalEcho(bool enable)
{
-#ifdef WIN32
- auto handle = GetStdHandle(STD_INPUT_HANDLE);
- DWORD mode;
- if (!GetConsoleMode(handle, &mode))
- throw std::runtime_error(std::string("setTerminalEcho failed get: ") + std::to_string(GetLastError()));
+ /// Obtain terminal attributes,
+ /// toggle the ECHO flag
+ /// and set them back.
- if (!enable)
- mode &= ~ENABLE_ECHO_INPUT;
- else
- mode |= ENABLE_ECHO_INPUT;
+ struct termios tty{};
- if (!SetConsoleMode(handle, mode))
- throw std::runtime_error(std::string("setTerminalEcho failed set: ") + std::to_string(GetLastError()));
-#else
- struct termios tty;
- if (tcgetattr(STDIN_FILENO, &tty))
+ if (0 != tcgetattr(STDIN_FILENO, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed get: ") + errnoToString(errno));
- if (!enable)
- tty.c_lflag &= ~ECHO;
- else
- tty.c_lflag |= ECHO;
- auto ret = tcsetattr(STDIN_FILENO, TCSANOW, &tty);
- if (ret)
+ if (enable)
+ tty.c_lflag |= ECHO;
+ else
+ tty.c_lflag &= ~ECHO;
+
+ if (0 != tcsetattr(STDIN_FILENO, TCSANOW, &tty))
throw std::runtime_error(std::string("setTerminalEcho failed set: ") + errnoToString(errno));
-#endif
}
diff --git a/base/common/strong_typedef.h b/base/common/strong_typedef.h
index d9850a25c37..77b83bfa6e5 100644
--- a/base/common/strong_typedef.h
+++ b/base/common/strong_typedef.h
@@ -12,6 +12,7 @@ private:
T t;
public:
+ using UnderlyingType = T;
template ::type>
explicit StrongTypedef(const T & t_) : t(t_) {}
template ::type>
diff --git a/base/common/tests/CMakeLists.txt b/base/common/tests/CMakeLists.txt
index b7082ee9900..2a07a94055f 100644
--- a/base/common/tests/CMakeLists.txt
+++ b/base/common/tests/CMakeLists.txt
@@ -1,25 +1,2 @@
-include (${ClickHouse_SOURCE_DIR}/cmake/add_check.cmake)
-
-add_executable (date_lut2 date_lut2.cpp)
-add_executable (date_lut3 date_lut3.cpp)
-add_executable (date_lut_default_timezone date_lut_default_timezone.cpp)
-add_executable (local_date_time_comparison local_date_time_comparison.cpp)
-add_executable (realloc-perf allocator.cpp)
-
-set(PLATFORM_LIBS ${CMAKE_DL_LIBS})
-
-target_link_libraries (date_lut2 PRIVATE common ${PLATFORM_LIBS})
-target_link_libraries (date_lut3 PRIVATE common ${PLATFORM_LIBS})
-target_link_libraries (date_lut_default_timezone PRIVATE common ${PLATFORM_LIBS})
-target_link_libraries (local_date_time_comparison PRIVATE common)
-target_link_libraries (realloc-perf PRIVATE common)
-add_check(local_date_time_comparison)
-
-if(USE_GTEST)
- add_executable(unit_tests_libcommon gtest_json_test.cpp gtest_strong_typedef.cpp gtest_find_symbols.cpp)
- target_link_libraries(unit_tests_libcommon PRIVATE common ${GTEST_MAIN_LIBRARIES} ${GTEST_LIBRARIES})
- add_check(unit_tests_libcommon)
-endif()
-
add_executable (dump_variable dump_variable.cpp)
target_link_libraries (dump_variable PRIVATE clickhouse_common_io)
diff --git a/base/common/tests/allocator.cpp b/base/common/tests/allocator.cpp
deleted file mode 100644
index 03f6228e0f5..00000000000
--- a/base/common/tests/allocator.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-#include
-#include
-#include
-#include
-
-
-void thread_func()
-{
- for (size_t i = 0; i < 100; ++i)
- {
- size_t size = 4096;
-
- void * buf = malloc(size);
- if (!buf)
- abort();
- memset(buf, 0, size);
-
- while (size < 1048576)
- {
- size_t next_size = size * 4;
-
- void * new_buf = realloc(buf, next_size);
- if (!new_buf)
- abort();
- buf = new_buf;
-
- memset(reinterpret_cast(buf) + size, 0, next_size - size);
- size = next_size;
- }
-
- free(buf);
- }
-}
-
-
-int main(int, char **)
-{
- std::vector threads(16);
- for (size_t i = 0; i < 1000; ++i)
- {
- for (auto & thread : threads)
- thread = std::thread(thread_func);
- for (auto & thread : threads)
- thread.join();
- }
- return 0;
-}
diff --git a/base/common/tests/date_lut2.cpp b/base/common/tests/date_lut2.cpp
deleted file mode 100644
index 6dcf5e8adf2..00000000000
--- a/base/common/tests/date_lut2.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-#include
-#include
-
-#include
-
-
-static std::string toString(time_t Value)
-{
- struct tm tm;
- char buf[96];
-
- localtime_r(&Value, &tm);
- snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
-
- return buf;
-}
-
-static time_t orderedIdentifierToDate(unsigned value)
-{
- struct tm tm;
-
- memset(&tm, 0, sizeof(tm));
-
- tm.tm_year = value / 10000 - 1900;
- tm.tm_mon = (value % 10000) / 100 - 1;
- tm.tm_mday = value % 100;
- tm.tm_isdst = -1;
-
- return mktime(&tm);
-}
-
-
-void loop(time_t begin, time_t end, int step)
-{
- const auto & date_lut = DateLUT::instance();
-
- for (time_t t = begin; t < end; t += step)
- std::cout << toString(t)
- << ", " << toString(date_lut.toTime(t))
- << ", " << date_lut.toHour(t)
- << std::endl;
-}
-
-
-int main(int, char **)
-{
- loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
- loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
- loop(orderedIdentifierToDate(20141020), orderedIdentifierToDate(20141106), 15 * 60);
-
- return 0;
-}
diff --git a/base/common/tests/date_lut3.cpp b/base/common/tests/date_lut3.cpp
deleted file mode 100644
index 411765d2b2a..00000000000
--- a/base/common/tests/date_lut3.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-#include
-#include
-
-#include
-
-#include
-
-
-static std::string toString(time_t Value)
-{
- struct tm tm;
- char buf[96];
-
- localtime_r(&Value, &tm);
- snprintf(buf, sizeof(buf), "%04d-%02d-%02d %02d:%02d:%02d",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
-
- return buf;
-}
-
-static time_t orderedIdentifierToDate(unsigned value)
-{
- struct tm tm;
-
- memset(&tm, 0, sizeof(tm));
-
- tm.tm_year = value / 10000 - 1900;
- tm.tm_mon = (value % 10000) / 100 - 1;
- tm.tm_mday = value % 100;
- tm.tm_isdst = -1;
-
- return mktime(&tm);
-}
-
-
-void loop(time_t begin, time_t end, int step)
-{
- const auto & date_lut = DateLUT::instance();
-
- for (time_t t = begin; t < end; t += step)
- {
- time_t t2 = date_lut.makeDateTime(date_lut.toYear(t), date_lut.toMonth(t), date_lut.toDayOfMonth(t),
- date_lut.toHour(t), date_lut.toMinute(t), date_lut.toSecond(t));
-
- std::string s1 = toString(t);
- std::string s2 = toString(t2);
-
- std::cerr << s1 << ", " << s2 << std::endl;
-
- if (s1 != s2)
- throw Poco::Exception("Test failed.");
- }
-}
-
-
-int main(int, char **)
-{
- loop(orderedIdentifierToDate(20101031), orderedIdentifierToDate(20101101), 15 * 60);
- loop(orderedIdentifierToDate(20100328), orderedIdentifierToDate(20100330), 15 * 60);
-
- return 0;
-}
diff --git a/base/common/tests/date_lut_default_timezone.cpp b/base/common/tests/date_lut_default_timezone.cpp
deleted file mode 100644
index b8e5aa08931..00000000000
--- a/base/common/tests/date_lut_default_timezone.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-#include
-#include
-#include
-
-int main(int, char **)
-{
- try
- {
- const auto & date_lut = DateLUT::instance();
- std::cout << "Detected default timezone: `" << date_lut.getTimeZone() << "'" << std::endl;
- time_t now = time(nullptr);
- std::cout << "Current time: " << date_lut.timeToString(now)
- << ", UTC: " << DateLUT::instance("UTC").timeToString(now) << std::endl;
- }
- catch (const Poco::Exception & e)
- {
- std::cerr << e.displayText() << std::endl;
- return 1;
- }
- catch (std::exception & e)
- {
- std::cerr << "std::exception: " << e.what() << std::endl;
- return 2;
- }
- catch (...)
- {
- std::cerr << "Some exception" << std::endl;
- return 3;
- }
- return 0;
-}
diff --git a/base/common/tests/gtest_json_test.cpp b/base/common/tests/gtest_json_test.cpp
deleted file mode 100644
index 189a1a03d99..00000000000
--- a/base/common/tests/gtest_json_test.cpp
+++ /dev/null
@@ -1,656 +0,0 @@
-#include
-#include
-#include
-#include
-
-#include
-
-using namespace std::literals::string_literals;
-
-#include
-
-enum class ResultType
-{
- Return,
- Throw
-};
-
-struct GetStringTestRecord
-{
- const char * input;
- ResultType result_type;
- const char * result;
-};
-
-TEST(JSONSuite, SimpleTest)
-{
- std::vector test_data =
- {
- { R"("name")", ResultType::Return, "name" },
- { R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("184509")", ResultType::Return, "184509" },
- { R"("category")", ResultType::Return, "category" },
- { R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("В наличии")", ResultType::Return, "В наличии" },
- { R"("price")", ResultType::Return, "price" },
- { R"("2390.00")", ResultType::Return, "2390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("Карточка")", ResultType::Return, "Карточка" },
- { R"("position")", ResultType::Return, "position" },
- { R"("detail")", ResultType::Return, "detail" },
- { R"("actionField")", ResultType::Return, "actionField" },
- { R"("list")", ResultType::Return, "list" },
- { R"("http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc")", ResultType::Return, "http://www.techport.ru/q/?t=вафельница&sort=price&sdim=asc" },
- { R"("action")", ResultType::Return, "action" },
- { R"("detail")", ResultType::Return, "detail" },
- { R"("products")", ResultType::Return, "products" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Вафельница Vitek WX-1102 FL")", ResultType::Return, "Вафельница Vitek WX-1102 FL" },
- { R"("id")", ResultType::Return, "id" },
- { R"("184509")", ResultType::Return, "184509" },
- { R"("price")", ResultType::Return, "price" },
- { R"("2390.00")", ResultType::Return, "2390.00" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("Vitek")", ResultType::Return, "Vitek" },
- { R"("category")", ResultType::Return, "category" },
- { R"("Все для детей/Детская техника/Vitek")", ResultType::Return, "Все для детей/Детская техника/Vitek" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("В наличии")", ResultType::Return, "В наличии" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("isAuthorized")", ResultType::Return, "isAuthorized" },
- { R"("isSubscriber")", ResultType::Return, "isSubscriber" },
- { R"("postType")", ResultType::Return, "postType" },
- { R"("Новости")", ResultType::Return, "Новости" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("Электроплита GEFEST Брест ЭПНД 5140-01 0001")", ResultType::Return, "Электроплита GEFEST Брест ЭПНД 5140-01 0001" },
- { R"("price")", ResultType::Return, "price" },
- { R"("currencyCode")", ResultType::Return, "currencyCode" },
- { R"("RUB")", ResultType::Return, "RUB" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("trash_login")", ResultType::Return, "trash_login" },
- { R"("novikoff")", ResultType::Return, "novikoff" },
- { R"("trash_cat_link")", ResultType::Return, "trash_cat_link" },
- { R"("progs")", ResultType::Return, "progs" },
- { R"("trash_parent_link")", ResultType::Return, "trash_parent_link" },
- { R"("content")", ResultType::Return, "content" },
- { R"("trash_posted_parent")", ResultType::Return, "trash_posted_parent" },
- { R"("content.01.2016")", ResultType::Return, "content.01.2016" },
- { R"("trash_posted_cat")", ResultType::Return, "trash_posted_cat" },
- { R"("progs.01.2016")", ResultType::Return, "progs.01.2016" },
- { R"("trash_virus_count")", ResultType::Return, "trash_virus_count" },
- { R"("trash_is_android")", ResultType::Return, "trash_is_android" },
- { R"("trash_is_wp8")", ResultType::Return, "trash_is_wp8" },
- { R"("trash_is_ios")", ResultType::Return, "trash_is_ios" },
- { R"("trash_posted")", ResultType::Return, "trash_posted" },
- { R"("01.2016")", ResultType::Return, "01.2016" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("merchantId")", ResultType::Return, "merchantId" },
- { R"("13694_49246")", ResultType::Return, "13694_49246" },
- { R"("cps-source")", ResultType::Return, "cps-source" },
- { R"("wargaming")", ResultType::Return, "wargaming" },
- { R"("cps_provider")", ResultType::Return, "cps_provider" },
- { R"("default")", ResultType::Return, "default" },
- { R"("errorReason")", ResultType::Return, "errorReason" },
- { R"("no errors")", ResultType::Return, "no errors" },
- { R"("scid")", ResultType::Return, "scid" },
- { R"("isAuthPayment")", ResultType::Return, "isAuthPayment" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("rubric")", ResultType::Return, "rubric" },
- { R"("")", ResultType::Return, "" },
- { R"("rubric")", ResultType::Return, "rubric" },
- { R"("Мир")", ResultType::Return, "Мир" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("__ym")", ResultType::Return, "__ym" },
- { R"("ecommerce")", ResultType::Return, "ecommerce" },
- { R"("impressions")", ResultType::Return, "impressions" },
- { R"("id")", ResultType::Return, "id" },
- { R"("863813")", ResultType::Return, "863813" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Happy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Happy, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("863839")", ResultType::Return, "863839" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty kitten, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("863847")", ResultType::Return, "863847" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little tiger, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911480")", ResultType::Return, "911480" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Puppy, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911484")", ResultType::Return, "911484" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Little bears, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Little bears, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911489")", ResultType::Return, "911489" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dolphin, возраст 2-4 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911496")", ResultType::Return, "911496" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Pretty, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Pretty, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911504")", ResultType::Return, "911504" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Fairytale, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911508")", ResultType::Return, "911508" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Kittens, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Kittens, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911512")", ResultType::Return, "911512" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Sunshine, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911516")", ResultType::Return, "911516" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Dog in bag, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911520")", ResultType::Return, "911520" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Cute puppy, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911524")", ResultType::Return, "911524" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Rabbit, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("911528")", ResultType::Return, "911528" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Футболка детская 3D Turtle, возраст 1-2 года, трикотаж")", ResultType::Return, "Футболка детская 3D Turtle, возраст 1-2 года, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("390.00")", ResultType::Return, "390.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("888616")", ResultType::Return, "888616" },
- { R"("name")", ResultType::Return, "name" },
- { "\"3Д Футболка мужская \\\"Collorista\\\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж\"", ResultType::Return, "3Д Футболка мужская \"Collorista\" Светлое завтра р-р XL(52-54), 100% хлопок, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Одежда и обувь/Мужская одежда/Футболки/")", ResultType::Return, "/Одежда и обувь/Мужская одежда/Футболки/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("406.60")", ResultType::Return, "406.60" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("913361")", ResultType::Return, "913361" },
- { R"("name")", ResultType::Return, "name" },
- { R"("3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская World р-р 8-10, 100% хлопок, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("470.00")", ResultType::Return, "470.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("913364")", ResultType::Return, "913364" },
- { R"("name")", ResultType::Return, "name" },
- { R"("3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Force р-р 8-10, 100% хлопок, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("470.00")", ResultType::Return, "470.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("913367")", ResultType::Return, "913367" },
- { R"("name")", ResultType::Return, "name" },
- { R"("3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Winter tale р-р 8-10, 100% хлопок, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("470.00")", ResultType::Return, "470.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("913385")", ResultType::Return, "913385" },
- { R"("name")", ResultType::Return, "name" },
- { R"("3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Moonshine р-р 8-10, 100% хлопок, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("470.00")", ResultType::Return, "470.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("id")", ResultType::Return, "id" },
- { R"("913391")", ResultType::Return, "913391" },
- { R"("name")", ResultType::Return, "name" },
- { R"("3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж")", ResultType::Return, "3Д Футболка детская Shaman р-р 8-10, 100% хлопок, трикотаж" },
- { R"("category")", ResultType::Return, "category" },
- { R"("/Летние товары/Летний текстиль/")", ResultType::Return, "/Летние товары/Летний текстиль/" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("")", ResultType::Return, "" },
- { R"("price")", ResultType::Return, "price" },
- { R"("470.00")", ResultType::Return, "470.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("/retailrocket/")", ResultType::Return, "/retailrocket/" },
- { R"("position")", ResultType::Return, "position" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/")", ResultType::Return, "/911488/futbolka-detskaya-3d-dolphin-vozrast-1-2-goda-trikotazh/" },
- { R"("usertype")", ResultType::Return, "usertype" },
- { R"("visitor")", ResultType::Return, "visitor" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("__ym")", ResultType::Return, "__ym" },
- { R"("ecommerce")", ResultType::Return, "ecommerce" },
- { R"("impressions")", ResultType::Return, "impressions" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("experiments")", ResultType::Return, "experiments" },
- { R"("lang")", ResultType::Return, "lang" },
- { R"("ru")", ResultType::Return, "ru" },
- { R"("los_portal")", ResultType::Return, "los_portal" },
- { R"("los_level")", ResultType::Return, "los_level" },
- { R"("none")", ResultType::Return, "none" },
- { R"("__ym")", ResultType::Return, "__ym" },
- { R"("ecommerce")", ResultType::Return, "ecommerce" },
- { R"("currencyCode")", ResultType::Return, "currencyCode" },
- { R"("RUR")", ResultType::Return, "RUR" },
- { R"("impressions")", ResultType::Return, "impressions" },
- { R"("name")", ResultType::Return, "name" },
- { R"("Чайник электрический Mystery MEK-1627, белый")", ResultType::Return, "Чайник электрический Mystery MEK-1627, белый" },
- { R"("brand")", ResultType::Return, "brand" },
- { R"("Mystery")", ResultType::Return, "Mystery" },
- { R"("id")", ResultType::Return, "id" },
- { R"("187180")", ResultType::Return, "187180" },
- { R"("category")", ResultType::Return, "category" },
- { R"("Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery")", ResultType::Return, "Мелкая бытовая техника/Мелкие кухонные приборы/Чайники электрические/Mystery" },
- { R"("variant")", ResultType::Return, "variant" },
- { R"("В наличии")", ResultType::Return, "В наличии" },
- { R"("price")", ResultType::Return, "price" },
- { R"("1630.00")", ResultType::Return, "1630.00" },
- { R"("list")", ResultType::Return, "list" },
- { R"("Карточка")", ResultType::Return, "Карточка" },
- { R"("position")", ResultType::Return, "position" },
- { R"("detail")", ResultType::Return, "detail" },
- { R"("actionField")", ResultType::Return, "actionField" },
- { R"("list")", ResultType::Return, "list" },
- { "\0\"", ResultType::Throw, "JSON: expected \", got \0" },
- { "\"/igrushki/konstruktory\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Творчество/Рисование/Инструменты и кра\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\0t", ResultType::Throw, "JSON: expected \", got \0" },
- { "\"/Хозтовары/Хранение вещей и организа\xD1\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Хозтовары/Товары для стир\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"li\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/kosmetika-i-parfyum/parfyumeriya/mu\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/ko\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "", ResultType::Throw, "JSON: begin >= end." },
- { "\"/stroitelstvo-i-remont/stroit\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/s\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Строительство и ремонт/Строительный инструмент/Изм\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/avto/soputstvuy\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/str\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xFF", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Мелкая бытовая техника/Мелки\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Пряжа \\\"Бамбук стрейч\\0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Карандаш чёрнографитны\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0l", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"ca\0e", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0t", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Канцтовары/Ежедневники и блокн\xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/kanctovary/ezhednevniki-i-blok\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Стакан \xD0\0a", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\x80", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"c\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Органайзер для хранения аксессуаров, \0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"quantity\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Сменный блок для тетрадей на кольцах А5, 160 листов клетка, офсет \xE2\x84\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Сувениры/Ф\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"\0\"", ResultType::Return, "\0" },
- { "\"\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"va\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"ca\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"В \0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/letnie-tovary/z\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Посудомоечная машина Ha\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Крупная бытов\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"var\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Телевизоры и видеотехника/Всё для домашних кинотеатр\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Флеш-диск Transcend JetFlash 620 8GB (TS8GJF62\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Табурет Мег\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"variant\0\x04", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Катал\xD0\0\"", ResultType::Return, "Катал\xD0\0" },
- { "\"К\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Полочная акустическая система Magnat Needl\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"brand\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"pos\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"c\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"17\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/igrushki/razvivayusc\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Ключница \\\"\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Игр\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Игрушки/Игрушки для девочек/Игровые модули дл\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Крупная бытовая техника/Стиральные машины/С фронт\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
- { "\"Светодиодная лента SMD3528, 5 м. IP33, 60LED, зеленый, 4,8W/мет\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Сантехника/Мебель для ванных комнат/Стол\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\0o", ResultType::Throw, "JSON: expected \", got \0" },
- { "\"/igrushki/konstruktory\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/posuda/kuhonnye-prinadlezhnosti-i-instrumenty/kuhonnye-pr\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/1290414/komplekt-zhenskiy-dzhemper-plusbryuki-m-254-09-malina-plustemno-siniy-\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Творчество/Рисование/Инструменты и кра\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобильных аккумуляторов/Пуско-зарядные устр\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Строительство и ремонт/Силовая техника/Зарядные устройств\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Строительство и ремонт/Силовая техника/Зарядные устройства для автомобиль\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\0 ", ResultType::Throw, "JSON: expected \", got \0" },
- { "\"/Хозтовары/Хранение вещей и организа\xD1\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Хозтовары/Товары для стир\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"li\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/igrushki/igrus\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/734859/samolet-radioupravlyaemyy-istrebitel-rabotaet-o\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/kosmetika-i-parfyum/parfyumeriya/mu\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/ko\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/avto/avtomobilnyy\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/stroitelstvo-i-remont/stroit\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/av\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/s\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Строительство и ремонт/Строительный инструмент/Изм\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/avto/soputstvuy\0\"", ResultType::Return, "/avto/soputstvuy\0" },
- { "\"/str\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Отвертка 2 в 1 \\\"TUNDRA basic\\\" 5х75 мм (+,-) \0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/stroitelstvo-i-remont/stroitelnyy-instrument/avtoinstrumen\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Чайник электрический Vitesse\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Мелкая бытовая техника/Мелки\xD0\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Пряжа \\\"Бамбук стрейч\\0о", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Карандаш чёрнографитны\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0\"", ResultType::Return, "/Творчество/Рукоделие, аппликации/Пряжа и шерсть для \xD0\0" },
- { "\"/1071547/karandash-chernografitnyy-volshebstvo-nv-kruglyy-d-7-2mm-dl-176mm-plast-tuba/\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"ca\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Подаро\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Средство для прочис\xD1\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"i\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/p\0\"", ResultType::Return, "/p\0" },
- { "\"/Сувениры/Магниты, н\xD0\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Дерев\xD0\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/prazdniki/svadba/svadebnaya-c\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Канцт\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Праздники/То\xD0\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"v\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Косметика \xD0\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Спорт и отдых/Настольные игры/Покер, руле\xD1\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"categ\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/retailr\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/retailrocket\0k", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Ежедневник недат А5 140л кл,ляссе,обл пв\0=", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/432809/ezhednevnik-organayzer-sredniy-s-remeshkom-na-knopke-v-oblozhke-kalkulyator-kalendar-do-\0\xD0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/1165424/chipbord-vyrubnoy-dlya-skrapbukinga-malyshi-mikki-maus-disney-bebi\0d", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/posuda/kuhonnye-prinadlezhnosti-i-i\0 ", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/Канцтовары/Ежедневники и блокн\xD0\0o", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"/kanctovary/ezhednevniki-i-blok\00", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Стакан \xD0\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"Набор бумаги для скрапбукинга \\\"Мои первый годик\\\": Микки Маус, Дисней бэби, 12 листов 29.5 х 29.5 см, 160\0\0", ResultType::Throw, "JSON: incorrect syntax (expected end of string, found end of JSON)." },
- { "\"c\0\"", ResultType::Return, "c\0" },
- };
-
- for (auto i : boost::irange(0, 1/*00000*/))
- {
- static_cast(i);
-
- for (auto & r : test_data)
- {
- try
- {
- JSON j(r.input, r.input + strlen(r.input));
-
- ASSERT_EQ(j.getString(), r.result);
- ASSERT_TRUE(r.result_type == ResultType::Return);
- }
- catch (JSONException & e)
- {
- ASSERT_TRUE(r.result_type == ResultType::Throw);
- ASSERT_EQ(e.message(), r.result);
- }
- }
- }
-}
diff --git a/base/common/wide_integer_impl.h b/base/common/wide_integer_impl.h
index a34e757eaa5..456c10a22e4 100644
--- a/base/common/wide_integer_impl.h
+++ b/base/common/wide_integer_impl.h
@@ -249,15 +249,15 @@ struct integer::_impl
return;
}
- const T alpha = t / max_int;
+ const T alpha = t / static_cast(max_int);
- if (alpha <= max_int)
+ if (alpha <= static_cast(max_int))
self = static_cast(alpha);
else // max(double) / 2^64 will surely contain less than 52 precision bits, so speed up computations.
set_multiplier(self, alpha);
self *= max_int;
- self += static_cast(t - alpha * max_int); // += b_i
+ self += static_cast(t - alpha * static_cast(max_int)); // += b_i
}
constexpr static void wide_integer_from_bultin(integer& self, double rhs) noexcept {
@@ -271,11 +271,15 @@ struct integer::_impl
/// As to_Integral does a static_cast to int64_t, it may result in UB.
/// The necessary check here is that long double has enough significant (mantissa) bits to store the
/// int64_t max value precisely.
+
+ //TODO Be compatible with Apple aarch64
+#if not (defined(__APPLE__) && defined(__aarch64__))
static_assert(LDBL_MANT_DIG >= 64,
"On your system long double has less than 64 precision bits,"
"which may result in UB when initializing double from int64_t");
+#endif
- if ((rhs > 0 && rhs < max_int) || (rhs < 0 && rhs > min_int))
+ if ((rhs > 0 && rhs < static_cast(max_int)) || (rhs < 0 && rhs > static_cast(min_int)))
{
self = static_cast(rhs);
return;
diff --git a/base/daemon/CMakeLists.txt b/base/daemon/CMakeLists.txt
index 26d59a57e7f..6ef87db6a61 100644
--- a/base/daemon/CMakeLists.txt
+++ b/base/daemon/CMakeLists.txt
@@ -5,6 +5,11 @@ add_library (daemon
)
target_include_directories (daemon PUBLIC ..)
+
+if (OS_DARWIN AND NOT MAKE_STATIC_LIBRARIES)
+ target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
+endif()
+
target_link_libraries (daemon PUBLIC loggers PRIVATE clickhouse_common_io clickhouse_common_config common ${EXECINFO_LIBRARIES})
if (USE_SENTRY)
diff --git a/base/daemon/SentryWriter.cpp b/base/daemon/SentryWriter.cpp
index 29430b65983..1b7d0064b99 100644
--- a/base/daemon/SentryWriter.cpp
+++ b/base/daemon/SentryWriter.cpp
@@ -9,6 +9,7 @@
#include
#include
+#include
#include
#include
#include
diff --git a/base/ext/scope_guard_safe.h b/base/ext/scope_guard_safe.h
new file mode 100644
index 00000000000..55140213572
--- /dev/null
+++ b/base/ext/scope_guard_safe.h
@@ -0,0 +1,68 @@
+#pragma once
+
+#include
+#include
+#include
+
+/// Same as SCOPE_EXIT() but block the MEMORY_LIMIT_EXCEEDED errors.
+///
+/// Typical example of SCOPE_EXIT_MEMORY() usage is when code under it may do
+/// some tiny allocations, that may fail under high memory pressure or/and low
+/// max_memory_usage (and related limits).
+///
+/// NOTE: it should be used with caution.
+#define SCOPE_EXIT_MEMORY(...) SCOPE_EXIT( \
+ MemoryTracker::LockExceptionInThread \
+ lock_memory_tracker(VariableContext::Global); \
+ __VA_ARGS__; \
+)
+
+/// Same as SCOPE_EXIT() but try/catch/tryLogCurrentException any exceptions.
+///
+/// SCOPE_EXIT_SAFE() should be used in case the exception during the code
+/// under SCOPE_EXIT() is not "that fatal" and error message in log is enough.
+///
+/// Good example is calling CurrentThread::detachQueryIfNotDetached().
+///
+/// Anti-pattern is calling WriteBuffer::finalize() under SCOPE_EXIT_SAFE()
+/// (since finalize() can do final write and it is better to fail abnormally
+/// instead of ignoring write error).
+///
+/// NOTE: it should be used with double caution.
+#define SCOPE_EXIT_SAFE(...) SCOPE_EXIT( \
+ try \
+ { \
+ __VA_ARGS__; \
+ } \
+ catch (...) \
+ { \
+ tryLogCurrentException(__PRETTY_FUNCTION__); \
+ } \
+)
+
+/// Same as SCOPE_EXIT() but:
+/// - block the MEMORY_LIMIT_EXCEEDED errors,
+/// - try/catch/tryLogCurrentException any exceptions.
+///
+/// SCOPE_EXIT_MEMORY_SAFE() can be used when the error can be ignored, and in
+/// addition to SCOPE_EXIT_SAFE() it will also lock MEMORY_LIMIT_EXCEEDED to
+/// avoid such exceptions.
+///
+/// It does exists as a separate helper, since you do not need to lock
+/// MEMORY_LIMIT_EXCEEDED always (there are cases when code under SCOPE_EXIT does
+/// not do any allocations, while LockExceptionInThread increment atomic
+/// variable).
+///
+/// NOTE: it should be used with triple caution.
+#define SCOPE_EXIT_MEMORY_SAFE(...) SCOPE_EXIT( \
+ try \
+ { \
+ MemoryTracker::LockExceptionInThread \
+ lock_memory_tracker(VariableContext::Global); \
+ __VA_ARGS__; \
+ } \
+ catch (...) \
+ { \
+ tryLogCurrentException(__PRETTY_FUNCTION__); \
+ } \
+)
diff --git a/base/glibc-compatibility/CMakeLists.txt b/base/glibc-compatibility/CMakeLists.txt
index 684c6162941..e785e2ab2ce 100644
--- a/base/glibc-compatibility/CMakeLists.txt
+++ b/base/glibc-compatibility/CMakeLists.txt
@@ -1,5 +1,8 @@
if (GLIBC_COMPATIBILITY)
- set (ENABLE_FASTMEMCPY ON)
+ add_subdirectory(memcpy)
+ if(TARGET memcpy)
+ set(MEMCPY_LIBRARY memcpy)
+ endif()
enable_language(ASM)
include(CheckIncludeFile)
@@ -27,13 +30,6 @@ if (GLIBC_COMPATIBILITY)
list(APPEND glibc_compatibility_sources musl/getentropy.c)
endif()
- if (NOT ARCH_ARM)
- # clickhouse_memcpy don't support ARCH_ARM, see https://github.com/ClickHouse/ClickHouse/issues/18951
- add_library (clickhouse_memcpy OBJECT
- ${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy/memcpy_wrapper.c
- )
- endif()
-
# Need to omit frame pointers to match the performance of glibc
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fomit-frame-pointer")
@@ -51,15 +47,16 @@ if (GLIBC_COMPATIBILITY)
target_compile_options(glibc-compatibility PRIVATE -fPIC)
endif ()
- target_link_libraries(global-libs INTERFACE glibc-compatibility)
+ target_link_libraries(global-libs INTERFACE glibc-compatibility ${MEMCPY_LIBRARY})
install(
- TARGETS glibc-compatibility
+ TARGETS glibc-compatibility ${MEMCPY_LIBRARY}
EXPORT global
ARCHIVE DESTINATION lib
)
message (STATUS "Some symbols from glibc will be replaced for compatibility")
+
elseif (YANDEX_OFFICIAL_BUILD)
message (WARNING "Option GLIBC_COMPATIBILITY must be turned on for production builds.")
endif ()
diff --git a/base/glibc-compatibility/memcpy/CMakeLists.txt b/base/glibc-compatibility/memcpy/CMakeLists.txt
new file mode 100644
index 00000000000..133995d9b96
--- /dev/null
+++ b/base/glibc-compatibility/memcpy/CMakeLists.txt
@@ -0,0 +1,8 @@
+if (ARCH_AMD64)
+ add_library(memcpy STATIC memcpy.cpp)
+
+ # We allow to include memcpy.h from user code for better inlining.
+ target_include_directories(memcpy PUBLIC $)
+
+ target_compile_options(memcpy PRIVATE -fno-builtin-memcpy)
+endif ()
diff --git a/base/glibc-compatibility/memcpy/memcpy.cpp b/base/glibc-compatibility/memcpy/memcpy.cpp
new file mode 100644
index 00000000000..ec43a2c3649
--- /dev/null
+++ b/base/glibc-compatibility/memcpy/memcpy.cpp
@@ -0,0 +1,6 @@
+#include "memcpy.h"
+
+extern "C" void * memcpy(void * __restrict dst, const void * __restrict src, size_t size)
+{
+ return inline_memcpy(dst, src, size);
+}
diff --git a/base/glibc-compatibility/memcpy/memcpy.h b/base/glibc-compatibility/memcpy/memcpy.h
new file mode 100644
index 00000000000..211d144cecb
--- /dev/null
+++ b/base/glibc-compatibility/memcpy/memcpy.h
@@ -0,0 +1,217 @@
+#include
+
+#include
+
+
+/** Custom memcpy implementation for ClickHouse.
+ * It has the following benefits over using glibc's implementation:
+ * 1. Avoiding dependency on specific version of glibc's symbol, like memcpy@@GLIBC_2.14 for portability.
+ * 2. Avoiding indirect call via PLT due to shared linking, that can be less efficient.
+ * 3. It's possible to include this header and call inline_memcpy directly for better inlining or interprocedural analysis.
+ * 4. Better results on our performance tests on current CPUs: up to 25% on some queries and up to 0.7%..1% in average across all queries.
+ *
+ * Writing our own memcpy is extremely difficult for the following reasons:
+ * 1. The optimal variant depends on the specific CPU model.
+ * 2. The optimal variant depends on the distribution of size arguments.
+ * 3. It depends on the number of threads copying data concurrently.
+ * 4. It also depends on how the calling code is using the copied data and how the different memcpy calls are related to each other.
+ * Due to vast range of scenarios it makes proper testing especially difficult.
+ * When writing our own memcpy there is a risk to overoptimize it
+ * on non-representative microbenchmarks while making real-world use cases actually worse.
+ *
+ * Most of the benchmarks for memcpy on the internet are wrong.
+ *
+ * Let's look at the details:
+ *
+ * For small size, the order of branches in code is important.
+ * There are variants with specific order of branches (like here or in glibc)
+ * or with jump table (in asm code see example from Cosmopolitan libc:
+ * https://github.com/jart/cosmopolitan/blob/de09bec215675e9b0beb722df89c6f794da74f3f/libc/nexgen32e/memcpy.S#L61)
+ * or with Duff device in C (see https://github.com/skywind3000/FastMemcpy/)
+ *
+ * It's also important how to copy uneven sizes.
+ * Almost every implementation, including this, is using two overlapping movs.
+ *
+ * It is important to disable -ftree-loop-distribute-patterns when compiling memcpy implementation,
+ * otherwise the compiler can replace internal loops to a call to memcpy that will lead to infinite recursion.
+ *
+ * For larger sizes it's important to choose the instructions used:
+ * - SSE or AVX or AVX-512;
+ * - rep movsb;
+ * Performance will depend on the size threshold, on the CPU model, on the "erms" flag
+ * ("Enhansed Rep MovS" - it indicates that performance of "rep movsb" is decent for large sizes)
+ * https://stackoverflow.com/questions/43343231/enhanced-rep-movsb-for-memcpy
+ *
+ * Using AVX-512 can be bad due to throttling.
+ * Using AVX can be bad if most code is using SSE due to switching penalty
+ * (it also depends on the usage of "vzeroupper" instruction).
+ * But in some cases AVX gives a win.
+ *
+ * It also depends on how many times the loop will be unrolled.
+ * We are unrolling the loop 8 times (by the number of available registers), but it not always the best.
+ *
+ * It also depends on the usage of aligned or unaligned loads/stores.
+ * We are using unaligned loads and aligned stores.
+ *
+ * It also depends on the usage of prefetch instructions. It makes sense on some Intel CPUs but can slow down performance on AMD.
+ * Setting up correct offset for prefetching is non-obvious.
+ *
+ * Non-temporary (cache bypassing) stores can be used for very large sizes (more than a half of L3 cache).
+ * But the exact threshold is unclear - when doing memcpy from multiple threads the optimal threshold can be lower,
+ * because L3 cache is shared (and L2 cache is partially shared).
+ *
+ * Very large size of memcpy typically indicates suboptimal (not cache friendly) algorithms in code or unrealistic scenarios,
+ * so we don't pay attention to using non-temporary stores.
+ *
+ * On recent Intel CPUs, the presence of "erms" makes "rep movsb" the most benefitial,
+ * even comparing to non-temporary aligned unrolled stores even with the most wide registers.
+ *
+ * memcpy can be written in asm, C or C++. The latter can also use inline asm.
+ * The asm implementation can be better to make sure that compiler won't make the code worse,
+ * to ensure the order of branches, the code layout, the usage of all required registers.
+ * But if it is located in separate translation unit, inlining will not be possible
+ * (inline asm can be used to overcome this limitation).
+ * Sometimes C or C++ code can be further optimized by compiler.
+ * For example, clang is capable replacing SSE intrinsics to AVX code if -mavx is used.
+ *
+ * Please note that compiler can replace plain code to memcpy and vice versa.
+ * - memcpy with compile-time known small size is replaced to simple instructions without a call to memcpy;
+ * it is controlled by -fbuiltin-memcpy and can be manually ensured by calling __builtin_memcpy.
+ * This is often used to implement unaligned load/store without undefined behaviour in C++.
+ * - a loop with copying bytes can be recognized and replaced by a call to memcpy;
+ * it is controlled by -ftree-loop-distribute-patterns.
+ * - also note that a loop with copying bytes can be unrolled, peeled and vectorized that will give you
+ * inline code somewhat similar to a decent implementation of memcpy.
+ *
+ * This description is up to date as of Mar 2021.
+ *
+ * How to test the memcpy implementation for performance:
+ * 1. Test on real production workload.
+ * 2. For synthetic test, see utils/memcpy-bench, but make sure you will do the best to exhaust the wide range of scenarios.
+ *
+ * TODO: Add self-tuning memcpy with bayesian bandits algorithm for large sizes.
+ * See https://habr.com/en/company/yandex/blog/457612/
+ */
+
+
+static inline void * inline_memcpy(void * __restrict dst_, const void * __restrict src_, size_t size)
+{
+ /// We will use pointer arithmetic, so char pointer will be used.
+ /// Note that __restrict makes sense (otherwise compiler will reload data from memory
+ /// instead of using the value of registers due to possible aliasing).
+ char * __restrict dst = reinterpret_cast(dst_);
+ const char * __restrict src = reinterpret_cast(src_);
+
+ /// Standard memcpy returns the original value of dst. It is rarely used but we have to do it.
+ /// If you use memcpy with small but non-constant sizes, you can call inline_memcpy directly
+ /// for inlining and removing this single instruction.
+ void * ret = dst;
+
+tail:
+ /// Small sizes and tails after the loop for large sizes.
+ /// The order of branches is important but in fact the optimal order depends on the distribution of sizes in your application.
+ /// This order of branches is from the disassembly of glibc's code.
+ /// We copy chunks of possibly uneven size with two overlapping movs.
+ /// Example: to copy 5 bytes [0, 1, 2, 3, 4] we will copy tail [1, 2, 3, 4] first and then head [0, 1, 2, 3].
+ if (size <= 16)
+ {
+ if (size >= 8)
+ {
+ /// Chunks of 8..16 bytes.
+ __builtin_memcpy(dst + size - 8, src + size - 8, 8);
+ __builtin_memcpy(dst, src, 8);
+ }
+ else if (size >= 4)
+ {
+ /// Chunks of 4..7 bytes.
+ __builtin_memcpy(dst + size - 4, src + size - 4, 4);
+ __builtin_memcpy(dst, src, 4);
+ }
+ else if (size >= 2)
+ {
+ /// Chunks of 2..3 bytes.
+ __builtin_memcpy(dst + size - 2, src + size - 2, 2);
+ __builtin_memcpy(dst, src, 2);
+ }
+ else if (size >= 1)
+ {
+ /// A single byte.
+ *dst = *src;
+ }
+ /// No bytes remaining.
+ }
+ else
+ {
+ /// Medium and large sizes.
+ if (size <= 128)
+ {
+ /// Medium size, not enough for full loop unrolling.
+
+ /// We will copy the last 16 bytes.
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(dst + size - 16), _mm_loadu_si128(reinterpret_cast(src + size - 16)));
+
+ /// Then we will copy every 16 bytes from the beginning in a loop.
+ /// The last loop iteration will possibly overwrite some part of already copied last 16 bytes.
+ /// This is Ok, similar to the code for small sizes above.
+ while (size > 16)
+ {
+ _mm_storeu_si128(reinterpret_cast<__m128i *>(dst), _mm_loadu_si128(reinterpret_cast(src)));
+ dst += 16;
+ src += 16;
+ size -= 16;
+ }
+ }
+ else
+ {
+ /// Large size with fully unrolled loop.
+
+ /// Align destination to 16 bytes boundary.
+ size_t padding = (16 - (reinterpret_cast(dst) & 15)) & 15;
+
+ /// If not aligned - we will copy first 16 bytes with unaligned stores.
+ if (padding > 0)
+ {
+ __m128i head = _mm_loadu_si128(reinterpret_cast(src));
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), head);
+ dst += padding;
+ src += padding;
+ size -= padding;
+ }
+
+ /// Aligned unrolled copy. We will use half of available SSE registers.
+ /// It's not possible to have both src and dst aligned.
+ /// So, we will use aligned stores and unaligned loads.
+ __m128i c0, c1, c2, c3, c4, c5, c6, c7;
+
+ while (size >= 128)
+ {
+ c0 = _mm_loadu_si128(reinterpret_cast(src) + 0);
+ c1 = _mm_loadu_si128(reinterpret_cast(src) + 1);
+ c2 = _mm_loadu_si128(reinterpret_cast(src) + 2);
+ c3 = _mm_loadu_si128(reinterpret_cast(src) + 3);
+ c4 = _mm_loadu_si128(reinterpret_cast(src) + 4);
+ c5 = _mm_loadu_si128(reinterpret_cast(src) + 5);
+ c6 = _mm_loadu_si128(reinterpret_cast(src) + 6);
+ c7 = _mm_loadu_si128(reinterpret_cast(src) + 7);
+ src += 128;
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 0), c0);
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 1), c1);
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 2), c2);
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 3), c3);
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 4), c4);
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 5), c5);
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 6), c6);
+ _mm_store_si128((reinterpret_cast<__m128i*>(dst) + 7), c7);
+ dst += 128;
+
+ size -= 128;
+ }
+
+ /// The latest remaining 0..127 bytes will be processed as usual.
+ goto tail;
+ }
+ }
+
+ return ret;
+}
+
diff --git a/base/mysqlxx/Connection.cpp b/base/mysqlxx/Connection.cpp
index 8a15115cb06..2dbbc0c73f3 100644
--- a/base/mysqlxx/Connection.cpp
+++ b/base/mysqlxx/Connection.cpp
@@ -51,10 +51,11 @@ Connection::Connection(
const char* ssl_key,
unsigned timeout,
unsigned rw_timeout,
- bool enable_local_infile)
+ bool enable_local_infile,
+ bool opt_reconnect)
: Connection()
{
- connect(db, server, user, password, port, socket, ssl_ca, ssl_cert, ssl_key, timeout, rw_timeout, enable_local_infile);
+ connect(db, server, user, password, port, socket, ssl_ca, ssl_cert, ssl_key, timeout, rw_timeout, enable_local_infile, opt_reconnect);
}
Connection::Connection(const std::string & config_name)
@@ -80,7 +81,8 @@ void Connection::connect(const char* db,
const char * ssl_key,
unsigned timeout,
unsigned rw_timeout,
- bool enable_local_infile)
+ bool enable_local_infile,
+ bool opt_reconnect)
{
if (is_connected)
disconnect();
@@ -104,9 +106,8 @@ void Connection::connect(const char* db,
if (mysql_options(driver.get(), MYSQL_OPT_LOCAL_INFILE, &enable_local_infile_arg))
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
- /// Enables auto-reconnect.
- bool reconnect = true;
- if (mysql_options(driver.get(), MYSQL_OPT_RECONNECT, reinterpret_cast(&reconnect)))
+ /// See C API Developer Guide: Automatic Reconnection Control
+ if (mysql_options(driver.get(), MYSQL_OPT_RECONNECT, reinterpret_cast(&opt_reconnect)))
throw ConnectionFailed(errorMessage(driver.get()), mysql_errno(driver.get()));
/// Specifies particular ssl key and certificate if it needs
diff --git a/base/mysqlxx/Connection.h b/base/mysqlxx/Connection.h
index ca67db0e0c6..65955136eb1 100644
--- a/base/mysqlxx/Connection.h
+++ b/base/mysqlxx/Connection.h
@@ -14,6 +14,8 @@
/// Disable LOAD DATA LOCAL INFILE because it is insecure
#define MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE false
+/// See https://dev.mysql.com/doc/c-api/5.7/en/c-api-auto-reconnect.html
+#define MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT true
namespace mysqlxx
@@ -76,7 +78,8 @@ public:
const char * ssl_key = "",
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
- bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
+ bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
+ bool opt_reconnect = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
/// Creates connection. Can be used if Poco::Util::Application is using.
/// All settings will be got from config_name section of configuration.
@@ -96,7 +99,8 @@ public:
const char* ssl_key,
unsigned timeout = MYSQLXX_DEFAULT_TIMEOUT,
unsigned rw_timeout = MYSQLXX_DEFAULT_RW_TIMEOUT,
- bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
+ bool enable_local_infile = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
+ bool opt_reconnect = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
void connect(const std::string & config_name)
{
@@ -112,6 +116,7 @@ public:
std::string ssl_cert = cfg.getString(config_name + ".ssl_cert", "");
std::string ssl_key = cfg.getString(config_name + ".ssl_key", "");
bool enable_local_infile = cfg.getBool(config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
+ bool opt_reconnect = cfg.getBool(config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
unsigned timeout =
cfg.getInt(config_name + ".connect_timeout",
@@ -135,7 +140,8 @@ public:
ssl_key.c_str(),
timeout,
rw_timeout,
- enable_local_infile);
+ enable_local_infile,
+ opt_reconnect);
}
/// If MySQL connection was established.
diff --git a/base/mysqlxx/Exception.h b/base/mysqlxx/Exception.h
index eaeb3565af1..48cd0997b94 100644
--- a/base/mysqlxx/Exception.h
+++ b/base/mysqlxx/Exception.h
@@ -26,6 +26,15 @@ struct ConnectionFailed : public Exception
};
+/// Connection to MySQL server was lost
+struct ConnectionLost : public Exception
+{
+ ConnectionLost(const std::string & msg, int code = 0) : Exception(msg, code) {}
+ const char * name() const throw() override { return "mysqlxx::ConnectionLost"; }
+ const char * className() const throw() override { return "mysqlxx::ConnectionLost"; }
+};
+
+
/// Erroneous query.
struct BadQuery : public Exception
{
diff --git a/base/mysqlxx/Pool.cpp b/base/mysqlxx/Pool.cpp
index 2cb3e62db84..386b4544b78 100644
--- a/base/mysqlxx/Pool.cpp
+++ b/base/mysqlxx/Pool.cpp
@@ -10,7 +10,6 @@
#include
-#include
#include
@@ -41,7 +40,9 @@ void Pool::Entry::decrementRefCount()
Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & config_name,
unsigned default_connections_, unsigned max_connections_,
const char * parent_config_name_)
- : default_connections(default_connections_), max_connections(max_connections_)
+ : logger(Poco::Logger::get("mysqlxx::Pool"))
+ , default_connections(default_connections_)
+ , max_connections(max_connections_)
{
server = cfg.getString(config_name + ".host");
@@ -78,6 +79,9 @@ Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & co
enable_local_infile = cfg.getBool(config_name + ".enable_local_infile",
cfg.getBool(parent_config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE));
+
+ opt_reconnect = cfg.getBool(config_name + ".opt_reconnect",
+ cfg.getBool(parent_config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT));
}
else
{
@@ -96,6 +100,8 @@ Pool::Pool(const Poco::Util::AbstractConfiguration & cfg, const std::string & co
enable_local_infile = cfg.getBool(
config_name + ".enable_local_infile", MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE);
+
+ opt_reconnect = cfg.getBool(config_name + ".opt_reconnect", MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT);
}
connect_timeout = cfg.getInt(config_name + ".connect_timeout",
@@ -125,20 +131,30 @@ Pool::Entry Pool::get()
initialize();
for (;;)
{
+ logger.trace("(%s): Iterating through existing MySQL connections", getDescription());
+
for (auto & connection : connections)
{
if (connection->ref_count == 0)
return Entry(connection, this);
}
+ logger.trace("(%s): Trying to allocate a new connection.", getDescription());
if (connections.size() < static_cast(max_connections))
{
Connection * conn = allocConnection();
if (conn)
return Entry(conn, this);
+
+ logger.trace("(%s): Unable to create a new connection: Allocation failed.", getDescription());
+ }
+ else
+ {
+ logger.trace("(%s): Unable to create a new connection: Max number of connections has been reached.", getDescription());
}
lock.unlock();
+ logger.trace("(%s): Sleeping for %d seconds.", getDescription(), MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
lock.lock();
}
@@ -158,12 +174,13 @@ Pool::Entry Pool::tryGet()
/// Fixme: There is a race condition here b/c we do not synchronize with Pool::Entry's copy-assignment operator
if (connection_ptr->ref_count == 0)
{
- Entry res(connection_ptr, this);
- if (res.tryForceConnected()) /// Tries to reestablish connection as well
- return res;
+ {
+ Entry res(connection_ptr, this);
+ if (res.tryForceConnected()) /// Tries to reestablish connection as well
+ return res;
+ }
- auto & logger = Poco::Util::Application::instance().logger();
- logger.information("Idle connection to mysql server cannot be recovered, dropping it.");
+ logger.debug("(%s): Idle connection to MySQL server cannot be recovered, dropping it.", getDescription());
/// This one is disconnected, cannot be reestablished and so needs to be disposed of.
connection_it = connections.erase(connection_it);
@@ -186,6 +203,8 @@ Pool::Entry Pool::tryGet()
void Pool::removeConnection(Connection* connection)
{
+ logger.trace("(%s): Removing connection.", getDescription());
+
std::lock_guard lock(mutex);
if (connection)
{
@@ -210,8 +229,6 @@ void Pool::Entry::forceConnected() const
if (data == nullptr)
throw Poco::RuntimeException("Tried to access NULL database connection.");
- Poco::Util::Application & app = Poco::Util::Application::instance();
-
bool first = true;
while (!tryForceConnected())
{
@@ -220,7 +237,7 @@ void Pool::Entry::forceConnected() const
else
sleepForSeconds(MYSQLXX_POOL_SLEEP_ON_CONNECT_FAIL);
- app.logger().information("MYSQL: Reconnecting to " + pool->description);
+ pool->logger.debug("Entry: Reconnecting to MySQL server %s", pool->description);
data->conn.connect(
pool->db.c_str(),
pool->server.c_str(),
@@ -233,7 +250,8 @@ void Pool::Entry::forceConnected() const
pool->ssl_key.c_str(),
pool->connect_timeout,
pool->rw_timeout,
- pool->enable_local_infile);
+ pool->enable_local_infile,
+ pool->opt_reconnect);
}
}
@@ -242,18 +260,22 @@ bool Pool::Entry::tryForceConnected() const
{
auto * const mysql_driver = data->conn.getDriver();
const auto prev_connection_id = mysql_thread_id(mysql_driver);
+
+ pool->logger.trace("Entry(connection %lu): sending PING to check if it is alive.", prev_connection_id);
if (data->conn.ping()) /// Attempts to reestablish lost connection
{
const auto current_connection_id = mysql_thread_id(mysql_driver);
if (prev_connection_id != current_connection_id)
{
- auto & logger = Poco::Util::Application::instance().logger();
- logger.information("Connection to mysql server has been reestablished. Connection id changed: %lu -> %lu",
- prev_connection_id, current_connection_id);
+ pool->logger.debug("Entry(connection %lu): Reconnected to MySQL server. Connection id changed: %lu -> %lu",
+ current_connection_id, prev_connection_id, current_connection_id);
}
+
+ pool->logger.trace("Entry(connection %lu): PING ok.", current_connection_id);
return true;
}
+ pool->logger.trace("Entry(connection %lu): PING failed.", prev_connection_id);
return false;
}
@@ -274,15 +296,13 @@ void Pool::initialize()
Pool::Connection * Pool::allocConnection(bool dont_throw_if_failed_first_time)
{
- Poco::Util::Application & app = Poco::Util::Application::instance();
-
- std::unique_ptr conn(new Connection);
+ std::unique_ptr conn_ptr{new Connection};
try
{
- app.logger().information("MYSQL: Connecting to " + description);
+ logger.debug("Connecting to %s", description);
- conn->conn.connect(
+ conn_ptr->conn.connect(
db.c_str(),
server.c_str(),
user.c_str(),
@@ -294,29 +314,29 @@ Pool::Connection * Pool::allocConnection(bool dont_throw_if_failed_first_time)
ssl_key.c_str(),
connect_timeout,
rw_timeout,
- enable_local_infile);
+ enable_local_infile,
+ opt_reconnect);
}
catch (mysqlxx::ConnectionFailed & e)
{
+ logger.error(e.what());
+
if ((!was_successful && !dont_throw_if_failed_first_time)
|| e.errnum() == ER_ACCESS_DENIED_ERROR
|| e.errnum() == ER_DBACCESS_DENIED_ERROR
|| e.errnum() == ER_BAD_DB_ERROR)
{
- app.logger().error(e.what());
throw;
}
else
{
- app.logger().error(e.what());
return nullptr;
}
}
+ connections.push_back(conn_ptr.get());
was_successful = true;
- auto * connection = conn.release();
- connections.push_back(connection);
- return connection;
+ return conn_ptr.release();
}
}
diff --git a/base/mysqlxx/Pool.h b/base/mysqlxx/Pool.h
index 83b00e0081a..530e2c78cf2 100644
--- a/base/mysqlxx/Pool.h
+++ b/base/mysqlxx/Pool.h
@@ -6,6 +6,8 @@
#include
#include
+#include
+
#include
@@ -157,27 +159,29 @@ public:
*/
Pool(const std::string & db_,
const std::string & server_,
- const std::string & user_ = "",
- const std::string & password_ = "",
- unsigned port_ = 0,
+ const std::string & user_,
+ const std::string & password_,
+ unsigned port_,
const std::string & socket_ = "",
unsigned connect_timeout_ = MYSQLXX_DEFAULT_TIMEOUT,
unsigned rw_timeout_ = MYSQLXX_DEFAULT_RW_TIMEOUT,
unsigned default_connections_ = MYSQLXX_POOL_DEFAULT_START_CONNECTIONS,
unsigned max_connections_ = MYSQLXX_POOL_DEFAULT_MAX_CONNECTIONS,
- unsigned enable_local_infile_ = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE)
- : default_connections(default_connections_), max_connections(max_connections_),
- db(db_), server(server_), user(user_), password(password_), port(port_), socket(socket_),
- connect_timeout(connect_timeout_), rw_timeout(rw_timeout_), enable_local_infile(enable_local_infile_) {}
+ unsigned enable_local_infile_ = MYSQLXX_DEFAULT_ENABLE_LOCAL_INFILE,
+ bool opt_reconnect_ = MYSQLXX_DEFAULT_MYSQL_OPT_RECONNECT)
+ : logger(Poco::Logger::get("mysqlxx::Pool")), default_connections(default_connections_),
+ max_connections(max_connections_), db(db_), server(server_), user(user_), password(password_), port(port_), socket(socket_),
+ connect_timeout(connect_timeout_), rw_timeout(rw_timeout_), enable_local_infile(enable_local_infile_),
+ opt_reconnect(opt_reconnect_) {}
Pool(const Pool & other)
- : default_connections{other.default_connections},
+ : logger(other.logger), default_connections{other.default_connections},
max_connections{other.max_connections},
db{other.db}, server{other.server},
user{other.user}, password{other.password},
port{other.port}, socket{other.socket},
connect_timeout{other.connect_timeout}, rw_timeout{other.rw_timeout},
- enable_local_infile{other.enable_local_infile}
+ enable_local_infile{other.enable_local_infile}, opt_reconnect(other.opt_reconnect)
{}
Pool & operator=(const Pool &) = delete;
@@ -201,6 +205,8 @@ public:
void removeConnection(Connection * connection);
protected:
+ Poco::Logger & logger;
+
/// Number of MySQL connections which are created at launch.
unsigned default_connections;
/// Maximum possible number of connections
@@ -231,6 +237,7 @@ private:
std::string ssl_cert;
std::string ssl_key;
bool enable_local_infile;
+ bool opt_reconnect;
/// True if connection was established at least once.
bool was_successful{false};
diff --git a/base/mysqlxx/PoolWithFailover.cpp b/base/mysqlxx/PoolWithFailover.cpp
index 5bee75aab1b..ea2d060e596 100644
--- a/base/mysqlxx/PoolWithFailover.cpp
+++ b/base/mysqlxx/PoolWithFailover.cpp
@@ -1,3 +1,7 @@
+#include
+#include
+#include
+#include
#include
@@ -10,9 +14,12 @@ static bool startsWith(const std::string & s, const char * prefix)
using namespace mysqlxx;
-PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & config_,
- const std::string & config_name_, const unsigned default_connections_,
- const unsigned max_connections_, const size_t max_tries_)
+PoolWithFailover::PoolWithFailover(
+ const Poco::Util::AbstractConfiguration & config_,
+ const std::string & config_name_,
+ const unsigned default_connections_,
+ const unsigned max_connections_,
+ const size_t max_tries_)
: max_tries(max_tries_)
{
shareable = config_.getBool(config_name_ + ".share_connection", false);
@@ -33,6 +40,19 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & con
std::make_shared(config_, replica_name, default_connections_, max_connections_, config_name_.c_str()));
}
}
+
+ /// PoolWithFailover objects are stored in a cache inside PoolFactory.
+ /// This cache is reset by ExternalDictionariesLoader after every SYSTEM RELOAD DICTIONAR{Y|IES}
+ /// which triggers massive re-constructing of connection pools.
+ /// The state of PRNGs like std::mt19937 is considered to be quite heavy
+ /// thus here we attempt to optimize its construction.
+ static thread_local std::mt19937 rnd_generator(
+ std::hash{}(std::this_thread::get_id()) + std::clock());
+ for (auto & [_, replicas] : replicas_by_priority)
+ {
+ if (replicas.size() > 1)
+ std::shuffle(replicas.begin(), replicas.end(), rnd_generator);
+ }
}
else
{
@@ -41,16 +61,38 @@ PoolWithFailover::PoolWithFailover(const Poco::Util::AbstractConfiguration & con
}
}
-PoolWithFailover::PoolWithFailover(const std::string & config_name_, const unsigned default_connections_,
- const unsigned max_connections_, const size_t max_tries_)
- : PoolWithFailover{
- Poco::Util::Application::instance().config(), config_name_,
- default_connections_, max_connections_, max_tries_}
+
+PoolWithFailover::PoolWithFailover(
+ const std::string & config_name_,
+ const unsigned default_connections_,
+ const unsigned max_connections_,
+ const size_t max_tries_)
+ : PoolWithFailover{Poco::Util::Application::instance().config(),
+ config_name_, default_connections_, max_connections_, max_tries_}
{
}
+
+PoolWithFailover::PoolWithFailover(
+ const std::string & database,
+ const RemoteDescription & addresses,
+ const std::string & user,
+ const std::string & password,
+ size_t max_tries_)
+ : max_tries(max_tries_)
+ , shareable(false)
+{
+ /// Replicas have the same priority, but traversed replicas are moved to the end of the queue.
+ for (const auto & [host, port] : addresses)
+ {
+ replicas_by_priority[0].emplace_back(std::make_shared(database, host, user, password, port));
+ }
+}
+
+
PoolWithFailover::PoolWithFailover(const PoolWithFailover & other)
- : max_tries{other.max_tries}, shareable{other.shareable}
+ : max_tries{other.max_tries}
+ , shareable{other.shareable}
{
if (shareable)
{
diff --git a/base/mysqlxx/PoolWithFailover.h b/base/mysqlxx/PoolWithFailover.h
index 029fc3ebad3..5154fc3e253 100644
--- a/base/mysqlxx/PoolWithFailover.h
+++ b/base/mysqlxx/PoolWithFailover.h
@@ -11,6 +11,8 @@
namespace mysqlxx
{
/** MySQL connection pool with support of failover.
+ *
+ * For dictionary source:
* Have information about replicas and their priorities.
* Tries to connect to replica in an order of priority. When equal priority, choose replica with maximum time without connections.
*
@@ -68,42 +70,58 @@ namespace mysqlxx
using PoolPtr = std::shared_ptr;
using Replicas = std::vector;
- /// [priority][index] -> replica.
+ /// [priority][index] -> replica. Highest priority is 0.
using ReplicasByPriority = std::map;
-
ReplicasByPriority replicas_by_priority;
/// Number of connection tries.
size_t max_tries;
/// Mutex for set of replicas.
std::mutex mutex;
-
/// Can the Pool be shared
bool shareable;
public:
using Entry = Pool::Entry;
+ using RemoteDescription = std::vector>;
/**
- * config_name Name of parameter in configuration file.
+ * * Mysql dictionary sourse related params:
+ * config_name Name of parameter in configuration file for dictionary source.
+ *
+ * * Mysql storage related parameters:
+ * replicas_description
+ *
+ * * Mutual parameters:
* default_connections Number of connection in pool to each replica at start.
* max_connections Maximum number of connections in pool to each replica.
* max_tries_ Max number of connection tries.
*/
- PoolWithFailover(const std::string & config_name_,
+ PoolWithFailover(
+ const std::string & config_name_,
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
- PoolWithFailover(const Poco::Util::AbstractConfiguration & config_,
+ PoolWithFailover(
+ const Poco::Util::AbstractConfiguration & config_,
const std::string & config_name_,
unsigned default_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_START_CONNECTIONS,
unsigned max_connections_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_CONNECTIONS,
size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
+ PoolWithFailover(
+ const std::string & database,
+ const RemoteDescription & addresses,
+ const std::string & user,
+ const std::string & password,
+ size_t max_tries_ = MYSQLXX_POOL_WITH_FAILOVER_DEFAULT_MAX_TRIES);
+
PoolWithFailover(const PoolWithFailover & other);
/** Allocates a connection to use. */
Entry get();
};
+
+ using PoolWithFailoverPtr = std::shared_ptr;
}
diff --git a/base/mysqlxx/Query.cpp b/base/mysqlxx/Query.cpp
index f3485c54edc..d4514c3e988 100644
--- a/base/mysqlxx/Query.cpp
+++ b/base/mysqlxx/Query.cpp
@@ -1,11 +1,16 @@
#if __has_include()
+#include
#include
#else
+#include
#include
#endif
+#include
+
#include
#include
+#include
namespace mysqlxx
@@ -57,8 +62,24 @@ void Query::reset()
void Query::executeImpl()
{
std::string query_string = query_buf.str();
- if (mysql_real_query(conn->getDriver(), query_string.data(), query_string.size()))
- throw BadQuery(errorMessage(conn->getDriver()), mysql_errno(conn->getDriver()));
+
+ MYSQL* mysql_driver = conn->getDriver();
+
+ auto & logger = Poco::Logger::get("mysqlxx::Query");
+ logger.trace("Running MySQL query using connection %lu", mysql_thread_id(mysql_driver));
+ if (mysql_real_query(mysql_driver, query_string.data(), query_string.size()))
+ {
+ const auto err_no = mysql_errno(mysql_driver);
+ switch (err_no)
+ {
+ case CR_SERVER_GONE_ERROR:
+ [[fallthrough]];
+ case CR_SERVER_LOST:
+ throw ConnectionLost(errorMessage(mysql_driver), err_no);
+ default:
+ throw BadQuery(errorMessage(mysql_driver), err_no);
+ }
+ }
}
UseQueryResult Query::use()
diff --git a/base/mysqlxx/tests/CMakeLists.txt b/base/mysqlxx/tests/CMakeLists.txt
index 2cf19d78418..6473a927308 100644
--- a/base/mysqlxx/tests/CMakeLists.txt
+++ b/base/mysqlxx/tests/CMakeLists.txt
@@ -1,5 +1,2 @@
-add_executable (mysqlxx_test mysqlxx_test.cpp)
-target_link_libraries (mysqlxx_test PRIVATE mysqlxx)
-
add_executable (mysqlxx_pool_test mysqlxx_pool_test.cpp)
target_link_libraries (mysqlxx_pool_test PRIVATE mysqlxx)
diff --git a/base/mysqlxx/tests/failover.xml b/base/mysqlxx/tests/failover.xml
deleted file mode 100644
index 73702eabb29..00000000000
--- a/base/mysqlxx/tests/failover.xml
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
- 3306
- root
- Metrica
- qwerty
-
- example02t
- 0
-
-
- example02t
- 3306
- root
- qwerty
- Metrica
- 1
-
-
-
diff --git a/base/mysqlxx/tests/mysqlxx_test.cpp b/base/mysqlxx/tests/mysqlxx_test.cpp
deleted file mode 100644
index c505d34a58d..00000000000
--- a/base/mysqlxx/tests/mysqlxx_test.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-#include
-#include
-
-
-int main(int, char **)
-{
- try
- {
- mysqlxx::Connection connection("test", "127.0.0.1", "root", "qwerty", 3306);
- std::cerr << "Connected." << std::endl;
-
- {
- mysqlxx::Query query = connection.query();
- query << "SELECT 1 x, '2010-01-01 01:01:01' d";
- mysqlxx::UseQueryResult result = query.use();
- std::cerr << "use() called." << std::endl;
-
- while (mysqlxx::Row row = result.fetch())
- {
- std::cerr << "Fetched row." << std::endl;
- std::cerr << row[0] << ", " << row["x"] << std::endl;
- std::cerr << row[1] << ", " << row["d"]
- << ", " << row[1].getDate()
- << ", " << row[1].getDateTime()
- << ", " << row[1].getDate()
- << ", " << row[1].getDateTime()
- << std::endl
- << row[1].getDate() << ", " << row[1].getDateTime() << std::endl
- << row[1].getDate() << ", " << row[1].getDateTime() << std::endl
- << row[1].getDate() << ", " << row[1].getDateTime() << std::endl
- << row[1].getDate() << ", " << row[1].getDateTime() << std::endl
- ;
-
- time_t t1 = row[0];
- time_t t2 = row[1];
- std::cerr << t1 << ", " << LocalDateTime(t1) << std::endl;
- std::cerr << t2 << ", " << LocalDateTime(t2) << std::endl;
- }
- }
-
- {
- mysqlxx::UseQueryResult result = connection.query("SELECT 'abc\\\\def' x").use();
- mysqlxx::Row row = result.fetch();
- std::cerr << row << std::endl;
- std::cerr << row << std::endl;
- }
-
- {
- /// Копирование Query
- mysqlxx::Query query1 = connection.query("SELECT");
- mysqlxx::Query query2 = query1;
- query2 << " 1";
-
- std::cerr << query1.str() << ", " << query2.str() << std::endl;
- }
-
- {
- /// NULL
- mysqlxx::Null x = mysqlxx::null;
- std::cerr << (x == mysqlxx::null ? "Ok" : "Fail") << std::endl;
- std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl;
- std::cerr << (x.isNull() ? "Ok" : "Fail") << std::endl;
- x = 1;
- std::cerr << (x == mysqlxx::null ? "Fail" : "Ok") << std::endl;
- std::cerr << (x == 0 ? "Fail" : "Ok") << std::endl;
- std::cerr << (x == 1 ? "Ok" : "Fail") << std::endl;
- std::cerr << (x.isNull() ? "Fail" : "Ok") << std::endl;
- }
- }
- catch (const mysqlxx::Exception & e)
- {
- std::cerr << e.code() << ", " << e.message() << std::endl;
- throw;
- }
-
- return 0;
-}
diff --git a/base/readpassphrase/CMakeLists.txt b/base/readpassphrase/CMakeLists.txt
index 574130ce6e3..51b12106eca 100644
--- a/base/readpassphrase/CMakeLists.txt
+++ b/base/readpassphrase/CMakeLists.txt
@@ -4,5 +4,5 @@
add_library(readpassphrase readpassphrase.c)
set_target_properties(readpassphrase PROPERTIES LINKER_LANGUAGE C)
-target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro)
+target_compile_options(readpassphrase PRIVATE -Wno-unused-result -Wno-reserved-id-macro -Wno-disabled-macro-expansion)
target_include_directories(readpassphrase PUBLIC .)
diff --git a/base/readpassphrase/readpassphrase.c b/base/readpassphrase/readpassphrase.c
index 9e8097643bb..8a7d3153915 100644
--- a/base/readpassphrase/readpassphrase.c
+++ b/base/readpassphrase/readpassphrase.c
@@ -94,7 +94,7 @@ restart:
if (input != STDIN_FILENO && tcgetattr(input, &oterm) == 0) {
memcpy(&term, &oterm, sizeof(term));
if (!(flags & RPP_ECHO_ON))
- term.c_lflag &= ~(ECHO | ECHONL);
+ term.c_lflag &= ~((unsigned int) (ECHO | ECHONL));
#ifdef VSTATUS
if (term.c_cc[VSTATUS] != _POSIX_VDISABLE)
term.c_cc[VSTATUS] = _POSIX_VDISABLE;
diff --git a/cmake/analysis.cmake b/cmake/analysis.cmake
index 369be295746..267bb34248b 100644
--- a/cmake/analysis.cmake
+++ b/cmake/analysis.cmake
@@ -16,6 +16,10 @@ if (ENABLE_CLANG_TIDY)
set (USE_CLANG_TIDY ON)
+ # clang-tidy requires assertions to guide the analysis
+ # Note that NDEBUG is set implicitly by CMake for non-debug builds
+ set(COMPILER_FLAGS "${COMPILER_FLAGS} -UNDEBUG")
+
# The variable CMAKE_CXX_CLANG_TIDY will be set inside src and base directories with non third-party code.
# set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}")
elseif (FAIL_ON_UNSUPPORTED_OPTIONS_COMBINATION)
diff --git a/cmake/arch.cmake b/cmake/arch.cmake
index 9604ef62b31..60e0346dbbf 100644
--- a/cmake/arch.cmake
+++ b/cmake/arch.cmake
@@ -1,7 +1,7 @@
if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
set (ARCH_AMD64 1)
endif ()
-if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)")
+if (CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
set (ARCH_AARCH64 1)
endif ()
if (ARCH_AARCH64 OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm")
diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt
index ce92ae203ea..51f4b974161 100644
--- a/cmake/autogenerated_versions.txt
+++ b/cmake/autogenerated_versions.txt
@@ -1,9 +1,9 @@
# This strings autochanged from release_lib.sh:
-SET(VERSION_REVISION 54448)
+SET(VERSION_REVISION 54451)
SET(VERSION_MAJOR 21)
-SET(VERSION_MINOR 3)
+SET(VERSION_MINOR 6)
SET(VERSION_PATCH 1)
-SET(VERSION_GITHASH ef72ba7349f230321750c13ee63b49a11a7c0adc)
-SET(VERSION_DESCRIBE v21.3.1.1-prestable)
-SET(VERSION_STRING 21.3.1.1)
+SET(VERSION_GITHASH 96fced4c3cf432fb0b401d2ab01f0c56e5f74a96)
+SET(VERSION_DESCRIBE v21.6.1.1-prestable)
+SET(VERSION_STRING 21.6.1.1)
# end of autochange
diff --git a/cmake/darwin/default_libs.cmake b/cmake/darwin/default_libs.cmake
index 79ac675f234..a6ee800d59b 100644
--- a/cmake/darwin/default_libs.cmake
+++ b/cmake/darwin/default_libs.cmake
@@ -1,11 +1,14 @@
set (DEFAULT_LIBS "-nodefaultlibs")
-if (NOT COMPILER_CLANG)
- message (FATAL_ERROR "Darwin build is supported only for Clang")
-endif ()
-
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl")
+if (COMPILER_GCC)
+ set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc_eh")
+ if (ARCH_AARCH64)
+ set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc")
+ endif ()
+endif ()
+
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
diff --git a/cmake/darwin/toolchain-aarch64.cmake b/cmake/darwin/toolchain-aarch64.cmake
new file mode 100644
index 00000000000..81398111495
--- /dev/null
+++ b/cmake/darwin/toolchain-aarch64.cmake
@@ -0,0 +1,14 @@
+set (CMAKE_SYSTEM_NAME "Darwin")
+set (CMAKE_SYSTEM_PROCESSOR "aarch64")
+set (CMAKE_C_COMPILER_TARGET "aarch64-apple-darwin")
+set (CMAKE_CXX_COMPILER_TARGET "aarch64-apple-darwin")
+set (CMAKE_ASM_COMPILER_TARGET "aarch64-apple-darwin")
+set (CMAKE_OSX_SYSROOT "${CMAKE_CURRENT_LIST_DIR}/../toolchain/darwin-aarch64")
+
+set (CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) # disable linkage check - it doesn't work in CMake
+
+set (HAS_PRE_1970_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
+set (HAS_PRE_1970_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
+
+set (HAS_POST_2038_EXITCODE "0" CACHE STRING "Result from TRY_RUN" FORCE)
+set (HAS_POST_2038_EXITCODE__TRYRUN_OUTPUT "" CACHE STRING "Output from TRY_RUN" FORCE)
diff --git a/cmake/find/amqpcpp.cmake b/cmake/find/amqpcpp.cmake
index 4191dce26bb..e3eaaf33ddb 100644
--- a/cmake/find/amqpcpp.cmake
+++ b/cmake/find/amqpcpp.cmake
@@ -1,3 +1,8 @@
+if (OS_DARWIN AND COMPILER_GCC)
+ # AMQP-CPP requires libuv which cannot be built with GCC in macOS due to a bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082
+ set (ENABLE_AMQPCPP OFF CACHE INTERNAL "")
+endif()
+
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
if (NOT ENABLE_AMQPCPP)
diff --git a/cmake/find/base64.cmake b/cmake/find/base64.cmake
index 7427baf9cad..acade11eb2f 100644
--- a/cmake/find/base64.cmake
+++ b/cmake/find/base64.cmake
@@ -1,4 +1,8 @@
-option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
+if(ARCH_AMD64 OR ARCH_ARM)
+ option (ENABLE_BASE64 "Enable base64" ${ENABLE_LIBRARIES})
+elseif(ENABLE_BASE64)
+ message (${RECONFIGURE_MESSAGE_LEVEL} "base64 library is only supported on x86_64 and aarch64")
+endif()
if (NOT ENABLE_BASE64)
return()
diff --git a/cmake/find/cassandra.cmake b/cmake/find/cassandra.cmake
index 037d6c3f131..ded25a5bf41 100644
--- a/cmake/find/cassandra.cmake
+++ b/cmake/find/cassandra.cmake
@@ -1,3 +1,8 @@
+if (OS_DARWIN AND COMPILER_GCC)
+ # Cassandra requires libuv which cannot be built with GCC in macOS due to a bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082
+ set (ENABLE_CASSANDRA OFF CACHE INTERNAL "")
+endif()
+
option(ENABLE_CASSANDRA "Enable Cassandra" ${ENABLE_LIBRARIES})
if (NOT ENABLE_CASSANDRA)
diff --git a/cmake/find/ccache.cmake b/cmake/find/ccache.cmake
index d9ccd1a9ac6..986c9cb5fe2 100644
--- a/cmake/find/ccache.cmake
+++ b/cmake/find/ccache.cmake
@@ -32,7 +32,12 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
if (CCACHE_VERSION VERSION_GREATER "3.2.0" OR NOT CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
message(STATUS "Using ${CCACHE_FOUND} ${CCACHE_VERSION}")
- # debian (debhlpers) set SOURCE_DATE_EPOCH environment variable, that is
+ set (CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_CXX_COMPILER_LAUNCHER})
+ set (CMAKE_C_COMPILER_LAUNCHER ${CCACHE_FOUND} ${CMAKE_C_COMPILER_LAUNCHER})
+
+ set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
+
+ # debian (debhelpers) set SOURCE_DATE_EPOCH environment variable, that is
# filled from the debian/changelog or current time.
#
# - 4.0+ ccache always includes this environment variable into the hash
@@ -48,9 +53,6 @@ if (CCACHE_FOUND AND NOT COMPILER_MATCHES_CCACHE)
message(STATUS "Ignore SOURCE_DATE_EPOCH for ccache")
set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK "env -u SOURCE_DATE_EPOCH ${CCACHE_FOUND}")
- else()
- set_property (GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
- set_property (GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
endif()
else ()
message(${RECONFIGURE_MESSAGE_LEVEL} "Not using ${CCACHE_FOUND} ${CCACHE_VERSION} bug: https://bugzilla.samba.org/show_bug.cgi?id=8118")
diff --git a/cmake/find/datasketches.cmake b/cmake/find/datasketches.cmake
new file mode 100644
index 00000000000..44ef324a9f2
--- /dev/null
+++ b/cmake/find/datasketches.cmake
@@ -0,0 +1,29 @@
+option (ENABLE_DATASKETCHES "Enable DataSketches" ${ENABLE_LIBRARIES})
+
+if (ENABLE_DATASKETCHES)
+
+option (USE_INTERNAL_DATASKETCHES_LIBRARY "Set to FALSE to use system DataSketches library instead of bundled" ${NOT_UNBUNDLED})
+
+if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/CMakeLists.txt")
+ if (USE_INTERNAL_DATASKETCHES_LIBRARY)
+ message(WARNING "submodule contrib/datasketches-cpp is missing. to fix try run: \n git submodule update --init --recursive")
+ endif()
+ set(MISSING_INTERNAL_DATASKETCHES_LIBRARY 1)
+ set(USE_INTERNAL_DATASKETCHES_LIBRARY 0)
+endif()
+
+if (USE_INTERNAL_DATASKETCHES_LIBRARY)
+ set(DATASKETCHES_LIBRARY theta)
+ set(DATASKETCHES_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/common/include" "${ClickHouse_SOURCE_DIR}/contrib/datasketches-cpp/theta/include")
+elseif (NOT MISSING_INTERNAL_DATASKETCHES_LIBRARY)
+ find_library(DATASKETCHES_LIBRARY theta)
+ find_path(DATASKETCHES_INCLUDE_DIR NAMES theta_sketch.hpp PATHS ${DATASKETCHES_INCLUDE_PATHS})
+endif()
+
+if (DATASKETCHES_LIBRARY AND DATASKETCHES_INCLUDE_DIR)
+ set(USE_DATASKETCHES 1)
+endif()
+
+endif()
+
+message (STATUS "Using datasketches=${USE_DATASKETCHES}: ${DATASKETCHES_INCLUDE_DIR} : ${DATASKETCHES_LIBRARY}")
diff --git a/cmake/find/fastops.cmake b/cmake/find/fastops.cmake
index 5ab320bdb7a..1675646654e 100644
--- a/cmake/find/fastops.cmake
+++ b/cmake/find/fastops.cmake
@@ -1,7 +1,7 @@
-if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT OS_DARWIN)
+if(ARCH_AMD64 AND NOT OS_FREEBSD AND NOT OS_DARWIN)
option(ENABLE_FASTOPS "Enable fast vectorized mathematical functions library by Mikhail Parakhin" ${ENABLE_LIBRARIES})
elseif(ENABLE_FASTOPS)
- message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is not supported on ARM, FreeBSD and Darwin")
+ message (${RECONFIGURE_MESSAGE_LEVEL} "Fastops library is supported on x86_64 only, and not FreeBSD or Darwin")
endif()
if(NOT ENABLE_FASTOPS)
diff --git a/cmake/find/hdfs3.cmake b/cmake/find/hdfs3.cmake
index 7b385f24e1e..3aab2b612ef 100644
--- a/cmake/find/hdfs3.cmake
+++ b/cmake/find/hdfs3.cmake
@@ -1,4 +1,4 @@
-if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF)
+if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF AND NOT ARCH_PPC64LE)
option(ENABLE_HDFS "Enable HDFS" ${ENABLE_LIBRARIES})
elseif(ENABLE_HDFS OR USE_INTERNAL_HDFS3_LIBRARY)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot use HDFS3 with current configuration")
diff --git a/cmake/find/krb5.cmake b/cmake/find/krb5.cmake
index bd9c8e239cd..49b7462b710 100644
--- a/cmake/find/krb5.cmake
+++ b/cmake/find/krb5.cmake
@@ -5,8 +5,8 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/krb5/README")
set (ENABLE_KRB5 0)
endif ()
-if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux")
- message (WARNING "krb5 disabled in non-Linux environments")
+if (NOT CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT (CMAKE_SYSTEM_NAME MATCHES "Darwin" AND NOT CMAKE_CROSSCOMPILING))
+ message (WARNING "krb5 disabled in non-Linux and non-native-Darwin environments")
set (ENABLE_KRB5 0)
endif ()
diff --git a/cmake/find/ldap.cmake b/cmake/find/ldap.cmake
index 369c1e42e8d..d8baea89429 100644
--- a/cmake/find/ldap.cmake
+++ b/cmake/find/ldap.cmake
@@ -62,8 +62,10 @@ if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
if (
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "x86_64" ) OR
( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "aarch64" ) OR
+ ( "${_system_name}" STREQUAL "linux" AND "${_system_processor}" STREQUAL "ppc64le" ) OR
( "${_system_name}" STREQUAL "freebsd" AND "${_system_processor}" STREQUAL "x86_64" ) OR
- ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" )
+ ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "x86_64" ) OR
+ ( "${_system_name}" STREQUAL "darwin" AND "${_system_processor}" STREQUAL "aarch64" )
)
set (_ldap_supported_platform TRUE)
endif ()
diff --git a/cmake/find/nanodbc.cmake b/cmake/find/nanodbc.cmake
new file mode 100644
index 00000000000..894a2a60bad
--- /dev/null
+++ b/cmake/find/nanodbc.cmake
@@ -0,0 +1,16 @@
+if (NOT ENABLE_ODBC)
+ return ()
+endif ()
+
+if (NOT USE_INTERNAL_NANODBC_LIBRARY)
+ message (FATAL_ERROR "Only the bundled nanodbc library can be used")
+endif ()
+
+if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/CMakeLists.txt")
+ message (FATAL_ERROR "submodule contrib/nanodbc is missing. to fix try run: \n git submodule update --init --recursive")
+endif()
+
+set (NANODBC_LIBRARY nanodbc)
+set (NANODBC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/nanodbc/nanodbc")
+
+message (STATUS "Using nanodbc: ${NANODBC_INCLUDE_DIR} : ${NANODBC_LIBRARY}")
diff --git a/cmake/find/nuraft.cmake b/cmake/find/nuraft.cmake
index 7fa5251946e..4e5258e132f 100644
--- a/cmake/find/nuraft.cmake
+++ b/cmake/find/nuraft.cmake
@@ -11,7 +11,7 @@ if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/NuRaft/CMakeLists.txt")
return()
endif ()
-if (NOT OS_FREEBSD AND NOT OS_DARWIN)
+if (NOT OS_FREEBSD)
set (USE_NURAFT 1)
set (NURAFT_LIBRARY nuraft)
diff --git a/cmake/find/odbc.cmake b/cmake/find/odbc.cmake
index a23f0c831e9..c475e600c0d 100644
--- a/cmake/find/odbc.cmake
+++ b/cmake/find/odbc.cmake
@@ -50,4 +50,6 @@ if (NOT EXTERNAL_ODBC_LIBRARY_FOUND)
set (USE_INTERNAL_ODBC_LIBRARY 1)
endif ()
+set (USE_INTERNAL_NANODBC_LIBRARY 1)
+
message (STATUS "Using unixodbc")
diff --git a/cmake/find/rocksdb.cmake b/cmake/find/rocksdb.cmake
index 968cdb52407..94278a603d7 100644
--- a/cmake/find/rocksdb.cmake
+++ b/cmake/find/rocksdb.cmake
@@ -1,3 +1,7 @@
+if (OS_DARWIN AND ARCH_AARCH64)
+ set (ENABLE_ROCKSDB OFF CACHE INTERNAL "")
+endif()
+
option(ENABLE_ROCKSDB "Enable ROCKSDB" ${ENABLE_LIBRARIES})
if (NOT ENABLE_ROCKSDB)
diff --git a/cmake/find/s3.cmake b/cmake/find/s3.cmake
index 1bbf48fd6b0..1b0c652a31a 100644
--- a/cmake/find/s3.cmake
+++ b/cmake/find/s3.cmake
@@ -1,7 +1,7 @@
-if(NOT OS_FREEBSD AND NOT APPLE AND NOT ARCH_ARM)
+if(NOT OS_FREEBSD AND NOT APPLE)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
elseif(ENABLE_S3 OR USE_INTERNAL_AWS_S3_LIBRARY)
- message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on ARM, Apple or FreeBSD")
+ message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on Apple or FreeBSD")
endif()
if(NOT ENABLE_S3)
diff --git a/cmake/find/xz.cmake b/cmake/find/xz.cmake
new file mode 100644
index 00000000000..0d19859c6b1
--- /dev/null
+++ b/cmake/find/xz.cmake
@@ -0,0 +1,27 @@
+option (USE_INTERNAL_XZ_LIBRARY "Set to OFF to use system xz (lzma) library instead of bundled" ${NOT_UNBUNDLED})
+
+if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api/lzma.h")
+ if(USE_INTERNAL_XZ_LIBRARY)
+ message(WARNING "submodule contrib/xz is missing. to fix try run: \n git submodule update --init --recursive")
+ message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find internal xz (lzma) library")
+ set(USE_INTERNAL_XZ_LIBRARY 0)
+ endif()
+ set(MISSING_INTERNAL_XZ_LIBRARY 1)
+endif()
+
+if (NOT USE_INTERNAL_XZ_LIBRARY)
+ find_library (XZ_LIBRARY lzma)
+ find_path (XZ_INCLUDE_DIR NAMES lzma.h PATHS ${XZ_INCLUDE_PATHS})
+ if (NOT XZ_LIBRARY OR NOT XZ_INCLUDE_DIR)
+ message (${RECONFIGURE_MESSAGE_LEVEL} "Can't find system xz (lzma) library")
+ endif ()
+endif ()
+
+if (XZ_LIBRARY AND XZ_INCLUDE_DIR)
+elseif (NOT MISSING_INTERNAL_XZ_LIBRARY)
+ set (USE_INTERNAL_XZ_LIBRARY 1)
+ set (XZ_LIBRARY liblzma)
+ set (XZ_INCLUDE_DIR ${ClickHouse_SOURCE_DIR}/contrib/xz/src/liblzma/api)
+endif ()
+
+message (STATUS "Using xz (lzma): ${XZ_INCLUDE_DIR} : ${XZ_LIBRARY}")
diff --git a/cmake/linux/default_libs.cmake b/cmake/linux/default_libs.cmake
index d3a727e9cb8..c1e4d450389 100644
--- a/cmake/linux/default_libs.cmake
+++ b/cmake/linux/default_libs.cmake
@@ -6,7 +6,7 @@ set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404
if (COMPILER_CLANG AND NOT (CMAKE_CROSSCOMPILING AND ARCH_AARCH64))
- execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-${CMAKE_SYSTEM_PROCESSOR}.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
+ execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
else ()
set (BUILTINS_LIBRARY "-lgcc")
endif ()
diff --git a/cmake/tools.cmake b/cmake/tools.cmake
index cc4046d2469..44fc3b3e530 100644
--- a/cmake/tools.cmake
+++ b/cmake/tools.cmake
@@ -75,14 +75,14 @@ if (OS_LINUX AND NOT LINKER_NAME)
endif ()
if (LINKER_NAME)
- set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
- set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
+ if (COMPILER_CLANG AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 12.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 12.0.0))
+ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
+ set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} --ld-path=${LINKER_NAME}")
+ else ()
+ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
+ set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
+ endif ()
message(STATUS "Using custom linker by name: ${LINKER_NAME}")
endif ()
-if (ARCH_PPC64LE)
- if (COMPILER_CLANG OR (COMPILER_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8))
- message(FATAL_ERROR "Only gcc-8 or higher is supported for powerpc architecture")
- endif ()
-endif ()
diff --git a/cmake/warnings.cmake b/cmake/warnings.cmake
index 8122e9ef31e..a85fe8963c7 100644
--- a/cmake/warnings.cmake
+++ b/cmake/warnings.cmake
@@ -11,11 +11,6 @@ if (NOT MSVC)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
endif ()
-if (USE_DEBUG_HELPERS)
- set (INCLUDE_DEBUG_HELPERS "-I${ClickHouse_SOURCE_DIR}/base -include ${ClickHouse_SOURCE_DIR}/src/Core/iostream_debug_helpers.h")
- set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${INCLUDE_DEBUG_HELPERS}")
-endif ()
-
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
# Intended for exploration of new compiler warnings that may be found useful.
# Applies to clang only
@@ -176,6 +171,7 @@ elseif (COMPILER_GCC)
add_cxx_compile_options(-Wtrampolines)
# Obvious
add_cxx_compile_options(-Wunused)
+ add_cxx_compile_options(-Wundef)
# Warn if vector operation is not implemented via SIMD capabilities of the architecture
add_cxx_compile_options(-Wvector-operation-performance)
# XXX: libstdc++ has some of these for 3way compare
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index 20b4fad0437..087212ad3b0 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -32,12 +32,12 @@ endif()
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
+add_subdirectory (abseil-cpp-cmake)
add_subdirectory (antlr4-runtime-cmake)
add_subdirectory (boost-cmake)
add_subdirectory (cctz-cmake)
add_subdirectory (consistent-hashing)
add_subdirectory (dragonbox-cmake)
-add_subdirectory (FastMemcpy)
add_subdirectory (hyperscan-cmake)
add_subdirectory (jemalloc-cmake)
add_subdirectory (libcpuid-cmake)
@@ -47,7 +47,11 @@ add_subdirectory (lz4-cmake)
add_subdirectory (murmurhash)
add_subdirectory (replxx-cmake)
add_subdirectory (unixodbc-cmake)
-add_subdirectory (xz)
+add_subdirectory (nanodbc-cmake)
+
+if (USE_INTERNAL_XZ_LIBRARY)
+ add_subdirectory (xz)
+endif()
add_subdirectory (poco-cmake)
add_subdirectory (croaring-cmake)
@@ -93,14 +97,8 @@ if (USE_INTERNAL_ZLIB_LIBRARY)
add_subdirectory (${INTERNAL_ZLIB_NAME})
# We should use same defines when including zlib.h as used when zlib compiled
target_compile_definitions (zlib PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
- if (TARGET zlibstatic)
- target_compile_definitions (zlibstatic PUBLIC ZLIB_COMPAT WITH_GZFILEOP)
- endif ()
if (ARCH_AMD64 OR ARCH_AARCH64)
target_compile_definitions (zlib PUBLIC X86_64 UNALIGNED_OK)
- if (TARGET zlibstatic)
- target_compile_definitions (zlibstatic PUBLIC X86_64 UNALIGNED_OK)
- endif ()
endif ()
endif ()
@@ -215,15 +213,17 @@ if (USE_EMBEDDED_COMPILER AND USE_INTERNAL_LLVM_LIBRARY)
set (LLVM_ENABLE_RTTI 1 CACHE INTERNAL "")
set (LLVM_ENABLE_PIC 0 CACHE INTERNAL "")
set (LLVM_TARGETS_TO_BUILD "X86;AArch64" CACHE STRING "")
- # Yes it is set globally, but this is not enough, since llvm will add -std=c++11 after default
- # And c++2a cannot be used, due to ambiguous operator !=
- if (COMPILER_GCC OR COMPILER_CLANG)
- set (_CXX_STANDARD "gnu++17")
- else()
- set (_CXX_STANDARD "c++17")
- endif()
- set (LLVM_CXX_STD ${_CXX_STANDARD} CACHE STRING "" FORCE)
+
+ # Need to use C++17 since the compilation is not possible with C++20 currently, due to ambiguous operator != etc.
+ # LLVM project will set its default value for the -std=... but our global setting from CMake will override it.
+ set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
+ set (CMAKE_CXX_STANDARD 17)
+
add_subdirectory (llvm/llvm)
+
+ set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
+ unset (CMAKE_CXX_STANDARD_bak)
+
target_include_directories(LLVMSupport SYSTEM BEFORE PRIVATE ${ZLIB_INCLUDE_DIR})
endif ()
@@ -280,7 +280,14 @@ if (USE_AMQPCPP)
add_subdirectory (amqpcpp-cmake)
endif()
if (USE_CASSANDRA)
+ # Need to use C++17 since the compilation is not possible with C++20 currently.
+ set (CMAKE_CXX_STANDARD_bak ${CMAKE_CXX_STANDARD})
+ set (CMAKE_CXX_STANDARD 17)
+
add_subdirectory (cassandra)
+
+ set (CMAKE_CXX_STANDARD ${CMAKE_CXX_STANDARD_bak})
+ unset (CMAKE_CXX_STANDARD_bak)
endif()
# Should go before:
diff --git a/contrib/FastMemcpy/CMakeLists.txt b/contrib/FastMemcpy/CMakeLists.txt
deleted file mode 100644
index 8efe6d45dff..00000000000
--- a/contrib/FastMemcpy/CMakeLists.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-option (ENABLE_FASTMEMCPY "Enable FastMemcpy library (only internal)" ${ENABLE_LIBRARIES})
-
-if (NOT OS_LINUX OR ARCH_AARCH64)
- set (ENABLE_FASTMEMCPY OFF)
-endif ()
-
-if (ENABLE_FASTMEMCPY)
- set (LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/FastMemcpy)
-
- set (SRCS
- ${LIBRARY_DIR}/FastMemcpy.c
-
- memcpy_wrapper.c
- )
-
- add_library (FastMemcpy ${SRCS})
- target_include_directories (FastMemcpy PUBLIC ${LIBRARY_DIR})
-
- target_compile_definitions(FastMemcpy PUBLIC USE_FASTMEMCPY=1)
-
- message (STATUS "Using FastMemcpy")
-else ()
- add_library (FastMemcpy INTERFACE)
-
- target_compile_definitions(FastMemcpy INTERFACE USE_FASTMEMCPY=0)
-
- message (STATUS "Not using FastMemcpy")
-endif ()
diff --git a/contrib/FastMemcpy/FastMemcpy.c b/contrib/FastMemcpy/FastMemcpy.c
deleted file mode 100644
index 5021bcc7d16..00000000000
--- a/contrib/FastMemcpy/FastMemcpy.c
+++ /dev/null
@@ -1,220 +0,0 @@
-//=====================================================================
-//
-// FastMemcpy.c - skywind3000@163.com, 2015
-//
-// feature:
-// 50% speed up in avg. vs standard memcpy (tested in vc2012/gcc4.9)
-//
-//=====================================================================
-#include
-#include