mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 15:42:02 +00:00
Merge branch 'master' into update_zlib
This commit is contained in:
commit
dcf45e83a9
1
.gitignore
vendored
1
.gitignore
vendored
@ -80,6 +80,7 @@ core
|
||||
vgcore*
|
||||
|
||||
*.deb
|
||||
*.tar.zst
|
||||
*.build
|
||||
*.upload
|
||||
*.changes
|
||||
|
51
CHANGELOG.md
51
CHANGELOG.md
@ -1,6 +1,6 @@
|
||||
### Table of Contents
|
||||
**[ClickHouse release v22.9, 2022-09-22](#229)**<br/>
|
||||
**[ClickHouse release v22.8, 2022-08-18](#228)**<br/>
|
||||
**[ClickHouse release v22.8-lts, 2022-08-18](#228)**<br/>
|
||||
**[ClickHouse release v22.7, 2022-07-21](#227)**<br/>
|
||||
**[ClickHouse release v22.6, 2022-06-16](#226)**<br/>
|
||||
**[ClickHouse release v22.5, 2022-05-19](#225)**<br/>
|
||||
@ -10,10 +10,10 @@
|
||||
**[ClickHouse release v22.1, 2022-01-18](#221)**<br/>
|
||||
**[Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021/)**<br/>
|
||||
|
||||
|
||||
### <a id="229"></a> ClickHouse release 22.9, 2022-09-22
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Upgrade from 20.3 and older to 22.9 and newer should be done through an intermediate version if there are any `ReplicatedMergeTree` tables, otherwise server with the new version will not start. [#40641](https://github.com/ClickHouse/ClickHouse/pull/40641) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Remove the functions `accurate_Cast` and `accurate_CastOrNull` (they are different to `accurateCast` and `accurateCastOrNull` by underscore in the name and they are not affected by the value of `cast_keep_nullable` setting). These functions were undocumented, untested, unused, and unneeded. They appeared to be alive due to code generalization. [#40682](https://github.com/ClickHouse/ClickHouse/pull/40682) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add a test to ensure that every new table function will be documented. See [#40649](https://github.com/ClickHouse/ClickHouse/issues/40649). Rename table function `MeiliSearch` to `meilisearch`. [#40709](https://github.com/ClickHouse/ClickHouse/pull/40709) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -21,6 +21,7 @@
|
||||
* Make interpretation of YAML configs to be more conventional. [#41044](https://github.com/ClickHouse/ClickHouse/pull/41044) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Support `insert_quorum = 'auto'` to use majority number. [#39970](https://github.com/ClickHouse/ClickHouse/pull/39970) ([Sachin](https://github.com/SachinSetiya)).
|
||||
* Add embedded dashboards to ClickHouse server. This is a demo project about how to achieve 90% results with 1% effort using ClickHouse features. [#40461](https://github.com/ClickHouse/ClickHouse/pull/40461) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Added new settings constraint writability kind `changeable_in_readonly`. [#40631](https://github.com/ClickHouse/ClickHouse/pull/40631) ([Sergei Trifonov](https://github.com/serxa)).
|
||||
@ -38,6 +39,7 @@
|
||||
* Improvement for in-memory data parts: remove completely processed WAL files. [#40592](https://github.com/ClickHouse/ClickHouse/pull/40592) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Implement compression of marks and primary key. Close [#34437](https://github.com/ClickHouse/ClickHouse/issues/34437). [#37693](https://github.com/ClickHouse/ClickHouse/pull/37693) ([zhongyuankai](https://github.com/zhongyuankai)).
|
||||
* Allow to load marks with threadpool in advance. Regulated by setting `load_marks_asynchronously` (default: 0). [#40821](https://github.com/ClickHouse/ClickHouse/pull/40821) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Virtual filesystem over s3 will use random object names split into multiple path prefixes for better performance on AWS. [#40968](https://github.com/ClickHouse/ClickHouse/pull/40968) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -58,6 +60,7 @@
|
||||
* Parallel hash JOIN for Float data types might be suboptimal. Make it better. [#41183](https://github.com/ClickHouse/ClickHouse/pull/41183) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* During startup and ATTACH call, `ReplicatedMergeTree` tables will be readonly until the ZooKeeper connection is made and the setup is finished. [#40148](https://github.com/ClickHouse/ClickHouse/pull/40148) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Add `enable_extended_results_for_datetime_functions` option to return results of type Date32 for functions toStartOfYear, toStartOfISOYear, toStartOfQuarter, toStartOfMonth, toStartOfWeek, toMonday and toLastDayOfMonth when argument is Date32 or DateTime64, otherwise results of Date type are returned. For compatibility reasons default value is ‘0’. [#41214](https://github.com/ClickHouse/ClickHouse/pull/41214) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* For security and stability reasons, CatBoost models are no longer evaluated within the ClickHouse server. Instead, the evaluation is now done in the clickhouse-library-bridge, a separate process that loads the catboost library and communicates with the server process via HTTP. [#40897](https://github.com/ClickHouse/ClickHouse/pull/40897) ([Robert Schulze](https://github.com/rschu1ze)). [#39629](https://github.com/ClickHouse/ClickHouse/pull/39629) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
@ -108,6 +111,7 @@
|
||||
* Add `has_lightweight_delete` to system.parts. [#41564](https://github.com/ClickHouse/ClickHouse/pull/41564) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Enforce documentation for every setting. [#40644](https://github.com/ClickHouse/ClickHouse/pull/40644) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enforce documentation for every current metric. [#40645](https://github.com/ClickHouse/ClickHouse/pull/40645) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Enforce documentation for every profile event counter. Write the documentation where it was missing. [#40646](https://github.com/ClickHouse/ClickHouse/pull/40646) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -217,15 +221,16 @@
|
||||
* Fix read bytes/rows in X-ClickHouse-Summary with materialized views. [#41586](https://github.com/ClickHouse/ClickHouse/pull/41586) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix possible `pipeline stuck` exception for queries with `OFFSET`. The error was found with `enable_optimize_predicate_expression = 0` and always false condition in `WHERE`. Fixes [#41383](https://github.com/ClickHouse/ClickHouse/issues/41383). [#41588](https://github.com/ClickHouse/ClickHouse/pull/41588) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
|
||||
|
||||
### <a id="228"></a> ClickHouse release 22.8, 2022-08-18
|
||||
### <a id="228"></a> ClickHouse release 22.8-lts, 2022-08-18
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Extended range of `Date32` and `DateTime64` to support dates from the year 1900 to 2299. In previous versions, the supported interval was only from the year 1925 to 2283. The implementation is using the proleptic Gregorian calendar (which is conformant with [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601):2004 (clause 3.2.1 The Gregorian calendar)) instead of accounting for historical transitions from the Julian to the Gregorian calendar. This change affects implementation-specific behavior for out-of-range arguments. E.g. if in previous versions the value of `1899-01-01` was clamped to `1925-01-01`, in the new version it will be clamped to `1900-01-01`. It changes the behavior of rounding with `toStartOfInterval` if you pass `INTERVAL 3 QUARTER` up to one quarter because the intervals are counted from an implementation-specific point of time. Closes [#28216](https://github.com/ClickHouse/ClickHouse/issues/28216), improves [#38393](https://github.com/ClickHouse/ClickHouse/issues/38393). [#39425](https://github.com/ClickHouse/ClickHouse/pull/39425) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Now, all relevant dictionary sources respect `remote_url_allow_hosts` setting. It was already done for HTTP, Cassandra, Redis. Added ClickHouse, MongoDB, MySQL, PostgreSQL. Host is checked only for dictionaries created from DDL. [#39184](https://github.com/ClickHouse/ClickHouse/pull/39184) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Make the remote filesystem cache composable, allow not to evict certain files (regarding idx, mrk, ..), delete old cache version. Now it is possible to configure cache over Azure blob storage disk, over Local disk, over StaticWeb disk, etc. This PR is marked backward incompatible because cache configuration changes and in order for cache to work need to update the config file. Old cache will still be used with new configuration. The server will startup fine with the old cache configuration. Closes https://github.com/ClickHouse/ClickHouse/issues/36140. Closes https://github.com/ClickHouse/ClickHouse/issues/37889. ([Kseniia Sumarokova](https://github.com/kssenii)). [#36171](https://github.com/ClickHouse/ClickHouse/pull/36171))
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Query parameters can be set in interactive mode as `SET param_abc = 'def'` and transferred via the native protocol as settings. [#39906](https://github.com/ClickHouse/ClickHouse/pull/39906) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Quota key can be set in the native protocol ([Yakov Olkhovsky](https://github.com/ClickHouse/ClickHouse/pull/39874)).
|
||||
* Added a setting `exact_rows_before_limit` (0/1). When enabled, ClickHouse will provide exact value for `rows_before_limit_at_least` statistic, but with the cost that the data before limit will have to be read completely. This closes [#6613](https://github.com/ClickHouse/ClickHouse/issues/6613). [#25333](https://github.com/ClickHouse/ClickHouse/pull/25333) ([kevin wan](https://github.com/MaxWk)).
|
||||
@ -240,12 +245,14 @@
|
||||
* Add new setting schema_inference_hints that allows to specify structure hints in schema inference for specific columns. Closes [#39569](https://github.com/ClickHouse/ClickHouse/issues/39569). [#40068](https://github.com/ClickHouse/ClickHouse/pull/40068) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Support SQL standard DELETE FROM syntax on merge tree tables and lightweight delete implementation for merge tree families. [#37893](https://github.com/ClickHouse/ClickHouse/pull/37893) ([Jianmei Zhang](https://github.com/zhangjmruc)) ([Alexander Gololobov](https://github.com/davenger)). Note: this new feature does not make ClickHouse an HTAP DBMS.
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Improved memory usage during memory efficient merging of aggregation results. [#39429](https://github.com/ClickHouse/ClickHouse/pull/39429) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Added concurrency control logic to limit total number of concurrent threads created by queries. [#37558](https://github.com/ClickHouse/ClickHouse/pull/37558) ([Sergei Trifonov](https://github.com/serxa)). Add `concurrent_threads_soft_limit parameter` to increase performance in case of high QPS by means of limiting total number of threads for all queries. [#37285](https://github.com/ClickHouse/ClickHouse/pull/37285) ([Roman Vasin](https://github.com/rvasin)).
|
||||
* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Add `SLRU` cache policy for uncompressed cache and marks cache. ([Kseniia Sumarokova](https://github.com/kssenii)). [#34651](https://github.com/ClickHouse/ClickHouse/pull/34651) ([alexX512](https://github.com/alexX512)). Decoupling local cache function and cache algorithm [#38048](https://github.com/ClickHouse/ClickHouse/pull/38048) ([Han Shukai](https://github.com/KinderRiven)).
|
||||
* Intel® In-Memory Analytics Accelerator (Intel® IAA) is a hardware accelerator available in the upcoming generation of Intel® Xeon® Scalable processors ("Sapphire Rapids"). Its goal is to speed up common operations in analytics like data (de)compression and filtering. ClickHouse gained the new "DeflateQpl" compression codec which utilizes the Intel® IAA offloading technology to provide a high-performance DEFLATE implementation. The codec uses the [Intel® Query Processing Library (QPL)](https://github.com/intel/qpl) which abstracts access to the hardware accelerator, respectively to a software fallback in case the hardware accelerator is not available. DEFLATE provides in general higher compression rates than ClickHouse's LZ4 default codec, and as a result, offers less disk I/O and lower main memory consumption. [#36654](https://github.com/ClickHouse/ClickHouse/pull/36654) ([jasperzhu](https://github.com/jinjunzh)). [#39494](https://github.com/ClickHouse/ClickHouse/pull/39494) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* `DISTINCT` in order with `ORDER BY`: Deduce way to sort based on input stream sort description. Skip sorting if input stream is already sorted. [#38719](https://github.com/ClickHouse/ClickHouse/pull/38719) ([Igor Nikonov](https://github.com/devcrafter)). Improve memory usage (significantly) and query execution time + use `DistinctSortedChunkTransform` for final distinct when `DISTINCT` columns match `ORDER BY` columns, but rename to `DistinctSortedStreamTransform` in `EXPLAIN PIPELINE` → this improves memory usage significantly + remove unnecessary allocations in hot loop in `DistinctSortedChunkTransform`. [#39432](https://github.com/ClickHouse/ClickHouse/pull/39432) ([Igor Nikonov](https://github.com/devcrafter)). Use `DistinctSortedTransform` only when sort description is applicable to DISTINCT columns, otherwise fall back to ordinary DISTINCT implementation + it allows making less checks during `DistinctSortedTransform` execution. [#39528](https://github.com/ClickHouse/ClickHouse/pull/39528) ([Igor Nikonov](https://github.com/devcrafter)). Fix: `DistinctSortedTransform` didn't take advantage of sorting. It never cleared HashSet since clearing_columns were detected incorrectly (always empty). So, it basically worked as ordinary `DISTINCT` (`DistinctTransform`). The fix reduces memory usage significantly. [#39538](https://github.com/ClickHouse/ClickHouse/pull/39538) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Use local node as first priority to get structure of remote table when executing `cluster` and similar table functions. [#39440](https://github.com/ClickHouse/ClickHouse/pull/39440) ([Mingliang Pan](https://github.com/liangliangpan)).
|
||||
@ -256,6 +263,7 @@
|
||||
* Improve bytes to bits mask transform for SSE/AVX/AVX512. [#39586](https://github.com/ClickHouse/ClickHouse/pull/39586) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Normalize `AggregateFunction` types and state representations because optimizations like [#35788](https://github.com/ClickHouse/ClickHouse/pull/35788) will treat `count(not null columns)` as `count()`, which might confuses distributed interpreters with the following error : `Conversion from AggregateFunction(count) to AggregateFunction(count, Int64) is not supported`. [#39420](https://github.com/ClickHouse/ClickHouse/pull/39420) ([Amos Bird](https://github.com/amosbird)). The functions with identical states can be used in materialized views interchangeably.
|
||||
* Rework and simplify the `system.backups` table, remove the `internal` column, allow user to set the ID of operation, add columns `num_files`, `uncompressed_size`, `compressed_size`, `start_time`, `end_time`. [#39503](https://github.com/ClickHouse/ClickHouse/pull/39503) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Improved structure of DDL query result table for `Replicated` database (separate columns with shard and replica name, more clear status) - `CREATE TABLE ... ON CLUSTER` queries can be normalized on initiator first if `distributed_ddl_entry_format_version` is set to 3 (default value). It means that `ON CLUSTER` queries may not work if initiator does not belong to the cluster that specified in query. Fixes [#37318](https://github.com/ClickHouse/ClickHouse/issues/37318), [#39500](https://github.com/ClickHouse/ClickHouse/issues/39500) - Ignore `ON CLUSTER` clause if database is `Replicated` and cluster name equals to database name. Related to [#35570](https://github.com/ClickHouse/ClickHouse/issues/35570) - Miscellaneous minor fixes for `Replicated` database engine - Check metadata consistency when starting up `Replicated` database, start replica recovery in case of mismatch of local metadata and metadata in Keeper. Resolves [#24880](https://github.com/ClickHouse/ClickHouse/issues/24880). [#37198](https://github.com/ClickHouse/ClickHouse/pull/37198) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
@ -294,6 +302,7 @@
|
||||
* Add support for LARGE_BINARY/LARGE_STRING with Arrow (Closes [#32401](https://github.com/ClickHouse/ClickHouse/issues/32401)). [#40293](https://github.com/ClickHouse/ClickHouse/pull/40293) ([Josh Taylor](https://github.com/joshuataylor)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* [ClickFiddle](https://fiddle.clickhouse.com/): A new tool for testing ClickHouse versions in read/write mode (**Igor Baliuk**).
|
||||
* ClickHouse binary is made self-extracting [#35775](https://github.com/ClickHouse/ClickHouse/pull/35775) ([Yakov Olkhovskiy, Arthur Filatenkov](https://github.com/yakov-olkhovskiy)).
|
||||
* Update tzdata to 2022b to support the new timezone changes. See https://github.com/google/cctz/pull/226. Chile's 2022 DST start is delayed from September 4 to September 11. Iran plans to stop observing DST permanently, after it falls back on 2022-09-21. There are corrections of the historical time zone of Asia/Tehran in the year 1977: Iran adopted standard time in 1935, not 1946. In 1977 it observed DST from 03-21 23:00 to 10-20 24:00; its 1978 transitions were on 03-24 and 08-05, not 03-20 and 10-20; and its spring 1979 transition was on 05-27, not 03-21 (https://data.iana.org/time-zones/tzdb/NEWS). ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -308,6 +317,7 @@
|
||||
* Docker: Now entrypoint.sh in docker image creates and executes chown for all folders it found in config for multidisk setup [#17717](https://github.com/ClickHouse/ClickHouse/issues/17717). [#39121](https://github.com/ClickHouse/ClickHouse/pull/39121) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### Bug Fix
|
||||
|
||||
* Fix possible segfault in `CapnProto` input format. This bug was found and send through ClickHouse bug-bounty [program](https://github.com/ClickHouse/ClickHouse/issues/38986) by *kiojj*. [#40241](https://github.com/ClickHouse/ClickHouse/pull/40241) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Fix a very rare case of incorrect behavior of array subscript operator. This closes [#28720](https://github.com/ClickHouse/ClickHouse/issues/28720). [#40185](https://github.com/ClickHouse/ClickHouse/pull/40185) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix insufficient argument check for encryption functions (found by query fuzzer). This closes [#39987](https://github.com/ClickHouse/ClickHouse/issues/39987). [#40194](https://github.com/ClickHouse/ClickHouse/pull/40194) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
@ -358,16 +368,17 @@
|
||||
* A fix for reverse DNS resolution. [#40134](https://github.com/ClickHouse/ClickHouse/pull/40134) ([Arthur Passos](https://github.com/arthurpassos)).
|
||||
* Fix unexpected result `arrayDifference` of `Array(UInt32). [#40211](https://github.com/ClickHouse/ClickHouse/pull/40211) ([Duc Canh Le](https://github.com/canhld94)).
|
||||
|
||||
|
||||
### <a id="227"></a> ClickHouse release 22.7, 2022-07-21
|
||||
|
||||
#### Upgrade Notes
|
||||
|
||||
* Enable setting `enable_positional_arguments` by default. It allows queries like `SELECT ... ORDER BY 1, 2` where 1, 2 are the references to the select clause. If you need to return the old behavior, disable this setting. [#38204](https://github.com/ClickHouse/ClickHouse/pull/38204) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Disable `format_csv_allow_single_quotes` by default. See [#37096](https://github.com/ClickHouse/ClickHouse/issues/37096). ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* `Ordinary` database engine and old storage definition syntax for `*MergeTree` tables are deprecated. By default it's not possible to create new databases with `Ordinary` engine. If `system` database has `Ordinary` engine it will be automatically converted to `Atomic` on server startup. There are settings to keep old behavior (`allow_deprecated_database_ordinary` and `allow_deprecated_syntax_for_merge_tree`), but these settings may be removed in future releases. [#38335](https://github.com/ClickHouse/ClickHouse/pull/38335) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Force rewriting comma join to inner by default (set default value `cross_to_inner_join_rewrite = 2`). To have old behavior set `cross_to_inner_join_rewrite = 1`. [#39326](https://github.com/ClickHouse/ClickHouse/pull/39326) ([Vladimir C](https://github.com/vdimir)). If you will face any incompatibilities, you can turn this setting back.
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Support expressions with window functions. Closes [#19857](https://github.com/ClickHouse/ClickHouse/issues/19857). [#37848](https://github.com/ClickHouse/ClickHouse/pull/37848) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Add new `direct` join algorithm for `EmbeddedRocksDB` tables, see [#33582](https://github.com/ClickHouse/ClickHouse/issues/33582). [#35363](https://github.com/ClickHouse/ClickHouse/pull/35363) ([Vladimir C](https://github.com/vdimir)).
|
||||
* Added full sorting merge join algorithm. [#35796](https://github.com/ClickHouse/ClickHouse/pull/35796) ([Vladimir C](https://github.com/vdimir)).
|
||||
@ -395,9 +406,11 @@
|
||||
* Add `clickhouse-diagnostics` binary to the packages. [#38647](https://github.com/ClickHouse/ClickHouse/pull/38647) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Adds new setting `implicit_transaction` to run standalone queries inside a transaction. It handles both creation and closing (via COMMIT if the query succeeded or ROLLBACK if it didn't) of the transaction automatically. [#38344](https://github.com/ClickHouse/ClickHouse/pull/38344) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Distinct optimization for sorted columns. Use specialized distinct transformation in case input stream is sorted by column(s) in distinct. Optimization can be applied to pre-distinct, final distinct, or both. Initial implementation by @dimarub2000. [#37803](https://github.com/ClickHouse/ClickHouse/pull/37803) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Improve performance of `ORDER BY`, `MergeTree` merges, window functions using batch version of `BinaryHeap`. [#38022](https://github.com/ClickHouse/ClickHouse/pull/38022) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* More parallel execution for queries with `FINAL` [#36396](https://github.com/ClickHouse/ClickHouse/pull/36396) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
@ -407,7 +420,7 @@
|
||||
* Improve performance of insertion to columns of type `JSON`. [#38320](https://github.com/ClickHouse/ClickHouse/pull/38320) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Optimized insertion and lookups in the HashTable. [#38413](https://github.com/ClickHouse/ClickHouse/pull/38413) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
* Fix performance degradation from [#32493](https://github.com/ClickHouse/ClickHouse/issues/32493). [#38417](https://github.com/ClickHouse/ClickHouse/pull/38417) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Improve performance of joining with numeric columns using SIMD instructions. [#37235](https://github.com/ClickHouse/ClickHouse/pull/37235) ([zzachimed](https://github.com/zzachimed)). [#38565](https://github.com/ClickHouse/ClickHouse/pull/38565) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Norm and Distance functions for arrays speed up 1.2-2 times. [#38740](https://github.com/ClickHouse/ClickHouse/pull/38740) ([Alexander Gololobov](https://github.com/davenger)).
|
||||
* Add AVX-512 VBMI optimized `copyOverlap32Shuffle` for LZ4 decompression. In other words, LZ4 decompression performance is improved. [#37891](https://github.com/ClickHouse/ClickHouse/pull/37891) ([Guo Wangyang](https://github.com/guowangy)).
|
||||
* `ORDER BY (a, b)` will use all the same benefits as `ORDER BY a, b`. [#38873](https://github.com/ClickHouse/ClickHouse/pull/38873) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
@ -419,6 +432,7 @@
|
||||
* The table `system.asynchronous_metric_log` is further optimized for storage space. This closes [#38134](https://github.com/ClickHouse/ClickHouse/issues/38134). See the [YouTube video](https://www.youtube.com/watch?v=0fSp9SF8N8A). [#38428](https://github.com/ClickHouse/ClickHouse/pull/38428) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Support SQL standard CREATE INDEX and DROP INDEX syntax. [#35166](https://github.com/ClickHouse/ClickHouse/pull/35166) ([Jianmei Zhang](https://github.com/zhangjmruc)).
|
||||
* Send profile events for INSERT queries (previously only SELECT was supported). [#37391](https://github.com/ClickHouse/ClickHouse/pull/37391) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Implement in order aggregation (`optimize_aggregation_in_order`) for fully materialized projections. [#37469](https://github.com/ClickHouse/ClickHouse/pull/37469) ([Azat Khuzhin](https://github.com/azat)).
|
||||
@ -464,6 +478,7 @@
|
||||
* Allow to declare `RabbitMQ` queue without default arguments `x-max-length` and `x-overflow`. [#39259](https://github.com/ClickHouse/ClickHouse/pull/39259) ([rnbondarenko](https://github.com/rnbondarenko)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Apply Clang Thread Safety Analysis (TSA) annotations to ClickHouse. [#38068](https://github.com/ClickHouse/ClickHouse/pull/38068) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Adapt universal installation script for FreeBSD. [#39302](https://github.com/ClickHouse/ClickHouse/pull/39302) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Preparation for building on `s390x` platform. [#39193](https://github.com/ClickHouse/ClickHouse/pull/39193) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
@ -473,6 +488,7 @@
|
||||
* Change `all|noarch` packages to architecture-dependent - Fix some documentation for it - Push aarch64|arm64 packages to artifactory and release assets - Fixes [#36443](https://github.com/ClickHouse/ClickHouse/issues/36443). [#38580](https://github.com/ClickHouse/ClickHouse/pull/38580) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Fix rounding for `Decimal128/Decimal256` with more than 19-digits long scale. [#38027](https://github.com/ClickHouse/ClickHouse/pull/38027) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Fixed crash caused by data race in storage `Hive` (integration table engine). [#38887](https://github.com/ClickHouse/ClickHouse/pull/38887) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Fix crash when executing GRANT ALL ON *.* with ON CLUSTER. It was broken in https://github.com/ClickHouse/ClickHouse/pull/35767. This closes [#38618](https://github.com/ClickHouse/ClickHouse/issues/38618). [#38674](https://github.com/ClickHouse/ClickHouse/pull/38674) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
@ -529,6 +545,7 @@
|
||||
### <a id="226"></a> ClickHouse release 22.6, 2022-06-16
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
* Remove support for octal number literals in SQL. In previous versions they were parsed as Float64. [#37765](https://github.com/ClickHouse/ClickHouse/pull/37765) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
* Changes how settings using `seconds` as type are parsed to support floating point values (for example: `max_execution_time=0.5`). Infinity or NaN values will throw an exception. [#37187](https://github.com/ClickHouse/ClickHouse/pull/37187) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Changed format of binary serialization of columns of experimental type `Object`. New format is more convenient to implement by third-party clients. [#37482](https://github.com/ClickHouse/ClickHouse/pull/37482) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
@ -537,6 +554,7 @@
|
||||
* If you run different ClickHouse versions on a cluster with AArch64 CPU or mix AArch64 and amd64 on a cluster, and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, and the size of the result is huge, the data will not be fully aggregated in the result of these queries during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade.
|
||||
|
||||
#### New Feature
|
||||
|
||||
* Add `GROUPING` function. It allows to disambiguate the records in the queries with `ROLLUP`, `CUBE` or `GROUPING SETS`. Closes [#19426](https://github.com/ClickHouse/ClickHouse/issues/19426). [#37163](https://github.com/ClickHouse/ClickHouse/pull/37163) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* A new codec [FPC](https://userweb.cs.txstate.edu/~burtscher/papers/dcc07a.pdf) algorithm for floating point data compression. [#37553](https://github.com/ClickHouse/ClickHouse/pull/37553) ([Mikhail Guzov](https://github.com/koloshmet)).
|
||||
* Add new columnar JSON formats: `JSONColumns`, `JSONCompactColumns`, `JSONColumnsWithMetadata`. Closes [#36338](https://github.com/ClickHouse/ClickHouse/issues/36338) Closes [#34509](https://github.com/ClickHouse/ClickHouse/issues/34509). [#36975](https://github.com/ClickHouse/ClickHouse/pull/36975) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
@ -557,11 +575,13 @@
|
||||
* Added `SYSTEM UNFREEZE` query that deletes the whole backup regardless if the corresponding table is deleted or not. [#36424](https://github.com/ClickHouse/ClickHouse/pull/36424) ([Vadim Volodin](https://github.com/PolyProgrammist)).
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Enables `POPULATE` for `WINDOW VIEW`. [#36945](https://github.com/ClickHouse/ClickHouse/pull/36945) ([vxider](https://github.com/Vxider)).
|
||||
* `ALTER TABLE ... MODIFY QUERY` support for `WINDOW VIEW`. [#37188](https://github.com/ClickHouse/ClickHouse/pull/37188) ([vxider](https://github.com/Vxider)).
|
||||
* This PR changes the behavior of the `ENGINE` syntax in `WINDOW VIEW`, to make it like in `MATERIALIZED VIEW`. [#37214](https://github.com/ClickHouse/ClickHouse/pull/37214) ([vxider](https://github.com/Vxider)).
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Added numerous optimizations for ARM NEON [#38093](https://github.com/ClickHouse/ClickHouse/pull/38093)([Daniel Kutenin](https://github.com/danlark1)), ([Alexandra Pilipyuk](https://github.com/chalice19)) Note: if you run different ClickHouse versions on a cluster with ARM CPU and use distributed queries with GROUP BY multiple keys of fixed-size type that fit in 256 bits but don't fit in 64 bits, the result of the aggregation query will be wrong during upgrade. Workaround: upgrade with downtime instead of a rolling upgrade.
|
||||
* Improve performance and memory usage for select of subset of columns for formats Native, Protobuf, CapnProto, JSONEachRow, TSKV, all formats with suffixes WithNames/WithNamesAndTypes. Previously while selecting only subset of columns from files in these formats all columns were read and stored in memory. Now only required columns are read. This PR enables setting `input_format_skip_unknown_fields` by default, because otherwise in case of select of subset of columns exception will be thrown. [#37192](https://github.com/ClickHouse/ClickHouse/pull/37192) ([Kruglov Pavel](https://github.com/Avogar)).
|
||||
* Now more filters can be pushed down for join. [#37472](https://github.com/ClickHouse/ClickHouse/pull/37472) ([Amos Bird](https://github.com/amosbird)).
|
||||
@ -592,6 +612,7 @@
|
||||
* In function: CompressedWriteBuffer::nextImpl(), there is an unnecessary write-copy step that would happen frequently during inserting data. Below shows the differentiation with this patch: - Before: 1. Compress "working_buffer" into "compressed_buffer" 2. write-copy into "out" - After: Directly Compress "working_buffer" into "out". [#37242](https://github.com/ClickHouse/ClickHouse/pull/37242) ([jasperzhu](https://github.com/jinjunzh)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Support types with non-standard defaults in ROLLUP, CUBE, GROUPING SETS. Closes [#37360](https://github.com/ClickHouse/ClickHouse/issues/37360). [#37667](https://github.com/ClickHouse/ClickHouse/pull/37667) ([Dmitry Novik](https://github.com/novikd)).
|
||||
* Fix stack traces collection on ARM. Closes [#37044](https://github.com/ClickHouse/ClickHouse/issues/37044). Closes [#15638](https://github.com/ClickHouse/ClickHouse/issues/15638). [#37797](https://github.com/ClickHouse/ClickHouse/pull/37797) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
* Client will try every IP address returned by DNS resolution until successful connection. [#37273](https://github.com/ClickHouse/ClickHouse/pull/37273) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||
@ -633,6 +654,7 @@
|
||||
* Add implicit grants with grant option too. For example `GRANT CREATE TABLE ON test.* TO A WITH GRANT OPTION` now allows `A` to execute `GRANT CREATE VIEW ON test.* TO B`. [#38017](https://github.com/ClickHouse/ClickHouse/pull/38017) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Use `clang-14` and LLVM infrastructure version 14 for builds. This closes [#34681](https://github.com/ClickHouse/ClickHouse/issues/34681). [#34754](https://github.com/ClickHouse/ClickHouse/pull/34754) ([Alexey Milovidov](https://github.com/alexey-milovidov)). Note: `clang-14` has [a bug](https://github.com/google/sanitizers/issues/1540) in ThreadSanitizer that makes our CI work worse.
|
||||
* Allow to drop privileges at startup. This simplifies Docker images. Closes [#36293](https://github.com/ClickHouse/ClickHouse/issues/36293). [#36341](https://github.com/ClickHouse/ClickHouse/pull/36341) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Add docs spellcheck to CI. [#37790](https://github.com/ClickHouse/ClickHouse/pull/37790) ([Vladimir C](https://github.com/vdimir)).
|
||||
@ -690,7 +712,6 @@
|
||||
* Fix possible heap-use-after-free error when reading system.projection_parts and system.projection_parts_columns . This fixes [#37184](https://github.com/ClickHouse/ClickHouse/issues/37184). [#37185](https://github.com/ClickHouse/ClickHouse/pull/37185) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fixed `DateTime64` fractional seconds behavior prior to Unix epoch. [#37697](https://github.com/ClickHouse/ClickHouse/pull/37697) ([Andrey Zvonov](https://github.com/zvonand)). [#37039](https://github.com/ClickHouse/ClickHouse/pull/37039) ([李扬](https://github.com/taiyang-li)).
|
||||
|
||||
|
||||
### <a id="225"></a> ClickHouse release 22.5, 2022-05-19
|
||||
|
||||
#### Upgrade Notes
|
||||
@ -743,7 +764,7 @@
|
||||
* Implement partial GROUP BY key for optimize_aggregation_in_order. [#35111](https://github.com/ClickHouse/ClickHouse/pull/35111) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
#### Improvement
|
||||
|
||||
|
||||
* Show names of erroneous files in case of parsing errors while executing table functions `file`, `s3` and `url`. [#36314](https://github.com/ClickHouse/ClickHouse/pull/36314) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Allowed to increase the number of threads for executing background operations (merges, mutations, moves and fetches) at runtime if they are specified at top level config. [#36425](https://github.com/ClickHouse/ClickHouse/pull/36425) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Now date time conversion functions that generates time before 1970-01-01 00:00:00 with partial hours/minutes timezones will be saturated to zero instead of overflow. This is the continuation of https://github.com/ClickHouse/ClickHouse/pull/29953 which addresses https://github.com/ClickHouse/ClickHouse/pull/29953#discussion_r800550280 . Mark as improvement because it's implementation defined behavior (and very rare case) and we are allowed to break it. [#36656](https://github.com/ClickHouse/ClickHouse/pull/36656) ([Amos Bird](https://github.com/amosbird)).
|
||||
@ -852,7 +873,6 @@
|
||||
* Fix ALTER DROP COLUMN of nested column with compact parts (i.e. `ALTER TABLE x DROP COLUMN n`, when there is column `n.d`). [#35797](https://github.com/ClickHouse/ClickHouse/pull/35797) ([Azat Khuzhin](https://github.com/azat)).
|
||||
* Fix substring function range error length when `offset` and `length` is negative constant and `s` is not constant. [#33861](https://github.com/ClickHouse/ClickHouse/pull/33861) ([RogerYK](https://github.com/RogerYK)).
|
||||
|
||||
|
||||
### <a id="224"></a> ClickHouse release 22.4, 2022-04-19
|
||||
|
||||
#### Backward Incompatible Change
|
||||
@ -1004,8 +1024,7 @@
|
||||
* Fix mutations in tables with enabled sparse columns. [#35284](https://github.com/ClickHouse/ClickHouse/pull/35284) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Do not delay final part writing by default (fixes possible `Memory limit exceeded` during `INSERT` by adding `max_insert_delayed_streams_for_parallel_write` with default to 1000 for writes to s3 and disabled as before otherwise). [#34780](https://github.com/ClickHouse/ClickHouse/pull/34780) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
## <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
|
||||
### <a id="223"></a> ClickHouse release v22.3-lts, 2022-03-17
|
||||
|
||||
#### Backward Incompatible Change
|
||||
|
||||
@ -1132,7 +1151,6 @@
|
||||
* Fix incorrect result of trivial count query when part movement feature is used [#34089](https://github.com/ClickHouse/ClickHouse/issues/34089). [#34385](https://github.com/ClickHouse/ClickHouse/pull/34385) ([nvartolomei](https://github.com/nvartolomei)).
|
||||
* Fix inconsistency of `max_query_size` limitation in distributed subqueries. [#34078](https://github.com/ClickHouse/ClickHouse/pull/34078) ([Chao Ma](https://github.com/godliness)).
|
||||
|
||||
|
||||
### <a id="222"></a> ClickHouse release v22.2, 2022-02-17
|
||||
|
||||
#### Upgrade Notes
|
||||
@ -1308,7 +1326,6 @@
|
||||
* Fix issue [#18206](https://github.com/ClickHouse/ClickHouse/issues/18206). [#33977](https://github.com/ClickHouse/ClickHouse/pull/33977) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* This PR allows using multiple LDAP storages in the same list of user directories. It worked earlier but was broken because LDAP tests are disabled (they are part of the testflows tests). [#33574](https://github.com/ClickHouse/ClickHouse/pull/33574) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
|
||||
|
||||
### <a id="221"></a> ClickHouse release v22.1, 2022-01-18
|
||||
|
||||
#### Upgrade Notes
|
||||
@ -1335,7 +1352,6 @@
|
||||
* Add function `decodeURLFormComponent` slightly different to `decodeURLComponent`. Close [#10298](https://github.com/ClickHouse/ClickHouse/issues/10298). [#33451](https://github.com/ClickHouse/ClickHouse/pull/33451) ([SuperDJY](https://github.com/cmsxbc)).
|
||||
* Allow to split `GraphiteMergeTree` rollup rules for plain/tagged metrics (optional rule_type field). [#33494](https://github.com/ClickHouse/ClickHouse/pull/33494) ([Michail Safronov](https://github.com/msaf1980)).
|
||||
|
||||
|
||||
#### Performance Improvement
|
||||
|
||||
* Support moving conditions to `PREWHERE` (setting `optimize_move_to_prewhere`) for tables of `Merge` engine if its all underlying tables supports `PREWHERE`. [#33300](https://github.com/ClickHouse/ClickHouse/pull/33300) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
@ -1351,7 +1367,6 @@
|
||||
* Optimize selecting of MergeTree parts that can be moved between volumes. [#33225](https://github.com/ClickHouse/ClickHouse/pull/33225) ([OnePiece](https://github.com/zhongyuankai)).
|
||||
* Fix `sparse_hashed` dict performance with sequential keys (wrong hash function). [#32536](https://github.com/ClickHouse/ClickHouse/pull/32536) ([Azat Khuzhin](https://github.com/azat)).
|
||||
|
||||
|
||||
#### Experimental Feature
|
||||
|
||||
* Parallel reading from multiple replicas within a shard during distributed query without using sample key. To enable this, set `allow_experimental_parallel_reading_from_replicas = 1` and `max_parallel_replicas` to any number. This closes [#26748](https://github.com/ClickHouse/ClickHouse/issues/26748). [#29279](https://github.com/ClickHouse/ClickHouse/pull/29279) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
@ -1364,7 +1379,6 @@
|
||||
* Fix ACL with explicit digit hash in `clickhouse-keeper`: now the behavior consistent with ZooKeeper and generated digest is always accepted. [#33249](https://github.com/ClickHouse/ClickHouse/pull/33249) ([小路](https://github.com/nicelulu)). [#33246](https://github.com/ClickHouse/ClickHouse/pull/33246).
|
||||
* Fix unexpected projection removal when detaching parts. [#32067](https://github.com/ClickHouse/ClickHouse/pull/32067) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
|
||||
#### Improvement
|
||||
|
||||
* Now date time conversion functions that generates time before `1970-01-01 00:00:00` will be saturated to zero instead of overflow. [#29953](https://github.com/ClickHouse/ClickHouse/pull/29953) ([Amos Bird](https://github.com/amosbird)). It also fixes a bug in index analysis if date truncation function would yield result before the Unix epoch.
|
||||
@ -1411,7 +1425,6 @@
|
||||
* Updating `modification_time` for data part in `system.parts` after part movement [#32964](https://github.com/ClickHouse/ClickHouse/issues/32964). [#32965](https://github.com/ClickHouse/ClickHouse/pull/32965) ([save-my-heart](https://github.com/save-my-heart)).
|
||||
* Potential issue, cannot be exploited: integer overflow may happen in array resize. [#33024](https://github.com/ClickHouse/ClickHouse/pull/33024) ([varadarajkumar](https://github.com/varadarajkumar)).
|
||||
|
||||
|
||||
#### Build/Testing/Packaging Improvement
|
||||
|
||||
* Add packages, functional tests and Docker builds for AArch64 (ARM) version of ClickHouse. [#32911](https://github.com/ClickHouse/ClickHouse/pull/32911) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). [#32415](https://github.com/ClickHouse/ClickHouse/pull/32415)
|
||||
@ -1426,7 +1439,6 @@
|
||||
* Inject git information into clickhouse binary file. So we can get source code revision easily from clickhouse binary file. [#33124](https://github.com/ClickHouse/ClickHouse/pull/33124) ([taiyang-li](https://github.com/taiyang-li)).
|
||||
* Remove obsolete code from ConfigProcessor. Yandex specific code is not used anymore. The code contained one minor defect. This defect was reported by [Mallik Hassan](https://github.com/SadiHassan) in [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). This closes [#33032](https://github.com/ClickHouse/ClickHouse/issues/33032). [#33026](https://github.com/ClickHouse/ClickHouse/pull/33026) ([alexey-milovidov](https://github.com/alexey-milovidov)).
|
||||
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in official stable or prestable release)
|
||||
|
||||
* Several fixes for format parsing. This is relevant if `clickhouse-server` is open for write access to adversary. Specifically crafted input data for `Native` format may lead to reading uninitialized memory or crash. This is relevant if `clickhouse-server` is open for write access to adversary. [#33050](https://github.com/ClickHouse/ClickHouse/pull/33050) ([Heena Bansal](https://github.com/HeenaBansal2009)). Fixed Apache Avro Union type index out of boundary issue in Apache Avro binary format. [#33022](https://github.com/ClickHouse/ClickHouse/pull/33022) ([Harry Lee](https://github.com/HarryLeeIBM)). Fix null pointer dereference in `LowCardinality` data when deserializing `LowCardinality` data in the Native format. [#33021](https://github.com/ClickHouse/ClickHouse/pull/33021) ([Harry Lee](https://github.com/HarryLeeIBM)).
|
||||
@ -1485,5 +1497,4 @@
|
||||
* Fix possible crash (or incorrect result) in case of `LowCardinality` arguments of window function. Fixes [#31114](https://github.com/ClickHouse/ClickHouse/issues/31114). [#31888](https://github.com/ClickHouse/ClickHouse/pull/31888) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix hang up with command `DROP TABLE system.query_log sync`. [#33293](https://github.com/ClickHouse/ClickHouse/pull/33293) ([zhanghuajie](https://github.com/zhanghuajieHIT)).
|
||||
|
||||
|
||||
## [Changelog for 2021](https://clickhouse.com/docs/en/whats-new/changelog/2021)
|
||||
|
@ -151,7 +151,7 @@ public:
|
||||
{
|
||||
size_t dot_pos = path.rfind('.');
|
||||
if (dot_pos != std::string::npos)
|
||||
fd = ::mkstemps(path.data(), path.size() - dot_pos);
|
||||
fd = ::mkstemps(path.data(), static_cast<int>(path.size() - dot_pos));
|
||||
else
|
||||
fd = ::mkstemp(path.data());
|
||||
|
||||
@ -408,7 +408,7 @@ ReplxxLineReader::ReplxxLineReader(
|
||||
// In a simplest case use simple comment.
|
||||
commented_line = fmt::format("-- {}", state.text());
|
||||
}
|
||||
rx.set_state(replxx::Replxx::State(commented_line.c_str(), commented_line.size()));
|
||||
rx.set_state(replxx::Replxx::State(commented_line.c_str(), static_cast<int>(commented_line.size())));
|
||||
|
||||
return rx.invoke(Replxx::ACTION::COMMIT_LINE, code);
|
||||
};
|
||||
@ -480,7 +480,7 @@ void ReplxxLineReader::openEditor()
|
||||
if (executeCommand(argv) == 0)
|
||||
{
|
||||
const std::string & new_query = readFile(editor_file.getPath());
|
||||
rx.set_state(replxx::Replxx::State(new_query.c_str(), new_query.size()));
|
||||
rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast<int>(new_query.size())));
|
||||
}
|
||||
}
|
||||
catch (const std::runtime_error & e)
|
||||
@ -526,7 +526,7 @@ void ReplxxLineReader::openInteractiveHistorySearch()
|
||||
{
|
||||
std::string new_query = readFile(output_file.getPath());
|
||||
rightTrim(new_query);
|
||||
rx.set_state(replxx::Replxx::State(new_query.c_str(), new_query.size()));
|
||||
rx.set_state(replxx::Replxx::State(new_query.c_str(), static_cast<int>(new_query.size())));
|
||||
}
|
||||
}
|
||||
catch (const std::runtime_error & e)
|
||||
|
@ -265,7 +265,7 @@ inline size_t hashLessThan16(const char * data, size_t size)
|
||||
|
||||
struct CRC32Hash
|
||||
{
|
||||
size_t operator() (StringRef x) const
|
||||
unsigned operator() (StringRef x) const
|
||||
{
|
||||
const char * pos = x.data;
|
||||
size_t size = x.size;
|
||||
@ -275,22 +275,22 @@ struct CRC32Hash
|
||||
|
||||
if (size < 8)
|
||||
{
|
||||
return hashLessThan8(x.data, x.size);
|
||||
return static_cast<unsigned>(hashLessThan8(x.data, x.size));
|
||||
}
|
||||
|
||||
const char * end = pos + size;
|
||||
size_t res = -1ULL;
|
||||
unsigned res = -1U;
|
||||
|
||||
do
|
||||
{
|
||||
UInt64 word = unalignedLoad<UInt64>(pos);
|
||||
res = CRC_INT(res, word);
|
||||
res = static_cast<unsigned>(CRC_INT(res, word));
|
||||
|
||||
pos += 8;
|
||||
} while (pos + 8 < end);
|
||||
|
||||
UInt64 word = unalignedLoad<UInt64>(end - 8); /// I'm not sure if this is normal.
|
||||
res = CRC_INT(res, word);
|
||||
res = static_cast<unsigned>(CRC_INT(res, word));
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -302,7 +302,7 @@ struct StringRefHash : CRC32Hash {};
|
||||
|
||||
struct CRC32Hash
|
||||
{
|
||||
size_t operator() (StringRef /* x */) const
|
||||
unsigned operator() (StringRef /* x */) const
|
||||
{
|
||||
throw std::logic_error{"Not implemented CRC32Hash without SSE"};
|
||||
}
|
||||
|
@ -122,7 +122,7 @@ QuotientAndRemainder<N> static inline split(UnsignedOfSize<N> value)
|
||||
constexpr DivisionBy10PowN<N> division;
|
||||
|
||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
||||
UnsignedOfSize<N / 2> remainder = value - quotient * pow10<UnsignedOfSize<N / 2>>(N);
|
||||
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
||||
|
||||
return {quotient, remainder};
|
||||
}
|
||||
|
@ -1,10 +1,8 @@
|
||||
#if defined(OS_LINUX)
|
||||
# include <sys/syscall.h>
|
||||
#endif
|
||||
#include <cstdlib>
|
||||
#include <unistd.h>
|
||||
#include <base/safeExit.h>
|
||||
#include <base/defines.h>
|
||||
|
||||
[[noreturn]] void safeExit(int code)
|
||||
{
|
||||
|
@ -227,6 +227,8 @@ struct integer<Bits, Signed>::_impl
|
||||
template <typename T>
|
||||
__attribute__((no_sanitize("undefined"))) constexpr static auto to_Integral(T f) noexcept
|
||||
{
|
||||
/// NOTE: this can be called with DB::Decimal, and in this case, result
|
||||
/// will be wrong
|
||||
if constexpr (std::is_signed_v<T>)
|
||||
return static_cast<int64_t>(f);
|
||||
else
|
||||
|
@ -61,8 +61,14 @@ elseif (ARCH_AARCH64)
|
||||
endif ()
|
||||
|
||||
elseif (ARCH_PPC64LE)
|
||||
# By Default, build for power8 and up, allow building for power9 and up
|
||||
# Note that gcc and clang have support for x86 SSE2 intrinsics when building for PowerPC
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
||||
option (POWER9 "Build for Power 9 CPU and above" 0)
|
||||
if(POWER9)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power9 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
||||
else ()
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -maltivec -mcpu=power8 -D__SSE2__=1 -DNO_WARN_X86_INTRINSICS")
|
||||
endif ()
|
||||
|
||||
elseif (ARCH_AMD64)
|
||||
option (ENABLE_SSSE3 "Use SSSE3 instructions on x86_64" 1)
|
||||
|
@ -3,15 +3,15 @@
|
||||
# This is a workaround for bug in llvm/clang,
|
||||
# that does not produce .debug_aranges with LTO
|
||||
#
|
||||
# NOTE: this is a temporary solution, that should be removed once [1] will be
|
||||
# resolved.
|
||||
# NOTE: this is a temporary solution, that should be removed after upgrading to
|
||||
# clang-16/llvm-16.
|
||||
#
|
||||
# [1]: https://discourse.llvm.org/t/clang-does-not-produce-full-debug-aranges-section-with-thinlto/64898/8
|
||||
# Refs: https://reviews.llvm.org/D133092
|
||||
|
||||
# NOTE: only -flto=thin is supported.
|
||||
# NOTE: it is not possible to check was there -gdwarf-aranges initially or not.
|
||||
if [[ "$*" =~ -plugin-opt=thinlto ]]; then
|
||||
exec "@LLD_PATH@" -mllvm -generate-arange-section "$@"
|
||||
exec "@LLD_PATH@" -plugin-opt=-generate-arange-section "$@"
|
||||
else
|
||||
exec "@LLD_PATH@" "$@"
|
||||
fi
|
||||
|
@ -85,7 +85,7 @@ if (SANITIZE)
|
||||
# and they have a bunch of flags not halt the program if UIO happend and even to silence that warnings.
|
||||
# But for unknown reason that flags don't work with ClickHouse or we don't understand how to properly use them,
|
||||
# that's why we often receive reports about UIO. The simplest way to avoid this is just set this flag here.
|
||||
set(UBSAN_FLAGS "${SAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
|
||||
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
|
||||
endif()
|
||||
if (COMPILER_CLANG)
|
||||
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||
|
@ -117,7 +117,7 @@ endif()
|
||||
# Archiver
|
||||
|
||||
if (COMPILER_GCC)
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-14" "llvm-ar-13" "llvm-ar-12")
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-15" "llvm-ar-14" "llvm-ar-13" "llvm-ar-12")
|
||||
else ()
|
||||
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
|
||||
endif ()
|
||||
@ -131,7 +131,7 @@ message(STATUS "Using archiver: ${CMAKE_AR}")
|
||||
# Ranlib
|
||||
|
||||
if (COMPILER_GCC)
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-14" "llvm-ranlib-13" "llvm-ranlib-12")
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-15" "llvm-ranlib-14" "llvm-ranlib-13" "llvm-ranlib-12")
|
||||
else ()
|
||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
|
||||
endif ()
|
||||
@ -145,7 +145,7 @@ message(STATUS "Using ranlib: ${CMAKE_RANLIB}")
|
||||
# Install Name Tool
|
||||
|
||||
if (COMPILER_GCC)
|
||||
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool" "llvm-install-name-tool-14" "llvm-install-name-tool-13" "llvm-install-name-tool-12")
|
||||
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool" "llvm-install-name-tool-15" "llvm-install-name-tool-14" "llvm-install-name-tool-13" "llvm-install-name-tool-12")
|
||||
else ()
|
||||
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
|
||||
endif ()
|
||||
@ -159,7 +159,7 @@ message(STATUS "Using install-name-tool: ${CMAKE_INSTALL_NAME_TOOL}")
|
||||
# Objcopy
|
||||
|
||||
if (COMPILER_GCC)
|
||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-14" "llvm-objcopy-13" "llvm-objcopy-12" "objcopy")
|
||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-15" "llvm-objcopy-14" "llvm-objcopy-13" "llvm-objcopy-12" "objcopy")
|
||||
else ()
|
||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
|
||||
endif ()
|
||||
@ -173,7 +173,7 @@ endif ()
|
||||
# Strip
|
||||
|
||||
if (COMPILER_GCC)
|
||||
find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-14" "llvm-strip-13" "llvm-strip-12" "strip")
|
||||
find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-15" "llvm-strip-14" "llvm-strip-13" "llvm-strip-12" "strip")
|
||||
else ()
|
||||
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
|
||||
endif ()
|
||||
|
@ -27,7 +27,6 @@ if (COMPILER_CLANG)
|
||||
no_warning(sign-conversion)
|
||||
no_warning(implicit-int-conversion)
|
||||
no_warning(implicit-int-float-conversion)
|
||||
no_warning(shorten-64-to-32)
|
||||
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
|
||||
no_warning(disabled-macro-expansion)
|
||||
no_warning(documentation-unknown-command)
|
||||
|
1
contrib/CMakeLists.txt
vendored
1
contrib/CMakeLists.txt
vendored
@ -114,6 +114,7 @@ if (ENABLE_TESTS)
|
||||
endif()
|
||||
|
||||
add_contrib (llvm-project-cmake llvm-project)
|
||||
add_contrib (libfuzzer-cmake llvm-project)
|
||||
add_contrib (libxml2-cmake libxml2)
|
||||
add_contrib (aws-s3-cmake
|
||||
aws
|
||||
|
35
contrib/libfuzzer-cmake/CMakeLists.txt
Normal file
35
contrib/libfuzzer-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,35 @@
|
||||
set(COMPILER_RT_FUZZER_SRC_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/compiler-rt/lib/fuzzer")
|
||||
|
||||
set(FUZZER_SRCS
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerCrossOver.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerDataFlowTrace.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerDriver.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsDlsym.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsWeak.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtFunctionsWindows.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCounters.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCountersDarwin.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerExtraCountersWindows.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerFork.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIO.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIOPosix.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerIOWindows.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerLoop.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMerge.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMutate.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerSHA1.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerTracePC.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtil.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilDarwin.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilFuchsia.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilLinux.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilPosix.cpp"
|
||||
"${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerUtilWindows.cpp"
|
||||
)
|
||||
|
||||
add_library(_fuzzer_no_main STATIC ${FUZZER_SRCS})
|
||||
add_library(ch_contrib::fuzzer_no_main ALIAS _fuzzer_no_main)
|
||||
|
||||
add_library(_fuzzer STATIC ${FUZZER_SRCS} "${COMPILER_RT_FUZZER_SRC_DIR}/FuzzerMain.cpp")
|
||||
add_library(ch_contrib::fuzzer ALIAS _fuzzer)
|
||||
|
@ -78,23 +78,13 @@ endif()
|
||||
|
||||
include(CheckCCompilerFlag)
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
CHECK_C_COMPILER_FLAG("-mcpu=power9" HAS_POWER9)
|
||||
if(HAS_POWER9)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power9 -mtune=power9")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power9 -mtune=power9")
|
||||
if(POWER9)
|
||||
set(HAS_POWER9 1)
|
||||
set(HAS_ALTIVEC 1)
|
||||
else()
|
||||
CHECK_C_COMPILER_FLAG("-mcpu=power8" HAS_POWER8)
|
||||
if(HAS_POWER8)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mcpu=power8 -mtune=power8")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=power8 -mtune=power8")
|
||||
endif(HAS_POWER8)
|
||||
endif(HAS_POWER9)
|
||||
CHECK_C_COMPILER_FLAG("-maltivec" HAS_ALTIVEC)
|
||||
if(HAS_ALTIVEC)
|
||||
message(STATUS " HAS_ALTIVEC yes")
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maltivec")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec")
|
||||
endif(HAS_ALTIVEC)
|
||||
set(HAS_POWER8 1)
|
||||
set(HAS_ALTIVEC 1)
|
||||
endif(POWER9)
|
||||
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64")
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64|arm64|ARM64")
|
||||
|
@ -73,7 +73,7 @@ RUN apt-get install binutils-riscv64-linux-gnu
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
ARG NFPM_VERSION=2.18.1
|
||||
ARG NFPM_VERSION=2.20.0
|
||||
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /tmp/nfpm.deb "https://github.com/goreleaser/nfpm/releases/download/v${NFPM_VERSION}/nfpm_${arch}.deb" \
|
||||
|
@ -208,6 +208,7 @@ def parse_env_variables(
|
||||
cxx = cc.replace("gcc", "g++").replace("clang", "clang++")
|
||||
|
||||
if package_type == "deb":
|
||||
# NOTE: This are the env for packages/build script
|
||||
result.append("MAKE_DEB=true")
|
||||
cmake_flags.append("-DENABLE_TESTS=0")
|
||||
cmake_flags.append("-DENABLE_UTILS=0")
|
||||
@ -268,6 +269,7 @@ def parse_env_variables(
|
||||
result.append('DISTCC_HOSTS="localhost/`nproc`"')
|
||||
|
||||
if additional_pkgs:
|
||||
# NOTE: This are the env for packages/build script
|
||||
result.append("MAKE_APK=true")
|
||||
result.append("MAKE_RPM=true")
|
||||
result.append("MAKE_TGZ=true")
|
||||
|
@ -36,10 +36,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# repo versions doesn't work correctly with C++17
|
||||
# also we push reports to s3, so we add index.html to subfolder urls
|
||||
# https://github.com/ClickHouse-Extras/woboq_codebrowser/commit/37e15eaf377b920acb0b48dbe82471be9203f76b
|
||||
# TODO: remove branch in a few weeks after merge, e.g. in May or June 2022
|
||||
#
|
||||
# FIXME: update location of a repo
|
||||
RUN git clone https://github.com/azat/woboq_codebrowser --branch llvm-15 \
|
||||
RUN git clone https://github.com/ClickHouse/woboq_codebrowser \
|
||||
&& cd woboq_codebrowser \
|
||||
&& cmake . -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=clang\+\+-${LLVM_VERSION} -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \
|
||||
&& ninja \
|
||||
|
31
docker/test/stress/run.sh
Executable file → Normal file
31
docker/test/stress/run.sh
Executable file → Normal file
@ -47,7 +47,6 @@ function install_packages()
|
||||
|
||||
function configure()
|
||||
{
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
# install test configs
|
||||
export USE_DATABASE_ORDINARY=1
|
||||
export EXPORT_S3_STORAGE_POLICIES=1
|
||||
@ -203,6 +202,7 @@ quit
|
||||
|
||||
install_packages package_folder
|
||||
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
|
||||
azurite-blob --blobHost 0.0.0.0 --blobPort 10000 --debug /azurite_log &
|
||||
@ -243,6 +243,7 @@ stop
|
||||
|
||||
# Let's enable S3 storage by default
|
||||
export USE_S3_STORAGE_FOR_MERGE_TREE=1
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
@ -270,10 +271,6 @@ clickhouse-client --query "SELECT 'Server successfully started', 'OK'" >> /test_
|
||||
|| (echo -e 'Server failed to start (see application_errors.txt and clickhouse-server.clean.log)\tFAIL' >> /test_output/test_results.tsv \
|
||||
&& grep -a "<Error>.*Application" /var/log/clickhouse-server/clickhouse-server.log > /test_output/application_errors.txt)
|
||||
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(clickhouse-client --query="SELECT version()" | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
|
||||
stop
|
||||
|
||||
[ -f /var/log/clickhouse-server/clickhouse-server.log ] || echo -e "Server log does not exist\tFAIL"
|
||||
@ -331,6 +328,10 @@ zgrep -Fa " received signal " /test_output/gdb.log > /dev/null \
|
||||
|
||||
echo -e "Backward compatibility check\n"
|
||||
|
||||
echo "Get previous release tag"
|
||||
previous_release_tag=$(clickhouse-client --version | grep -o "[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*" | get_previous_release_tag)
|
||||
echo $previous_release_tag
|
||||
|
||||
echo "Clone previous release repository"
|
||||
git clone https://github.com/ClickHouse/ClickHouse.git --no-tags --progress --branch=$previous_release_tag --no-recurse-submodules --depth=1 previous_release_repository
|
||||
|
||||
@ -375,6 +376,8 @@ else
|
||||
install_packages previous_release_package_folder
|
||||
|
||||
# Start server from previous release
|
||||
# Previous version may not be ready for fault injections
|
||||
export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# Avoid "Setting s3_check_objects_after_upload is neither a builtin setting..."
|
||||
@ -389,12 +392,23 @@ else
|
||||
|
||||
clickhouse-client --query="SELECT 'Server version: ', version()"
|
||||
|
||||
# Install new package before running stress test because we should use new clickhouse-client and new clickhouse-test
|
||||
# But we should leave old binary in /usr/bin/ for gdb (so it will print sane stacktarces)
|
||||
# Install new package before running stress test because we should use new
|
||||
# clickhouse-client and new clickhouse-test.
|
||||
#
|
||||
# But we should leave old binary in /usr/bin/ and debug symbols in
|
||||
# /usr/lib/debug/usr/bin (if any) for gdb and internal DWARF parser, so it
|
||||
# will print sane stacktraces and also to avoid possible crashes.
|
||||
#
|
||||
# FIXME: those files can be extracted directly from debian package, but
|
||||
# actually better solution will be to use different PATH instead of playing
|
||||
# games with files from packages.
|
||||
mv /usr/bin/clickhouse previous_release_package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug previous_release_package_folder/
|
||||
install_packages package_folder
|
||||
mv /usr/bin/clickhouse package_folder/
|
||||
mv /usr/lib/debug/usr/bin/clickhouse.debug package_folder/
|
||||
mv previous_release_package_folder/clickhouse /usr/bin/
|
||||
mv previous_release_package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
|
||||
mkdir tmp_stress_output
|
||||
|
||||
@ -410,6 +424,8 @@ else
|
||||
|
||||
# Start new server
|
||||
mv package_folder/clickhouse /usr/bin/
|
||||
mv package_folder/clickhouse.debug /usr/lib/debug/usr/bin/clickhouse.debug
|
||||
export ZOOKEEPER_FAULT_INJECTION=1
|
||||
configure
|
||||
start 500
|
||||
clickhouse-client --query "SELECT 'Backward compatibility check: Server successfully started', 'OK'" >> /test_output/test_results.tsv \
|
||||
@ -464,6 +480,7 @@ else
|
||||
-e "[Queue = DB::MergeMutateRuntimeQueue]: Code: 235. DB::Exception: Part" \
|
||||
-e "The set of parts restored in place of" \
|
||||
-e "(ReplicatedMergeTreeAttachThread): Initialization failed. Error" \
|
||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||
|
@ -286,9 +286,7 @@ if __name__ == "__main__":
|
||||
# But right now it should work, since neither hung check, nor 00001_select_1 has GROUP BY.
|
||||
"--client-option",
|
||||
"max_untracked_memory=1Gi",
|
||||
"--client-option",
|
||||
"max_memory_usage_for_user=0",
|
||||
"--client-option",
|
||||
"memory_profiler_step=1Gi",
|
||||
# Use system database to avoid CREATE/DROP DATABASE queries
|
||||
"--database=system",
|
||||
|
@ -49,27 +49,13 @@ When we calculate some function over columns in a block, we add another column w
|
||||
|
||||
Blocks are created for every processed chunk of data. Note that for the same type of calculation, the column names and types remain the same for different blocks, and only column data changes. It is better to split block data from the block header because small block sizes have a high overhead of temporary strings for copying shared_ptrs and column names.
|
||||
|
||||
## Block Streams {#block-streams}
|
||||
## Processors
|
||||
|
||||
Block streams are for processing data. We use streams of blocks to read data from somewhere, perform data transformations, or write data to somewhere. `IBlockInputStream` has the `read` method to fetch the next block while available. `IBlockOutputStream` has the `write` method to push the block somewhere.
|
||||
|
||||
Streams are responsible for:
|
||||
|
||||
1. Reading or writing to a table. The table just returns a stream for reading or writing blocks.
|
||||
2. Implementing data formats. For example, if you want to output data to a terminal in `Pretty` format, you create a block output stream where you push blocks, and it formats them.
|
||||
3. Performing data transformations. Let’s say you have `IBlockInputStream` and want to create a filtered stream. You create `FilterBlockInputStream` and initialize it with your stream. Then when you pull a block from `FilterBlockInputStream`, it pulls a block from your stream, filters it, and returns the filtered block to you. Query execution pipelines are represented this way.
|
||||
|
||||
There are more sophisticated transformations. For example, when you pull from `AggregatingBlockInputStream`, it reads all data from its source, aggregates it, and then returns a stream of aggregated data for you. Another example: `UnionBlockInputStream` accepts many input sources in the constructor and also a number of threads. It launches multiple threads and reads from multiple sources in parallel.
|
||||
|
||||
> Block streams use the “pull” approach to control flow: when you pull a block from the first stream, it consequently pulls the required blocks from nested streams, and the entire execution pipeline will work. Neither “pull” nor “push” is the best solution, because control flow is implicit, and that limits the implementation of various features like simultaneous execution of multiple queries (merging many pipelines together). This limitation could be overcome with coroutines or just running extra threads that wait for each other. We may have more possibilities if we make control flow explicit: if we locate the logic for passing data from one calculation unit to another outside of those calculation units. Read this [article](http://journal.stuffwithstuff.com/2013/01/13/iteration-inside-and-out/) for more thoughts.
|
||||
|
||||
We should note that the query execution pipeline creates temporary data at each step. We try to keep block size small enough so that temporary data fits in the CPU cache. With that assumption, writing and reading temporary data is almost free in comparison with other calculations. We could consider an alternative, which is to fuse many operations in the pipeline together. It could make the pipeline as short as possible and remove much of the temporary data, which could be an advantage, but it also has drawbacks. For example, a split pipeline makes it easy to implement caching intermediate data, stealing intermediate data from similar queries running at the same time, and merging pipelines for similar queries.
|
||||
See the description at [https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h](https://github.com/ClickHouse/ClickHouse/blob/master/src/Processors/IProcessor.h).
|
||||
|
||||
## Formats {#formats}
|
||||
|
||||
Data formats are implemented with block streams. There are “presentational” formats only suitable for the output of data to the client, such as `Pretty` format, which provides only `IBlockOutputStream`. And there are input/output formats, such as `TabSeparated` or `JSONEachRow`.
|
||||
|
||||
There are also row streams: `IRowInputStream` and `IRowOutputStream`. They allow you to pull/push data by individual rows, not by blocks. And they are only needed to simplify the implementation of row-oriented formats. The wrappers `BlockInputStreamFromRowInputStream` and `BlockOutputStreamFromRowOutputStream` allow you to convert row-oriented streams to regular block-oriented streams.
|
||||
Data formats are implemented with processors.
|
||||
|
||||
## I/O {#io}
|
||||
|
||||
|
@ -419,6 +419,8 @@ Supported data types: `Int*`, `UInt*`, `Float*`, `Enum`, `Date`, `DateTime`, `St
|
||||
|
||||
For `Map` data type client can specify if index should be created for keys or values using [mapKeys](../../../sql-reference/functions/tuple-map-functions.md#mapkeys) or [mapValues](../../../sql-reference/functions/tuple-map-functions.md#mapvalues) function.
|
||||
|
||||
There are also special-purpose and experimental indexes to support approximate nearest neighbor (ANN) queries. See [here](annindexes.md) for details.
|
||||
|
||||
The following functions can use the filter: [equals](../../../sql-reference/functions/comparison-functions.md), [notEquals](../../../sql-reference/functions/comparison-functions.md), [in](../../../sql-reference/functions/in-functions), [notIn](../../../sql-reference/functions/in-functions), [has](../../../sql-reference/functions/array-functions#hasarr-elem), [hasAny](../../../sql-reference/functions/array-functions#hasany), [hasAll](../../../sql-reference/functions/array-functions#hasall).
|
||||
|
||||
Example of index creation for `Map` data type
|
||||
|
@ -101,7 +101,7 @@ SELECT count()
|
||||
FROM uk_price_paid
|
||||
```
|
||||
|
||||
At the time this query was executed, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse:
|
||||
At the time this query was run, the dataset had 27,450,499 rows. Let's see what the storage size is of the table in ClickHouse:
|
||||
|
||||
```sql
|
||||
SELECT formatReadableSize(total_bytes)
|
||||
@ -342,7 +342,7 @@ The result looks like:
|
||||
|
||||
## Let's Speed Up Queries Using Projections {#speedup-with-projections}
|
||||
|
||||
[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At execution time, ClickHouse will use your projection if it thinks the projection can improve the performance fo the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful).
|
||||
[Projections](../../sql-reference/statements/alter/projection.md) allow you to improve query speeds by storing pre-aggregated data in whatever format you want. In this example, we create a projection that keeps track of the average price, total price, and count of properties grouped by the year, district and town. At query time, ClickHouse will use your projection if it thinks the projection can improve the performance of the query (you don't have to do anything special to use the projection - ClickHouse decides for you when the projection will be useful).
|
||||
|
||||
### Build a Projection {#build-projection}
|
||||
|
||||
|
@ -271,11 +271,7 @@ Result:
|
||||
The return type of `toStartOf*`, `toLastDayOfMonth`, `toMonday`, `timeSlot` functions described below is determined by the configuration parameter [enable_extended_results_for_datetime_functions](../../operations/settings/settings#enable-extended-results-for-datetime-functions) which is `0` by default.
|
||||
|
||||
Behavior for
|
||||
* `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results. In case argument is out of normal range:
|
||||
* If the argument is smaller than 1970, the result will be calculated from the argument `1970-01-01 (00:00:00)` instead.
|
||||
* If the return type is `DateTime` and the argument is larger than `2106-02-07 08:28:15`, the result will be calculated from the argument `2106-02-07 08:28:15` instead.
|
||||
* If the return type is `Date` and the argument is larger than `2149-06-06`, the result will be calculated from the argument `2149-06-06` instead.
|
||||
* If `toLastDayOfMonth` is called with an argument greater then `2149-05-31`, the result will be calculated from the argument `2149-05-31` instead.
|
||||
* `enable_extended_results_for_datetime_functions = 0`: Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime`. Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime`. Though these functions can take values of the extended types `Date32` and `DateTime64` as an argument, passing them a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results.
|
||||
* `enable_extended_results_for_datetime_functions = 1`:
|
||||
* Functions `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` return `Date` or `DateTime` if their argument is a `Date` or `DateTime`, and they return `Date32` or `DateTime64` if their argument is a `Date32` or `DateTime64`.
|
||||
* Functions `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` return `DateTime` if their argument is a `Date` or `DateTime`, and they return `DateTime64` if their argument is a `Date32` or `DateTime64`.
|
||||
@ -302,25 +298,22 @@ Returns the date.
|
||||
Rounds down a date or date with time to the first day of the month.
|
||||
Returns the date.
|
||||
|
||||
## toLastDayOfMonth
|
||||
|
||||
Rounds up a date or date with time to the last day of the month.
|
||||
Returns the date.
|
||||
:::note
|
||||
The behavior of parsing incorrect dates is implementation specific. ClickHouse may return zero date, throw an exception or do “natural” overflow.
|
||||
:::
|
||||
|
||||
If `toLastDayOfMonth` is called with an argument of type `Date` greater then 2149-05-31, the result will be calculated from the argument 2149-05-31 instead.
|
||||
|
||||
## toMonday
|
||||
|
||||
Rounds down a date or date with time to the nearest Monday.
|
||||
As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` return date `1970-01-01`.
|
||||
Returns the date.
|
||||
|
||||
## toStartOfWeek(t\[,mode\])
|
||||
|
||||
Rounds down a date or date with time to the nearest Sunday or Monday by mode.
|
||||
Returns the date.
|
||||
As a special case, date arguments `1970-01-01`, `1970-01-02`, `1970-01-03` and `1970-01-04` (and `1970-01-05` if `mode` is `1`) return date `1970-01-01`.
|
||||
The `mode` argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used.
|
||||
The mode argument works exactly like the mode argument to toWeek(). For the single-argument syntax, a mode value of 0 is used.
|
||||
|
||||
## toStartOfDay
|
||||
|
||||
@ -671,9 +664,9 @@ Aliases: `dateDiff`, `DATE_DIFF`.
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md) or [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -1075,7 +1068,7 @@ Example:
|
||||
SELECT timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600));
|
||||
SELECT timeSlots(toDateTime('1980-12-12 21:01:02', 'UTC'), toUInt32(600), 299);
|
||||
SELECT timeSlots(toDateTime64('1980-12-12 21:01:02.1234', 4, 'UTC'), toDecimal64(600.1, 1), toDecimal64(299, 0));
|
||||
```
|
||||
```
|
||||
``` text
|
||||
┌─timeSlots(toDateTime('2012-01-01 12:20:00'), toUInt32(600))─┐
|
||||
│ ['2012-01-01 12:00:00','2012-01-01 12:30:00'] │
|
||||
@ -1163,7 +1156,7 @@ dateName(date_part, date)
|
||||
**Arguments**
|
||||
|
||||
- `date_part` — Date part. Possible values: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md).
|
||||
- `date` — Date. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `date` — Date. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `timezone` — Timezone. Optional. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
@ -571,7 +571,7 @@ Example:
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s,
|
||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s,
|
||||
count() AS c
|
||||
FROM test.hits
|
||||
GROUP BY domain(Referer)
|
||||
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
slug: /en/sql-reference/statements/misc
|
||||
toc_hidden: true
|
||||
sidebar_position: 70
|
||||
---
|
||||
|
||||
# Miscellaneous Statements
|
||||
|
||||
- [ATTACH](../../sql-reference/statements/attach.md)
|
||||
- [CHECK TABLE](../../sql-reference/statements/check-table.md)
|
||||
- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md)
|
||||
- [DETACH](../../sql-reference/statements/detach.md)
|
||||
- [DROP](../../sql-reference/statements/drop.md)
|
||||
- [EXISTS](../../sql-reference/statements/exists.md)
|
||||
- [KILL](../../sql-reference/statements/kill.md)
|
||||
- [OPTIMIZE](../../sql-reference/statements/optimize.md)
|
||||
- [RENAME](../../sql-reference/statements/rename.md)
|
||||
- [SET](../../sql-reference/statements/set.md)
|
||||
- [SET ROLE](../../sql-reference/statements/set-role.md)
|
||||
- [TRUNCATE](../../sql-reference/statements/truncate.md)
|
||||
- [USE](../../sql-reference/statements/use.md)
|
@ -6,7 +6,7 @@ sidebar_label: Date
|
||||
|
||||
# Date {#data-type-date}
|
||||
|
||||
Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2149 года, последний полностью поддерживаемый год - 2148).
|
||||
Дата. Хранится в двух байтах в виде (беззнакового) числа дней, прошедших от 1970-01-01. Позволяет хранить значения от чуть больше, чем начала unix-эпохи до верхнего порога, определяющегося константой на этапе компиляции (сейчас - до 2106 года, последний полностью поддерживаемый год - 2105).
|
||||
|
||||
Диапазон значений: \[1970-01-01, 2149-06-06\].
|
||||
|
||||
|
@ -272,15 +272,9 @@ SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Tokyo') AS unix_timestamp;
|
||||
|
||||
Поведение для
|
||||
* `enable_extended_results_for_datetime_functions = 0`: Функции `toStartOf*`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime`. Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime`. Хотя эти функции могут принимать значения типа `Date32` или `DateTime64` в качестве аргумента, при обработке аргумента вне нормального диапазона значений (`1970` - `2148` для `Date` и `1970-01-01 00:00:00`-`2106-02-07 08:28:15` для `DateTime`) будет получен некорректный результат.
|
||||
В случае если значение аргумента вне нормального диапазона:
|
||||
* `1970-01-01 (00:00:00)` будет возвращён для моментов времени до 1970 года,
|
||||
* `2106-02-07 08:28:15` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `DateTime`,
|
||||
* `2149-06-06` будет взят в качестве аргумента, если полученный аргумент превосходит данное значение и возвращаемый тип - `Date`,
|
||||
* `2149-05-31` будет результатом функции `toLastDayOfMonth` при обработке аргумента больше `2149-05-31`.
|
||||
* `enable_extended_results_for_datetime_functions = 1`:
|
||||
* Функции `toStartOfYear`, `toStartOfISOYear`, `toStartOfQuarter`, `toStartOfMonth`, `toStartOfWeek`, `toLastDayOfMonth`, `toMonday` возвращают `Date` или `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `Date32` или `DateTime64` если их аргумент `Date32` или `DateTime64`.
|
||||
* Функции `toStartOfDay`, `toStartOfHour`, `toStartOfFifteenMinutes`, `toStartOfTenMinutes`, `toStartOfFiveMinutes`, `toStartOfMinute`, `timeSlot` возвращают `DateTime` если их аргумент `Date` или `DateTime` и они возвращают `DateTime64` если их аргумент `Date32` или `DateTime64`.
|
||||
|
||||
:::
|
||||
|
||||
## toStartOfYear {#tostartofyear}
|
||||
@ -321,20 +315,20 @@ SELECT toStartOfISOYear(toDate('2017-01-01')) AS ISOYear20170101;
|
||||
Округляет дату или дату-с-временем до последнего числа месяца.
|
||||
Возвращается дата.
|
||||
|
||||
Если `toLastDayOfMonth` вызывается с аргументом типа `Date` большим чем 2149-05-31, то результат будет вычислен от аргумента 2149-05-31.
|
||||
|
||||
:::note "Attention"
|
||||
Возвращаемое значение для некорректных дат зависит от реализации. ClickHouse может вернуть нулевую дату, выбросить исключение, или выполнить «естественное» перетекание дат между месяцами.
|
||||
:::
|
||||
|
||||
## toMonday {#tomonday}
|
||||
|
||||
Округляет дату или дату-с-временем вниз до ближайшего понедельника.
|
||||
Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` результатом будет `1970-01-01`.
|
||||
Возвращается дата.
|
||||
|
||||
## toStartOfWeek(t[,mode]) {#tostartofweek}
|
||||
|
||||
Округляет дату или дату со временем до ближайшего воскресенья или понедельника в соответствии с mode.
|
||||
Возвращается дата.
|
||||
Частный случай: для дат `1970-01-01`, `1970-01-02`, `1970-01-03` и `1970-01-04` (и `1970-01-05`, если `mode` равен `1`) результатом будет `1970-01-01`.
|
||||
Аргумент `mode` работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0.
|
||||
Аргумент mode работает точно так же, как аргумент mode [toWeek()](#toweek). Если аргумент mode опущен, то используется режим 0.
|
||||
|
||||
## toStartOfDay {#tostartofday}
|
||||
|
||||
@ -721,9 +715,9 @@ date_diff('unit', startdate, enddate, [timezone])
|
||||
- `quarter`
|
||||
- `year`
|
||||
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md) или [DateTime](../../sql-reference/data-types/datetime.md).
|
||||
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
@ -975,8 +969,7 @@ SELECT now('Europe/Moscow');
|
||||
|
||||
## timeSlots(StartTime, Duration,\[, Size\]) {#timeslotsstarttime-duration-size}
|
||||
Для интервала, начинающегося в `StartTime` и длящегося `Duration` секунд, возвращает массив моментов времени, кратных `Size`. Параметр `Size` указывать необязательно, по умолчанию он равен 1800 секундам (30 минутам) - необязательный параметр.
|
||||
Данная функция может использоваться, например, для анализа количества просмотров страницы за соответствующую сессию.
|
||||
Аргумент `StartTime` может иметь тип `DateTime` или `DateTime64`. В случае, если используется `DateTime`, аргументы `Duration` и `Size` должны иметь тип `UInt32`; Для DateTime64 они должны быть типа `Decimal64`.
|
||||
|
||||
Возвращает массив DateTime/DateTime64 (тип будет совпадать с типом параметра ’StartTime’). Для DateTime64 масштаб(scale) возвращаемой величины может отличаться от масштаба фргумента ’StartTime’ --- результат будет иметь наибольший масштаб среди всех данных аргументов.
|
||||
|
||||
Пример использования:
|
||||
@ -1085,7 +1078,7 @@ dateName(date_part, date)
|
||||
**Аргументы**
|
||||
|
||||
- `date_part` — часть даты. Возможные значения: 'year', 'quarter', 'month', 'week', 'dayofyear', 'day', 'weekday', 'hour', 'minute', 'second'. [String](../../sql-reference/data-types/string.md).
|
||||
- `date` — дата. [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `date` — дата. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
- `timezone` — часовой пояс. Необязательный аргумент. [String](../../sql-reference/data-types/string.md).
|
||||
|
||||
**Возвращаемое значение**
|
||||
|
@ -568,7 +568,7 @@ ORDER BY c DESC
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s,
|
||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s,
|
||||
count() AS c
|
||||
FROM test.hits
|
||||
GROUP BY domain(Referer)
|
||||
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
slug: /ru/sql-reference/statements/misc
|
||||
sidebar_position: 41
|
||||
---
|
||||
|
||||
# Прочие виды запросов {#prochie-vidy-zaprosov}
|
||||
|
||||
- [ATTACH](../../sql-reference/statements/attach.md)
|
||||
- [CHECK TABLE](../../sql-reference/statements/check-table.md)
|
||||
- [DESCRIBE TABLE](../../sql-reference/statements/describe-table.md)
|
||||
- [DETACH](../../sql-reference/statements/detach.md)
|
||||
- [DROP](../../sql-reference/statements/drop.md)
|
||||
- [EXISTS](../../sql-reference/statements/exists.md)
|
||||
- [KILL](../../sql-reference/statements/kill.md)
|
||||
- [OPTIMIZE](../../sql-reference/statements/optimize.md)
|
||||
- [RENAME](../../sql-reference/statements/rename.md)
|
||||
- [SET](../../sql-reference/statements/set.md)
|
||||
- [SET ROLE](../../sql-reference/statements/set-role.md)
|
||||
- [TRUNCATE](../../sql-reference/statements/truncate.md)
|
||||
- [USE](../../sql-reference/statements/use.md)
|
||||
|
@ -3,7 +3,7 @@ slug: /zh/sql-reference/data-types/date
|
||||
---
|
||||
# 日期 {#date}
|
||||
|
||||
日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2149年,但最终完全支持的年份为2148)。最小值输出为1970-01-01。
|
||||
日期类型,用两个字节存储,表示从 1970-01-01 (无符号) 到当前的日期值。允许存储从 Unix 纪元开始到编译阶段定义的上限阈值常量(目前上限是2106年,但最终完全支持的年份为2105)。最小值输出为1970-01-01。
|
||||
|
||||
值的范围: \[1970-01-01, 2149-06-06\]。
|
||||
|
||||
|
@ -237,7 +237,7 @@ ORDER BY c DESC
|
||||
|
||||
``` sql
|
||||
SELECT
|
||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vk.com'], ['www.yandex', 'example.com']) AS s,
|
||||
transform(domain(Referer), ['yandex.ru', 'google.ru', 'vkontakte.ru'], ['www.yandex', 'example.com', 'vk.com']) AS s,
|
||||
count() AS c
|
||||
FROM test.hits
|
||||
GROUP BY domain(Referer)
|
||||
|
@ -26,8 +26,10 @@ SOURCE=${SOURCE:-$PKG_ROOT}
|
||||
HELP="${0} [--test] [--rpm] [-h|--help]
|
||||
--test - adds '+test' prefix to version
|
||||
--apk - build APK packages
|
||||
--archlinux - build archlinux packages
|
||||
--rpm - build RPM packages
|
||||
--tgz - build tarball package
|
||||
--deb - build deb package
|
||||
--help - show this help and exit
|
||||
|
||||
Used envs:
|
||||
@ -47,16 +49,21 @@ fi
|
||||
export CLICKHOUSE_VERSION_STRING
|
||||
|
||||
|
||||
|
||||
while [[ $1 == --* ]]
|
||||
do
|
||||
case "$1" in
|
||||
--test )
|
||||
VERSION_POSTFIX+='+test'
|
||||
shift ;;
|
||||
--deb )
|
||||
MAKE_DEB=1
|
||||
shift ;;
|
||||
--apk )
|
||||
MAKE_APK=1
|
||||
shift ;;
|
||||
--archlinux )
|
||||
MAKE_ARCHLINUX=1
|
||||
shift ;;
|
||||
--rpm )
|
||||
MAKE_RPM=1
|
||||
shift ;;
|
||||
@ -131,18 +138,24 @@ CLICKHOUSE_VERSION_STRING+=$VERSION_POSTFIX
|
||||
echo -e "\nCurrent version is $CLICKHOUSE_VERSION_STRING"
|
||||
|
||||
for config in clickhouse*.yaml; do
|
||||
echo "Building deb package for $config"
|
||||
if [ -n "$MAKE_DEB" ] || [ -n "$MAKE_TGZ" ]; then
|
||||
echo "Building deb package for $config"
|
||||
|
||||
# Preserve package path
|
||||
exec 9>&1
|
||||
PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9)
|
||||
PKG_PATH=${PKG_PATH##*created package: }
|
||||
exec 9>&-
|
||||
# Preserve package path
|
||||
exec 9>&1
|
||||
PKG_PATH=$(nfpm package --target "$OUTPUT_DIR" --config "$config" --packager deb | tee /dev/fd/9)
|
||||
PKG_PATH=${PKG_PATH##*created package: }
|
||||
exec 9>&-
|
||||
fi
|
||||
|
||||
if [ -n "$MAKE_APK" ]; then
|
||||
echo "Building apk package for $config"
|
||||
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager apk
|
||||
fi
|
||||
if [ -n "$MAKE_ARCHLINUX" ]; then
|
||||
echo "Building archlinux package for $config"
|
||||
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager archlinux
|
||||
fi
|
||||
if [ -n "$MAKE_RPM" ]; then
|
||||
echo "Building rpm package for $config"
|
||||
nfpm package --target "$OUTPUT_DIR" --config "$config" --packager rpm
|
||||
|
@ -27,8 +27,8 @@ deb:
|
||||
Source: clickhouse
|
||||
|
||||
contents:
|
||||
- src: root/etc/clickhouse-keeper
|
||||
dst: /etc/clickhouse-keeper
|
||||
- src: root/etc/clickhouse-keeper/keeper_config.xml
|
||||
dst: /etc/clickhouse-keeper/keeper_config.xml
|
||||
type: config
|
||||
- src: root/usr/bin/clickhouse-keeper
|
||||
dst: /usr/bin/clickhouse-keeper
|
||||
|
@ -42,8 +42,11 @@ deb:
|
||||
Source: clickhouse
|
||||
|
||||
contents:
|
||||
- src: root/etc/clickhouse-server
|
||||
dst: /etc/clickhouse-server
|
||||
- src: root/etc/clickhouse-server/config.xml
|
||||
dst: /etc/clickhouse-server/config.xml
|
||||
type: config
|
||||
- src: root/etc/clickhouse-server/users.xml
|
||||
dst: /etc/clickhouse-server/users.xml
|
||||
type: config
|
||||
- src: clickhouse-server.init
|
||||
dst: /etc/init.d/clickhouse-server
|
||||
|
@ -1108,15 +1108,21 @@ void Client::processConfig()
|
||||
else
|
||||
format = config().getString("format", is_interactive ? "PrettyCompact" : "TabSeparated");
|
||||
|
||||
format_max_block_size = config().getInt("format_max_block_size", global_context->getSettingsRef().max_block_size);
|
||||
format_max_block_size = config().getUInt64("format_max_block_size",
|
||||
global_context->getSettingsRef().max_block_size);
|
||||
|
||||
insert_format = "Values";
|
||||
|
||||
/// Setting value from cmd arg overrides one from config
|
||||
if (global_context->getSettingsRef().max_insert_block_size.changed)
|
||||
{
|
||||
insert_format_max_block_size = global_context->getSettingsRef().max_insert_block_size;
|
||||
}
|
||||
else
|
||||
insert_format_max_block_size = config().getInt("insert_format_max_block_size", global_context->getSettingsRef().max_insert_block_size);
|
||||
{
|
||||
insert_format_max_block_size = config().getUInt64("insert_format_max_block_size",
|
||||
global_context->getSettingsRef().max_insert_block_size);
|
||||
}
|
||||
|
||||
ClientInfo & client_info = global_context->getClientInfo();
|
||||
client_info.setInitialQuery();
|
||||
|
@ -19,7 +19,6 @@
|
||||
{host}
|
||||
{port}
|
||||
{user}
|
||||
{database}
|
||||
{display_name}
|
||||
Terminal colors: https://misc.flogisoft.com/bash/tip_colors_and_formatting
|
||||
See also: https://wiki.hackzine.org/development/misc/readline-color-prompt.html
|
||||
|
@ -47,8 +47,8 @@ public:
|
||||
WrappingUInt32 epoch;
|
||||
WrappingUInt32 counter;
|
||||
explicit Zxid(UInt64 _zxid)
|
||||
: epoch(_zxid >> 32)
|
||||
, counter(_zxid)
|
||||
: epoch(static_cast<UInt32>(_zxid >> 32))
|
||||
, counter(static_cast<UInt32>(_zxid))
|
||||
{}
|
||||
|
||||
bool operator<=(const Zxid & other) const
|
||||
|
@ -57,7 +57,7 @@ void DisksApp::addOptions(
|
||||
("config-file,C", po::value<String>(), "Set config file")
|
||||
("disk", po::value<String>(), "Set disk name")
|
||||
("command_name", po::value<String>(), "Name for command to do")
|
||||
("send-logs", "Send logs")
|
||||
("save-logs", "Save logs to a file")
|
||||
("log-level", po::value<String>(), "Logging level")
|
||||
;
|
||||
|
||||
@ -82,10 +82,10 @@ void DisksApp::processOptions()
|
||||
config().setString("config-file", options["config-file"].as<String>());
|
||||
if (options.count("disk"))
|
||||
config().setString("disk", options["disk"].as<String>());
|
||||
if (options.count("send-logs"))
|
||||
config().setBool("send-logs", true);
|
||||
if (options.count("save-logs"))
|
||||
config().setBool("save-logs", true);
|
||||
if (options.count("log-level"))
|
||||
Poco::Logger::root().setLevel(options["log-level"].as<std::string>());
|
||||
config().setString("log-level", options["log-level"].as<String>());
|
||||
}
|
||||
|
||||
void DisksApp::init(std::vector<String> & common_arguments)
|
||||
@ -149,15 +149,6 @@ void DisksApp::parseAndCheckOptions(
|
||||
|
||||
int DisksApp::main(const std::vector<String> & /*args*/)
|
||||
{
|
||||
if (config().has("send-logs"))
|
||||
{
|
||||
auto log_level = config().getString("log-level", "trace");
|
||||
Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level));
|
||||
|
||||
auto log_path = config().getString("logger.clickhouse-disks", "/var/log/clickhouse-server/clickhouse-disks.log");
|
||||
Poco::Logger::root().setChannel(Poco::AutoPtr<Poco::FileChannel>(new Poco::FileChannel(log_path)));
|
||||
}
|
||||
|
||||
if (config().has("config-file") || fs::exists(getDefaultConfigFileName()))
|
||||
{
|
||||
String config_path = config().getString("config-file", getDefaultConfigFileName());
|
||||
@ -171,6 +162,20 @@ int DisksApp::main(const std::vector<String> & /*args*/)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No config-file specifiged");
|
||||
}
|
||||
|
||||
if (config().has("save-logs"))
|
||||
{
|
||||
auto log_level = config().getString("log-level", "trace");
|
||||
Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level));
|
||||
|
||||
auto log_path = config().getString("logger.clickhouse-disks", "/var/log/clickhouse-server/clickhouse-disks.log");
|
||||
Poco::Logger::root().setChannel(Poco::AutoPtr<Poco::FileChannel>(new Poco::FileChannel(log_path)));
|
||||
}
|
||||
else
|
||||
{
|
||||
auto log_level = config().getString("log-level", "none");
|
||||
Poco::Logger::root().setLevel(Poco::Logger::parseLevel(log_level));
|
||||
}
|
||||
|
||||
registerDisks();
|
||||
registerFormats();
|
||||
|
||||
|
@ -893,7 +893,7 @@ namespace
|
||||
if (fs::exists(pid_file))
|
||||
{
|
||||
ReadBufferFromFile in(pid_file.string());
|
||||
UInt64 pid;
|
||||
Int32 pid;
|
||||
if (tryReadIntText(pid, in))
|
||||
{
|
||||
fmt::print("{} file exists and contains pid = {}.\n", pid_file.string(), pid);
|
||||
@ -982,9 +982,9 @@ namespace
|
||||
return 0;
|
||||
}
|
||||
|
||||
UInt64 isRunning(const fs::path & pid_file)
|
||||
int isRunning(const fs::path & pid_file)
|
||||
{
|
||||
UInt64 pid = 0;
|
||||
int pid = 0;
|
||||
|
||||
if (fs::exists(pid_file))
|
||||
{
|
||||
@ -1057,7 +1057,7 @@ namespace
|
||||
if (force && do_not_kill)
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible");
|
||||
|
||||
UInt64 pid = isRunning(pid_file);
|
||||
int pid = isRunning(pid_file);
|
||||
|
||||
if (!pid)
|
||||
return 0;
|
||||
|
@ -45,6 +45,7 @@ if (BUILD_STANDALONE_KEEPER)
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperLogStore.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperServer.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperSnapshotManager.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperSnapshotManagerS3.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateMachine.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateManager.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStorage.cpp
|
||||
|
@ -68,12 +68,12 @@ namespace ErrorCodes
|
||||
namespace
|
||||
{
|
||||
|
||||
int waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||
size_t waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||
{
|
||||
const int sleep_max_ms = 1000 * seconds_to_wait;
|
||||
const int sleep_one_ms = 100;
|
||||
int sleep_current_ms = 0;
|
||||
int current_connections = 0;
|
||||
const size_t sleep_max_ms = 1000 * seconds_to_wait;
|
||||
const size_t sleep_one_ms = 100;
|
||||
size_t sleep_current_ms = 0;
|
||||
size_t current_connections = 0;
|
||||
for (;;)
|
||||
{
|
||||
current_connections = 0;
|
||||
@ -441,7 +441,7 @@ int Keeper::main(const std::vector<std::string> & /*args*/)
|
||||
main_config_reloader.reset();
|
||||
|
||||
LOG_DEBUG(log, "Waiting for current connections to Keeper to finish.");
|
||||
int current_connections = 0;
|
||||
size_t current_connections = 0;
|
||||
for (auto & server : *servers)
|
||||
{
|
||||
server.stop();
|
||||
|
@ -8,9 +8,10 @@
|
||||
#include <Databases/DatabaseMemory.h>
|
||||
#include <Storages/System/attachSystemTables.h>
|
||||
#include <Storages/System/attachInformationSchemaTables.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <Interpreters/JIT/CompiledExpressionCache.h>
|
||||
#include <Interpreters/ProcessList.h>
|
||||
#include <Interpreters/loadMetadata.h>
|
||||
#include <Interpreters/DatabaseCatalog.h>
|
||||
#include <base/getFQDNOrHostName.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Interpreters/Session.h>
|
||||
@ -546,9 +547,14 @@ void LocalServer::processConfig()
|
||||
|
||||
/// Setting value from cmd arg overrides one from config
|
||||
if (global_context->getSettingsRef().max_insert_block_size.changed)
|
||||
{
|
||||
insert_format_max_block_size = global_context->getSettingsRef().max_insert_block_size;
|
||||
}
|
||||
else
|
||||
insert_format_max_block_size = config().getInt("insert_format_max_block_size", global_context->getSettingsRef().max_insert_block_size);
|
||||
{
|
||||
insert_format_max_block_size = config().getUInt64("insert_format_max_block_size",
|
||||
global_context->getSettingsRef().max_insert_block_size);
|
||||
}
|
||||
|
||||
/// Sets external authenticators config (LDAP, Kerberos).
|
||||
global_context->setExternalAuthenticatorsConfig(config());
|
||||
@ -586,6 +592,18 @@ void LocalServer::processConfig()
|
||||
if (mmap_cache_size)
|
||||
global_context->setMMappedFileCache(mmap_cache_size);
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
/// 128 MB
|
||||
constexpr size_t compiled_expression_cache_size_default = 1024 * 1024 * 128;
|
||||
size_t compiled_expression_cache_size = config().getUInt64("compiled_expression_cache_size", compiled_expression_cache_size_default);
|
||||
|
||||
constexpr size_t compiled_expression_cache_elements_size_default = 10000;
|
||||
size_t compiled_expression_cache_elements_size
|
||||
= config().getUInt64("compiled_expression_cache_elements_size", compiled_expression_cache_elements_size_default);
|
||||
|
||||
CompiledExpressionCacheFactory::instance().init(compiled_expression_cache_size, compiled_expression_cache_elements_size);
|
||||
#endif
|
||||
|
||||
/// Load global settings from default_profile and system_profile.
|
||||
global_context->setDefaultProfiles(config());
|
||||
|
||||
|
@ -279,7 +279,7 @@ Float transformFloatMantissa(Float x, UInt64 seed)
|
||||
constexpr size_t mantissa_num_bits = std::is_same_v<Float, Float32> ? 23 : 52;
|
||||
|
||||
UInt x_uint = bit_cast<UInt>(x);
|
||||
x_uint = feistelNetwork(x_uint, mantissa_num_bits, seed);
|
||||
x_uint = static_cast<UInt>(feistelNetwork(x_uint, mantissa_num_bits, seed));
|
||||
return bit_cast<Float>(x_uint);
|
||||
}
|
||||
|
||||
@ -511,13 +511,13 @@ public:
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
{
|
||||
UInt32 src_datetime = src_data[i];
|
||||
UInt32 src_date = date_lut.toDate(src_datetime);
|
||||
UInt32 src_date = static_cast<UInt32>(date_lut.toDate(src_datetime));
|
||||
|
||||
Int32 src_diff = src_datetime - src_prev_value;
|
||||
Int32 res_diff = transformSigned(src_diff, seed);
|
||||
Int32 res_diff = static_cast<Int32>(transformSigned(src_diff, seed));
|
||||
|
||||
UInt32 new_datetime = res_prev_value + res_diff;
|
||||
UInt32 new_time = new_datetime - date_lut.toDate(new_datetime);
|
||||
UInt32 new_time = new_datetime - static_cast<UInt32>(date_lut.toDate(new_datetime));
|
||||
res_data[i] = src_date + new_time;
|
||||
|
||||
src_prev_value = src_datetime;
|
||||
|
@ -183,7 +183,10 @@ void ODBCColumnsInfoHandler::handleRequest(HTTPServerRequest & request, HTTPServ
|
||||
if (columns.empty())
|
||||
throw Exception("Columns definition was not returned", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
WriteBufferFromHTTPServerResponse out(response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout);
|
||||
WriteBufferFromHTTPServerResponse out(
|
||||
response,
|
||||
request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD,
|
||||
keep_alive_timeout);
|
||||
try
|
||||
{
|
||||
writeStringBinary(columns.toString(), out);
|
||||
|
@ -139,7 +139,7 @@ void ODBCSource::insertValue(
|
||||
readDateTimeText(time, in, assert_cast<const DataTypeDateTime *>(data_type.get())->getTimeZone());
|
||||
if (time < 0)
|
||||
time = 0;
|
||||
assert_cast<ColumnUInt32 &>(column).insertValue(time);
|
||||
assert_cast<ColumnUInt32 &>(column).insertValue(static_cast<UInt32>(time));
|
||||
break;
|
||||
}
|
||||
case ValueType::vtDateTime64:
|
||||
|
@ -228,12 +228,12 @@ catch (...)
|
||||
path));
|
||||
}
|
||||
|
||||
int waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||
size_t waitServersToFinish(std::vector<DB::ProtocolServerAdapter> & servers, size_t seconds_to_wait)
|
||||
{
|
||||
const int sleep_max_ms = 1000 * seconds_to_wait;
|
||||
const int sleep_one_ms = 100;
|
||||
int sleep_current_ms = 0;
|
||||
int current_connections = 0;
|
||||
const size_t sleep_max_ms = 1000 * seconds_to_wait;
|
||||
const size_t sleep_one_ms = 100;
|
||||
size_t sleep_current_ms = 0;
|
||||
size_t current_connections = 0;
|
||||
for (;;)
|
||||
{
|
||||
current_connections = 0;
|
||||
@ -933,7 +933,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
else
|
||||
{
|
||||
rlim_t old = rlim.rlim_cur;
|
||||
rlim.rlim_cur = config().getUInt("max_open_files", rlim.rlim_max);
|
||||
rlim.rlim_cur = config().getUInt("max_open_files", static_cast<unsigned>(rlim.rlim_max));
|
||||
int rc = setrlimit(RLIMIT_NOFILE, &rlim);
|
||||
if (rc != 0)
|
||||
LOG_WARNING(log, "Cannot set max number of file descriptors to {}. Try to specify max_open_files according to your system limits. error: {}", rlim.rlim_cur, errnoToString());
|
||||
@ -1507,7 +1507,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
if (!servers_to_start_before_tables.empty())
|
||||
{
|
||||
LOG_DEBUG(log, "Waiting for current connections to servers for tables to finish.");
|
||||
int current_connections = 0;
|
||||
size_t current_connections = 0;
|
||||
for (auto & server : servers_to_start_before_tables)
|
||||
{
|
||||
server.stop();
|
||||
@ -1793,7 +1793,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
||||
|
||||
is_cancelled = true;
|
||||
|
||||
int current_connections = 0;
|
||||
size_t current_connections = 0;
|
||||
{
|
||||
std::lock_guard lock(servers_lock);
|
||||
for (auto & server : servers)
|
||||
|
@ -236,7 +236,7 @@ void AllowedClientHosts::IPSubnet::set(const IPAddress & prefix_, const IPAddres
|
||||
|
||||
void AllowedClientHosts::IPSubnet::set(const IPAddress & prefix_, size_t num_prefix_bits)
|
||||
{
|
||||
set(prefix_, IPAddress(num_prefix_bits, prefix_.family()));
|
||||
set(prefix_, IPAddress(static_cast<unsigned>(num_prefix_bits), prefix_.family()));
|
||||
}
|
||||
|
||||
void AllowedClientHosts::IPSubnet::set(const IPAddress & address)
|
||||
|
@ -254,7 +254,7 @@ bool LDAPClient::openConnection()
|
||||
#endif
|
||||
|
||||
{
|
||||
const int search_timeout = params.search_timeout.count();
|
||||
const int search_timeout = static_cast<int>(params.search_timeout.count());
|
||||
diag(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout));
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,8 @@ public:
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
assert_cast<ColumnVector<T> &>(to).getData().push_back(this->data(place).rbs.size());
|
||||
assert_cast<ColumnVector<T> &>(to).getData().push_back(
|
||||
static_cast<T>(this->data(place).rbs.size()));
|
||||
}
|
||||
};
|
||||
|
||||
@ -142,7 +143,8 @@ public:
|
||||
|
||||
void insertResultInto(AggregateDataPtr __restrict place, IColumn & to, Arena *) const override
|
||||
{
|
||||
assert_cast<ColumnVector<T> &>(to).getData().push_back(this->data(place).rbs.size());
|
||||
assert_cast<ColumnVector<T> &>(to).getData().push_back(
|
||||
static_cast<T>(this->data(place).rbs.size()));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -426,7 +426,7 @@ public:
|
||||
return 0;
|
||||
|
||||
if (isSmall())
|
||||
return small.find(x) != small.end();
|
||||
return small.find(static_cast<T>(x)) != small.end();
|
||||
else
|
||||
return rb->contains(static_cast<Value>(x));
|
||||
}
|
||||
|
@ -136,8 +136,8 @@ private:
|
||||
|
||||
for (size_t i = 0; i <= size; ++i)
|
||||
{
|
||||
previous[i] = i - 1;
|
||||
next[i] = i + 1;
|
||||
previous[i] = static_cast<UInt32>(i - 1);
|
||||
next[i] = static_cast<UInt32>(i + 1);
|
||||
}
|
||||
|
||||
next[size] = 0;
|
||||
@ -157,7 +157,7 @@ private:
|
||||
auto quality = [&](UInt32 i) { return points[next[i]].mean - points[i].mean; };
|
||||
|
||||
for (size_t i = 0; i + 1 < size; ++i)
|
||||
queue.push({quality(i), i});
|
||||
queue.push({quality(static_cast<UInt32>(i)), i});
|
||||
|
||||
while (new_size > max_bins && !queue.empty())
|
||||
{
|
||||
@ -217,7 +217,7 @@ private:
|
||||
points[left] = points[right];
|
||||
}
|
||||
}
|
||||
size = left + 1;
|
||||
size = static_cast<UInt32>(left + 1);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -540,7 +540,7 @@ public:
|
||||
/// Assuming to.has()
|
||||
void changeImpl(StringRef value, Arena * arena)
|
||||
{
|
||||
Int32 value_size = value.size;
|
||||
Int32 value_size = static_cast<Int32>(value.size);
|
||||
|
||||
if (value_size <= MAX_SMALL_STRING_SIZE)
|
||||
{
|
||||
@ -555,7 +555,7 @@ public:
|
||||
if (capacity < value_size)
|
||||
{
|
||||
/// Don't free large_data here.
|
||||
capacity = roundUpToPowerOfTwoOrZero(value_size);
|
||||
capacity = static_cast<Int32>(roundUpToPowerOfTwoOrZero(value_size));
|
||||
large_data = arena->alloc(capacity);
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
|
||||
if (which.idx == TypeIndex::DateTime64) return std::make_shared<Function<DateTime64, false>>(argument_types, params);
|
||||
|
||||
if (which.idx == TypeIndex::Int128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<UInt128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Int256) return std::make_shared<Function<Int256, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt256) return std::make_shared<Function<UInt256, true>>(argument_types, params);
|
||||
|
||||
|
@ -40,7 +40,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
|
||||
if (which.idx == TypeIndex::DateTime) return std::make_shared<Function<DataTypeDateTime::FieldType, false>>(argument_types, params);
|
||||
|
||||
if (which.idx == TypeIndex::Int128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<UInt128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Int256) return std::make_shared<Function<Int256, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt256) return std::make_shared<Function<UInt256, true>>(argument_types, params);
|
||||
|
||||
|
@ -47,7 +47,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
|
||||
if (which.idx == TypeIndex::DateTime64) return std::make_shared<Function<DateTime64, false>>(argument_types, params);
|
||||
|
||||
if (which.idx == TypeIndex::Int128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<UInt128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Int256) return std::make_shared<Function<Int256, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt256) return std::make_shared<Function<UInt256, true>>(argument_types, params);
|
||||
|
||||
|
@ -46,7 +46,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
|
||||
if (which.idx == TypeIndex::DateTime64) return std::make_shared<Function<DateTime64, false>>(argument_types, params);
|
||||
|
||||
if (which.idx == TypeIndex::Int128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<Int128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt128) return std::make_shared<Function<UInt128, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::Int256) return std::make_shared<Function<Int256, true>>(argument_types, params);
|
||||
if (which.idx == TypeIndex::UInt256) return std::make_shared<Function<UInt256, true>>(argument_types, params);
|
||||
|
||||
|
@ -44,7 +44,7 @@ struct AggregateFunctionRetentionData
|
||||
|
||||
void serialize(WriteBuffer & buf) const
|
||||
{
|
||||
UInt32 event_value = events.to_ulong();
|
||||
UInt32 event_value = static_cast<UInt32>(events.to_ulong());
|
||||
writeBinary(event_value, buf);
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ private:
|
||||
|
||||
actions.emplace_back(PatternActionType::SpecificEvent, event_number - 1);
|
||||
dfa_states.back().transition = DFATransition::SpecificEvent;
|
||||
dfa_states.back().event = event_number - 1;
|
||||
dfa_states.back().event = static_cast<uint32_t>(event_number - 1);
|
||||
dfa_states.emplace_back();
|
||||
conditions_in_pattern.set(event_number - 1);
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ public:
|
||||
for (UInt8 i = 0; i < events_size; ++i)
|
||||
if (assert_cast<const ColumnVector<UInt8> *>(columns[min_required_args + i])->getData()[row_num])
|
||||
node->events_bitset.set(i);
|
||||
node->event_time = timestamp;
|
||||
node->event_time = static_cast<DataTypeDateTime::FieldType>(timestamp);
|
||||
|
||||
node->can_be_base = assert_cast<const ColumnVector<UInt8> *>(columns[base_cond_column_idx])->getData()[row_num];
|
||||
|
||||
@ -365,7 +365,7 @@ public:
|
||||
/// The first matched event is 0x00000001, the second one is 0x00000002, the third one is 0x00000004, and so on.
|
||||
UInt32 getNextNodeIndex(Data & data) const
|
||||
{
|
||||
const UInt32 unmatched_idx = data.value.size();
|
||||
const UInt32 unmatched_idx = static_cast<UInt32>(data.value.size());
|
||||
|
||||
if (data.value.size() <= events_size)
|
||||
return unmatched_idx;
|
||||
|
@ -165,7 +165,7 @@ private:
|
||||
{
|
||||
for (size_t i = 0; i <= diff_x; ++i)
|
||||
{
|
||||
auto it = data.points.find(min_x_local + i);
|
||||
auto it = data.points.find(static_cast<X>(min_x_local + i));
|
||||
bool found = it != data.points.end();
|
||||
value += getBar(found ? std::round(((it->getMapped() - min_y) / diff_y) * 7) + 1 : 0.0);
|
||||
}
|
||||
@ -173,7 +173,7 @@ private:
|
||||
else
|
||||
{
|
||||
for (size_t i = 0; i <= diff_x; ++i)
|
||||
value += getBar(data.points.has(min_x_local + i) ? 1 : 0);
|
||||
value += getBar(data.points.has(min_x_local + static_cast<X>(i)) ? 1 : 0);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -202,7 +202,7 @@ private:
|
||||
if (i == bound.first) // is bound
|
||||
{
|
||||
Float64 proportion = bound.second - bound.first;
|
||||
auto it = data.points.find(min_x_local + i);
|
||||
auto it = data.points.find(min_x_local + static_cast<X>(i));
|
||||
bool found = (it != data.points.end());
|
||||
if (found && proportion > 0)
|
||||
new_y = new_y.value_or(0) + it->getMapped() * proportion;
|
||||
@ -229,7 +229,7 @@ private:
|
||||
}
|
||||
else
|
||||
{
|
||||
auto it = data.points.find(min_x_local + i);
|
||||
auto it = data.points.find(min_x_local + static_cast<X>(i));
|
||||
if (it != data.points.end())
|
||||
new_y = new_y.value_or(0) + it->getMapped();
|
||||
}
|
||||
@ -267,8 +267,8 @@ public:
|
||||
if (params.size() == 3)
|
||||
{
|
||||
specified_min_max_x = true;
|
||||
min_x = params.at(1).safeGet<X>();
|
||||
max_x = params.at(2).safeGet<X>();
|
||||
min_x = static_cast<X>(params.at(1).safeGet<X>());
|
||||
max_x = static_cast<X>(params.at(2).safeGet<X>());
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -175,8 +175,9 @@ struct OneAdder
|
||||
{
|
||||
if constexpr (!std::is_same_v<T, String>)
|
||||
{
|
||||
using ValueType = typename decltype(data.set)::value_type;
|
||||
const auto & value = assert_cast<const ColumnVector<T> &>(column).getElement(row_num);
|
||||
data.set.insert(AggregateFunctionUniqTraits<T>::hash(value));
|
||||
data.set.insert(static_cast<ValueType>(AggregateFunctionUniqTraits<T>::hash(value)));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -43,7 +43,7 @@ namespace detail
|
||||
{
|
||||
static Ret hash(UInt128 x)
|
||||
{
|
||||
return sipHash64(x);
|
||||
return static_cast<Ret>(sipHash64(x));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -40,7 +40,15 @@ struct WelchTTestData : public TTestMoments<Float64>
|
||||
Float64 denominator_x = sx2 * sx2 / (nx * nx * (nx - 1));
|
||||
Float64 denominator_y = sy2 * sy2 / (ny * ny * (ny - 1));
|
||||
|
||||
return numerator / (denominator_x + denominator_y);
|
||||
auto result = numerator / (denominator_x + denominator_y);
|
||||
|
||||
if (result <= 0 || std::isinf(result) || isNaN(result))
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Cannot calculate p_value, because the t-distribution \
|
||||
has inappropriate value of degrees of freedom (={}). It should be > 0", result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::tuple<Float64, Float64> getResult() const
|
||||
|
@ -53,9 +53,12 @@ String IAggregateFunction::getDescription() const
|
||||
|
||||
bool IAggregateFunction::haveEqualArgumentTypes(const IAggregateFunction & rhs) const
|
||||
{
|
||||
return std::equal(argument_types.begin(), argument_types.end(),
|
||||
rhs.argument_types.begin(), rhs.argument_types.end(),
|
||||
[](const auto & t1, const auto & t2) { return t1->equals(*t2); });
|
||||
return std::equal(
|
||||
argument_types.begin(),
|
||||
argument_types.end(),
|
||||
rhs.argument_types.begin(),
|
||||
rhs.argument_types.end(),
|
||||
[](const auto & t1, const auto & t2) { return t1->equals(*t2); });
|
||||
}
|
||||
|
||||
bool IAggregateFunction::haveSameStateRepresentation(const IAggregateFunction & rhs) const
|
||||
@ -67,11 +70,7 @@ bool IAggregateFunction::haveSameStateRepresentation(const IAggregateFunction &
|
||||
|
||||
bool IAggregateFunction::haveSameStateRepresentationImpl(const IAggregateFunction & rhs) const
|
||||
{
|
||||
bool res = getName() == rhs.getName()
|
||||
&& parameters == rhs.parameters
|
||||
&& haveEqualArgumentTypes(rhs);
|
||||
assert(res == (getStateType()->getName() == rhs.getStateType()->getName()));
|
||||
return res;
|
||||
return getStateType()->equals(*rhs.getStateType());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ public:
|
||||
if (isNaN(v))
|
||||
return;
|
||||
|
||||
UInt32 hash = intHash64(determinator);
|
||||
UInt32 hash = static_cast<UInt32>(intHash64(determinator));
|
||||
insertImpl(v, hash);
|
||||
sorted = false;
|
||||
++total_values;
|
||||
|
@ -118,7 +118,7 @@ private:
|
||||
|
||||
HashValue hash(Value key) const
|
||||
{
|
||||
return Hash()(key);
|
||||
return static_cast<HashValue>(Hash()(key));
|
||||
}
|
||||
|
||||
/// Delete all values whose hashes do not divide by 2 ^ skip_degree
|
||||
|
@ -32,10 +32,12 @@ void BackupFactory::registerBackupEngine(const String & engine_name, const Creat
|
||||
}
|
||||
|
||||
void registerBackupEnginesFileAndDisk(BackupFactory &);
|
||||
void registerBackupEngineS3(BackupFactory &);
|
||||
|
||||
void registerBackupEngines(BackupFactory & factory)
|
||||
{
|
||||
registerBackupEnginesFileAndDisk(factory);
|
||||
registerBackupEngineS3(factory);
|
||||
}
|
||||
|
||||
BackupFactory::BackupFactory()
|
||||
|
375
src/Backups/BackupIO_S3.cpp
Normal file
375
src/Backups/BackupIO_S3.cpp
Normal file
@ -0,0 +1,375 @@
|
||||
#include <Backups/BackupIO_S3.h>
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Common/quoteString.h>
|
||||
#include <Interpreters/threadPoolCallbackRunner.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Storages/StorageS3Settings.h>
|
||||
#include <IO/IOThreadPool.h>
|
||||
#include <IO/ReadBufferFromS3.h>
|
||||
#include <IO/WriteBufferFromS3.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <aws/core/auth/AWSCredentials.h>
|
||||
#include <aws/s3/S3Client.h>
|
||||
#include <filesystem>
|
||||
|
||||
#include <aws/s3/model/ListObjectsRequest.h>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int S3_ERROR;
|
||||
extern const int LOGICAL_ERROR;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
std::shared_ptr<Aws::S3::S3Client>
|
||||
makeS3Client(const S3::URI & s3_uri, const String & access_key_id, const String & secret_access_key, const ContextPtr & context)
|
||||
{
|
||||
auto settings = context->getStorageS3Settings().getSettings(s3_uri.uri.toString());
|
||||
|
||||
Aws::Auth::AWSCredentials credentials(access_key_id, secret_access_key);
|
||||
HeaderCollection headers;
|
||||
if (access_key_id.empty())
|
||||
{
|
||||
credentials = Aws::Auth::AWSCredentials(settings.auth_settings.access_key_id, settings.auth_settings.secret_access_key);
|
||||
headers = settings.auth_settings.headers;
|
||||
}
|
||||
|
||||
S3::PocoHTTPClientConfiguration client_configuration = S3::ClientFactory::instance().createClientConfiguration(
|
||||
settings.auth_settings.region,
|
||||
context->getRemoteHostFilter(),
|
||||
static_cast<unsigned>(context->getGlobalContext()->getSettingsRef().s3_max_redirects),
|
||||
context->getGlobalContext()->getSettingsRef().enable_s3_requests_logging,
|
||||
/* for_disk_s3 = */ false);
|
||||
|
||||
client_configuration.endpointOverride = s3_uri.endpoint;
|
||||
client_configuration.maxConnections = static_cast<unsigned>(context->getSettingsRef().s3_max_connections);
|
||||
/// Increase connect timeout
|
||||
client_configuration.connectTimeoutMs = 10 * 1000;
|
||||
/// Requests in backups can be extremely long, set to one hour
|
||||
client_configuration.requestTimeoutMs = 60 * 60 * 1000;
|
||||
|
||||
return S3::ClientFactory::instance().create(
|
||||
client_configuration,
|
||||
s3_uri.is_virtual_hosted_style,
|
||||
credentials.GetAWSAccessKeyId(),
|
||||
credentials.GetAWSSecretKey(),
|
||||
settings.auth_settings.server_side_encryption_customer_key_base64,
|
||||
std::move(headers),
|
||||
settings.auth_settings.use_environment_credentials.value_or(
|
||||
context->getConfigRef().getBool("s3.use_environment_credentials", false)),
|
||||
settings.auth_settings.use_insecure_imds_request.value_or(
|
||||
context->getConfigRef().getBool("s3.use_insecure_imds_request", false)));
|
||||
}
|
||||
|
||||
Aws::Vector<Aws::S3::Model::Object> listObjects(Aws::S3::S3Client & client, const S3::URI & s3_uri, const String & file_name)
|
||||
{
|
||||
Aws::S3::Model::ListObjectsRequest request;
|
||||
request.SetBucket(s3_uri.bucket);
|
||||
request.SetPrefix(fs::path{s3_uri.key} / file_name);
|
||||
request.SetMaxKeys(1);
|
||||
auto outcome = client.ListObjects(request);
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
return outcome.GetResult().GetContents();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BackupReaderS3::BackupReaderS3(
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
||||
: s3_uri(s3_uri_)
|
||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||
, max_single_read_retries(context_->getSettingsRef().s3_max_single_read_retries)
|
||||
, read_settings(context_->getReadSettings())
|
||||
{
|
||||
}
|
||||
|
||||
DataSourceDescription BackupReaderS3::getDataSourceDescription() const
|
||||
{
|
||||
return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false};
|
||||
}
|
||||
|
||||
|
||||
BackupReaderS3::~BackupReaderS3() = default;
|
||||
|
||||
bool BackupReaderS3::fileExists(const String & file_name)
|
||||
{
|
||||
return !listObjects(*client, s3_uri, file_name).empty();
|
||||
}
|
||||
|
||||
UInt64 BackupReaderS3::getFileSize(const String & file_name)
|
||||
{
|
||||
auto objects = listObjects(*client, s3_uri, file_name);
|
||||
if (objects.empty())
|
||||
throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist");
|
||||
return objects[0].GetSize();
|
||||
}
|
||||
|
||||
std::unique_ptr<SeekableReadBuffer> BackupReaderS3::readFile(const String & file_name)
|
||||
{
|
||||
return std::make_unique<ReadBufferFromS3>(
|
||||
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, max_single_read_retries, read_settings);
|
||||
}
|
||||
|
||||
|
||||
BackupWriterS3::BackupWriterS3(
|
||||
const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_)
|
||||
: s3_uri(s3_uri_)
|
||||
, client(makeS3Client(s3_uri_, access_key_id_, secret_access_key_, context_))
|
||||
, max_single_read_retries(context_->getSettingsRef().s3_max_single_read_retries)
|
||||
, read_settings(context_->getReadSettings())
|
||||
, rw_settings(context_->getStorageS3Settings().getSettings(s3_uri.uri.toString()).rw_settings)
|
||||
{
|
||||
rw_settings.updateFromSettingsIfEmpty(context_->getSettingsRef());
|
||||
}
|
||||
|
||||
DataSourceDescription BackupWriterS3::getDataSourceDescription() const
|
||||
{
|
||||
return DataSourceDescription{DataSourceType::S3, s3_uri.endpoint, false, false};
|
||||
}
|
||||
|
||||
bool BackupWriterS3::supportNativeCopy(DataSourceDescription data_source_description) const
|
||||
{
|
||||
return getDataSourceDescription() == data_source_description;
|
||||
}
|
||||
|
||||
|
||||
void BackupWriterS3::copyObjectImpl(
|
||||
const String & src_bucket,
|
||||
const String & src_key,
|
||||
const String & dst_bucket,
|
||||
const String & dst_key,
|
||||
std::optional<Aws::S3::Model::HeadObjectResult> head,
|
||||
std::optional<ObjectAttributes> metadata) const
|
||||
{
|
||||
Aws::S3::Model::CopyObjectRequest request;
|
||||
request.SetCopySource(src_bucket + "/" + src_key);
|
||||
request.SetBucket(dst_bucket);
|
||||
request.SetKey(dst_key);
|
||||
if (metadata)
|
||||
{
|
||||
request.SetMetadata(*metadata);
|
||||
request.SetMetadataDirective(Aws::S3::Model::MetadataDirective::REPLACE);
|
||||
}
|
||||
|
||||
auto outcome = client->CopyObject(request);
|
||||
|
||||
if (!outcome.IsSuccess() && outcome.GetError().GetExceptionName() == "EntityTooLarge")
|
||||
{ // Can't come here with MinIO, MinIO allows single part upload for large objects.
|
||||
copyObjectMultipartImpl(src_bucket, src_key, dst_bucket, dst_key, head, metadata);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
|
||||
}
|
||||
|
||||
Aws::S3::Model::HeadObjectOutcome BackupWriterS3::requestObjectHeadData(const std::string & bucket_from, const std::string & key) const
|
||||
{
|
||||
Aws::S3::Model::HeadObjectRequest request;
|
||||
request.SetBucket(bucket_from);
|
||||
request.SetKey(key);
|
||||
|
||||
return client->HeadObject(request);
|
||||
}
|
||||
|
||||
void BackupWriterS3::copyObjectMultipartImpl(
|
||||
const String & src_bucket,
|
||||
const String & src_key,
|
||||
const String & dst_bucket,
|
||||
const String & dst_key,
|
||||
std::optional<Aws::S3::Model::HeadObjectResult> head,
|
||||
std::optional<ObjectAttributes> metadata) const
|
||||
{
|
||||
if (!head)
|
||||
head = requestObjectHeadData(src_bucket, src_key).GetResult();
|
||||
|
||||
size_t size = head->GetContentLength();
|
||||
|
||||
String multipart_upload_id;
|
||||
|
||||
{
|
||||
Aws::S3::Model::CreateMultipartUploadRequest request;
|
||||
request.SetBucket(dst_bucket);
|
||||
request.SetKey(dst_key);
|
||||
if (metadata)
|
||||
request.SetMetadata(*metadata);
|
||||
|
||||
auto outcome = client->CreateMultipartUpload(request);
|
||||
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
|
||||
multipart_upload_id = outcome.GetResult().GetUploadId();
|
||||
}
|
||||
|
||||
std::vector<String> part_tags;
|
||||
|
||||
size_t upload_part_size = rw_settings.min_upload_part_size;
|
||||
for (size_t position = 0, part_number = 1; position < size; ++part_number, position += upload_part_size)
|
||||
{
|
||||
Aws::S3::Model::UploadPartCopyRequest part_request;
|
||||
part_request.SetCopySource(src_bucket + "/" + src_key);
|
||||
part_request.SetBucket(dst_bucket);
|
||||
part_request.SetKey(dst_key);
|
||||
part_request.SetUploadId(multipart_upload_id);
|
||||
part_request.SetPartNumber(static_cast<int>(part_number));
|
||||
part_request.SetCopySourceRange(fmt::format("bytes={}-{}", position, std::min(size, position + upload_part_size) - 1));
|
||||
|
||||
auto outcome = client->UploadPartCopy(part_request);
|
||||
if (!outcome.IsSuccess())
|
||||
{
|
||||
Aws::S3::Model::AbortMultipartUploadRequest abort_request;
|
||||
abort_request.SetBucket(dst_bucket);
|
||||
abort_request.SetKey(dst_key);
|
||||
abort_request.SetUploadId(multipart_upload_id);
|
||||
client->AbortMultipartUpload(abort_request);
|
||||
// In error case we throw exception later with first error from UploadPartCopy
|
||||
}
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
|
||||
auto etag = outcome.GetResult().GetCopyPartResult().GetETag();
|
||||
part_tags.push_back(etag);
|
||||
}
|
||||
|
||||
{
|
||||
Aws::S3::Model::CompleteMultipartUploadRequest req;
|
||||
req.SetBucket(dst_bucket);
|
||||
req.SetKey(dst_key);
|
||||
req.SetUploadId(multipart_upload_id);
|
||||
|
||||
Aws::S3::Model::CompletedMultipartUpload multipart_upload;
|
||||
for (size_t i = 0; i < part_tags.size(); ++i)
|
||||
{
|
||||
Aws::S3::Model::CompletedPart part;
|
||||
multipart_upload.AddParts(part.WithETag(part_tags[i]).WithPartNumber(static_cast<int>(i) + 1));
|
||||
}
|
||||
|
||||
req.SetMultipartUpload(multipart_upload);
|
||||
|
||||
auto outcome = client->CompleteMultipartUpload(req);
|
||||
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
void BackupWriterS3::copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to)
|
||||
{
|
||||
if (!from_disk)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot natively copy data to disk without source disk");
|
||||
|
||||
auto objects = from_disk->getStorageObjects(file_name_from);
|
||||
if (objects.size() > 1)
|
||||
{
|
||||
copyFileThroughBuffer(from_disk->readFile(file_name_from), file_name_to);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto object_storage = from_disk->getObjectStorage();
|
||||
std::string source_bucket = object_storage->getObjectsNamespace();
|
||||
auto file_path = fs::path(s3_uri.key) / file_name_to;
|
||||
|
||||
auto head = requestObjectHeadData(source_bucket, objects[0].absolute_path).GetResult();
|
||||
static constexpr int64_t multipart_upload_threashold = 5UL * 1024 * 1024 * 1024;
|
||||
if (head.GetContentLength() >= multipart_upload_threashold)
|
||||
{
|
||||
copyObjectMultipartImpl(
|
||||
source_bucket, objects[0].absolute_path, s3_uri.bucket, file_path, head);
|
||||
}
|
||||
else
|
||||
{
|
||||
copyObjectImpl(
|
||||
source_bucket, objects[0].absolute_path, s3_uri.bucket, file_path, head);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
BackupWriterS3::~BackupWriterS3() = default;
|
||||
|
||||
bool BackupWriterS3::fileExists(const String & file_name)
|
||||
{
|
||||
return !listObjects(*client, s3_uri, file_name).empty();
|
||||
}
|
||||
|
||||
UInt64 BackupWriterS3::getFileSize(const String & file_name)
|
||||
{
|
||||
auto objects = listObjects(*client, s3_uri, file_name);
|
||||
if (objects.empty())
|
||||
throw Exception(ErrorCodes::S3_ERROR, "Object {} must exist");
|
||||
return objects[0].GetSize();
|
||||
}
|
||||
|
||||
bool BackupWriterS3::fileContentsEqual(const String & file_name, const String & expected_file_contents)
|
||||
{
|
||||
if (listObjects(*client, s3_uri, file_name).empty())
|
||||
return false;
|
||||
|
||||
try
|
||||
{
|
||||
auto in = std::make_unique<ReadBufferFromS3>(
|
||||
client, s3_uri.bucket, fs::path(s3_uri.key) / file_name, s3_uri.version_id, max_single_read_retries, read_settings);
|
||||
String actual_file_contents(expected_file_contents.size(), ' ');
|
||||
return (in->read(actual_file_contents.data(), actual_file_contents.size()) == actual_file_contents.size())
|
||||
&& (actual_file_contents == expected_file_contents) && in->eof();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(__PRETTY_FUNCTION__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<WriteBuffer> BackupWriterS3::writeFile(const String & file_name)
|
||||
{
|
||||
return std::make_unique<WriteBufferFromS3>(
|
||||
client,
|
||||
s3_uri.bucket,
|
||||
fs::path(s3_uri.key) / file_name,
|
||||
rw_settings,
|
||||
std::nullopt,
|
||||
DBMS_DEFAULT_BUFFER_SIZE,
|
||||
threadPoolCallbackRunner<void>(IOThreadPool::get(), "BackupWriterS3"));
|
||||
}
|
||||
|
||||
void BackupWriterS3::removeFiles(const Strings & file_names)
|
||||
{
|
||||
/// One call of DeleteObjects() cannot remove more than 1000 keys.
|
||||
size_t chunk_size_limit = 1000;
|
||||
|
||||
size_t current_position = 0;
|
||||
while (current_position < file_names.size())
|
||||
{
|
||||
std::vector<Aws::S3::Model::ObjectIdentifier> current_chunk;
|
||||
for (; current_position < file_names.size() && current_chunk.size() < chunk_size_limit; ++current_position)
|
||||
{
|
||||
Aws::S3::Model::ObjectIdentifier obj;
|
||||
obj.SetKey(fs::path(s3_uri.key) / file_names[current_position]);
|
||||
current_chunk.push_back(obj);
|
||||
}
|
||||
|
||||
Aws::S3::Model::Delete delkeys;
|
||||
delkeys.SetObjects(current_chunk);
|
||||
Aws::S3::Model::DeleteObjectsRequest request;
|
||||
request.SetBucket(s3_uri.bucket);
|
||||
request.SetDelete(delkeys);
|
||||
|
||||
auto outcome = client->DeleteObjects(request);
|
||||
if (!outcome.IsSuccess())
|
||||
throw Exception(outcome.GetError().GetMessage(), ErrorCodes::S3_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
92
src/Backups/BackupIO_S3.h
Normal file
92
src/Backups/BackupIO_S3.h
Normal file
@ -0,0 +1,92 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Backups/BackupIO.h>
|
||||
#include <IO/S3Common.h>
|
||||
#include <IO/ReadSettings.h>
|
||||
#include <Storages/StorageS3Settings.h>
|
||||
|
||||
#include <aws/s3/S3Client.h>
|
||||
#include <aws/s3/model/CopyObjectRequest.h>
|
||||
#include <aws/s3/model/ListObjectsV2Request.h>
|
||||
#include <aws/s3/model/HeadObjectRequest.h>
|
||||
#include <aws/s3/model/DeleteObjectRequest.h>
|
||||
#include <aws/s3/model/DeleteObjectsRequest.h>
|
||||
#include <aws/s3/model/CreateMultipartUploadRequest.h>
|
||||
#include <aws/s3/model/CompleteMultipartUploadRequest.h>
|
||||
#include <aws/s3/model/UploadPartCopyRequest.h>
|
||||
#include <aws/s3/model/AbortMultipartUploadRequest.h>
|
||||
#include <aws/s3/model/HeadObjectResult.h>
|
||||
#include <aws/s3/model/ListObjectsV2Result.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
/// Represents a backup stored to AWS S3.
|
||||
class BackupReaderS3 : public IBackupReader
|
||||
{
|
||||
public:
|
||||
BackupReaderS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
||||
~BackupReaderS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
UInt64 getFileSize(const String & file_name) override;
|
||||
std::unique_ptr<SeekableReadBuffer> readFile(const String & file_name) override;
|
||||
DataSourceDescription getDataSourceDescription() const override;
|
||||
|
||||
private:
|
||||
S3::URI s3_uri;
|
||||
std::shared_ptr<Aws::S3::S3Client> client;
|
||||
UInt64 max_single_read_retries;
|
||||
ReadSettings read_settings;
|
||||
};
|
||||
|
||||
|
||||
class BackupWriterS3 : public IBackupWriter
|
||||
{
|
||||
public:
|
||||
BackupWriterS3(const S3::URI & s3_uri_, const String & access_key_id_, const String & secret_access_key_, const ContextPtr & context_);
|
||||
~BackupWriterS3() override;
|
||||
|
||||
bool fileExists(const String & file_name) override;
|
||||
UInt64 getFileSize(const String & file_name) override;
|
||||
bool fileContentsEqual(const String & file_name, const String & expected_file_contents) override;
|
||||
std::unique_ptr<WriteBuffer> writeFile(const String & file_name) override;
|
||||
void removeFiles(const Strings & file_names) override;
|
||||
|
||||
DataSourceDescription getDataSourceDescription() const override;
|
||||
bool supportNativeCopy(DataSourceDescription data_source_description) const override;
|
||||
void copyFileNative(DiskPtr from_disk, const String & file_name_from, const String & file_name_to) override;
|
||||
|
||||
private:
|
||||
|
||||
Aws::S3::Model::HeadObjectOutcome requestObjectHeadData(const std::string & bucket_from, const std::string & key) const;
|
||||
|
||||
void copyObjectImpl(
|
||||
const String & src_bucket,
|
||||
const String & src_key,
|
||||
const String & dst_bucket,
|
||||
const String & dst_key,
|
||||
std::optional<Aws::S3::Model::HeadObjectResult> head = std::nullopt,
|
||||
std::optional<ObjectAttributes> metadata = std::nullopt) const;
|
||||
|
||||
void copyObjectMultipartImpl(
|
||||
const String & src_bucket,
|
||||
const String & src_key,
|
||||
const String & dst_bucket,
|
||||
const String & dst_key,
|
||||
std::optional<Aws::S3::Model::HeadObjectResult> head = std::nullopt,
|
||||
std::optional<ObjectAttributes> metadata = std::nullopt) const;
|
||||
|
||||
S3::URI s3_uri;
|
||||
std::shared_ptr<Aws::S3::S3Client> client;
|
||||
UInt64 max_single_read_retries;
|
||||
ReadSettings read_settings;
|
||||
S3Settings::ReadWriteSettings rw_settings;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
@ -455,6 +455,7 @@ void BackupImpl::createLockFile()
|
||||
assert(uuid);
|
||||
auto out = writer->writeFile(lock_file_name);
|
||||
writeUUIDText(*uuid, *out);
|
||||
out->finalize();
|
||||
}
|
||||
|
||||
bool BackupImpl::checkLockFile(bool throw_if_failed) const
|
||||
|
@ -62,7 +62,6 @@ namespace
|
||||
#define LIST_OF_BACKUP_SETTINGS(M) \
|
||||
M(String, id) \
|
||||
M(String, compression_method) \
|
||||
M(Int64, compression_level) \
|
||||
M(String, password) \
|
||||
M(Bool, structure_only) \
|
||||
M(Bool, async) \
|
||||
@ -72,6 +71,7 @@ namespace
|
||||
M(String, host_id) \
|
||||
M(String, coordination_zk_path) \
|
||||
M(OptionalUUID, backup_uuid)
|
||||
/// M(Int64, compression_level)
|
||||
|
||||
BackupSettings BackupSettings::fromBackupQuery(const ASTBackupQuery & query)
|
||||
{
|
||||
@ -82,6 +82,9 @@ BackupSettings BackupSettings::fromBackupQuery(const ASTBackupQuery & query)
|
||||
const auto & settings = query.settings->as<const ASTSetQuery &>().changes;
|
||||
for (const auto & setting : settings)
|
||||
{
|
||||
if (setting.name == "compression_level")
|
||||
res.compression_level = static_cast<int>(SettingFieldInt64{setting.value}.value);
|
||||
else
|
||||
#define GET_SETTINGS_FROM_BACKUP_QUERY_HELPER(TYPE, NAME) \
|
||||
if (setting.name == #NAME) \
|
||||
res.NAME = SettingField##TYPE{setting.value}.value; \
|
||||
|
129
src/Backups/registerBackupEngineS3.cpp
Normal file
129
src/Backups/registerBackupEngineS3.cpp
Normal file
@ -0,0 +1,129 @@
|
||||
#include "config.h"
|
||||
|
||||
#include <Backups/BackupFactory.h>
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Backups/BackupIO_S3.h>
|
||||
#include <Backups/BackupImpl.h>
|
||||
#include <IO/Archives/hasRegisteredArchiveFileExtension.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <filesystem>
|
||||
#endif
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||
extern const int SUPPORT_IS_DISABLED;
|
||||
}
|
||||
|
||||
#if USE_AWS_S3
|
||||
namespace
|
||||
{
|
||||
String removeFileNameFromURL(String & url)
|
||||
{
|
||||
Poco::URI url2{url};
|
||||
String path = url2.getPath();
|
||||
size_t slash_pos = path.find_last_of('/');
|
||||
String file_name = path.substr(slash_pos + 1);
|
||||
path.resize(slash_pos + 1);
|
||||
url2.setPath(path);
|
||||
url = url2.toString();
|
||||
return file_name;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void registerBackupEngineS3(BackupFactory & factory)
|
||||
{
|
||||
auto creator_fn = []([[maybe_unused]] const BackupFactory::CreateParams & params) -> std::unique_ptr<IBackup>
|
||||
{
|
||||
#if USE_AWS_S3
|
||||
String backup_name = params.backup_info.toString();
|
||||
const String & id_arg = params.backup_info.id_arg;
|
||||
const auto & args = params.backup_info.args;
|
||||
|
||||
String s3_uri, access_key_id, secret_access_key;
|
||||
|
||||
if (!id_arg.empty())
|
||||
{
|
||||
const auto & config = params.context->getConfigRef();
|
||||
auto config_prefix = "named_collections." + id_arg;
|
||||
|
||||
if (!config.has(config_prefix))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", id_arg);
|
||||
|
||||
s3_uri = config.getString(config_prefix + ".url");
|
||||
access_key_id = config.getString(config_prefix + ".access_key_id", "");
|
||||
secret_access_key = config.getString(config_prefix + ".secret_access_key", "");
|
||||
|
||||
if (config.has(config_prefix + ".filename"))
|
||||
s3_uri = fs::path(s3_uri) / config.getString(config_prefix + ".filename");
|
||||
|
||||
if (args.size() > 1)
|
||||
throw Exception(
|
||||
"Backup S3 requires 1 or 2 arguments: named_collection, [filename]",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
if (args.size() == 1)
|
||||
s3_uri = fs::path(s3_uri) / args[0].safeGet<String>();
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((args.size() != 1) && (args.size() != 3))
|
||||
throw Exception(
|
||||
"Backup S3 requires 1 or 3 arguments: url, [access_key_id, secret_access_key]",
|
||||
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH);
|
||||
|
||||
s3_uri = args[0].safeGet<String>();
|
||||
if (args.size() >= 3)
|
||||
{
|
||||
access_key_id = args[1].safeGet<String>();
|
||||
secret_access_key = args[2].safeGet<String>();
|
||||
}
|
||||
}
|
||||
|
||||
BackupImpl::ArchiveParams archive_params;
|
||||
if (hasRegisteredArchiveFileExtension(s3_uri))
|
||||
{
|
||||
if (params.is_internal_backup)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Using archives with backups on clusters is disabled");
|
||||
|
||||
archive_params.archive_name = removeFileNameFromURL(s3_uri);
|
||||
archive_params.compression_method = params.compression_method;
|
||||
archive_params.compression_level = params.compression_level;
|
||||
archive_params.password = params.password;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!params.password.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Password is not applicable, backup cannot be encrypted");
|
||||
}
|
||||
|
||||
if (params.open_mode == IBackup::OpenMode::READ)
|
||||
{
|
||||
auto reader = std::make_shared<BackupReaderS3>(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context);
|
||||
return std::make_unique<BackupImpl>(backup_name, archive_params, params.base_backup_info, reader, params.context);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto writer = std::make_shared<BackupWriterS3>(S3::URI{Poco::URI{s3_uri}}, access_key_id, secret_access_key, params.context);
|
||||
return std::make_unique<BackupImpl>(backup_name, archive_params, params.base_backup_info, writer, params.context, params.is_internal_backup, params.backup_coordination, params.backup_uuid);
|
||||
}
|
||||
#else
|
||||
throw Exception("S3 support is disabled", ErrorCodes::SUPPORT_IS_DISABLED);
|
||||
#endif
|
||||
};
|
||||
|
||||
factory.registerBackupEngine("S3", creator_fn);
|
||||
}
|
||||
|
||||
}
|
@ -43,7 +43,7 @@ private:
|
||||
std::string hostname;
|
||||
size_t port;
|
||||
std::string log_level;
|
||||
size_t max_server_connections;
|
||||
unsigned max_server_connections;
|
||||
size_t http_timeout;
|
||||
|
||||
Poco::Logger * log;
|
||||
|
@ -43,7 +43,7 @@ protected:
|
||||
|
||||
virtual String serviceFileName() const = 0;
|
||||
|
||||
virtual size_t getDefaultPort() const = 0;
|
||||
virtual unsigned getDefaultPort() const = 0;
|
||||
|
||||
virtual bool startBridgeManually() const = 0;
|
||||
|
||||
|
@ -23,7 +23,7 @@ protected:
|
||||
|
||||
String serviceFileName() const override { return serviceAlias(); }
|
||||
|
||||
size_t getDefaultPort() const override { return DEFAULT_PORT; }
|
||||
unsigned getDefaultPort() const override { return DEFAULT_PORT; }
|
||||
|
||||
bool startBridgeManually() const override { return false; }
|
||||
|
||||
|
@ -109,7 +109,7 @@ protected:
|
||||
|
||||
String getName() const override { return BridgeHelperMixin::getName(); }
|
||||
|
||||
size_t getDefaultPort() const override { return DEFAULT_PORT; }
|
||||
unsigned getDefaultPort() const override { return DEFAULT_PORT; }
|
||||
|
||||
String serviceAlias() const override { return BridgeHelperMixin::serviceAlias(); }
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <Client/ClientBase.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <iomanip>
|
||||
#include <filesystem>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
@ -9,7 +8,6 @@
|
||||
#include "config.h"
|
||||
|
||||
#include <Common/DateLUT.h>
|
||||
#include <Common/LocalDate.h>
|
||||
#include <Common/MemoryTracker.h>
|
||||
#include <base/argsToConfig.h>
|
||||
#include <base/LineReader.h>
|
||||
@ -32,7 +30,6 @@
|
||||
#include <Common/clearPasswordFromCommandLine.h>
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/filesystemHelpers.h>
|
||||
#include <Common/Config/configReadClient.h>
|
||||
#include <Common/NetException.h>
|
||||
#include <Storages/ColumnsDescription.h>
|
||||
|
||||
@ -70,10 +67,10 @@
|
||||
#include <IO/WriteBufferFromOStream.h>
|
||||
#include <IO/CompressionMethod.h>
|
||||
#include <Client/InternalTextLogs.h>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <IO/ForkWriteBuffer.h>
|
||||
#include <Parsers/Kusto/ParserKQLStatement.h>
|
||||
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
using namespace std::literals;
|
||||
|
||||
@ -340,7 +337,7 @@ ASTPtr ClientBase::parseQuery(const char *& pos, const char * end, bool allow_mu
|
||||
|
||||
|
||||
/// Consumes trailing semicolons and tries to consume the same-line trailing comment.
|
||||
void ClientBase::adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, int max_parser_depth)
|
||||
void ClientBase::adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth)
|
||||
{
|
||||
// We have to skip the trailing semicolon that might be left
|
||||
// after VALUES parsing or just after a normal semicolon-terminated query.
|
||||
@ -553,7 +550,7 @@ try
|
||||
out_file_buf = wrapWriteBufferWithCompressionMethod(
|
||||
std::make_unique<WriteBufferFromFile>(out_file, DBMS_DEFAULT_BUFFER_SIZE, O_WRONLY | O_EXCL | O_CREAT),
|
||||
compression_method,
|
||||
compression_level
|
||||
static_cast<int>(compression_level)
|
||||
);
|
||||
|
||||
if (query_with_output->is_into_outfile_with_stdout)
|
||||
@ -1605,6 +1602,8 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
||||
if (this_query_begin >= all_queries_end)
|
||||
return MultiQueryProcessingStage::QUERIES_END;
|
||||
|
||||
unsigned max_parser_depth = static_cast<unsigned>(global_context->getSettingsRef().max_parser_depth);
|
||||
|
||||
// If there are only comments left until the end of file, we just
|
||||
// stop. The parser can't handle this situation because it always
|
||||
// expects that there is some query that it can parse.
|
||||
@ -1614,7 +1613,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
||||
// and it makes more sense to treat them as such.
|
||||
{
|
||||
Tokens tokens(this_query_begin, all_queries_end);
|
||||
IParser::Pos token_iterator(tokens, global_context->getSettingsRef().max_parser_depth);
|
||||
IParser::Pos token_iterator(tokens, max_parser_depth);
|
||||
if (!token_iterator.isValid())
|
||||
return MultiQueryProcessingStage::QUERIES_END;
|
||||
}
|
||||
@ -1635,7 +1634,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
||||
if (ignore_error)
|
||||
{
|
||||
Tokens tokens(this_query_begin, all_queries_end);
|
||||
IParser::Pos token_iterator(tokens, global_context->getSettingsRef().max_parser_depth);
|
||||
IParser::Pos token_iterator(tokens, max_parser_depth);
|
||||
while (token_iterator->type != TokenType::Semicolon && token_iterator.isValid())
|
||||
++token_iterator;
|
||||
this_query_begin = token_iterator->end;
|
||||
@ -1675,7 +1674,7 @@ MultiQueryProcessingStage ClientBase::analyzeMultiQueryText(
|
||||
// after we have processed the query. But even this guess is
|
||||
// beneficial so that we see proper trailing comments in "echo" and
|
||||
// server log.
|
||||
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
|
||||
adjustQueryEnd(this_query_end, all_queries_end, max_parser_depth);
|
||||
return MultiQueryProcessingStage::EXECUTE_QUERY;
|
||||
}
|
||||
|
||||
@ -1869,7 +1868,9 @@ bool ClientBase::executeMultiQuery(const String & all_queries_text)
|
||||
if (insert_ast && isSyncInsertWithData(*insert_ast, global_context))
|
||||
{
|
||||
this_query_end = insert_ast->end;
|
||||
adjustQueryEnd(this_query_end, all_queries_end, global_context->getSettingsRef().max_parser_depth);
|
||||
adjustQueryEnd(
|
||||
this_query_end, all_queries_end,
|
||||
static_cast<unsigned>(global_context->getSettingsRef().max_parser_depth));
|
||||
}
|
||||
|
||||
// Report error.
|
||||
@ -1925,7 +1926,7 @@ bool ClientBase::processQueryText(const String & text)
|
||||
|
||||
String ClientBase::prompt() const
|
||||
{
|
||||
return boost::replace_all_copy(prompt_by_server_display_name, "{database}", config().getString("database", "default"));
|
||||
return prompt_by_server_display_name;
|
||||
}
|
||||
|
||||
|
||||
@ -2350,7 +2351,7 @@ void ClientBase::init(int argc, char ** argv)
|
||||
if (options.count("print-profile-events"))
|
||||
config().setBool("print-profile-events", true);
|
||||
if (options.count("profile-events-delay-ms"))
|
||||
config().setInt("profile-events-delay-ms", options["profile-events-delay-ms"].as<UInt64>());
|
||||
config().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as<UInt64>());
|
||||
if (options.count("progress"))
|
||||
config().setBool("progress", true);
|
||||
if (options.count("echo"))
|
||||
|
@ -72,7 +72,7 @@ protected:
|
||||
void processParsedSingleQuery(const String & full_query, const String & query_to_execute,
|
||||
ASTPtr parsed_query, std::optional<bool> echo_query_ = {}, bool report_error = false);
|
||||
|
||||
static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, int max_parser_depth);
|
||||
static void adjustQueryEnd(const char *& this_query_end, const char * all_queries_end, uint32_t max_parser_depth);
|
||||
ASTPtr parseQuery(const char *& pos, const char * end, bool allow_multi_statements) const;
|
||||
static void setupSignalHandler();
|
||||
|
||||
|
@ -338,7 +338,7 @@ HedgedConnections::ReplicaLocation HedgedConnections::getReadyReplicaLocation(As
|
||||
offset_states[location.offset].replicas[location.index].change_replica_timeout.reset();
|
||||
offset_states[location.offset].replicas[location.index].is_change_replica_timeout_expired = true;
|
||||
offset_states[location.offset].next_replica_in_process = true;
|
||||
offsets_queue.push(location.offset);
|
||||
offsets_queue.push(static_cast<int>(location.offset));
|
||||
ProfileEvents::increment(ProfileEvents::HedgedRequestsChangeReplica);
|
||||
startNewReplica();
|
||||
}
|
||||
|
@ -362,7 +362,7 @@ void HedgedConnectionsFactory::removeReplicaFromEpoll(int index, int fd)
|
||||
timeout_fd_to_replica_index.erase(replicas[index].change_replica_timeout.getDescriptor());
|
||||
}
|
||||
|
||||
int HedgedConnectionsFactory::numberOfProcessingReplicas() const
|
||||
size_t HedgedConnectionsFactory::numberOfProcessingReplicas() const
|
||||
{
|
||||
if (epoll.empty())
|
||||
return 0;
|
||||
@ -381,7 +381,7 @@ HedgedConnectionsFactory::State HedgedConnectionsFactory::setBestUsableReplica(C
|
||||
&& result.is_usable
|
||||
&& !replicas[i].is_ready
|
||||
&& (!skip_replicas_with_two_level_aggregation_incompatibility || !isTwoLevelAggregationIncompatible(&*result.entry)))
|
||||
indexes.push_back(i);
|
||||
indexes.push_back(static_cast<int>(i));
|
||||
}
|
||||
|
||||
if (indexes.empty())
|
||||
|
@ -70,7 +70,7 @@ public:
|
||||
|
||||
const ConnectionTimeouts & getConnectionTimeouts() const { return timeouts; }
|
||||
|
||||
int numberOfProcessingReplicas() const;
|
||||
size_t numberOfProcessingReplicas() const;
|
||||
|
||||
/// Tell Factory to not return connections with two level aggregation incompatibility.
|
||||
void skipReplicasWithTwoLevelAggregationIncompatibility() { skip_replicas_with_two_level_aggregation_incompatibility = true; }
|
||||
|
@ -6,8 +6,6 @@
|
||||
#include <Processors/Executors/PushingAsyncPipelineExecutor.h>
|
||||
#include <Storages/IStorage.h>
|
||||
#include <Core/Protocol.h>
|
||||
#include <DataTypes/DataTypesNumber.h>
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -393,24 +393,38 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead
|
||||
Poco::Net::Socket::SocketList write_list;
|
||||
Poco::Net::Socket::SocketList except_list;
|
||||
|
||||
for (const ReplicaState & state : replica_states)
|
||||
{
|
||||
Connection * connection = state.connection;
|
||||
if (connection != nullptr)
|
||||
read_list.push_back(*connection->socket);
|
||||
}
|
||||
|
||||
auto timeout = is_draining ? drain_timeout : receive_timeout;
|
||||
int n = Poco::Net::Socket::select(
|
||||
read_list,
|
||||
write_list,
|
||||
except_list,
|
||||
timeout);
|
||||
int n = 0;
|
||||
|
||||
/// EINTR loop
|
||||
while (true)
|
||||
{
|
||||
read_list.clear();
|
||||
for (const ReplicaState & state : replica_states)
|
||||
{
|
||||
Connection * connection = state.connection;
|
||||
if (connection != nullptr)
|
||||
read_list.push_back(*connection->socket);
|
||||
}
|
||||
|
||||
/// poco returns 0 on EINTR, let's reset errno to ensure that EINTR came from select().
|
||||
errno = 0;
|
||||
|
||||
n = Poco::Net::Socket::select(
|
||||
read_list,
|
||||
write_list,
|
||||
except_list,
|
||||
timeout);
|
||||
if (n <= 0 && errno == EINTR)
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
||||
/// We treat any error as timeout for simplicity.
|
||||
/// And we also check if read_list is still empty just in case.
|
||||
if (n <= 0 || read_list.empty())
|
||||
{
|
||||
const auto & addresses = dumpAddressesUnlocked();
|
||||
for (ReplicaState & state : replica_states)
|
||||
{
|
||||
Connection * connection = state.connection;
|
||||
@ -423,7 +437,7 @@ MultiplexedConnections::ReplicaState & MultiplexedConnections::getReplicaForRead
|
||||
throw Exception(ErrorCodes::TIMEOUT_EXCEEDED,
|
||||
"Timeout ({} ms) exceeded while reading from {}",
|
||||
timeout.totalMilliseconds(),
|
||||
dumpAddressesUnlocked());
|
||||
addresses);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,9 +81,9 @@ Field QueryFuzzer::getRandomField(int type)
|
||||
{
|
||||
static constexpr UInt64 scales[] = {0, 1, 2, 10};
|
||||
return DecimalField<Decimal64>(
|
||||
bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values)
|
||||
/ sizeof(*bad_int64_values))],
|
||||
scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))]);
|
||||
bad_int64_values[fuzz_rand() % (sizeof(bad_int64_values) / sizeof(*bad_int64_values))],
|
||||
static_cast<UInt32>(scales[fuzz_rand() % (sizeof(scales) / sizeof(*scales))])
|
||||
);
|
||||
}
|
||||
default:
|
||||
assert(false);
|
||||
|
@ -277,13 +277,13 @@ void ColumnArray::updateWeakHash32(WeakHash32 & hash) const
|
||||
{
|
||||
/// This row improves hash a little bit according to integration tests.
|
||||
/// It is the same as to use previous hash value as the first element of array.
|
||||
hash_data[i] = intHashCRC32(hash_data[i]);
|
||||
hash_data[i] = static_cast<UInt32>(intHashCRC32(hash_data[i]));
|
||||
|
||||
for (size_t row = prev_offset; row < offsets_data[i]; ++row)
|
||||
/// It is probably not the best way to combine hashes.
|
||||
/// But much better then xor which lead to similar hash for arrays like [1], [1, 1, 1], [1, 1, 1, 1, 1], ...
|
||||
/// Much better implementation - to add offsets as an optional argument to updateWeakHash32.
|
||||
hash_data[i] = intHashCRC32(internal_hash_data[row], hash_data[i]);
|
||||
hash_data[i] = static_cast<UInt32>(intHashCRC32(internal_hash_data[row], hash_data[i]));
|
||||
|
||||
prev_offset = offsets_data[i];
|
||||
}
|
||||
@ -569,8 +569,8 @@ void ColumnArray::expand(const IColumn::Filter & mask, bool inverted)
|
||||
if (mask.size() < offsets_data.size())
|
||||
throw Exception("Mask size should be no less than data size.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
int index = mask.size() - 1;
|
||||
int from = offsets_data.size() - 1;
|
||||
ssize_t index = mask.size() - 1;
|
||||
ssize_t from = offsets_data.size() - 1;
|
||||
offsets_data.resize(mask.size());
|
||||
UInt64 last_offset = offsets_data[from];
|
||||
while (index >= 0)
|
||||
|
@ -27,8 +27,8 @@ std::shared_ptr<Memory<>> ColumnCompressed::compressBuffer(const void * data, si
|
||||
auto compressed_size = LZ4_compress_default(
|
||||
reinterpret_cast<const char *>(data),
|
||||
compressed.data(),
|
||||
data_size,
|
||||
max_dest_size);
|
||||
static_cast<int>(data_size),
|
||||
static_cast<int>(max_dest_size));
|
||||
|
||||
if (compressed_size <= 0)
|
||||
throw Exception(ErrorCodes::CANNOT_COMPRESS, "Cannot compress column");
|
||||
@ -51,8 +51,8 @@ void ColumnCompressed::decompressBuffer(
|
||||
auto processed_size = LZ4_decompress_safe(
|
||||
reinterpret_cast<const char *>(compressed_data),
|
||||
reinterpret_cast<char *>(decompressed_data),
|
||||
compressed_size,
|
||||
decompressed_size);
|
||||
static_cast<int>(compressed_size),
|
||||
static_cast<int>(decompressed_size));
|
||||
|
||||
if (processed_size <= 0)
|
||||
throw Exception(ErrorCodes::CANNOT_DECOMPRESS, "Cannot decompress column");
|
||||
|
@ -148,7 +148,7 @@ void ColumnConst::updateWeakHash32(WeakHash32 & hash) const
|
||||
size_t data_hash = element_hash.getData()[0];
|
||||
|
||||
for (auto & value : hash.getData())
|
||||
value = intHashCRC32(data_hash, value);
|
||||
value = static_cast<UInt32>(intHashCRC32(data_hash, value));
|
||||
}
|
||||
|
||||
void ColumnConst::compareColumn(
|
||||
|
@ -109,7 +109,7 @@ void ColumnDecimal<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
|
||||
while (begin < end)
|
||||
{
|
||||
*hash_data = intHashCRC32(*begin, *hash_data);
|
||||
*hash_data = static_cast<UInt32>(intHashCRC32(*begin, *hash_data));
|
||||
++begin;
|
||||
++hash_data;
|
||||
}
|
||||
|
@ -277,8 +277,8 @@ void ColumnFixedString::expand(const IColumn::Filter & mask, bool inverted)
|
||||
if (mask.size() < size())
|
||||
throw Exception("Mask size should be no less than data size.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
int index = mask.size() - 1;
|
||||
int from = size() - 1;
|
||||
ssize_t index = mask.size() - 1;
|
||||
ssize_t from = size() - 1;
|
||||
chars.resize_fill(mask.size() * n, 0);
|
||||
while (index >= 0)
|
||||
{
|
||||
|
@ -46,7 +46,7 @@ namespace
|
||||
|
||||
HashMap<T, T> hash_map;
|
||||
for (auto val : index)
|
||||
hash_map.insert({val, hash_map.size()});
|
||||
hash_map.insert({val, static_cast<T>(hash_map.size())});
|
||||
|
||||
auto res_col = ColumnVector<T>::create();
|
||||
auto & data = res_col->getData();
|
||||
@ -632,7 +632,7 @@ void ColumnLowCardinality::Index::convertPositions()
|
||||
|
||||
/// TODO: Optimize with SSE?
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
new_data[i] = data[i];
|
||||
new_data[i] = static_cast<CurIndexType>(data[i]);
|
||||
|
||||
positions = std::move(new_positions);
|
||||
size_of_type = sizeof(IndexType);
|
||||
@ -717,7 +717,7 @@ void ColumnLowCardinality::Index::insertPositionsRange(const IColumn & column, U
|
||||
positions_data.resize(size + limit);
|
||||
|
||||
for (UInt64 i = 0; i < limit; ++i)
|
||||
positions_data[size + i] = column_data[offset + i];
|
||||
positions_data[size + i] = static_cast<CurIndexType>(column_data[offset + i]);
|
||||
};
|
||||
|
||||
callForType(std::move(copy), size_of_type);
|
||||
@ -789,7 +789,7 @@ void ColumnLowCardinality::Index::updateWeakHash(WeakHash32 & hash, WeakHash32 &
|
||||
auto size = data.size();
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
hash_data[i] = intHashCRC32(dict_hash_data[data[i]], hash_data[i]);
|
||||
hash_data[i] = static_cast<UInt32>(intHashCRC32(dict_hash_data[data[i]], hash_data[i]));
|
||||
};
|
||||
|
||||
callForType(std::move(update_weak_hash), size_of_type);
|
||||
|
@ -168,8 +168,8 @@ void ColumnString::expand(const IColumn::Filter & mask, bool inverted)
|
||||
/// We cannot change only offsets, because each string should end with terminating zero byte.
|
||||
/// So, we will insert one zero byte when mask value is zero.
|
||||
|
||||
int index = mask.size() - 1;
|
||||
int from = offsets_data.size() - 1;
|
||||
ssize_t index = mask.size() - 1;
|
||||
ssize_t from = offsets_data.size() - 1;
|
||||
/// mask.size() - offsets_data.size() should be equal to the number of zeros in mask
|
||||
/// (if not, one of exceptions below will throw) and we can calculate the resulting chars size.
|
||||
UInt64 last_offset = offsets_data[from] + (mask.size() - offsets_data.size());
|
||||
|
@ -550,7 +550,7 @@ MutableColumnPtr ColumnUnique<ColumnType>::uniqueInsertRangeImpl(
|
||||
auto insert_key = [&](StringRef ref, ReverseIndex<UInt64, ColumnType> & cur_index) -> MutableColumnPtr
|
||||
{
|
||||
auto inserted_pos = cur_index.insert(ref);
|
||||
positions[num_added_rows] = inserted_pos;
|
||||
positions[num_added_rows] = static_cast<IndexType>(inserted_pos);
|
||||
if (inserted_pos == next_position)
|
||||
return update_position(next_position);
|
||||
|
||||
@ -562,9 +562,9 @@ MutableColumnPtr ColumnUnique<ColumnType>::uniqueInsertRangeImpl(
|
||||
auto row = start + num_added_rows;
|
||||
|
||||
if (null_map && (*null_map)[row])
|
||||
positions[num_added_rows] = getNullValueIndex();
|
||||
positions[num_added_rows] = static_cast<IndexType>(getNullValueIndex());
|
||||
else if (column->compareAt(getNestedTypeDefaultValueIndex(), row, *src_column, 1) == 0)
|
||||
positions[num_added_rows] = getNestedTypeDefaultValueIndex();
|
||||
positions[num_added_rows] = static_cast<IndexType>(getNestedTypeDefaultValueIndex());
|
||||
else
|
||||
{
|
||||
auto ref = src_column->getDataAt(row);
|
||||
@ -576,7 +576,7 @@ MutableColumnPtr ColumnUnique<ColumnType>::uniqueInsertRangeImpl(
|
||||
if (insertion_point == reverse_index.lastInsertionPoint())
|
||||
res = insert_key(ref, *secondary_index);
|
||||
else
|
||||
positions[num_added_rows] = insertion_point;
|
||||
positions[num_added_rows] = static_cast<IndexType>(insertion_point);
|
||||
}
|
||||
else
|
||||
res = insert_key(ref, reverse_index);
|
||||
|
@ -12,12 +12,14 @@
|
||||
#include <Common/RadixSort.h>
|
||||
#include <Common/SipHash.h>
|
||||
#include <Common/WeakHash.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <base/sort.h>
|
||||
#include <base/unaligned.h>
|
||||
#include <base/bit_cast.h>
|
||||
#include <base/scope_guard.h>
|
||||
|
||||
#include <bit>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
|
||||
@ -25,6 +27,10 @@
|
||||
# include <emmintrin.h>
|
||||
#endif
|
||||
|
||||
#if USE_MULTITARGET_CODE
|
||||
# include <immintrin.h>
|
||||
#endif
|
||||
|
||||
#if USE_EMBEDDED_COMPILER
|
||||
#include <DataTypes/Native.h>
|
||||
#include <llvm/IR/IRBuilder.h>
|
||||
@ -84,7 +90,7 @@ void ColumnVector<T>::updateWeakHash32(WeakHash32 & hash) const
|
||||
|
||||
while (begin < end)
|
||||
{
|
||||
*hash_data = hashCRC32(*begin, *hash_data);
|
||||
*hash_data = static_cast<UInt32>(hashCRC32(*begin, *hash_data));
|
||||
++begin;
|
||||
++hash_data;
|
||||
}
|
||||
@ -471,6 +477,128 @@ void ColumnVector<T>::insertRangeFrom(const IColumn & src, size_t start, size_t
|
||||
memcpy(data.data() + old_size, &src_vec.data[start], length * sizeof(data[0]));
|
||||
}
|
||||
|
||||
static inline UInt64 blsr(UInt64 mask)
|
||||
{
|
||||
#ifdef __BMI__
|
||||
return _blsr_u64(mask);
|
||||
#else
|
||||
return mask & (mask-1);
|
||||
#endif
|
||||
}
|
||||
|
||||
DECLARE_DEFAULT_CODE(
|
||||
template <typename T, typename Container, size_t SIMD_ELEMENTS>
|
||||
inline void doFilterAligned(const UInt8 *& filt_pos, const UInt8 *& filt_end_aligned, const T *& data_pos, Container & res_data)
|
||||
{
|
||||
while (filt_pos < filt_end_aligned)
|
||||
{
|
||||
UInt64 mask = bytes64MaskToBits64Mask(filt_pos);
|
||||
|
||||
if (0xffffffffffffffff == mask)
|
||||
{
|
||||
res_data.insert(data_pos, data_pos + SIMD_ELEMENTS);
|
||||
}
|
||||
else
|
||||
{
|
||||
while (mask)
|
||||
{
|
||||
size_t index = std::countr_zero(mask);
|
||||
res_data.push_back(data_pos[index]);
|
||||
mask = blsr(mask);
|
||||
}
|
||||
}
|
||||
|
||||
filt_pos += SIMD_ELEMENTS;
|
||||
data_pos += SIMD_ELEMENTS;
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T, typename Container>
|
||||
void resize(Container & res_data, size_t reserve_size)
|
||||
{
|
||||
#if defined(MEMORY_SANITIZER)
|
||||
res_data.resize_fill(reserve_size, static_cast<T>(0)); // MSan doesn't recognize that all allocated memory is written by AVX-512 intrinsics.
|
||||
#else
|
||||
res_data.resize(reserve_size);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
DECLARE_AVX512VBMI2_SPECIFIC_CODE(
|
||||
template <size_t ELEMENT_WIDTH>
|
||||
inline void compressStoreAVX512(const void *src, void *dst, const UInt64 mask)
|
||||
{
|
||||
__m512i vsrc = _mm512_loadu_si512(src);
|
||||
if constexpr (ELEMENT_WIDTH == 1)
|
||||
_mm512_mask_compressstoreu_epi8(dst, static_cast<__mmask64>(mask), vsrc);
|
||||
else if constexpr (ELEMENT_WIDTH == 2)
|
||||
_mm512_mask_compressstoreu_epi16(dst, static_cast<__mmask32>(mask), vsrc);
|
||||
else if constexpr (ELEMENT_WIDTH == 4)
|
||||
_mm512_mask_compressstoreu_epi32(dst, static_cast<__mmask16>(mask), vsrc);
|
||||
else if constexpr (ELEMENT_WIDTH == 8)
|
||||
_mm512_mask_compressstoreu_epi64(dst, static_cast<__mmask8>(mask), vsrc);
|
||||
}
|
||||
|
||||
template <typename T, typename Container, size_t SIMD_ELEMENTS>
|
||||
inline void doFilterAligned(const UInt8 *& filt_pos, const UInt8 *& filt_end_aligned, const T *& data_pos, Container & res_data)
|
||||
{
|
||||
static constexpr size_t VEC_LEN = 64; /// AVX512 vector length - 64 bytes
|
||||
static constexpr size_t ELEMENT_WIDTH = sizeof(T);
|
||||
static constexpr size_t ELEMENTS_PER_VEC = VEC_LEN / ELEMENT_WIDTH;
|
||||
static constexpr UInt64 KMASK = 0xffffffffffffffff >> (64 - ELEMENTS_PER_VEC);
|
||||
|
||||
size_t current_offset = res_data.size();
|
||||
size_t reserve_size = res_data.size();
|
||||
size_t alloc_size = SIMD_ELEMENTS * 2;
|
||||
|
||||
while (filt_pos < filt_end_aligned)
|
||||
{
|
||||
/// to avoid calling resize too frequently, resize to reserve buffer.
|
||||
if (reserve_size - current_offset < SIMD_ELEMENTS)
|
||||
{
|
||||
reserve_size += alloc_size;
|
||||
resize<T>(res_data, reserve_size);
|
||||
alloc_size *= 2;
|
||||
}
|
||||
|
||||
UInt64 mask = bytes64MaskToBits64Mask(filt_pos);
|
||||
|
||||
if (0xffffffffffffffff == mask)
|
||||
{
|
||||
for (size_t i = 0; i < SIMD_ELEMENTS; i += ELEMENTS_PER_VEC)
|
||||
_mm512_storeu_si512(reinterpret_cast<void *>(&res_data[current_offset + i]),
|
||||
_mm512_loadu_si512(reinterpret_cast<const void *>(data_pos + i)));
|
||||
current_offset += SIMD_ELEMENTS;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (mask)
|
||||
{
|
||||
for (size_t i = 0; i < SIMD_ELEMENTS; i += ELEMENTS_PER_VEC)
|
||||
{
|
||||
compressStoreAVX512<ELEMENT_WIDTH>(reinterpret_cast<const void *>(data_pos + i),
|
||||
reinterpret_cast<void *>(&res_data[current_offset]), mask & KMASK);
|
||||
current_offset += std::popcount(mask & KMASK);
|
||||
/// prepare mask for next iter, if ELEMENTS_PER_VEC = 64, no next iter
|
||||
if (ELEMENTS_PER_VEC < 64)
|
||||
{
|
||||
mask >>= ELEMENTS_PER_VEC;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
filt_pos += SIMD_ELEMENTS;
|
||||
data_pos += SIMD_ELEMENTS;
|
||||
}
|
||||
/// resize to the real size.
|
||||
res_data.resize(current_offset);
|
||||
}
|
||||
)
|
||||
|
||||
template <typename T>
|
||||
ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_size_hint) const
|
||||
{
|
||||
@ -491,36 +619,18 @@ ColumnPtr ColumnVector<T>::filter(const IColumn::Filter & filt, ssize_t result_s
|
||||
/** A slightly more optimized version.
|
||||
* Based on the assumption that often pieces of consecutive values
|
||||
* completely pass or do not pass the filter.
|
||||
* Therefore, we will optimistically check the parts of `SIMD_BYTES` values.
|
||||
* Therefore, we will optimistically check the parts of `SIMD_ELEMENTS` values.
|
||||
*/
|
||||
static constexpr size_t SIMD_BYTES = 64;
|
||||
const UInt8 * filt_end_aligned = filt_pos + size / SIMD_BYTES * SIMD_BYTES;
|
||||
static constexpr size_t SIMD_ELEMENTS = 64;
|
||||
const UInt8 * filt_end_aligned = filt_pos + size / SIMD_ELEMENTS * SIMD_ELEMENTS;
|
||||
|
||||
while (filt_pos < filt_end_aligned)
|
||||
{
|
||||
UInt64 mask = bytes64MaskToBits64Mask(filt_pos);
|
||||
|
||||
if (0xffffffffffffffff == mask)
|
||||
{
|
||||
res_data.insert(data_pos, data_pos + SIMD_BYTES);
|
||||
}
|
||||
else
|
||||
{
|
||||
while (mask)
|
||||
{
|
||||
size_t index = std::countr_zero(mask);
|
||||
res_data.push_back(data_pos[index]);
|
||||
#ifdef __BMI__
|
||||
mask = _blsr_u64(mask);
|
||||
#else
|
||||
mask = mask & (mask-1);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
filt_pos += SIMD_BYTES;
|
||||
data_pos += SIMD_BYTES;
|
||||
}
|
||||
#if USE_MULTITARGET_CODE
|
||||
static constexpr bool VBMI2_CAPABLE = sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8;
|
||||
if (VBMI2_CAPABLE && isArchSupported(TargetArch::AVX512VBMI2))
|
||||
TargetSpecific::AVX512VBMI2::doFilterAligned<T, Container, SIMD_ELEMENTS>(filt_pos, filt_end_aligned, data_pos, res_data);
|
||||
else
|
||||
#endif
|
||||
TargetSpecific::Default::doFilterAligned<T, Container, SIMD_ELEMENTS>(filt_pos, filt_end_aligned, data_pos, res_data);
|
||||
|
||||
while (filt_pos < filt_end)
|
||||
{
|
||||
|
@ -7,11 +7,15 @@
|
||||
#include <base/unaligned.h>
|
||||
#include <Core/Field.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/TargetSpecific.h>
|
||||
#include <Core/TypeId.h>
|
||||
#include <base/TypeName.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_MULTITARGET_CODE
|
||||
# include <immintrin.h>
|
||||
#endif
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -391,6 +395,127 @@ protected:
|
||||
Container data;
|
||||
};
|
||||
|
||||
DECLARE_DEFAULT_CODE(
|
||||
template <typename Container, typename Type>
|
||||
inline void vectorIndexImpl(const Container & data, const PaddedPODArray<Type> & indexes, size_t limit, Container & res_data)
|
||||
{
|
||||
for (size_t i = 0; i < limit; ++i)
|
||||
res_data[i] = data[indexes[i]];
|
||||
}
|
||||
);
|
||||
|
||||
DECLARE_AVX512VBMI_SPECIFIC_CODE(
|
||||
template <typename Container, typename Type>
|
||||
inline void vectorIndexImpl(const Container & data, const PaddedPODArray<Type> & indexes, size_t limit, Container & res_data)
|
||||
{
|
||||
static constexpr UInt64 MASK64 = 0xffffffffffffffff;
|
||||
const size_t limit64 = limit & ~63;
|
||||
size_t pos = 0;
|
||||
size_t data_size = data.size();
|
||||
|
||||
auto data_pos = reinterpret_cast<const UInt8 *>(data.data());
|
||||
auto indexes_pos = reinterpret_cast<const UInt8 *>(indexes.data());
|
||||
auto res_pos = reinterpret_cast<UInt8 *>(res_data.data());
|
||||
|
||||
if (limit == 0)
|
||||
return; /// nothing to do, just return
|
||||
|
||||
if (data_size <= 64)
|
||||
{
|
||||
/// one single mask load for table size <= 64
|
||||
__mmask64 last_mask = MASK64 >> (64 - data_size);
|
||||
__m512i table1 = _mm512_maskz_loadu_epi8(last_mask, data_pos);
|
||||
|
||||
/// 64 bytes table lookup using one single permutexvar_epi8
|
||||
while (pos < limit64)
|
||||
{
|
||||
__m512i vidx = _mm512_loadu_epi8(indexes_pos + pos);
|
||||
__m512i out = _mm512_permutexvar_epi8(vidx, table1);
|
||||
_mm512_storeu_epi8(res_pos + pos, out);
|
||||
pos += 64;
|
||||
}
|
||||
/// tail handling
|
||||
if (limit > limit64)
|
||||
{
|
||||
__mmask64 tail_mask = MASK64 >> (limit64 + 64 - limit);
|
||||
__m512i vidx = _mm512_maskz_loadu_epi8(tail_mask, indexes_pos + pos);
|
||||
__m512i out = _mm512_permutexvar_epi8(vidx, table1);
|
||||
_mm512_mask_storeu_epi8(res_pos + pos, tail_mask, out);
|
||||
}
|
||||
}
|
||||
else if (data_size <= 128)
|
||||
{
|
||||
/// table size (64, 128] requires 2 zmm load
|
||||
__mmask64 last_mask = MASK64 >> (128 - data_size);
|
||||
__m512i table1 = _mm512_loadu_epi8(data_pos);
|
||||
__m512i table2 = _mm512_maskz_loadu_epi8(last_mask, data_pos + 64);
|
||||
|
||||
/// 128 bytes table lookup using one single permute2xvar_epi8
|
||||
while (pos < limit64)
|
||||
{
|
||||
__m512i vidx = _mm512_loadu_epi8(indexes_pos + pos);
|
||||
__m512i out = _mm512_permutex2var_epi8(table1, vidx, table2);
|
||||
_mm512_storeu_epi8(res_pos + pos, out);
|
||||
pos += 64;
|
||||
}
|
||||
if (limit > limit64)
|
||||
{
|
||||
__mmask64 tail_mask = MASK64 >> (limit64 + 64 - limit);
|
||||
__m512i vidx = _mm512_maskz_loadu_epi8(tail_mask, indexes_pos + pos);
|
||||
__m512i out = _mm512_permutex2var_epi8(table1, vidx, table2);
|
||||
_mm512_mask_storeu_epi8(res_pos + pos, tail_mask, out);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (data_size > 256)
|
||||
{
|
||||
/// byte index will not exceed 256 boundary.
|
||||
data_size = 256;
|
||||
}
|
||||
|
||||
__m512i table1 = _mm512_loadu_epi8(data_pos);
|
||||
__m512i table2 = _mm512_loadu_epi8(data_pos + 64);
|
||||
__m512i table3, table4;
|
||||
if (data_size <= 192)
|
||||
{
|
||||
/// only 3 tables need to load if size <= 192
|
||||
__mmask64 last_mask = MASK64 >> (192 - data_size);
|
||||
table3 = _mm512_maskz_loadu_epi8(last_mask, data_pos + 128);
|
||||
table4 = _mm512_setzero_si512();
|
||||
}
|
||||
else
|
||||
{
|
||||
__mmask64 last_mask = MASK64 >> (256 - data_size);
|
||||
table3 = _mm512_loadu_epi8(data_pos + 128);
|
||||
table4 = _mm512_maskz_loadu_epi8(last_mask, data_pos + 192);
|
||||
}
|
||||
|
||||
/// 256 bytes table lookup can use: 2 permute2xvar_epi8 plus 1 blender with MSB
|
||||
while (pos < limit64)
|
||||
{
|
||||
__m512i vidx = _mm512_loadu_epi8(indexes_pos + pos);
|
||||
__m512i tmp1 = _mm512_permutex2var_epi8(table1, vidx, table2);
|
||||
__m512i tmp2 = _mm512_permutex2var_epi8(table3, vidx, table4);
|
||||
__mmask64 msb = _mm512_movepi8_mask(vidx);
|
||||
__m512i out = _mm512_mask_blend_epi8(msb, tmp1, tmp2);
|
||||
_mm512_storeu_epi8(res_pos + pos, out);
|
||||
pos += 64;
|
||||
}
|
||||
if (limit > limit64)
|
||||
{
|
||||
__mmask64 tail_mask = MASK64 >> (limit64 + 64 - limit);
|
||||
__m512i vidx = _mm512_maskz_loadu_epi8(tail_mask, indexes_pos + pos);
|
||||
__m512i tmp1 = _mm512_permutex2var_epi8(table1, vidx, table2);
|
||||
__m512i tmp2 = _mm512_permutex2var_epi8(table3, vidx, table4);
|
||||
__mmask64 msb = _mm512_movepi8_mask(vidx);
|
||||
__m512i out = _mm512_mask_blend_epi8(msb, tmp1, tmp2);
|
||||
_mm512_mask_storeu_epi8(res_pos + pos, tail_mask, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
template <typename T>
|
||||
template <typename Type>
|
||||
ColumnPtr ColumnVector<T>::indexImpl(const PaddedPODArray<Type> & indexes, size_t limit) const
|
||||
@ -399,8 +524,18 @@ ColumnPtr ColumnVector<T>::indexImpl(const PaddedPODArray<Type> & indexes, size_
|
||||
|
||||
auto res = this->create(limit);
|
||||
typename Self::Container & res_data = res->getData();
|
||||
for (size_t i = 0; i < limit; ++i)
|
||||
res_data[i] = data[indexes[i]];
|
||||
#if USE_MULTITARGET_CODE
|
||||
if constexpr (sizeof(T) == 1 && sizeof(Type) == 1)
|
||||
{
|
||||
/// VBMI optimization only applicable for (U)Int8 types
|
||||
if (isArchSupported(TargetArch::AVX512VBMI))
|
||||
{
|
||||
TargetSpecific::AVX512VBMI::vectorIndexImpl<Container, Type>(data, indexes, limit, res_data);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
TargetSpecific::Default::vectorIndexImpl<Container, Type>(data, indexes, limit, res_data);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -22,8 +22,8 @@ void expandDataByMask(PaddedPODArray<T> & data, const PaddedPODArray<UInt8> & ma
|
||||
if (mask.size() < data.size())
|
||||
throw Exception("Mask size should be no less than data size.", ErrorCodes::LOGICAL_ERROR);
|
||||
|
||||
int from = data.size() - 1;
|
||||
int index = mask.size() - 1;
|
||||
ssize_t from = data.size() - 1;
|
||||
ssize_t index = mask.size() - 1;
|
||||
data.resize(mask.size());
|
||||
while (index >= 0)
|
||||
{
|
||||
@ -317,7 +317,7 @@ int checkShortCircuitArguments(const ColumnsWithTypeAndName & arguments)
|
||||
for (size_t i = 0; i != arguments.size(); ++i)
|
||||
{
|
||||
if (checkAndGetShortCircuitArgument(arguments[i].column))
|
||||
last_short_circuit_argument_index = i;
|
||||
last_short_circuit_argument_index = static_cast<int>(i);
|
||||
}
|
||||
|
||||
return last_short_circuit_argument_index;
|
||||
|
159
src/Columns/tests/gtest_column_vector.cpp
Normal file
159
src/Columns/tests/gtest_column_vector.cpp
Normal file
@ -0,0 +1,159 @@
|
||||
#include <limits>
|
||||
#include <typeinfo>
|
||||
#include <vector>
|
||||
#include <Columns/ColumnsNumber.h>
|
||||
#include <Common/randomSeed.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
using namespace DB;
|
||||
|
||||
static pcg64 rng(randomSeed());
|
||||
static constexpr int error_code = 12345;
|
||||
static constexpr size_t TEST_RUNS = 500;
|
||||
static constexpr size_t MAX_ROWS = 10000;
|
||||
static const std::vector<size_t> filter_ratios = {1, 2, 5, 11, 32, 64, 100, 1000};
|
||||
static const size_t K = filter_ratios.size();
|
||||
|
||||
template <typename T>
|
||||
static MutableColumnPtr createColumn(size_t n)
|
||||
{
|
||||
auto column = ColumnVector<T>::create();
|
||||
auto & values = column->getData();
|
||||
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
values.push_back(i);
|
||||
}
|
||||
|
||||
return column;
|
||||
}
|
||||
|
||||
bool checkFilter(const PaddedPODArray<UInt8> &flit, const IColumn & src, const IColumn & dst)
|
||||
{
|
||||
size_t n = flit.size();
|
||||
size_t dst_size = dst.size();
|
||||
size_t j = 0; /// index of dest
|
||||
for (size_t i = 0; i < n; ++i)
|
||||
{
|
||||
if (flit[i] != 0)
|
||||
{
|
||||
if ((dst_size <= j) || (src.compareAt(i, j, dst, 0) != 0))
|
||||
return false;
|
||||
j++;
|
||||
}
|
||||
}
|
||||
return dst_size == j; /// filtered size check
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void testFilter()
|
||||
{
|
||||
auto test_case = [&](size_t rows, size_t filter_ratio)
|
||||
{
|
||||
auto vector_column = createColumn<T>(rows);
|
||||
PaddedPODArray<UInt8> flit(rows);
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
flit[i] = rng() % filter_ratio == 0;
|
||||
auto res_column = vector_column->filter(flit, -1);
|
||||
|
||||
if (!checkFilter(flit, *vector_column, *res_column))
|
||||
throw Exception(error_code, "VectorColumn filter failure, type: {}", typeid(T).name());
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
for (size_t i = 0; i < TEST_RUNS; ++i)
|
||||
{
|
||||
size_t rows = rng() % MAX_ROWS + 1;
|
||||
size_t filter_ratio = filter_ratios[rng() % K];
|
||||
|
||||
test_case(rows, filter_ratio);
|
||||
}
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
FAIL() << e.displayText();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ColumnVector, Filter)
|
||||
{
|
||||
testFilter<UInt8>();
|
||||
testFilter<Int16>();
|
||||
testFilter<UInt32>();
|
||||
testFilter<Int64>();
|
||||
testFilter<UInt128>();
|
||||
testFilter<Int256>();
|
||||
testFilter<Float32>();
|
||||
testFilter<Float64>();
|
||||
testFilter<UUID>();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static MutableColumnPtr createIndexColumn(size_t limit, size_t rows)
|
||||
{
|
||||
auto column = ColumnVector<T>::create();
|
||||
auto & values = column->getData();
|
||||
auto max = std::numeric_limits<T>::max();
|
||||
limit = limit > max ? max : limit;
|
||||
|
||||
for (size_t i = 0; i < rows; ++i)
|
||||
{
|
||||
T val = rng() % limit;
|
||||
values.push_back(val);
|
||||
}
|
||||
|
||||
return column;
|
||||
}
|
||||
|
||||
template <typename T, typename IndexType>
|
||||
static void testIndex()
|
||||
{
|
||||
static const std::vector<size_t> column_sizes = {64, 128, 196, 256, 512};
|
||||
|
||||
auto test_case = [&](size_t rows, size_t index_rows, size_t limit)
|
||||
{
|
||||
auto vector_column = createColumn<T>(rows);
|
||||
auto index_column = createIndexColumn<IndexType>(rows, index_rows);
|
||||
auto res_column = vector_column->index(*index_column, limit);
|
||||
if (limit == 0)
|
||||
limit = index_column->size();
|
||||
|
||||
/// check results
|
||||
if (limit != res_column->size())
|
||||
throw Exception(error_code, "ColumnVector index size not match to limit: {} {}", typeid(T).name(), typeid(IndexType).name());
|
||||
for (size_t i = 0; i < limit; ++i)
|
||||
{
|
||||
/// vector_column data is the same as index, so indexed column's value will equals to index_column.
|
||||
if (res_column->get64(i) != index_column->get64(i))
|
||||
throw Exception(error_code, "ColumnVector index fail: {} {}", typeid(T).name(), typeid(IndexType).name());
|
||||
}
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
test_case(0, 0, 0); /// test for zero length index
|
||||
for (size_t i = 0; i < TEST_RUNS; ++i)
|
||||
{
|
||||
/// make sure rows distribute in (column_sizes[r-1], colulmn_sizes[r]]
|
||||
size_t row_idx = rng() % column_sizes.size();
|
||||
size_t row_base = row_idx > 0 ? column_sizes[row_idx - 1] : 0;
|
||||
size_t rows = row_base + (rng() % (column_sizes[row_idx] - row_base) + 1);
|
||||
size_t index_rows = rng() % MAX_ROWS + 1;
|
||||
|
||||
test_case(rows, index_rows, 0);
|
||||
test_case(rows, index_rows, static_cast<size_t>(0.5 * index_rows));
|
||||
}
|
||||
}
|
||||
catch (const Exception & e)
|
||||
{
|
||||
FAIL() << e.displayText();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ColumnVector, Index)
|
||||
{
|
||||
testIndex<UInt8, UInt8>();
|
||||
testIndex<UInt16, UInt8>();
|
||||
testIndex<UInt16, UInt16>();
|
||||
}
|
@ -164,7 +164,7 @@ TEST(WeakHash32, ColumnVectorU32)
|
||||
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (uint64_t i = 0; i < 65536; ++i)
|
||||
for (uint32_t i = 0; i < 65536; ++i)
|
||||
data.push_back(i << 16u);
|
||||
}
|
||||
|
||||
@ -181,7 +181,7 @@ TEST(WeakHash32, ColumnVectorI32)
|
||||
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = -32768; i < 32768; ++i)
|
||||
for (int32_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 16); //-V610
|
||||
}
|
||||
|
||||
@ -240,7 +240,7 @@ TEST(WeakHash32, ColumnVectorU128)
|
||||
val.items[0] = i << 32u;
|
||||
val.items[1] = i << 32u;
|
||||
data.push_back(val);
|
||||
eq_data.push_back(i);
|
||||
eq_data.push_back(static_cast<UInt32>(i));
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,7 +274,7 @@ TEST(WeakHash32, ColumnDecimal32)
|
||||
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = -32768; i < 32768; ++i)
|
||||
for (int32_t i = -32768; i < 32768; ++i)
|
||||
data.push_back(i << 16); //-V610
|
||||
}
|
||||
|
||||
@ -326,7 +326,7 @@ TEST(WeakHash32, ColumnString1)
|
||||
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
for (int32_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
data.push_back(i);
|
||||
auto str = std::to_string(i);
|
||||
@ -359,7 +359,7 @@ TEST(WeakHash32, ColumnString2)
|
||||
{
|
||||
size_t max_size = 3000;
|
||||
char letter = 'a';
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
for (int32_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
data.push_back(i);
|
||||
size_t s = (i % max_size) + 1;
|
||||
@ -401,7 +401,7 @@ TEST(WeakHash32, ColumnString3)
|
||||
char letter = 'a';
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
data.push_back(i);
|
||||
data.push_back(static_cast<UInt32>(i));
|
||||
size_t s = (i % max_size) + 1;
|
||||
std::string str(s,'\0');
|
||||
str[0] = letter;
|
||||
@ -430,7 +430,7 @@ TEST(WeakHash32, ColumnFixedString)
|
||||
char letter = 'a';
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
data.push_back(i);
|
||||
data.push_back(static_cast<UInt32>(i));
|
||||
size_t s = (i % max_size) + 1;
|
||||
std::string str(s, letter);
|
||||
col->insertData(str.data(), str.size());
|
||||
@ -471,7 +471,7 @@ TEST(WeakHash32, ColumnArray)
|
||||
UInt32 cur = 0;
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
eq_data.push_back(i);
|
||||
eq_data.push_back(static_cast<UInt32>(i));
|
||||
size_t s = (i % max_size) + 1;
|
||||
|
||||
cur_off += s;
|
||||
@ -505,9 +505,9 @@ TEST(WeakHash32, ColumnArray2)
|
||||
UInt64 cur_off = 0;
|
||||
for (int idx [[maybe_unused]] : {1, 2})
|
||||
{
|
||||
for (int64_t i = 0; i < 1000; ++i)
|
||||
for (int32_t i = 0; i < 1000; ++i)
|
||||
{
|
||||
for (size_t j = 0; j < 1000; ++j)
|
||||
for (uint32_t j = 0; j < 1000; ++j)
|
||||
{
|
||||
eq_data.push_back(i * 1000 + j);
|
||||
|
||||
@ -556,7 +556,7 @@ TEST(WeakHash32, ColumnArrayArray)
|
||||
UInt32 cur = 1;
|
||||
for (int64_t i = 0; i < 3000; ++i)
|
||||
{
|
||||
eq_data.push_back(i);
|
||||
eq_data.push_back(static_cast<UInt32>(i));
|
||||
size_t s = (i % max_size) + 1;
|
||||
|
||||
cur_off2 += s;
|
||||
@ -667,7 +667,7 @@ TEST(WeakHash32, ColumnTupleUInt64UInt64)
|
||||
{
|
||||
data1.push_back(l);
|
||||
data2.push_back(i << 32u);
|
||||
eq.push_back(l * 65536 + i);
|
||||
eq.push_back(static_cast<UInt32>(l * 65536 + i));
|
||||
}
|
||||
}
|
||||
|
||||
@ -695,7 +695,7 @@ TEST(WeakHash32, ColumnTupleUInt64String)
|
||||
|
||||
size_t max_size = 3000;
|
||||
char letter = 'a';
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
for (int32_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
data1.push_back(l);
|
||||
eq.push_back(l * 65536 + i);
|
||||
@ -737,7 +737,7 @@ TEST(WeakHash32, ColumnTupleUInt64FixedString)
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
data1.push_back(l);
|
||||
eq.push_back(l * 65536 + i);
|
||||
eq.push_back(static_cast<Int32>(l * 65536 + i));
|
||||
|
||||
size_t s = (i % max_size) + 1;
|
||||
std::string str(s, letter);
|
||||
@ -778,7 +778,7 @@ TEST(WeakHash32, ColumnTupleUInt64Array)
|
||||
auto l = idx % 2;
|
||||
|
||||
UInt32 cur = 0;
|
||||
for (int64_t i = 0; i < 65536; ++i)
|
||||
for (int32_t i = 0; i < 65536; ++i)
|
||||
{
|
||||
data1.push_back(l);
|
||||
eq_data.push_back(l * 65536 + i);
|
||||
|
@ -65,7 +65,7 @@ public:
|
||||
private:
|
||||
using Small = SmallSet<Key, small_set_size_max>;
|
||||
using Medium = HashContainer;
|
||||
using Large = HyperLogLogCounter<K, Hash, HashValueType, DenominatorType, BiasEstimator, mode>;
|
||||
using Large = HyperLogLogCounter<K, Key, Hash, HashValueType, DenominatorType, BiasEstimator, mode>;
|
||||
|
||||
public:
|
||||
CombinedCardinalityEstimator()
|
||||
|
@ -82,6 +82,7 @@ inline bool cpuid(UInt32 op, UInt32 * res) noexcept /// NOLINT
|
||||
OP(AVX512BW) \
|
||||
OP(AVX512VL) \
|
||||
OP(AVX512VBMI) \
|
||||
OP(AVX512VBMI2) \
|
||||
OP(PREFETCHWT1) \
|
||||
OP(SHA) \
|
||||
OP(ADX) \
|
||||
@ -302,6 +303,11 @@ bool haveAVX512VBMI() noexcept
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ecx >> 1) & 1u);
|
||||
}
|
||||
|
||||
bool haveAVX512VBMI2() noexcept
|
||||
{
|
||||
return haveAVX512F() && ((CpuInfo(0x7, 0).registers.ecx >> 6) & 1u);
|
||||
}
|
||||
|
||||
bool haveRDRAND() noexcept
|
||||
{
|
||||
return CpuInfo(0x0).registers.eax >= 0x7 && ((CpuInfo(0x1).registers.ecx >> 30) & 1u);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user